code
stringlengths 81
54k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
|---|---|---|---|---|
"""simple docstring"""
from __future__ import annotations
from collections.abc import Iterable, Iterator
from dataclasses import dataclass
snake_case__ : Union[str, Any] = (3, 9, -11, 0, 7, 5, 1, -1)
snake_case__ : Union[str, Any] = (4, 6, 2, 0, 8, 10, 3, -2)
@dataclass
class snake_case_:
__UpperCamelCase = 42
__UpperCamelCase = 42
class snake_case_:
def __init__( self : List[str] , UpperCamelCase_ : Iterable[int] ):
lowerCAmelCase : Node | None = None
for i in sorted(UpperCamelCase_ , reverse=UpperCamelCase_ ):
lowerCAmelCase : Dict = Node(UpperCamelCase_ , self.head )
def __iter__( self : Optional[int] ):
lowerCAmelCase : Optional[int] = self.head
while node:
yield node.data
lowerCAmelCase : List[str] = node.next_node
def __len__( self : Optional[int] ):
return sum(1 for _ in self )
def __str__( self : List[str] ):
return " -> ".join([str(UpperCamelCase_ ) for node in self] )
def _snake_case ( _snake_case : SortedLinkedList , _snake_case : SortedLinkedList ):
return SortedLinkedList(list(_snake_case ) + list(_snake_case ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
snake_case__ : List[str] = SortedLinkedList
print(merge_lists(SSL(test_data_odd), SSL(test_data_even)))
| 702
|
"""simple docstring"""
from __future__ import annotations
from typing import Any
class snake_case_( a__ ):
pass
class snake_case_:
def __init__( self : Any , UpperCamelCase_ : Any ):
lowerCAmelCase : Any = data
lowerCAmelCase : Node | None = None
def __iter__( self : int ):
lowerCAmelCase : Any = self
lowerCAmelCase : Union[str, Any] = []
while node:
if node in visited:
raise ContainsLoopError
visited.append(UpperCamelCase_ )
yield node.data
lowerCAmelCase : Optional[int] = node.next_node
@property
def lowerCamelCase__ ( self : str ):
try:
list(self )
return False
except ContainsLoopError:
return True
if __name__ == "__main__":
snake_case__ : Dict = Node(1)
snake_case__ : Any = Node(2)
snake_case__ : int = Node(3)
snake_case__ : Any = Node(4)
print(root_node.has_loop) # False
snake_case__ : Tuple = root_node.next_node
print(root_node.has_loop) # True
snake_case__ : List[Any] = Node(5)
snake_case__ : int = Node(6)
snake_case__ : List[Any] = Node(5)
snake_case__ : Dict = Node(6)
print(root_node.has_loop) # False
snake_case__ : Any = Node(1)
print(root_node.has_loop) # False
| 637
| 0
|
"""simple docstring"""
import os
from typing import List, Optional, Union
from ...image_processing_utils import BatchFeature
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
from ..auto import AutoTokenizer
class snake_case_( a__ ):
__UpperCamelCase = ['''image_processor''', '''tokenizer''']
__UpperCamelCase = '''BlipImageProcessor'''
__UpperCamelCase = '''AutoTokenizer'''
def __init__( self : Optional[Any] , UpperCamelCase_ : Any , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : str ):
super().__init__(UpperCamelCase_ , UpperCamelCase_ )
# add QFormer tokenizer
lowerCAmelCase : str = qformer_tokenizer
def __call__( self : Optional[Any] , UpperCamelCase_ : ImageInput = None , UpperCamelCase_ : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , UpperCamelCase_ : bool = True , UpperCamelCase_ : Union[bool, str, PaddingStrategy] = False , UpperCamelCase_ : Union[bool, str, TruncationStrategy] = None , UpperCamelCase_ : Optional[int] = None , UpperCamelCase_ : int = 0 , UpperCamelCase_ : Optional[int] = None , UpperCamelCase_ : Optional[bool] = None , UpperCamelCase_ : bool = False , UpperCamelCase_ : bool = False , UpperCamelCase_ : bool = False , UpperCamelCase_ : bool = False , UpperCamelCase_ : bool = False , UpperCamelCase_ : bool = True , UpperCamelCase_ : Optional[Union[str, TensorType]] = None , **UpperCamelCase_ : str , ):
if images is None and text is None:
raise ValueError('''You have to specify at least images or text.''' )
lowerCAmelCase : Optional[Any] = BatchFeature()
if text is not None:
lowerCAmelCase : int = self.tokenizer(
text=UpperCamelCase_ , add_special_tokens=UpperCamelCase_ , padding=UpperCamelCase_ , truncation=UpperCamelCase_ , max_length=UpperCamelCase_ , stride=UpperCamelCase_ , pad_to_multiple_of=UpperCamelCase_ , return_attention_mask=UpperCamelCase_ , return_overflowing_tokens=UpperCamelCase_ , return_special_tokens_mask=UpperCamelCase_ , return_offsets_mapping=UpperCamelCase_ , return_token_type_ids=UpperCamelCase_ , return_length=UpperCamelCase_ , verbose=UpperCamelCase_ , return_tensors=UpperCamelCase_ , **UpperCamelCase_ , )
encoding.update(UpperCamelCase_ )
lowerCAmelCase : Optional[Any] = self.qformer_tokenizer(
text=UpperCamelCase_ , add_special_tokens=UpperCamelCase_ , padding=UpperCamelCase_ , truncation=UpperCamelCase_ , max_length=UpperCamelCase_ , stride=UpperCamelCase_ , pad_to_multiple_of=UpperCamelCase_ , return_attention_mask=UpperCamelCase_ , return_overflowing_tokens=UpperCamelCase_ , return_special_tokens_mask=UpperCamelCase_ , return_offsets_mapping=UpperCamelCase_ , return_token_type_ids=UpperCamelCase_ , return_length=UpperCamelCase_ , verbose=UpperCamelCase_ , return_tensors=UpperCamelCase_ , **UpperCamelCase_ , )
lowerCAmelCase : int = qformer_text_encoding.pop('''input_ids''' )
lowerCAmelCase : List[Any] = qformer_text_encoding.pop('''attention_mask''' )
if images is not None:
lowerCAmelCase : str = self.image_processor(UpperCamelCase_ , return_tensors=UpperCamelCase_ )
encoding.update(UpperCamelCase_ )
return encoding
def lowerCamelCase__ ( self : Optional[int] , *UpperCamelCase_ : List[str] , **UpperCamelCase_ : Optional[Any] ):
return self.tokenizer.batch_decode(*UpperCamelCase_ , **UpperCamelCase_ )
def lowerCamelCase__ ( self : Dict , *UpperCamelCase_ : Any , **UpperCamelCase_ : List[str] ):
return self.tokenizer.decode(*UpperCamelCase_ , **UpperCamelCase_ )
@property
# Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names
def lowerCamelCase__ ( self : Optional[int] ):
lowerCAmelCase : Any = self.tokenizer.model_input_names
lowerCAmelCase : List[Any] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
def lowerCamelCase__ ( self : str , UpperCamelCase_ : str , **UpperCamelCase_ : Optional[int] ):
if os.path.isfile(UpperCamelCase_ ):
raise ValueError(F'''Provided path ({save_directory}) should be a directory, not a file''' )
os.makedirs(UpperCamelCase_ , exist_ok=UpperCamelCase_ )
lowerCAmelCase : Tuple = os.path.join(UpperCamelCase_ , '''qformer_tokenizer''' )
self.qformer_tokenizer.save_pretrained(UpperCamelCase_ )
return super().save_pretrained(UpperCamelCase_ , **UpperCamelCase_ )
@classmethod
def lowerCamelCase__ ( cls : Optional[Any] , UpperCamelCase_ : str , **UpperCamelCase_ : Union[str, Any] ):
lowerCAmelCase : Any = AutoTokenizer.from_pretrained(UpperCamelCase_ , subfolder='''qformer_tokenizer''' )
lowerCAmelCase : Union[str, Any] = cls._get_arguments_from_pretrained(UpperCamelCase_ , **UpperCamelCase_ )
args.append(UpperCamelCase_ )
return cls(*UpperCamelCase_ )
| 703
|
"""simple docstring"""
from torch import nn
class snake_case_( nn.Module ):
def __init__( self : int , UpperCamelCase_ : int , UpperCamelCase_ : int ):
super().__init__()
lowerCAmelCase : str = class_size
lowerCAmelCase : Dict = embed_size
# self.mlp1 = nn.Linear(embed_size, embed_size)
# self.mlp2 = (nn.Linear(embed_size, class_size))
lowerCAmelCase : Any = nn.Linear(UpperCamelCase_ , UpperCamelCase_ )
def lowerCamelCase__ ( self : List[Any] , UpperCamelCase_ : Tuple ):
# hidden_state = nn.functional.relu(self.mlp1(hidden_state))
# hidden_state = self.mlp2(hidden_state)
lowerCAmelCase : int = self.mlp(UpperCamelCase_ )
return logits
| 637
| 0
|
"""simple docstring"""
from bisect import bisect
from itertools import accumulate
def _snake_case ( _snake_case : str , _snake_case : Optional[Any] , _snake_case : int , _snake_case : Optional[Any] ):
lowerCAmelCase : Optional[int] = sorted(zip(_snake_case , _snake_case ) , key=lambda _snake_case : x[0] / x[1] , reverse=_snake_case )
lowerCAmelCase : Tuple = [i[0] for i in r], [i[1] for i in r]
lowerCAmelCase : Optional[Any] = list(accumulate(_snake_case ) )
lowerCAmelCase : str = bisect(_snake_case , _snake_case )
return (
0
if k == 0
else sum(vl[:k] ) + (w - acc[k - 1]) * (vl[k]) / (wt[k])
if k != n
else sum(vl[:k] )
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 704
|
"""simple docstring"""
class snake_case_:
def __init__( self : Union[str, Any] , UpperCamelCase_ : str ):
lowerCAmelCase : Dict = val
lowerCAmelCase : str = None
lowerCAmelCase : Dict = None
def lowerCamelCase__ ( self : Union[str, Any] , UpperCamelCase_ : Dict ):
if self.val:
if val < self.val:
if self.left is None:
lowerCAmelCase : int = Node(UpperCamelCase_ )
else:
self.left.insert(UpperCamelCase_ )
elif val > self.val:
if self.right is None:
lowerCAmelCase : Any = Node(UpperCamelCase_ )
else:
self.right.insert(UpperCamelCase_ )
else:
lowerCAmelCase : Optional[Any] = val
def _snake_case ( _snake_case : Tuple , _snake_case : str ):
# Recursive traversal
if root:
inorder(root.left , _snake_case )
res.append(root.val )
inorder(root.right , _snake_case )
def _snake_case ( _snake_case : Optional[Any] ):
# Build BST
if len(_snake_case ) == 0:
return arr
lowerCAmelCase : Optional[Any] = Node(arr[0] )
for i in range(1 , len(_snake_case ) ):
root.insert(arr[i] )
# Traverse BST in order.
lowerCAmelCase : Optional[int] = []
inorder(_snake_case , _snake_case )
return res
if __name__ == "__main__":
print(tree_sort([10, 1, 3, 2, 9, 14, 13]))
| 637
| 0
|
"""simple docstring"""
from __future__ import annotations
from typing import Any
class snake_case_( a__ ):
pass
class snake_case_:
def __init__( self : Any , UpperCamelCase_ : Any ):
lowerCAmelCase : Any = data
lowerCAmelCase : Node | None = None
def __iter__( self : int ):
lowerCAmelCase : Any = self
lowerCAmelCase : Union[str, Any] = []
while node:
if node in visited:
raise ContainsLoopError
visited.append(UpperCamelCase_ )
yield node.data
lowerCAmelCase : Optional[int] = node.next_node
@property
def lowerCamelCase__ ( self : str ):
try:
list(self )
return False
except ContainsLoopError:
return True
if __name__ == "__main__":
snake_case__ : Dict = Node(1)
snake_case__ : Any = Node(2)
snake_case__ : int = Node(3)
snake_case__ : Any = Node(4)
print(root_node.has_loop) # False
snake_case__ : Tuple = root_node.next_node
print(root_node.has_loop) # True
snake_case__ : List[Any] = Node(5)
snake_case__ : int = Node(6)
snake_case__ : List[Any] = Node(5)
snake_case__ : Dict = Node(6)
print(root_node.has_loop) # False
snake_case__ : Any = Node(1)
print(root_node.has_loop) # False
| 705
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
snake_case__ : Tuple = logging.get_logger(__name__)
snake_case__ : int = {
'''facebook/levit-128S''': '''https://huggingface.co/facebook/levit-128S/resolve/main/config.json''',
# See all LeViT models at https://huggingface.co/models?filter=levit
}
class snake_case_( a__ ):
__UpperCamelCase = '''levit'''
def __init__( self : str , UpperCamelCase_ : Union[str, Any]=2_2_4 , UpperCamelCase_ : Union[str, Any]=3 , UpperCamelCase_ : Union[str, Any]=3 , UpperCamelCase_ : int=2 , UpperCamelCase_ : Union[str, Any]=1 , UpperCamelCase_ : Tuple=1_6 , UpperCamelCase_ : Dict=[1_2_8, 2_5_6, 3_8_4] , UpperCamelCase_ : Optional[Any]=[4, 8, 1_2] , UpperCamelCase_ : Dict=[4, 4, 4] , UpperCamelCase_ : Any=[1_6, 1_6, 1_6] , UpperCamelCase_ : str=0 , UpperCamelCase_ : int=[2, 2, 2] , UpperCamelCase_ : Optional[Any]=[2, 2, 2] , UpperCamelCase_ : str=0.02 , **UpperCamelCase_ : List[str] , ):
super().__init__(**UpperCamelCase_ )
lowerCAmelCase : Tuple = image_size
lowerCAmelCase : int = num_channels
lowerCAmelCase : Optional[int] = kernel_size
lowerCAmelCase : Dict = stride
lowerCAmelCase : List[Any] = padding
lowerCAmelCase : Dict = hidden_sizes
lowerCAmelCase : List[str] = num_attention_heads
lowerCAmelCase : Tuple = depths
lowerCAmelCase : Dict = key_dim
lowerCAmelCase : Union[str, Any] = drop_path_rate
lowerCAmelCase : List[Any] = patch_size
lowerCAmelCase : Tuple = attention_ratio
lowerCAmelCase : Optional[int] = mlp_ratio
lowerCAmelCase : Union[str, Any] = initializer_range
lowerCAmelCase : List[str] = [
['''Subsample''', key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2],
['''Subsample''', key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2],
]
class snake_case_( a__ ):
__UpperCamelCase = version.parse('''1.11''' )
@property
def lowerCamelCase__ ( self : Tuple ):
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def lowerCamelCase__ ( self : Optional[Any] ):
return 1E-4
| 637
| 0
|
"""simple docstring"""
from torch import nn
class snake_case_( nn.Module ):
def __init__( self : int , UpperCamelCase_ : int , UpperCamelCase_ : int ):
super().__init__()
lowerCAmelCase : str = class_size
lowerCAmelCase : Dict = embed_size
# self.mlp1 = nn.Linear(embed_size, embed_size)
# self.mlp2 = (nn.Linear(embed_size, class_size))
lowerCAmelCase : Any = nn.Linear(UpperCamelCase_ , UpperCamelCase_ )
def lowerCamelCase__ ( self : List[Any] , UpperCamelCase_ : Tuple ):
# hidden_state = nn.functional.relu(self.mlp1(hidden_state))
# hidden_state = self.mlp2(hidden_state)
lowerCAmelCase : int = self.mlp(UpperCamelCase_ )
return logits
| 706
|
"""simple docstring"""
import time
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers.generation import (
MaxLengthCriteria,
MaxNewTokensCriteria,
MaxTimeCriteria,
StoppingCriteriaList,
validate_stopping_criteria,
)
@require_torch
class snake_case_( unittest.TestCase ):
def lowerCamelCase__ ( self : int , UpperCamelCase_ : int ):
lowerCAmelCase : str = 3
lowerCAmelCase : Tuple = 2_5_0
lowerCAmelCase : Optional[Any] = ids_tensor((batch_size, length) , UpperCamelCase_ )
lowerCAmelCase : Optional[Any] = torch.ones((batch_size, length) , device=UpperCamelCase_ , dtype=torch.float ) / length
return input_ids, scores
def lowerCamelCase__ ( self : Optional[Any] ):
lowerCAmelCase, lowerCAmelCase : Optional[int] = self._get_tensors(5 )
lowerCAmelCase : Union[str, Any] = StoppingCriteriaList(
[
MaxLengthCriteria(max_length=1_0 ),
MaxTimeCriteria(max_time=0.1 ),
] )
self.assertFalse(criteria(UpperCamelCase_ , UpperCamelCase_ ) )
lowerCAmelCase, lowerCAmelCase : List[str] = self._get_tensors(9 )
self.assertFalse(criteria(UpperCamelCase_ , UpperCamelCase_ ) )
lowerCAmelCase, lowerCAmelCase : Any = self._get_tensors(1_0 )
self.assertTrue(criteria(UpperCamelCase_ , UpperCamelCase_ ) )
def lowerCamelCase__ ( self : Optional[Any] ):
lowerCAmelCase : Optional[Any] = MaxLengthCriteria(max_length=1_0 )
lowerCAmelCase, lowerCAmelCase : Optional[Any] = self._get_tensors(5 )
self.assertFalse(criteria(UpperCamelCase_ , UpperCamelCase_ ) )
lowerCAmelCase, lowerCAmelCase : List[str] = self._get_tensors(9 )
self.assertFalse(criteria(UpperCamelCase_ , UpperCamelCase_ ) )
lowerCAmelCase, lowerCAmelCase : str = self._get_tensors(1_0 )
self.assertTrue(criteria(UpperCamelCase_ , UpperCamelCase_ ) )
def lowerCamelCase__ ( self : List[Any] ):
lowerCAmelCase : Optional[Any] = MaxNewTokensCriteria(start_length=5 , max_new_tokens=5 )
lowerCAmelCase, lowerCAmelCase : Optional[int] = self._get_tensors(5 )
self.assertFalse(criteria(UpperCamelCase_ , UpperCamelCase_ ) )
lowerCAmelCase, lowerCAmelCase : Union[str, Any] = self._get_tensors(9 )
self.assertFalse(criteria(UpperCamelCase_ , UpperCamelCase_ ) )
lowerCAmelCase, lowerCAmelCase : str = self._get_tensors(1_0 )
self.assertTrue(criteria(UpperCamelCase_ , UpperCamelCase_ ) )
lowerCAmelCase : Dict = StoppingCriteriaList([criteria] )
self.assertEqual(criteria_list.max_length , 1_0 )
def lowerCamelCase__ ( self : Union[str, Any] ):
lowerCAmelCase, lowerCAmelCase : Tuple = self._get_tensors(5 )
lowerCAmelCase : List[str] = MaxTimeCriteria(max_time=0.1 )
self.assertFalse(criteria(UpperCamelCase_ , UpperCamelCase_ ) )
lowerCAmelCase : List[str] = MaxTimeCriteria(max_time=0.1 , initial_timestamp=time.time() - 0.2 )
self.assertTrue(criteria(UpperCamelCase_ , UpperCamelCase_ ) )
def lowerCamelCase__ ( self : str ):
validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(1_0 )] ) , 1_0 )
with self.assertWarns(UpperCamelCase_ ):
validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(1_0 )] ) , 1_1 )
lowerCAmelCase : str = validate_stopping_criteria(StoppingCriteriaList() , 1_1 )
self.assertEqual(len(UpperCamelCase_ ) , 1 )
| 637
| 0
|
"""simple docstring"""
import inspect
import os
import unittest
from pathlib import Path
import torch
import accelerate
from accelerate.test_utils import execute_subprocess_async
from accelerate.test_utils.testing import run_command
class snake_case_( unittest.TestCase ):
__UpperCamelCase = inspect.getfile(accelerate.test_utils )
__UpperCamelCase = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''test_cli.py'''] )
__UpperCamelCase = ['''accelerate''', '''launch''']
__UpperCamelCase = Path.home() / '''.cache/huggingface/accelerate'''
__UpperCamelCase = '''default_config.yaml'''
__UpperCamelCase = config_folder / config_file
__UpperCamelCase = config_folder / '''_default_config.yaml'''
__UpperCamelCase = Path('''tests/test_configs''' )
@classmethod
def lowerCamelCase__ ( cls : Dict ):
if cls.config_path.is_file():
cls.config_path.rename(cls.changed_path )
@classmethod
def lowerCamelCase__ ( cls : List[Any] ):
if cls.changed_path.is_file():
cls.changed_path.rename(cls.config_path )
def lowerCamelCase__ ( self : List[Any] ):
lowerCAmelCase : List[Any] = self.base_cmd
if torch.cuda.is_available() and (torch.cuda.device_count() > 1):
cmd += ["--multi_gpu"]
execute_subprocess_async(cmd + [self.test_file_path] , env=os.environ.copy() )
def lowerCamelCase__ ( self : int ):
for config in sorted(self.test_config_path.glob('''**/*.yaml''' ) ):
with self.subTest(config_file=UpperCamelCase_ ):
execute_subprocess_async(
self.base_cmd + ['''--config_file''', str(UpperCamelCase_ ), self.test_file_path] , env=os.environ.copy() )
def lowerCamelCase__ ( self : List[str] ):
execute_subprocess_async(['''accelerate''', '''test'''] , env=os.environ.copy() )
class snake_case_( unittest.TestCase ):
__UpperCamelCase = '''test-tpu'''
__UpperCamelCase = '''us-central1-a'''
__UpperCamelCase = '''ls'''
__UpperCamelCase = ['''accelerate''', '''tpu-config''']
__UpperCamelCase = '''cd /usr/share'''
__UpperCamelCase = '''tests/test_samples/test_command_file.sh'''
__UpperCamelCase = '''Running gcloud compute tpus tpu-vm ssh'''
def lowerCamelCase__ ( self : Union[str, Any] ):
lowerCAmelCase : Optional[int] = run_command(
self.cmd
+ ['''--command''', self.command, '''--tpu_zone''', self.tpu_zone, '''--tpu_name''', self.tpu_name, '''--debug'''] , return_stdout=UpperCamelCase_ , )
self.assertIn(
F'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all''' , UpperCamelCase_ , )
def lowerCamelCase__ ( self : List[Any] ):
lowerCAmelCase : Any = run_command(
self.cmd
+ [
'''--config_file''',
'''tests/test_configs/0_12_0.yaml''',
'''--command''',
self.command,
'''--tpu_zone''',
self.tpu_zone,
'''--tpu_name''',
self.tpu_name,
'''--debug''',
] , return_stdout=UpperCamelCase_ , )
self.assertIn(
F'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all''' , UpperCamelCase_ , )
def lowerCamelCase__ ( self : List[str] ):
lowerCAmelCase : Dict = run_command(
self.cmd + ['''--config_file''', '''tests/test_configs/latest.yaml''', '''--debug'''] , return_stdout=UpperCamelCase_ )
self.assertIn(
F'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo "hello world"; echo "this is a second command" --worker all''' , UpperCamelCase_ , )
def lowerCamelCase__ ( self : List[Any] ):
lowerCAmelCase : int = run_command(
self.cmd + ['''--config_file''', '''tests/test_configs/latest.yaml''', '''--command''', self.command, '''--debug'''] , return_stdout=UpperCamelCase_ , )
self.assertIn(
F'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all''' , UpperCamelCase_ , )
def lowerCamelCase__ ( self : Union[str, Any] ):
lowerCAmelCase : Any = run_command(
self.cmd
+ [
'''--config_file''',
'''tests/test_configs/latest.yaml''',
'''--command''',
self.command,
'''--command''',
'''echo "Hello World"''',
'''--debug''',
] , return_stdout=UpperCamelCase_ , )
self.assertIn(
F'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls; echo "Hello World" --worker all''' , UpperCamelCase_ , )
def lowerCamelCase__ ( self : Optional[Any] ):
lowerCAmelCase : int = run_command(
self.cmd
+ ['''--config_file''', '''tests/test_configs/latest.yaml''', '''--command_file''', self.command_file, '''--debug'''] , return_stdout=UpperCamelCase_ , )
self.assertIn(
F'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo "hello world"; echo "this is a second command" --worker all''' , UpperCamelCase_ , )
def lowerCamelCase__ ( self : Any ):
lowerCAmelCase : List[Any] = run_command(
self.cmd
+ [
'''--config_file''',
'''tests/test_configs/0_12_0.yaml''',
'''--command_file''',
self.command_file,
'''--tpu_zone''',
self.tpu_zone,
'''--tpu_name''',
self.tpu_name,
'''--debug''',
] , return_stdout=UpperCamelCase_ , )
self.assertIn(
F'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo "hello world"; echo "this is a second command" --worker all''' , UpperCamelCase_ , )
def lowerCamelCase__ ( self : Dict ):
lowerCAmelCase : List[str] = run_command(
self.cmd + ['''--config_file''', '''tests/test_configs/latest.yaml''', '''--install_accelerate''', '''--debug'''] , return_stdout=UpperCamelCase_ , )
self.assertIn(
F'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate -U; echo "hello world"; echo "this is a second command" --worker all''' , UpperCamelCase_ , )
def lowerCamelCase__ ( self : Union[str, Any] ):
lowerCAmelCase : Union[str, Any] = run_command(
self.cmd
+ [
'''--config_file''',
'''tests/test_configs/latest.yaml''',
'''--install_accelerate''',
'''--accelerate_version''',
'''12.0.0''',
'''--debug''',
] , return_stdout=UpperCamelCase_ , )
self.assertIn(
F'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate==12.0.0; echo "hello world"; echo "this is a second command" --worker all''' , UpperCamelCase_ , )
| 707
|
"""simple docstring"""
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin
@dataclass
# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->UnCLIP
class snake_case_( a__ ):
__UpperCamelCase = 42
__UpperCamelCase = None
def _snake_case ( _snake_case : Dict , _snake_case : List[str]=0.999 , _snake_case : Dict="cosine" , ):
if alpha_transform_type == "cosine":
def alpha_bar_fn(_snake_case : List[Any] ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(_snake_case : Optional[int] ):
return math.exp(t * -12.0 )
else:
raise ValueError(f'''Unsupported alpha_tranform_type: {alpha_transform_type}''' )
lowerCAmelCase : List[Any] = []
for i in range(_snake_case ):
lowerCAmelCase : int = i / num_diffusion_timesteps
lowerCAmelCase : Tuple = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(_snake_case ) / alpha_bar_fn(_snake_case ) , _snake_case ) )
return torch.tensor(_snake_case , dtype=torch.floataa )
class snake_case_( a__ , a__ ):
@register_to_config
def __init__( self : Any , UpperCamelCase_ : int = 1_0_0_0 , UpperCamelCase_ : str = "fixed_small_log" , UpperCamelCase_ : bool = True , UpperCamelCase_ : Optional[float] = 1.0 , UpperCamelCase_ : str = "epsilon" , UpperCamelCase_ : str = "squaredcos_cap_v2" , ):
if beta_schedule != "squaredcos_cap_v2":
raise ValueError('''UnCLIPScheduler only supports `beta_schedule`: \'squaredcos_cap_v2\'''' )
lowerCAmelCase : Any = betas_for_alpha_bar(UpperCamelCase_ )
lowerCAmelCase : str = 1.0 - self.betas
lowerCAmelCase : Union[str, Any] = torch.cumprod(self.alphas , dim=0 )
lowerCAmelCase : Tuple = torch.tensor(1.0 )
# standard deviation of the initial noise distribution
lowerCAmelCase : Any = 1.0
# setable values
lowerCAmelCase : Any = None
lowerCAmelCase : Any = torch.from_numpy(np.arange(0 , UpperCamelCase_ )[::-1].copy() )
lowerCAmelCase : List[str] = variance_type
def lowerCamelCase__ ( self : Union[str, Any] , UpperCamelCase_ : torch.FloatTensor , UpperCamelCase_ : Optional[int] = None ):
return sample
def lowerCamelCase__ ( self : Tuple , UpperCamelCase_ : int , UpperCamelCase_ : Union[str, torch.device] = None ):
lowerCAmelCase : Any = num_inference_steps
lowerCAmelCase : str = (self.config.num_train_timesteps - 1) / (self.num_inference_steps - 1)
lowerCAmelCase : Tuple = (np.arange(0 , UpperCamelCase_ ) * step_ratio).round()[::-1].copy().astype(np.intaa )
lowerCAmelCase : Any = torch.from_numpy(UpperCamelCase_ ).to(UpperCamelCase_ )
def lowerCamelCase__ ( self : Optional[Any] , UpperCamelCase_ : List[Any] , UpperCamelCase_ : str=None , UpperCamelCase_ : Tuple=None , UpperCamelCase_ : Any=None ):
if prev_timestep is None:
lowerCAmelCase : Any = t - 1
lowerCAmelCase : int = self.alphas_cumprod[t]
lowerCAmelCase : Union[str, Any] = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one
lowerCAmelCase : Dict = 1 - alpha_prod_t
lowerCAmelCase : str = 1 - alpha_prod_t_prev
if prev_timestep == t - 1:
lowerCAmelCase : Tuple = self.betas[t]
else:
lowerCAmelCase : Union[str, Any] = 1 - alpha_prod_t / alpha_prod_t_prev
# For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
# and sample from it to get previous sample
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
lowerCAmelCase : Optional[Any] = beta_prod_t_prev / beta_prod_t * beta
if variance_type is None:
lowerCAmelCase : List[str] = self.config.variance_type
# hacks - were probably added for training stability
if variance_type == "fixed_small_log":
lowerCAmelCase : Any = torch.log(torch.clamp(UpperCamelCase_ , min=1E-20 ) )
lowerCAmelCase : Union[str, Any] = torch.exp(0.5 * variance )
elif variance_type == "learned_range":
# NOTE difference with DDPM scheduler
lowerCAmelCase : Optional[Any] = variance.log()
lowerCAmelCase : Union[str, Any] = beta.log()
lowerCAmelCase : Dict = (predicted_variance + 1) / 2
lowerCAmelCase : Union[str, Any] = frac * max_log + (1 - frac) * min_log
return variance
def lowerCamelCase__ ( self : Optional[Any] , UpperCamelCase_ : torch.FloatTensor , UpperCamelCase_ : int , UpperCamelCase_ : torch.FloatTensor , UpperCamelCase_ : Optional[int] = None , UpperCamelCase_ : List[Any]=None , UpperCamelCase_ : bool = True , ):
lowerCAmelCase : Optional[Any] = timestep
if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type == "learned_range":
lowerCAmelCase, lowerCAmelCase : List[Any] = torch.split(UpperCamelCase_ , sample.shape[1] , dim=1 )
else:
lowerCAmelCase : Optional[int] = None
# 1. compute alphas, betas
if prev_timestep is None:
lowerCAmelCase : Any = t - 1
lowerCAmelCase : Union[str, Any] = self.alphas_cumprod[t]
lowerCAmelCase : Optional[int] = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one
lowerCAmelCase : int = 1 - alpha_prod_t
lowerCAmelCase : str = 1 - alpha_prod_t_prev
if prev_timestep == t - 1:
lowerCAmelCase : List[Any] = self.betas[t]
lowerCAmelCase : Optional[int] = self.alphas[t]
else:
lowerCAmelCase : List[Any] = 1 - alpha_prod_t / alpha_prod_t_prev
lowerCAmelCase : Dict = 1 - beta
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if self.config.prediction_type == "epsilon":
lowerCAmelCase : List[Any] = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif self.config.prediction_type == "sample":
lowerCAmelCase : Tuple = model_output
else:
raise ValueError(
F'''prediction_type given as {self.config.prediction_type} must be one of `epsilon` or `sample`'''
''' for the UnCLIPScheduler.''' )
# 3. Clip "predicted x_0"
if self.config.clip_sample:
lowerCAmelCase : Dict = torch.clamp(
UpperCamelCase_ , -self.config.clip_sample_range , self.config.clip_sample_range )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
lowerCAmelCase : int = (alpha_prod_t_prev ** 0.5 * beta) / beta_prod_t
lowerCAmelCase : List[Any] = alpha ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
lowerCAmelCase : str = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
lowerCAmelCase : int = 0
if t > 0:
lowerCAmelCase : Union[str, Any] = randn_tensor(
model_output.shape , dtype=model_output.dtype , generator=UpperCamelCase_ , device=model_output.device )
lowerCAmelCase : Any = self._get_variance(
UpperCamelCase_ , predicted_variance=UpperCamelCase_ , prev_timestep=UpperCamelCase_ , )
if self.variance_type == "fixed_small_log":
lowerCAmelCase : str = variance
elif self.variance_type == "learned_range":
lowerCAmelCase : Optional[Any] = (0.5 * variance).exp()
else:
raise ValueError(
F'''variance_type given as {self.variance_type} must be one of `fixed_small_log` or `learned_range`'''
''' for the UnCLIPScheduler.''' )
lowerCAmelCase : List[Any] = variance * variance_noise
lowerCAmelCase : int = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample,)
return UnCLIPSchedulerOutput(prev_sample=UpperCamelCase_ , pred_original_sample=UpperCamelCase_ )
def lowerCamelCase__ ( self : Optional[int] , UpperCamelCase_ : torch.FloatTensor , UpperCamelCase_ : torch.FloatTensor , UpperCamelCase_ : torch.IntTensor , ):
# Make sure alphas_cumprod and timestep have same device and dtype as original_samples
lowerCAmelCase : Tuple = self.alphas_cumprod.to(device=original_samples.device , dtype=original_samples.dtype )
lowerCAmelCase : int = timesteps.to(original_samples.device )
lowerCAmelCase : Dict = alphas_cumprod[timesteps] ** 0.5
lowerCAmelCase : str = sqrt_alpha_prod.flatten()
while len(sqrt_alpha_prod.shape ) < len(original_samples.shape ):
lowerCAmelCase : Any = sqrt_alpha_prod.unsqueeze(-1 )
lowerCAmelCase : List[str] = (1 - alphas_cumprod[timesteps]) ** 0.5
lowerCAmelCase : Tuple = sqrt_one_minus_alpha_prod.flatten()
while len(sqrt_one_minus_alpha_prod.shape ) < len(original_samples.shape ):
lowerCAmelCase : int = sqrt_one_minus_alpha_prod.unsqueeze(-1 )
lowerCAmelCase : Dict = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise
return noisy_samples
| 637
| 0
|
"""simple docstring"""
snake_case__ : Any = [
(1_000, '''M'''),
(900, '''CM'''),
(500, '''D'''),
(400, '''CD'''),
(100, '''C'''),
(90, '''XC'''),
(50, '''L'''),
(40, '''XL'''),
(10, '''X'''),
(9, '''IX'''),
(5, '''V'''),
(4, '''IV'''),
(1, '''I'''),
]
def _snake_case ( _snake_case : str ):
lowerCAmelCase : Any = {'''I''': 1, '''V''': 5, '''X''': 10, '''L''': 50, '''C''': 100, '''D''': 500, '''M''': 1000}
lowerCAmelCase : str = 0
lowerCAmelCase : Dict = 0
while place < len(_snake_case ):
if (place + 1 < len(_snake_case )) and (vals[roman[place]] < vals[roman[place + 1]]):
total += vals[roman[place + 1]] - vals[roman[place]]
place += 2
else:
total += vals[roman[place]]
place += 1
return total
def _snake_case ( _snake_case : int ):
lowerCAmelCase : Dict = []
for arabic, roman in ROMAN:
(lowerCAmelCase) : List[Any] = divmod(_snake_case , _snake_case )
result.append(roman * factor )
if number == 0:
break
return "".join(_snake_case )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 708
|
"""simple docstring"""
import unittest
from parameterized import parameterized
from transformers import LlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaTokenizer
class snake_case_:
def __init__( self : int , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Dict=1_3 , UpperCamelCase_ : Optional[Any]=7 , UpperCamelCase_ : Optional[int]=True , UpperCamelCase_ : Dict=True , UpperCamelCase_ : Optional[int]=False , UpperCamelCase_ : Any=True , UpperCamelCase_ : List[str]=9_9 , UpperCamelCase_ : Tuple=3_2 , UpperCamelCase_ : Optional[Any]=5 , UpperCamelCase_ : str=4 , UpperCamelCase_ : Any=3_7 , UpperCamelCase_ : Optional[Any]="gelu" , UpperCamelCase_ : Tuple=0.1 , UpperCamelCase_ : Union[str, Any]=0.1 , UpperCamelCase_ : Union[str, Any]=5_1_2 , UpperCamelCase_ : Union[str, Any]=1_6 , UpperCamelCase_ : Any=2 , UpperCamelCase_ : Optional[Any]=0.02 , UpperCamelCase_ : List[Any]=3 , UpperCamelCase_ : Any=4 , UpperCamelCase_ : int=None , ):
lowerCAmelCase : Any = parent
lowerCAmelCase : Any = batch_size
lowerCAmelCase : List[Any] = seq_length
lowerCAmelCase : str = is_training
lowerCAmelCase : List[Any] = use_input_mask
lowerCAmelCase : Optional[int] = use_token_type_ids
lowerCAmelCase : Union[str, Any] = use_labels
lowerCAmelCase : List[str] = vocab_size
lowerCAmelCase : Tuple = hidden_size
lowerCAmelCase : int = num_hidden_layers
lowerCAmelCase : Union[str, Any] = num_attention_heads
lowerCAmelCase : Optional[int] = intermediate_size
lowerCAmelCase : List[Any] = hidden_act
lowerCAmelCase : int = hidden_dropout_prob
lowerCAmelCase : Tuple = attention_probs_dropout_prob
lowerCAmelCase : Optional[Any] = max_position_embeddings
lowerCAmelCase : Optional[int] = type_vocab_size
lowerCAmelCase : Tuple = type_sequence_label_size
lowerCAmelCase : List[str] = initializer_range
lowerCAmelCase : str = num_labels
lowerCAmelCase : Optional[int] = num_choices
lowerCAmelCase : Tuple = scope
def lowerCamelCase__ ( self : Optional[int] ):
lowerCAmelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase : Tuple = None
if self.use_input_mask:
lowerCAmelCase : str = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase : List[str] = None
if self.use_token_type_ids:
lowerCAmelCase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCAmelCase : int = None
lowerCAmelCase : int = None
lowerCAmelCase : Tuple = None
if self.use_labels:
lowerCAmelCase : int = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase : Union[str, Any] = ids_tensor([self.batch_size] , self.num_choices )
lowerCAmelCase : Optional[Any] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCamelCase__ ( self : Tuple ):
return LlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCamelCase_ , initializer_range=self.initializer_range , )
def lowerCamelCase__ ( self : int , UpperCamelCase_ : Any , UpperCamelCase_ : Dict , UpperCamelCase_ : Any , UpperCamelCase_ : Dict , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : List[str] , UpperCamelCase_ : Tuple ):
lowerCAmelCase : List[Any] = LlamaModel(config=UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
lowerCAmelCase : Dict = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ )
lowerCAmelCase : Optional[int] = model(UpperCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase__ ( self : Any , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Tuple , UpperCamelCase_ : Tuple , UpperCamelCase_ : Dict , UpperCamelCase_ : int , UpperCamelCase_ : Dict , UpperCamelCase_ : Tuple , UpperCamelCase_ : int , UpperCamelCase_ : Any , ):
lowerCAmelCase : Tuple = True
lowerCAmelCase : Optional[int] = LlamaModel(UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
lowerCAmelCase : List[Any] = model(
UpperCamelCase_ , attention_mask=UpperCamelCase_ , encoder_hidden_states=UpperCamelCase_ , encoder_attention_mask=UpperCamelCase_ , )
lowerCAmelCase : Dict = model(
UpperCamelCase_ , attention_mask=UpperCamelCase_ , encoder_hidden_states=UpperCamelCase_ , )
lowerCAmelCase : Tuple = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase__ ( self : Any , UpperCamelCase_ : int , UpperCamelCase_ : Tuple , UpperCamelCase_ : Any , UpperCamelCase_ : List[Any] , UpperCamelCase_ : str , UpperCamelCase_ : int , UpperCamelCase_ : int , UpperCamelCase_ : int , UpperCamelCase_ : str , ):
lowerCAmelCase : Optional[Any] = LlamaForCausalLM(config=UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
lowerCAmelCase : List[str] = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ , labels=UpperCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCamelCase__ ( self : str , UpperCamelCase_ : List[str] , UpperCamelCase_ : int , UpperCamelCase_ : Tuple , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : int , UpperCamelCase_ : str , UpperCamelCase_ : Dict , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : List[Any] , ):
lowerCAmelCase : Union[str, Any] = True
lowerCAmelCase : str = True
lowerCAmelCase : Tuple = LlamaForCausalLM(config=UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
# first forward pass
lowerCAmelCase : Optional[Any] = model(
UpperCamelCase_ , attention_mask=UpperCamelCase_ , encoder_hidden_states=UpperCamelCase_ , encoder_attention_mask=UpperCamelCase_ , use_cache=UpperCamelCase_ , )
lowerCAmelCase : Dict = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
lowerCAmelCase : Any = ids_tensor((self.batch_size, 3) , config.vocab_size )
lowerCAmelCase : Dict = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
lowerCAmelCase : Optional[Any] = torch.cat([input_ids, next_tokens] , dim=-1 )
lowerCAmelCase : List[str] = torch.cat([input_mask, next_mask] , dim=-1 )
lowerCAmelCase : Dict = model(
UpperCamelCase_ , attention_mask=UpperCamelCase_ , encoder_hidden_states=UpperCamelCase_ , encoder_attention_mask=UpperCamelCase_ , output_hidden_states=UpperCamelCase_ , )['''hidden_states'''][0]
lowerCAmelCase : str = model(
UpperCamelCase_ , attention_mask=UpperCamelCase_ , encoder_hidden_states=UpperCamelCase_ , encoder_attention_mask=UpperCamelCase_ , past_key_values=UpperCamelCase_ , output_hidden_states=UpperCamelCase_ , )['''hidden_states'''][0]
# select random slice
lowerCAmelCase : Tuple = ids_tensor((1,) , output_from_past.shape[-1] ).item()
lowerCAmelCase : Any = output_from_no_past[:, -3:, random_slice_idx].detach()
lowerCAmelCase : Optional[int] = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(UpperCamelCase_ , UpperCamelCase_ , atol=1E-3 ) )
def lowerCamelCase__ ( self : Union[str, Any] ):
lowerCAmelCase : Dict = self.prepare_config_and_inputs()
(
(
lowerCAmelCase
), (
lowerCAmelCase
), (
lowerCAmelCase
), (
lowerCAmelCase
), (
lowerCAmelCase
), (
lowerCAmelCase
), (
lowerCAmelCase
),
) : Tuple = config_and_inputs
lowerCAmelCase : Optional[int] = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class snake_case_( a__ , a__ , a__ , unittest.TestCase ):
__UpperCamelCase = (LlamaModel, LlamaForCausalLM, LlamaForSequenceClassification) if is_torch_available() else ()
__UpperCamelCase = (LlamaForCausalLM,) if is_torch_available() else ()
__UpperCamelCase = (
{
'''feature-extraction''': LlamaModel,
'''text-classification''': LlamaForSequenceClassification,
'''text-generation''': LlamaForCausalLM,
'''zero-shot''': LlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
__UpperCamelCase = False
__UpperCamelCase = False
def lowerCamelCase__ ( self : Optional[Any] ):
lowerCAmelCase : Any = LlamaModelTester(self )
lowerCAmelCase : Dict = ConfigTester(self , config_class=UpperCamelCase_ , hidden_size=3_7 )
def lowerCamelCase__ ( self : str ):
self.config_tester.run_common_tests()
def lowerCamelCase__ ( self : Tuple ):
lowerCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase_ )
def lowerCamelCase__ ( self : List[Any] ):
lowerCAmelCase : str = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
lowerCAmelCase : str = type
self.model_tester.create_and_check_model(*UpperCamelCase_ )
def lowerCamelCase__ ( self : List[Any] ):
lowerCAmelCase, lowerCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase : List[str] = 3
lowerCAmelCase : List[str] = input_dict['''input_ids''']
lowerCAmelCase : List[str] = input_ids.ne(1 ).to(UpperCamelCase_ )
lowerCAmelCase : Tuple = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
lowerCAmelCase : Union[str, Any] = LlamaForSequenceClassification(UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
lowerCAmelCase : List[Any] = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ , labels=UpperCamelCase_ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def lowerCamelCase__ ( self : Optional[int] ):
lowerCAmelCase, lowerCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase : Any = 3
lowerCAmelCase : int = '''single_label_classification'''
lowerCAmelCase : Tuple = input_dict['''input_ids''']
lowerCAmelCase : Tuple = input_ids.ne(1 ).to(UpperCamelCase_ )
lowerCAmelCase : str = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
lowerCAmelCase : Tuple = LlamaForSequenceClassification(UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
lowerCAmelCase : Any = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ , labels=UpperCamelCase_ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def lowerCamelCase__ ( self : Union[str, Any] ):
lowerCAmelCase, lowerCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase : Any = 3
lowerCAmelCase : Dict = '''multi_label_classification'''
lowerCAmelCase : Union[str, Any] = input_dict['''input_ids''']
lowerCAmelCase : Tuple = input_ids.ne(1 ).to(UpperCamelCase_ )
lowerCAmelCase : Any = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
lowerCAmelCase : Optional[int] = LlamaForSequenceClassification(UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
lowerCAmelCase : Optional[Any] = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ , labels=UpperCamelCase_ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip('''LLaMA buffers include complex numbers, which breaks this test''' )
def lowerCamelCase__ ( self : Optional[Any] ):
pass
@parameterized.expand([('''linear''',), ('''dynamic''',)] )
def lowerCamelCase__ ( self : List[str] , UpperCamelCase_ : Tuple ):
lowerCAmelCase, lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase : Optional[int] = ids_tensor([1, 1_0] , config.vocab_size )
lowerCAmelCase : int = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(4_2 ) # Fixed seed at init time so the two models get the same random weights
lowerCAmelCase : List[Any] = LlamaModel(UpperCamelCase_ )
original_model.to(UpperCamelCase_ )
original_model.eval()
lowerCAmelCase : Optional[int] = original_model(UpperCamelCase_ ).last_hidden_state
lowerCAmelCase : List[Any] = original_model(UpperCamelCase_ ).last_hidden_state
set_seed(4_2 ) # Fixed seed at init time so the two models get the same random weights
lowerCAmelCase : int = {'''type''': scaling_type, '''factor''': 10.0}
lowerCAmelCase : List[str] = LlamaModel(UpperCamelCase_ )
scaled_model.to(UpperCamelCase_ )
scaled_model.eval()
lowerCAmelCase : Union[str, Any] = scaled_model(UpperCamelCase_ ).last_hidden_state
lowerCAmelCase : Optional[int] = scaled_model(UpperCamelCase_ ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(UpperCamelCase_ , UpperCamelCase_ , atol=1E-5 ) )
else:
self.assertFalse(torch.allclose(UpperCamelCase_ , UpperCamelCase_ , atol=1E-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(UpperCamelCase_ , UpperCamelCase_ , atol=1E-5 ) )
@require_torch
class snake_case_( unittest.TestCase ):
@unittest.skip('''Logits are not exactly the same, once we fix the instabalities somehow, will update!''' )
@slow
def lowerCamelCase__ ( self : List[Any] ):
lowerCAmelCase : Tuple = [1, 3_0_6, 4_6_5_8, 2_7_8, 6_5_9_3, 3_1_0, 2_8_3_4, 3_3_8]
lowerCAmelCase : Optional[Any] = LlamaForCausalLM.from_pretrained('''meta-llama/Llama-2-7b-hf''' , device_map='''auto''' )
lowerCAmelCase : str = model(torch.tensor([input_ids] ) )
# Expected mean on dim = -1
lowerCAmelCase : int = torch.tensor([[-6.6_550, -4.1_227, -4.9_859, -3.2_406, 0.8_262, -3.0_033, 1.2_964, -3.3_699]] )
torch.testing.assert_close(out.mean(-1 ) , UpperCamelCase_ , atol=1E-2 , rtol=1E-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
lowerCAmelCase : Tuple = torch.tensor([-12.8_281, -7.4_453, -0.4_639, -8.0_625, -7.2_500, -8.0_000, -6.4_883, -7.7_695, -7.8_438, -7.0_312, -6.2_188, -7.1_328, -1.8_496, 1.9_961, -8.6_250, -6.7_227, -12.8_281, -6.9_492, -7.0_742, -7.7_852, -7.5_820, -7.9_062, -6.9_375, -7.9_805, -8.3_438, -8.1_562, -8.0_469, -7.6_250, -7.7_422, -7.3_398,] )
# fmt: on
torch.testing.assert_close(out[0, 0, :3_0] , UpperCamelCase_ , atol=1E-5 , rtol=1E-5 )
@unittest.skip('''Logits are not exactly the same, once we fix the instabalities somehow, will update!''' )
@slow
def lowerCamelCase__ ( self : Dict ):
lowerCAmelCase : str = [1, 3_0_6, 4_6_5_8, 2_7_8, 6_5_9_3, 3_1_0, 2_8_3_4, 3_3_8]
lowerCAmelCase : Dict = LlamaForCausalLM.from_pretrained('''meta-llama/Llama-2-13b-hf''' , device_map='''auto''' )
lowerCAmelCase : str = model(torch.tensor(UpperCamelCase_ ) )
# Expected mean on dim = -1
lowerCAmelCase : Any = torch.tensor([[-2.0_622, -1.2_794, -1.1_638, -0.9_788, -1.4_603, -1.0_238, -1.7_893, -1.4_411]] )
torch.testing.assert_close(out.mean(-1 ) , UpperCamelCase_ , atol=1E-2 , rtol=1E-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
lowerCAmelCase : Tuple = torch.tensor([-8.1_406, -8.0_547, 2.7_461, -1.2_344, -0.1_448, -1.8_262, -1.0_020, -1.8_154, -1.6_895, -1.8_516, -2.3_574, -0.9_277, 3.7_598, 6.5_742, -1.2_998, -0.1_177, -8.1_406, -2.9_688, -2.9_199, -3.1_699, -3.5_254, -2.3_555, -2.7_988, -3.4_141, -2.8_262, -4.5_195, -3.3_379, -3.3_164, -2.7_832, -3.0_273] )
# fmt: on
torch.testing.assert_close(out[0, 0, :3_0] , UpperCamelCase_ , atol=1E-5 , rtol=1E-5 )
@unittest.skip('''Logits are not exactly the same, once we fix the instabalities somehow, will update!''' )
@slow
def lowerCamelCase__ ( self : Optional[int] ):
lowerCAmelCase : int = [1, 3_0_6, 4_6_5_8, 2_7_8, 6_5_9_3, 3_1_0, 2_8_3_4, 3_3_8]
lowerCAmelCase : List[str] = LlamaForCausalLM.from_pretrained('''meta-llama/Llama-2-13b-chat-hf''' , device_map='''auto''' )
lowerCAmelCase : List[Any] = model(torch.tensor(UpperCamelCase_ ) )
# Expected mean on dim = -1
lowerCAmelCase : List[str] = torch.tensor([[-0.8_562, -1.8_520, -0.7_551, -0.4_162, -1.5_161, -1.2_038, -2.4_823, -2.3_254]] )
torch.testing.assert_close(out.mean(-1 ) , UpperCamelCase_ , atol=1E-2 , rtol=1E-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
lowerCAmelCase : Dict = torch.tensor([-2.2_227, 4.8_828, 0.9_023, -0.4_578, -0.7_871, -0.1_033, -0.6_221, -0.5_786, -0.7_803, -1.0_674, -1.2_920, -0.1_570, 0.8_008, 2.0_723, -0.9_497, 0.2_771, -2.2_227, -0.7_612, -1.4_346, -1.2_061, -1.6_426, -0.3_000, -0.7_139, -1.1_934, -1.8_691, -1.6_973, -1.5_947, -1.2_705, -0.3_523, -0.5_513] )
# fmt: on
torch.testing.assert_close(out.mean(-1 ) , UpperCamelCase_ , atol=1E-2 , rtol=1E-2 )
@unittest.skip(
'''Logits are not exactly the same, once we fix the instabalities somehow, will update! Also it is gonna be a `too_slow` test''' )
@slow
def lowerCamelCase__ ( self : List[str] ):
lowerCAmelCase : Optional[Any] = [1, 3_0_6, 4_6_5_8, 2_7_8, 6_5_9_3, 3_1_0, 2_8_3_4, 3_3_8]
lowerCAmelCase : Optional[int] = LlamaForCausalLM.from_pretrained('''meta-llama/Llama-2-70b-hf''' , device_map='''auto''' )
lowerCAmelCase : Any = model(torch.tensor(UpperCamelCase_ ) )
lowerCAmelCase : Optional[Any] = torch.tensor(
[[-4.2_327, -3.3_360, -4.6_665, -4.7_631, -1.8_180, -3.4_170, -1.4_211, -3.1_810]] , dtype=torch.floataa )
torch.testing.assert_close(out.mean(-1 ) , UpperCamelCase_ , atol=1E-2 , rtol=1E-2 )
# fmt: off
lowerCAmelCase : Any = torch.tensor([-9.4_922, -3.9_551, 1.7_998, -5.6_758, -5.1_055, -5.8_984, -4.8_320, -6.8_086, -6.5_391, -5.6_172, -5.5_820, -5.5_352, 1.7_881, 3.6_289, -6.5_117, -3.4_785, -9.5_000, -6.0_352, -6.8_125, -6.0_195, -6.6_836, -5.4_727, -6.2_812, -6.0_391, -7.3_398, -7.4_297, -7.4_844, -6.5_820, -5.8_789, -5.5_312] )
# fmt: on
torch.testing.assert_close(out[0, 0, :3_0] , UpperCamelCase_ , atol=1E-5 , rtol=1E-5 )
@unittest.skip('''Model is curently gated''' )
@slow
def lowerCamelCase__ ( self : List[Any] ):
lowerCAmelCase : List[Any] = '''Simply put, the theory of relativity states that 1) the laws of physics are the same everywhere in the universe and 2) the passage of time and the length of objects can vary depending on the observer\'s frame of reference.\n\nThe first part of the theory, that the laws of physics are the same everywhere, is known as the "princi'''
lowerCAmelCase : int = '''Simply put, the theory of relativity states that '''
lowerCAmelCase : str = LlamaTokenizer.from_pretrained('''meta-llama/Llama-2-13b-chat-hf''' )
lowerCAmelCase : Optional[int] = tokenizer.encode(UpperCamelCase_ , return_tensors='''pt''' )
lowerCAmelCase : List[Any] = LlamaForCausalLM.from_pretrained(
'''meta-llama/Llama-2-13b-chat-hf''' , device_map='''sequential''' , use_safetensors=UpperCamelCase_ )
# greedy generation outputs
lowerCAmelCase : int = model.generate(UpperCamelCase_ , max_new_tokens=6_4 , top_p=UpperCamelCase_ , temperature=1 , do_sample=UpperCamelCase_ )
lowerCAmelCase : int = tokenizer.decode(generated_ids[0] , skip_special_tokens=UpperCamelCase_ )
self.assertEqual(UpperCamelCase_ , UpperCamelCase_ )
| 637
| 0
|
"""simple docstring"""
def _snake_case ( _snake_case : int ):
if not isinstance(_snake_case , _snake_case ):
raise ValueError('''Input must be an integer''' )
if input_num <= 0:
raise ValueError('''Input must be positive''' )
return sum(
divisor for divisor in range(1 , input_num // 2 + 1 ) if input_num % divisor == 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 709
|
"""simple docstring"""
import os
import tempfile
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from torch import nn
from transformers import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_inverse_sqrt_schedule,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
def _snake_case ( _snake_case : Tuple , _snake_case : Union[str, Any]=10 ):
lowerCAmelCase : Dict = []
for _ in range(_snake_case ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
return lrs
def _snake_case ( _snake_case : Optional[int] , _snake_case : int=10 ):
lowerCAmelCase : Optional[int] = []
for step in range(_snake_case ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
if step == num_steps // 2:
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCAmelCase : List[str] = os.path.join(_snake_case , '''schedule.bin''' )
torch.save(scheduler.state_dict() , _snake_case )
lowerCAmelCase : List[Any] = torch.load(_snake_case )
scheduler.load_state_dict(_snake_case )
return lrs
@require_torch
class snake_case_( unittest.TestCase ):
def lowerCamelCase__ ( self : Optional[int] , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : str , UpperCamelCase_ : Any ):
self.assertEqual(len(UpperCamelCase_ ) , len(UpperCamelCase_ ) )
for a, b in zip(UpperCamelCase_ , UpperCamelCase_ ):
self.assertAlmostEqual(UpperCamelCase_ , UpperCamelCase_ , delta=UpperCamelCase_ )
def lowerCamelCase__ ( self : Tuple ):
lowerCAmelCase : Any = torch.tensor([0.1, -0.2, -0.1] , requires_grad=UpperCamelCase_ )
lowerCAmelCase : List[str] = torch.tensor([0.4, 0.2, -0.5] )
lowerCAmelCase : List[Any] = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
lowerCAmelCase : Union[str, Any] = AdamW(params=[w] , lr=2E-1 , weight_decay=0.0 )
for _ in range(1_0_0 ):
lowerCAmelCase : Union[str, Any] = criterion(UpperCamelCase_ , UpperCamelCase_ )
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1E-2 )
def lowerCamelCase__ ( self : Union[str, Any] ):
lowerCAmelCase : Tuple = torch.tensor([0.1, -0.2, -0.1] , requires_grad=UpperCamelCase_ )
lowerCAmelCase : Union[str, Any] = torch.tensor([0.4, 0.2, -0.5] )
lowerCAmelCase : Optional[int] = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
lowerCAmelCase : Any = Adafactor(
params=[w] , lr=1E-2 , eps=(1E-30, 1E-3) , clip_threshold=1.0 , decay_rate=-0.8 , betaa=UpperCamelCase_ , weight_decay=0.0 , relative_step=UpperCamelCase_ , scale_parameter=UpperCamelCase_ , warmup_init=UpperCamelCase_ , )
for _ in range(1_0_0_0 ):
lowerCAmelCase : List[Any] = criterion(UpperCamelCase_ , UpperCamelCase_ )
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1E-2 )
@require_torch
class snake_case_( unittest.TestCase ):
__UpperCamelCase = nn.Linear(50 , 50 ) if is_torch_available() else None
__UpperCamelCase = AdamW(m.parameters() , lr=10.0 ) if is_torch_available() else None
__UpperCamelCase = 10
def lowerCamelCase__ ( self : Union[str, Any] , UpperCamelCase_ : str , UpperCamelCase_ : str , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Union[str, Any]=None ):
self.assertEqual(len(UpperCamelCase_ ) , len(UpperCamelCase_ ) )
for a, b in zip(UpperCamelCase_ , UpperCamelCase_ ):
self.assertAlmostEqual(UpperCamelCase_ , UpperCamelCase_ , delta=UpperCamelCase_ , msg=UpperCamelCase_ )
def lowerCamelCase__ ( self : Union[str, Any] ):
lowerCAmelCase : Tuple = {'''num_warmup_steps''': 2, '''num_training_steps''': 1_0}
# schedulers doct format
# function: (sched_args_dict, expected_learning_rates)
lowerCAmelCase : Optional[Any] = {
get_constant_schedule: ({}, [10.0] * self.num_steps),
get_constant_schedule_with_warmup: (
{'''num_warmup_steps''': 4},
[0.0, 2.5, 5.0, 7.5, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0],
),
get_linear_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 8.75, 7.5, 6.25, 5.0, 3.75, 2.5, 1.25],
),
get_cosine_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 9.61, 8.53, 6.91, 5.0, 3.08, 1.46, 0.38],
),
get_cosine_with_hard_restarts_schedule_with_warmup: (
{**common_kwargs, '''num_cycles''': 2},
[0.0, 5.0, 10.0, 8.53, 5.0, 1.46, 10.0, 8.53, 5.0, 1.46],
),
get_polynomial_decay_schedule_with_warmup: (
{**common_kwargs, '''power''': 2.0, '''lr_end''': 1E-7},
[0.0, 5.0, 10.0, 7.656, 5.625, 3.906, 2.5, 1.406, 0.625, 0.156],
),
get_inverse_sqrt_schedule: (
{'''num_warmup_steps''': 2},
[0.0, 5.0, 10.0, 8.165, 7.071, 6.325, 5.774, 5.345, 5.0, 4.714],
),
}
for scheduler_func, data in scheds.items():
lowerCAmelCase, lowerCAmelCase : Union[str, Any] = data
lowerCAmelCase : List[Any] = scheduler_func(self.optimizer , **UpperCamelCase_ )
self.assertEqual(len([scheduler.get_lr()[0]] ) , 1 )
lowerCAmelCase : str = unwrap_schedule(UpperCamelCase_ , self.num_steps )
self.assertListAlmostEqual(
UpperCamelCase_ , UpperCamelCase_ , tol=1E-2 , msg=F'''failed for {scheduler_func} in normal scheduler''' , )
lowerCAmelCase : Optional[int] = scheduler_func(self.optimizer , **UpperCamelCase_ )
if scheduler_func.__name__ != "get_constant_schedule":
LambdaScheduleWrapper.wrap_scheduler(UpperCamelCase_ ) # wrap to test picklability of the schedule
lowerCAmelCase : List[Any] = unwrap_and_save_reload_schedule(UpperCamelCase_ , self.num_steps )
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ , msg=F'''failed for {scheduler_func} in save and reload''' )
class snake_case_:
def __init__( self : List[Any] , UpperCamelCase_ : Any ):
lowerCAmelCase : Tuple = fn
def __call__( self : Union[str, Any] , *UpperCamelCase_ : Optional[Any] , **UpperCamelCase_ : List[Any] ):
return self.fn(*UpperCamelCase_ , **UpperCamelCase_ )
@classmethod
def lowerCamelCase__ ( self : Any , UpperCamelCase_ : Optional[int] ):
lowerCAmelCase : Union[str, Any] = list(map(self , scheduler.lr_lambdas ) )
| 637
| 0
|
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_rembert import RemBertTokenizer
else:
snake_case__ : Optional[int] = None
snake_case__ : Tuple = logging.get_logger(__name__)
snake_case__ : Dict = {'''vocab_file''': '''sentencepiece.model''', '''tokenizer_file''': '''tokenizer.json'''}
snake_case__ : Optional[int] = {
'''vocab_file''': {
'''google/rembert''': '''https://huggingface.co/google/rembert/resolve/main/sentencepiece.model''',
},
'''tokenizer_file''': {
'''google/rembert''': '''https://huggingface.co/google/rembert/resolve/main/tokenizer.json''',
},
}
snake_case__ : Any = {
'''google/rembert''': 256,
}
snake_case__ : List[str] = '''▁'''
class snake_case_( a__ ):
__UpperCamelCase = VOCAB_FILES_NAMES
__UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase = RemBertTokenizer
def __init__( self : List[str] , UpperCamelCase_ : Optional[int]=None , UpperCamelCase_ : str=None , UpperCamelCase_ : Union[str, Any]=True , UpperCamelCase_ : Dict=True , UpperCamelCase_ : List[str]=False , UpperCamelCase_ : Optional[Any]="[CLS]" , UpperCamelCase_ : Dict="[SEP]" , UpperCamelCase_ : Tuple="<unk>" , UpperCamelCase_ : str="[SEP]" , UpperCamelCase_ : Optional[Any]="<pad>" , UpperCamelCase_ : List[str]="[CLS]" , UpperCamelCase_ : Optional[int]="[MASK]" , **UpperCamelCase_ : Dict , ):
# Mask token behave like a normal word, i.e. include the space before it
lowerCAmelCase : List[str] = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else mask_token
super().__init__(
UpperCamelCase_ , tokenizer_file=UpperCamelCase_ , do_lower_case=UpperCamelCase_ , remove_space=UpperCamelCase_ , keep_accents=UpperCamelCase_ , bos_token=UpperCamelCase_ , eos_token=UpperCamelCase_ , unk_token=UpperCamelCase_ , sep_token=UpperCamelCase_ , pad_token=UpperCamelCase_ , cls_token=UpperCamelCase_ , mask_token=UpperCamelCase_ , **UpperCamelCase_ , )
lowerCAmelCase : Optional[int] = do_lower_case
lowerCAmelCase : Any = remove_space
lowerCAmelCase : Optional[Any] = keep_accents
lowerCAmelCase : Any = vocab_file
lowerCAmelCase : Optional[Any] = False if not self.vocab_file else True
def lowerCamelCase__ ( self : str , UpperCamelCase_ : List[int] , UpperCamelCase_ : Optional[List[int]] = None ):
lowerCAmelCase : int = [self.sep_token_id]
lowerCAmelCase : List[Any] = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def lowerCamelCase__ ( self : Dict , UpperCamelCase_ : List[int] , UpperCamelCase_ : Optional[List[int]] = None , UpperCamelCase_ : bool = False ):
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'''You should not supply a second sequence if the provided sequence of '''
'''ids is already formatted with special tokens for the model.''' )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(UpperCamelCase_ )) + [1] + ([0] * len(UpperCamelCase_ )) + [1]
return [1] + ([0] * len(UpperCamelCase_ )) + [1]
def lowerCamelCase__ ( self : Tuple , UpperCamelCase_ : List[int] , UpperCamelCase_ : Optional[List[int]] = None ):
lowerCAmelCase : List[str] = [self.sep_token_id]
lowerCAmelCase : str = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowerCamelCase__ ( self : Dict , UpperCamelCase_ : str , UpperCamelCase_ : Optional[str] = None ):
if not os.path.isdir(UpperCamelCase_ ):
logger.error('''Vocabulary path ({}) should be a directory'''.format(UpperCamelCase_ ) )
return
lowerCAmelCase : str = os.path.join(
UpperCamelCase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase_ ):
copyfile(self.vocab_file , UpperCamelCase_ )
return (out_vocab_file,)
| 710
|
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer
from .base import PipelineTool
class snake_case_( a__ ):
__UpperCamelCase = '''philschmid/bart-large-cnn-samsum'''
__UpperCamelCase = (
'''This is a tool that summarizes an English text. It takes an input `text` containing the text to summarize, '''
'''and returns a summary of the text.'''
)
__UpperCamelCase = '''summarizer'''
__UpperCamelCase = AutoTokenizer
__UpperCamelCase = AutoModelForSeqaSeqLM
__UpperCamelCase = ['''text''']
__UpperCamelCase = ['''text''']
def lowerCamelCase__ ( self : Dict , UpperCamelCase_ : int ):
return self.pre_processor(UpperCamelCase_ , return_tensors='''pt''' , truncation=UpperCamelCase_ )
def lowerCamelCase__ ( self : Optional[int] , UpperCamelCase_ : str ):
return self.model.generate(**UpperCamelCase_ )[0]
def lowerCamelCase__ ( self : Any , UpperCamelCase_ : Tuple ):
return self.pre_processor.decode(UpperCamelCase_ , skip_special_tokens=UpperCamelCase_ , clean_up_tokenization_spaces=UpperCamelCase_ )
| 637
| 0
|
"""simple docstring"""
import itertools
import random
import unittest
import numpy as np
from transformers import ASTFeatureExtractor
from transformers.testing_utils import require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
snake_case__ : Union[str, Any] = random.Random()
if is_torch_available():
import torch
def _snake_case ( _snake_case : str , _snake_case : List[str]=1.0 , _snake_case : int=None , _snake_case : Optional[int]=None ) -> int:
if rng is None:
lowerCAmelCase : int = global_rng
lowerCAmelCase : int = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class snake_case_( unittest.TestCase ):
def __init__( self : Union[str, Any] , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : Tuple=7 , UpperCamelCase_ : List[Any]=4_0_0 , UpperCamelCase_ : Any=2_0_0_0 , UpperCamelCase_ : Optional[Any]=1 , UpperCamelCase_ : Dict=0.0 , UpperCamelCase_ : List[Any]=1_6_0_0_0 , UpperCamelCase_ : Union[str, Any]=True , UpperCamelCase_ : Optional[Any]=True , ):
lowerCAmelCase : str = parent
lowerCAmelCase : Dict = batch_size
lowerCAmelCase : str = min_seq_length
lowerCAmelCase : List[Any] = max_seq_length
lowerCAmelCase : Tuple = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
lowerCAmelCase : List[str] = feature_size
lowerCAmelCase : Tuple = padding_value
lowerCAmelCase : Union[str, Any] = sampling_rate
lowerCAmelCase : str = return_attention_mask
lowerCAmelCase : Optional[int] = do_normalize
def lowerCamelCase__ ( self : Optional[Any] ):
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def lowerCamelCase__ ( self : Optional[Any] , UpperCamelCase_ : str=False , UpperCamelCase_ : int=False ):
def _flatten(UpperCamelCase_ : Tuple ):
return list(itertools.chain(*UpperCamelCase_ ) )
if equal_length:
lowerCAmelCase : Optional[Any] = floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
lowerCAmelCase : List[str] = [
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
lowerCAmelCase : List[Any] = [np.asarray(UpperCamelCase_ ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class snake_case_( a__ , unittest.TestCase ):
__UpperCamelCase = ASTFeatureExtractor
def lowerCamelCase__ ( self : Optional[Any] ):
lowerCAmelCase : Any = ASTFeatureExtractionTester(self )
def lowerCamelCase__ ( self : Optional[int] ):
# Tests that all call wrap to encode_plus and batch_encode_plus
lowerCAmelCase : str = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
lowerCAmelCase : Optional[int] = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
lowerCAmelCase : str = [np.asarray(UpperCamelCase_ ) for speech_input in speech_inputs]
# Test not batched input
lowerCAmelCase : int = feat_extract(speech_inputs[0] , return_tensors='''np''' ).input_values
lowerCAmelCase : Union[str, Any] = feat_extract(np_speech_inputs[0] , return_tensors='''np''' ).input_values
self.assertTrue(np.allclose(UpperCamelCase_ , UpperCamelCase_ , atol=1E-3 ) )
# Test batched
lowerCAmelCase : int = feat_extract(UpperCamelCase_ , padding=UpperCamelCase_ , return_tensors='''np''' ).input_values
lowerCAmelCase : Union[str, Any] = feat_extract(UpperCamelCase_ , padding=UpperCamelCase_ , return_tensors='''np''' ).input_values
for enc_seq_a, enc_seq_a in zip(UpperCamelCase_ , UpperCamelCase_ ):
self.assertTrue(np.allclose(UpperCamelCase_ , UpperCamelCase_ , atol=1E-3 ) )
# Test 2-D numpy arrays are batched.
lowerCAmelCase : str = [floats_list((1, x) )[0] for x in (8_0_0, 8_0_0, 8_0_0)]
lowerCAmelCase : Optional[int] = np.asarray(UpperCamelCase_ )
lowerCAmelCase : Optional[int] = feat_extract(UpperCamelCase_ , return_tensors='''np''' ).input_values
lowerCAmelCase : int = feat_extract(UpperCamelCase_ , return_tensors='''np''' ).input_values
for enc_seq_a, enc_seq_a in zip(UpperCamelCase_ , UpperCamelCase_ ):
self.assertTrue(np.allclose(UpperCamelCase_ , UpperCamelCase_ , atol=1E-3 ) )
@require_torch
def lowerCamelCase__ ( self : List[Any] ):
import torch
lowerCAmelCase : str = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowerCAmelCase : Any = np.random.rand(1_0_0 ).astype(np.floataa )
lowerCAmelCase : List[str] = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
lowerCAmelCase : str = feature_extractor.pad([{'''input_values''': inputs}] , return_tensors='''np''' )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
lowerCAmelCase : Optional[int] = feature_extractor.pad([{'''input_values''': inputs}] , return_tensors='''pt''' )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
def lowerCamelCase__ ( self : List[Any] , UpperCamelCase_ : List[str] ):
from datasets import load_dataset
lowerCAmelCase : Union[str, Any] = load_dataset('''hf-internal-testing/librispeech_asr_dummy''' , '''clean''' , split='''validation''' )
# automatic decoding with librispeech
lowerCAmelCase : Tuple = ds.sort('''id''' ).select(range(UpperCamelCase_ ) )[:num_samples]['''audio''']
return [x["array"] for x in speech_samples]
@require_torch
def lowerCamelCase__ ( self : List[str] ):
# fmt: off
lowerCAmelCase : List[str] = torch.tensor(
[-0.9_894, -1.2_776, -0.9_066, -1.2_776, -0.9_349, -1.2_609, -1.0_386, -1.2_776,
-1.1_561, -1.2_776, -1.2_052, -1.2_723, -1.2_190, -1.2_132, -1.2_776, -1.1_133,
-1.1_953, -1.1_343, -1.1_584, -1.2_203, -1.1_770, -1.2_474, -1.2_381, -1.1_936,
-0.9_270, -0.8_317, -0.8_049, -0.7_706, -0.7_565, -0.7_869] )
# fmt: on
lowerCAmelCase : Union[str, Any] = self._load_datasamples(1 )
lowerCAmelCase : Tuple = ASTFeatureExtractor()
lowerCAmelCase : Optional[Any] = feature_extractor(UpperCamelCase_ , return_tensors='''pt''' ).input_values
self.assertEquals(input_values.shape , (1, 1_0_2_4, 1_2_8) )
self.assertTrue(torch.allclose(input_values[0, 0, :3_0] , UpperCamelCase_ , atol=1E-4 ) )
| 711
|
"""simple docstring"""
snake_case__ : List[Any] = '''Tobias Carryer'''
from time import time
class snake_case_:
def __init__( self : Optional[Any] , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Tuple , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : Dict=int(time() ) ): # noqa: B008
lowerCAmelCase : str = multiplier
lowerCAmelCase : Optional[int] = increment
lowerCAmelCase : Optional[Any] = modulo
lowerCAmelCase : Optional[Any] = seed
def lowerCamelCase__ ( self : Union[str, Any] ):
lowerCAmelCase : Optional[int] = (self.multiplier * self.seed + self.increment) % self.modulo
return self.seed
if __name__ == "__main__":
# Show the LCG in action.
snake_case__ : int = LinearCongruentialGenerator(1_664_525, 1_013_904_223, 2 << 31)
while True:
print(lcg.next_number())
| 637
| 0
|
"""simple docstring"""
from ....configuration_utils import PretrainedConfig
from ....utils import logging
snake_case__ : List[Any] = logging.get_logger(__name__)
# TODO: upload to AWS
snake_case__ : str = {
'''yjernite/retribert-base-uncased''': (
'''https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/config.json'''
),
}
class snake_case_( a__ ):
__UpperCamelCase = '''retribert'''
def __init__( self : int , UpperCamelCase_ : Union[str, Any]=3_0_5_2_2 , UpperCamelCase_ : List[Any]=7_6_8 , UpperCamelCase_ : Union[str, Any]=8 , UpperCamelCase_ : str=1_2 , UpperCamelCase_ : Union[str, Any]=3_0_7_2 , UpperCamelCase_ : List[str]="gelu" , UpperCamelCase_ : str=0.1 , UpperCamelCase_ : Optional[int]=0.1 , UpperCamelCase_ : str=5_1_2 , UpperCamelCase_ : Optional[int]=2 , UpperCamelCase_ : Optional[int]=0.02 , UpperCamelCase_ : List[Any]=1E-12 , UpperCamelCase_ : str=True , UpperCamelCase_ : List[Any]=1_2_8 , UpperCamelCase_ : int=0 , **UpperCamelCase_ : Dict , ):
super().__init__(pad_token_id=UpperCamelCase_ , **UpperCamelCase_ )
lowerCAmelCase : Any = vocab_size
lowerCAmelCase : str = hidden_size
lowerCAmelCase : Optional[int] = num_hidden_layers
lowerCAmelCase : List[Any] = num_attention_heads
lowerCAmelCase : List[Any] = hidden_act
lowerCAmelCase : List[str] = intermediate_size
lowerCAmelCase : Optional[Any] = hidden_dropout_prob
lowerCAmelCase : Tuple = attention_probs_dropout_prob
lowerCAmelCase : Dict = max_position_embeddings
lowerCAmelCase : List[str] = type_vocab_size
lowerCAmelCase : Tuple = initializer_range
lowerCAmelCase : str = layer_norm_eps
lowerCAmelCase : Union[str, Any] = share_encoders
lowerCAmelCase : Any = projection_dim
| 712
|
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_big_bird import BigBirdTokenizer
else:
snake_case__ : Optional[Any] = None
snake_case__ : Union[str, Any] = logging.get_logger(__name__)
snake_case__ : List[str] = {'''vocab_file''': '''spiece.model''', '''tokenizer_file''': '''tokenizer.json'''}
snake_case__ : Any = {
'''vocab_file''': {
'''google/bigbird-roberta-base''': '''https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model''',
'''google/bigbird-roberta-large''': (
'''https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model'''
),
'''google/bigbird-base-trivia-itc''': (
'''https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model'''
),
},
'''tokenizer_file''': {
'''google/bigbird-roberta-base''': (
'''https://huggingface.co/google/bigbird-roberta-base/resolve/main/tokenizer.json'''
),
'''google/bigbird-roberta-large''': (
'''https://huggingface.co/google/bigbird-roberta-large/resolve/main/tokenizer.json'''
),
'''google/bigbird-base-trivia-itc''': (
'''https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/tokenizer.json'''
),
},
}
snake_case__ : int = {
'''google/bigbird-roberta-base''': 4_096,
'''google/bigbird-roberta-large''': 4_096,
'''google/bigbird-base-trivia-itc''': 4_096,
}
snake_case__ : Optional[Any] = '''▁'''
class snake_case_( a__ ):
__UpperCamelCase = VOCAB_FILES_NAMES
__UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase = BigBirdTokenizer
__UpperCamelCase = ['''input_ids''', '''attention_mask''']
__UpperCamelCase = []
def __init__( self : Union[str, Any] , UpperCamelCase_ : str=None , UpperCamelCase_ : Any=None , UpperCamelCase_ : str="<unk>" , UpperCamelCase_ : str="<s>" , UpperCamelCase_ : str="</s>" , UpperCamelCase_ : int="<pad>" , UpperCamelCase_ : List[Any]="[SEP]" , UpperCamelCase_ : Dict="[MASK]" , UpperCamelCase_ : Any="[CLS]" , **UpperCamelCase_ : Any , ):
lowerCAmelCase : Tuple = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else bos_token
lowerCAmelCase : int = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else eos_token
lowerCAmelCase : List[Any] = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else unk_token
lowerCAmelCase : List[str] = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else pad_token
lowerCAmelCase : Any = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else cls_token
lowerCAmelCase : Tuple = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else sep_token
# Mask token behave like a normal word, i.e. include the space before it
lowerCAmelCase : Optional[Any] = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else mask_token
super().__init__(
UpperCamelCase_ , tokenizer_file=UpperCamelCase_ , bos_token=UpperCamelCase_ , eos_token=UpperCamelCase_ , unk_token=UpperCamelCase_ , sep_token=UpperCamelCase_ , pad_token=UpperCamelCase_ , cls_token=UpperCamelCase_ , mask_token=UpperCamelCase_ , **UpperCamelCase_ , )
lowerCAmelCase : Optional[int] = vocab_file
lowerCAmelCase : Optional[int] = False if not self.vocab_file else True
def lowerCamelCase__ ( self : Union[str, Any] , UpperCamelCase_ : List[int] , UpperCamelCase_ : Optional[List[int]] = None ):
lowerCAmelCase : str = [self.sep_token_id]
lowerCAmelCase : Tuple = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def lowerCamelCase__ ( self : Dict , UpperCamelCase_ : List[int] , UpperCamelCase_ : Optional[List[int]] = None , UpperCamelCase_ : bool = False ):
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'''You should not supply a second sequence if the provided sequence of '''
'''ids is already formatted with special tokens for the model.''' )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is None:
return [1] + ([0] * len(UpperCamelCase_ )) + [1]
return [1] + ([0] * len(UpperCamelCase_ )) + [1] + ([0] * len(UpperCamelCase_ )) + [1]
def lowerCamelCase__ ( self : Dict , UpperCamelCase_ : List[int] , UpperCamelCase_ : Optional[List[int]] = None ):
lowerCAmelCase : Tuple = [self.sep_token_id]
lowerCAmelCase : Tuple = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowerCamelCase__ ( self : Optional[int] , UpperCamelCase_ : str , UpperCamelCase_ : Optional[str] = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(UpperCamelCase_ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
lowerCAmelCase : Optional[int] = os.path.join(
UpperCamelCase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase_ ):
copyfile(self.vocab_file , UpperCamelCase_ )
return (out_vocab_file,)
| 637
| 0
|
"""simple docstring"""
import unittest
import numpy as np
import torch
from torch import nn
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModelWithProjection,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import KandinskyVaaPriorPipeline, PriorTransformer, UnCLIPScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import enable_full_determinism, skip_mps
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class snake_case_( a__ , unittest.TestCase ):
__UpperCamelCase = KandinskyVaaPriorPipeline
__UpperCamelCase = ['''prompt''']
__UpperCamelCase = ['''prompt''', '''negative_prompt''']
__UpperCamelCase = [
'''num_images_per_prompt''',
'''generator''',
'''num_inference_steps''',
'''latents''',
'''negative_prompt''',
'''guidance_scale''',
'''output_type''',
'''return_dict''',
]
__UpperCamelCase = False
@property
def lowerCamelCase__ ( self : List[Any] ):
return 3_2
@property
def lowerCamelCase__ ( self : str ):
return 3_2
@property
def lowerCamelCase__ ( self : Tuple ):
return self.time_input_dim
@property
def lowerCamelCase__ ( self : Any ):
return self.time_input_dim * 4
@property
def lowerCamelCase__ ( self : int ):
return 1_0_0
@property
def lowerCamelCase__ ( self : int ):
lowerCAmelCase : List[str] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
return tokenizer
@property
def lowerCamelCase__ ( self : Optional[int] ):
torch.manual_seed(0 )
lowerCAmelCase : Tuple = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , )
return CLIPTextModelWithProjection(UpperCamelCase_ )
@property
def lowerCamelCase__ ( self : List[Any] ):
torch.manual_seed(0 )
lowerCAmelCase : Any = {
'''num_attention_heads''': 2,
'''attention_head_dim''': 1_2,
'''embedding_dim''': self.text_embedder_hidden_size,
'''num_layers''': 1,
}
lowerCAmelCase : List[Any] = PriorTransformer(**UpperCamelCase_ )
# clip_std and clip_mean is initialized to be 0 so PriorTransformer.post_process_latents will always return 0 - set clip_std to be 1 so it won't return 0
lowerCAmelCase : int = nn.Parameter(torch.ones(model.clip_std.shape ) )
return model
@property
def lowerCamelCase__ ( self : Union[str, Any] ):
torch.manual_seed(0 )
lowerCAmelCase : int = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , image_size=2_2_4 , projection_dim=self.text_embedder_hidden_size , intermediate_size=3_7 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=1_4 , )
lowerCAmelCase : Union[str, Any] = CLIPVisionModelWithProjection(UpperCamelCase_ )
return model
@property
def lowerCamelCase__ ( self : Optional[int] ):
lowerCAmelCase : Dict = CLIPImageProcessor(
crop_size=2_2_4 , do_center_crop=UpperCamelCase_ , do_normalize=UpperCamelCase_ , do_resize=UpperCamelCase_ , image_mean=[0.48_145_466, 0.4_578_275, 0.40_821_073] , image_std=[0.26_862_954, 0.26_130_258, 0.27_577_711] , resample=3 , size=2_2_4 , )
return image_processor
def lowerCamelCase__ ( self : Optional[int] ):
lowerCAmelCase : Optional[int] = self.dummy_prior
lowerCAmelCase : List[str] = self.dummy_image_encoder
lowerCAmelCase : Tuple = self.dummy_text_encoder
lowerCAmelCase : List[Any] = self.dummy_tokenizer
lowerCAmelCase : List[str] = self.dummy_image_processor
lowerCAmelCase : Optional[int] = UnCLIPScheduler(
variance_type='''fixed_small_log''' , prediction_type='''sample''' , num_train_timesteps=1_0_0_0 , clip_sample=UpperCamelCase_ , clip_sample_range=10.0 , )
lowerCAmelCase : int = {
'''prior''': prior,
'''image_encoder''': image_encoder,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''scheduler''': scheduler,
'''image_processor''': image_processor,
}
return components
def lowerCamelCase__ ( self : Dict , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : Tuple=0 ):
if str(UpperCamelCase_ ).startswith('''mps''' ):
lowerCAmelCase : Dict = torch.manual_seed(UpperCamelCase_ )
else:
lowerCAmelCase : List[Any] = torch.Generator(device=UpperCamelCase_ ).manual_seed(UpperCamelCase_ )
lowerCAmelCase : str = {
'''prompt''': '''horse''',
'''generator''': generator,
'''guidance_scale''': 4.0,
'''num_inference_steps''': 2,
'''output_type''': '''np''',
}
return inputs
def lowerCamelCase__ ( self : Tuple ):
lowerCAmelCase : Optional[Any] = '''cpu'''
lowerCAmelCase : List[str] = self.get_dummy_components()
lowerCAmelCase : List[Any] = self.pipeline_class(**UpperCamelCase_ )
lowerCAmelCase : List[str] = pipe.to(UpperCamelCase_ )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
lowerCAmelCase : Any = pipe(**self.get_dummy_inputs(UpperCamelCase_ ) )
lowerCAmelCase : Tuple = output.image_embeds
lowerCAmelCase : Optional[int] = pipe(
**self.get_dummy_inputs(UpperCamelCase_ ) , return_dict=UpperCamelCase_ , )[0]
lowerCAmelCase : Dict = image[0, -1_0:]
lowerCAmelCase : str = image_from_tuple[0, -1_0:]
assert image.shape == (1, 3_2)
lowerCAmelCase : List[str] = np.array(
[-0.0_532, 1.7_120, 0.3_656, -1.0_852, -0.8_946, -1.1_756, 0.4_348, 0.2_482, 0.5_146, -0.1_156] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@skip_mps
def lowerCamelCase__ ( self : int ):
lowerCAmelCase : Optional[Any] = torch_device == '''cpu'''
lowerCAmelCase : Tuple = True
lowerCAmelCase : Union[str, Any] = False
self._test_inference_batch_single_identical(
test_max_difference=UpperCamelCase_ , relax_max_difference=UpperCamelCase_ , test_mean_pixel_difference=UpperCamelCase_ , )
@skip_mps
def lowerCamelCase__ ( self : Optional[int] ):
lowerCAmelCase : List[Any] = torch_device == '''cpu'''
lowerCAmelCase : Tuple = False
self._test_attention_slicing_forward_pass(
test_max_difference=UpperCamelCase_ , test_mean_pixel_difference=UpperCamelCase_ , )
| 713
|
"""simple docstring"""
# using dfs for finding eulerian path traversal
def _snake_case ( _snake_case : Optional[Any] , _snake_case : List[Any] , _snake_case : str , _snake_case : List[Any]=None ):
lowerCAmelCase : Any = (path or []) + [u]
for v in graph[u]:
if visited_edge[u][v] is False:
lowerCAmelCase, lowerCAmelCase : Union[str, Any] = True, True
lowerCAmelCase : int = dfs(_snake_case , _snake_case , _snake_case , _snake_case )
return path
def _snake_case ( _snake_case : Optional[int] , _snake_case : Dict ):
lowerCAmelCase : Tuple = 0
lowerCAmelCase : Optional[Any] = -1
for i in range(_snake_case ):
if i not in graph.keys():
continue
if len(graph[i] ) % 2 == 1:
odd_degree_nodes += 1
lowerCAmelCase : Optional[Any] = i
if odd_degree_nodes == 0:
return 1, odd_node
if odd_degree_nodes == 2:
return 2, odd_node
return 3, odd_node
def _snake_case ( _snake_case : Tuple , _snake_case : List[Any] ):
lowerCAmelCase : Any = [[False for _ in range(max_node + 1 )] for _ in range(max_node + 1 )]
lowerCAmelCase, lowerCAmelCase : Optional[int] = check_circuit_or_path(_snake_case , _snake_case )
if check == 3:
print('''graph is not Eulerian''' )
print('''no path''' )
return
lowerCAmelCase : Dict = 1
if check == 2:
lowerCAmelCase : int = odd_node
print('''graph has a Euler path''' )
if check == 1:
print('''graph has a Euler cycle''' )
lowerCAmelCase : List[str] = dfs(_snake_case , _snake_case , _snake_case )
print(_snake_case )
def _snake_case ( ):
lowerCAmelCase : Optional[Any] = {1: [2, 3, 4], 2: [1, 3], 3: [1, 2], 4: [1, 5], 5: [4]}
lowerCAmelCase : Union[str, Any] = {1: [2, 3, 4, 5], 2: [1, 3], 3: [1, 2], 4: [1, 5], 5: [1, 4]}
lowerCAmelCase : List[Any] = {1: [2, 3, 4], 2: [1, 3, 4], 3: [1, 2], 4: [1, 2, 5], 5: [4]}
lowerCAmelCase : Optional[Any] = {1: [2, 3], 2: [1, 3], 3: [1, 2]}
lowerCAmelCase : Any = {
1: [],
2: []
# all degree is zero
}
lowerCAmelCase : List[str] = 10
check_euler(_snake_case , _snake_case )
check_euler(_snake_case , _snake_case )
check_euler(_snake_case , _snake_case )
check_euler(_snake_case , _snake_case )
check_euler(_snake_case , _snake_case )
if __name__ == "__main__":
main()
| 637
| 0
|
"""simple docstring"""
snake_case__ : List[Any] = '''
# Transformers installation
! pip install transformers datasets
# To install from source instead of the last release, comment the command above and uncomment the following one.
# ! pip install git+https://github.com/huggingface/transformers.git
'''
snake_case__ : Union[str, Any] = [{'''type''': '''code''', '''content''': INSTALL_CONTENT}]
snake_case__ : Optional[Any] = {
'''{processor_class}''': '''FakeProcessorClass''',
'''{model_class}''': '''FakeModelClass''',
'''{object_class}''': '''FakeObjectClass''',
}
| 714
|
"""simple docstring"""
import os
import shutil
import sys
import tempfile
import unittest
from pathlib import Path
import pytest
import transformers
from transformers import (
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoTokenizer,
BertConfig,
BertTokenizer,
BertTokenizerFast,
CTRLTokenizer,
GPTaTokenizer,
GPTaTokenizerFast,
PreTrainedTokenizerFast,
RobertaTokenizer,
RobertaTokenizerFast,
is_tokenizers_available,
)
from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig
from transformers.models.auto.tokenization_auto import (
TOKENIZER_MAPPING,
get_tokenizer_config,
tokenizer_class_from_name,
)
from transformers.models.roberta.configuration_roberta import RobertaConfig
from transformers.testing_utils import (
DUMMY_DIFF_TOKENIZER_IDENTIFIER,
DUMMY_UNKNOWN_IDENTIFIER,
SMALL_MODEL_IDENTIFIER,
RequestCounter,
require_tokenizers,
slow,
)
sys.path.append(str(Path(__file__).parent.parent.parent.parent / '''utils'''))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_tokenization import CustomTokenizer # noqa E402
if is_tokenizers_available():
from test_module.custom_tokenization_fast import CustomTokenizerFast
class snake_case_( unittest.TestCase ):
def lowerCamelCase__ ( self : Optional[int] ):
lowerCAmelCase : Optional[Any] = 0
@slow
def lowerCamelCase__ ( self : Dict ):
for model_name in (x for x in BERT_PRETRAINED_CONFIG_ARCHIVE_MAP.keys() if "japanese" not in x):
lowerCAmelCase : Optional[Any] = AutoTokenizer.from_pretrained(UpperCamelCase_ )
self.assertIsNotNone(UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , (BertTokenizer, BertTokenizerFast) )
self.assertGreater(len(UpperCamelCase_ ) , 0 )
for model_name in GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP.keys():
lowerCAmelCase : Tuple = AutoTokenizer.from_pretrained(UpperCamelCase_ )
self.assertIsNotNone(UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , (GPTaTokenizer, GPTaTokenizerFast) )
self.assertGreater(len(UpperCamelCase_ ) , 0 )
def lowerCamelCase__ ( self : Union[str, Any] ):
lowerCAmelCase : Optional[Any] = AutoTokenizer.from_pretrained(UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 1_2 )
def lowerCamelCase__ ( self : Dict ):
lowerCAmelCase : Tuple = AutoTokenizer.from_pretrained(UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , (RobertaTokenizer, RobertaTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 2_0 )
def lowerCamelCase__ ( self : Dict ):
lowerCAmelCase : int = AutoConfig.from_pretrained(UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
# Check that tokenizer_type ≠ model_type
lowerCAmelCase : List[Any] = AutoTokenizer.from_pretrained(UpperCamelCase_ , config=UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 1_2 )
def lowerCamelCase__ ( self : Any ):
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('''./tests/fixtures/vocab.txt''' , os.path.join(UpperCamelCase_ , '''vocab.txt''' ) )
lowerCAmelCase : Any = AutoTokenizer.from_pretrained(UpperCamelCase_ , tokenizer_type='''bert''' , use_fast=UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('''./tests/fixtures/vocab.json''' , os.path.join(UpperCamelCase_ , '''vocab.json''' ) )
shutil.copy('''./tests/fixtures/merges.txt''' , os.path.join(UpperCamelCase_ , '''merges.txt''' ) )
lowerCAmelCase : List[Any] = AutoTokenizer.from_pretrained(UpperCamelCase_ , tokenizer_type='''gpt2''' , use_fast=UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
@require_tokenizers
def lowerCamelCase__ ( self : Union[str, Any] ):
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('''./tests/fixtures/vocab.txt''' , os.path.join(UpperCamelCase_ , '''vocab.txt''' ) )
lowerCAmelCase : Optional[Any] = AutoTokenizer.from_pretrained(UpperCamelCase_ , tokenizer_type='''bert''' )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('''./tests/fixtures/vocab.json''' , os.path.join(UpperCamelCase_ , '''vocab.json''' ) )
shutil.copy('''./tests/fixtures/merges.txt''' , os.path.join(UpperCamelCase_ , '''merges.txt''' ) )
lowerCAmelCase : int = AutoTokenizer.from_pretrained(UpperCamelCase_ , tokenizer_type='''gpt2''' )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
def lowerCamelCase__ ( self : Dict ):
with pytest.raises(UpperCamelCase_ ):
AutoTokenizer.from_pretrained('''./''' , tokenizer_type='''xxx''' )
@require_tokenizers
def lowerCamelCase__ ( self : str ):
for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]:
lowerCAmelCase : Dict = tokenizer_class.from_pretrained('''wietsedv/bert-base-dutch-cased''' )
self.assertIsInstance(UpperCamelCase_ , (BertTokenizer, BertTokenizerFast) )
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
self.assertEqual(tokenizer.basic_tokenizer.do_lower_case , UpperCamelCase_ )
else:
self.assertEqual(tokenizer.do_lower_case , UpperCamelCase_ )
self.assertEqual(tokenizer.model_max_length , 5_1_2 )
@require_tokenizers
def lowerCamelCase__ ( self : Optional[int] ):
for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]:
with self.assertRaisesRegex(
UpperCamelCase_ , '''julien-c/herlolip-not-exists is not a local folder and is not a valid model identifier''' , ):
lowerCAmelCase : Any = tokenizer_class.from_pretrained('''julien-c/herlolip-not-exists''' )
def lowerCamelCase__ ( self : Tuple ):
# tests: https://github.com/huggingface/transformers/pull/13251
# 1. models with `-`, e.g. xlm-roberta -> xlm_roberta
# 2. models that don't remap 1-1 from model-name to model file, e.g., openai-gpt -> openai
lowerCAmelCase : Optional[Any] = TOKENIZER_MAPPING.values()
lowerCAmelCase : Optional[Any] = []
for slow_tok, fast_tok in tokenizers:
if slow_tok is not None:
tokenizer_names.append(slow_tok.__name__ )
if fast_tok is not None:
tokenizer_names.append(fast_tok.__name__ )
for tokenizer_name in tokenizer_names:
# must find the right class
tokenizer_class_from_name(UpperCamelCase_ )
@require_tokenizers
def lowerCamelCase__ ( self : Any ):
self.assertIsInstance(AutoTokenizer.from_pretrained('''bert-base-cased''' , use_fast=UpperCamelCase_ ) , UpperCamelCase_ )
self.assertIsInstance(AutoTokenizer.from_pretrained('''bert-base-cased''' ) , UpperCamelCase_ )
@require_tokenizers
def lowerCamelCase__ ( self : Dict ):
lowerCAmelCase : List[Any] = AutoTokenizer.from_pretrained('''distilbert-base-uncased''' , do_lower_case=UpperCamelCase_ )
lowerCAmelCase : Union[str, Any] = '''Hello, world. How are you?'''
lowerCAmelCase : Optional[Any] = tokenizer.tokenize(UpperCamelCase_ )
self.assertEqual('''[UNK]''' , tokens[0] )
lowerCAmelCase : List[str] = AutoTokenizer.from_pretrained('''microsoft/mpnet-base''' , do_lower_case=UpperCamelCase_ )
lowerCAmelCase : Optional[int] = tokenizer.tokenize(UpperCamelCase_ )
self.assertEqual('''[UNK]''' , tokens[0] )
@require_tokenizers
def lowerCamelCase__ ( self : int ):
lowerCAmelCase : Union[str, Any] = AutoTokenizer.from_pretrained('''robot-test/dummy-tokenizer-fast-with-model-config''' )
self.assertEqual(type(UpperCamelCase_ ) , UpperCamelCase_ )
self.assertEqual(tokenizer.model_max_length , 5_1_2 )
self.assertEqual(tokenizer.vocab_size , 3_0_0_0_0 )
self.assertEqual(tokenizer.unk_token , '''[UNK]''' )
self.assertEqual(tokenizer.padding_side , '''right''' )
self.assertEqual(tokenizer.truncation_side , '''right''' )
def lowerCamelCase__ ( self : List[Any] ):
lowerCAmelCase : int = AutoTokenizer.from_pretrained(UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , (BertTokenizer, BertTokenizerFast) )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(UpperCamelCase_ )
lowerCAmelCase : List[Any] = AutoTokenizer.from_pretrained(UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , tokenizer.__class__ )
self.assertEqual(tokenizera.vocab_size , 1_2 )
def lowerCamelCase__ ( self : List[str] ):
lowerCAmelCase : List[Any] = AutoTokenizer.from_pretrained('''ctrl''' )
# There is no fast CTRL so this always gives us a slow tokenizer.
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
def lowerCamelCase__ ( self : Dict ):
# Check we can load the tokenizer config of an online model.
lowerCAmelCase : Any = get_tokenizer_config('''bert-base-cased''' )
lowerCAmelCase : Optional[int] = config.pop('''_commit_hash''' , UpperCamelCase_ )
# If we ever update bert-base-cased tokenizer config, this dict here will need to be updated.
self.assertEqual(UpperCamelCase_ , {'''do_lower_case''': False} )
# This model does not have a tokenizer_config so we get back an empty dict.
lowerCAmelCase : Union[str, Any] = get_tokenizer_config(UpperCamelCase_ )
self.assertDictEqual(UpperCamelCase_ , {} )
# A tokenizer saved with `save_pretrained` always creates a tokenizer config.
lowerCAmelCase : List[Any] = AutoTokenizer.from_pretrained(UpperCamelCase_ )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(UpperCamelCase_ )
lowerCAmelCase : Dict = get_tokenizer_config(UpperCamelCase_ )
# Check the class of the tokenizer was properly saved (note that it always saves the slow class).
self.assertEqual(config['''tokenizer_class'''] , '''BertTokenizer''' )
def lowerCamelCase__ ( self : Optional[int] ):
try:
AutoConfig.register('''custom''' , UpperCamelCase_ )
AutoTokenizer.register(UpperCamelCase_ , slow_tokenizer_class=UpperCamelCase_ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(UpperCamelCase_ ):
AutoTokenizer.register(UpperCamelCase_ , slow_tokenizer_class=UpperCamelCase_ )
lowerCAmelCase : Union[str, Any] = CustomTokenizer.from_pretrained(UpperCamelCase_ )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(UpperCamelCase_ )
lowerCAmelCase : Tuple = AutoTokenizer.from_pretrained(UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
@require_tokenizers
def lowerCamelCase__ ( self : str ):
try:
AutoConfig.register('''custom''' , UpperCamelCase_ )
# Can register in two steps
AutoTokenizer.register(UpperCamelCase_ , slow_tokenizer_class=UpperCamelCase_ )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, None) )
AutoTokenizer.register(UpperCamelCase_ , fast_tokenizer_class=UpperCamelCase_ )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, CustomTokenizerFast) )
del TOKENIZER_MAPPING._extra_content[CustomConfig]
# Can register in one step
AutoTokenizer.register(
UpperCamelCase_ , slow_tokenizer_class=UpperCamelCase_ , fast_tokenizer_class=UpperCamelCase_ )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, CustomTokenizerFast) )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(UpperCamelCase_ ):
AutoTokenizer.register(UpperCamelCase_ , fast_tokenizer_class=UpperCamelCase_ )
# We pass through a bert tokenizer fast cause there is no converter slow to fast for our new toknizer
# and that model does not have a tokenizer.json
with tempfile.TemporaryDirectory() as tmp_dir:
lowerCAmelCase : Dict = BertTokenizerFast.from_pretrained(UpperCamelCase_ )
bert_tokenizer.save_pretrained(UpperCamelCase_ )
lowerCAmelCase : int = CustomTokenizerFast.from_pretrained(UpperCamelCase_ )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(UpperCamelCase_ )
lowerCAmelCase : Optional[int] = AutoTokenizer.from_pretrained(UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
lowerCAmelCase : List[str] = AutoTokenizer.from_pretrained(UpperCamelCase_ , use_fast=UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
def lowerCamelCase__ ( self : Optional[int] ):
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(UpperCamelCase_ ):
lowerCAmelCase : int = AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(UpperCamelCase_ ):
lowerCAmelCase : str = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=UpperCamelCase_ )
lowerCAmelCase : List[str] = AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=UpperCamelCase_ )
self.assertTrue(tokenizer.special_attribute_present )
# Test tokenizer can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(UpperCamelCase_ )
lowerCAmelCase : Optional[Any] = AutoTokenizer.from_pretrained(UpperCamelCase_ , trust_remote_code=UpperCamelCase_ )
self.assertTrue(reloaded_tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , '''NewTokenizerFast''' )
# Test we can also load the slow version
lowerCAmelCase : Union[str, Any] = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=UpperCamelCase_ , use_fast=UpperCamelCase_ )
self.assertTrue(tokenizer.special_attribute_present )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
# Test tokenizer can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(UpperCamelCase_ )
lowerCAmelCase : List[str] = AutoTokenizer.from_pretrained(UpperCamelCase_ , trust_remote_code=UpperCamelCase_ , use_fast=UpperCamelCase_ )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , '''NewTokenizer''' )
self.assertTrue(reloaded_tokenizer.special_attribute_present )
else:
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , '''NewTokenizer''' )
@require_tokenizers
def lowerCamelCase__ ( self : Optional[int] ):
class snake_case_( a__ ):
__UpperCamelCase = False
class snake_case_( a__ ):
__UpperCamelCase = NewTokenizer
__UpperCamelCase = False
try:
AutoConfig.register('''custom''' , UpperCamelCase_ )
AutoTokenizer.register(UpperCamelCase_ , slow_tokenizer_class=UpperCamelCase_ )
AutoTokenizer.register(UpperCamelCase_ , fast_tokenizer_class=UpperCamelCase_ )
# If remote code is not set, the default is to use local
lowerCAmelCase : Optional[int] = AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''' )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' )
self.assertFalse(tokenizer.special_attribute_present )
lowerCAmelCase : str = AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''' , use_fast=UpperCamelCase_ )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
self.assertFalse(tokenizer.special_attribute_present )
# If remote code is disabled, we load the local one.
lowerCAmelCase : Union[str, Any] = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=UpperCamelCase_ )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' )
self.assertFalse(tokenizer.special_attribute_present )
lowerCAmelCase : Dict = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=UpperCamelCase_ , use_fast=UpperCamelCase_ )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
self.assertFalse(tokenizer.special_attribute_present )
# If remote is enabled, we load from the Hub
lowerCAmelCase : int = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=UpperCamelCase_ )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' )
self.assertTrue(tokenizer.special_attribute_present )
lowerCAmelCase : int = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=UpperCamelCase_ , use_fast=UpperCamelCase_ )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
self.assertTrue(tokenizer.special_attribute_present )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
def lowerCamelCase__ ( self : Tuple ):
lowerCAmelCase : str = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer_legacy''' , trust_remote_code=UpperCamelCase_ )
self.assertTrue(tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' )
# Test we can also load the slow version
lowerCAmelCase : List[str] = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer_legacy''' , trust_remote_code=UpperCamelCase_ , use_fast=UpperCamelCase_ )
self.assertTrue(tokenizer.special_attribute_present )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
else:
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
def lowerCamelCase__ ( self : str ):
with self.assertRaisesRegex(
UpperCamelCase_ , '''bert-base is not a local folder and is not a valid model identifier''' ):
lowerCAmelCase : List[str] = AutoTokenizer.from_pretrained('''bert-base''' )
def lowerCamelCase__ ( self : int ):
with self.assertRaisesRegex(
UpperCamelCase_ , r'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)''' ):
lowerCAmelCase : List[Any] = AutoTokenizer.from_pretrained(UpperCamelCase_ , revision='''aaaaaa''' )
def lowerCamelCase__ ( self : Optional[int] ):
# Make sure we have cached the tokenizer.
lowerCAmelCase : List[str] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
with RequestCounter() as counter:
lowerCAmelCase : int = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
self.assertEqual(counter.get_request_count , 0 )
self.assertEqual(counter.head_request_count , 1 )
self.assertEqual(counter.other_request_count , 0 )
| 637
| 0
|
"""simple docstring"""
import random
def _snake_case ( _snake_case : int , _snake_case : float , _snake_case : bool = False ):
lowerCAmelCase : dict = {i: [] for i in range(_snake_case )}
# if probability is greater or equal than 1, then generate a complete graph
if probability >= 1:
return complete_graph(_snake_case )
# if probability is lower or equal than 0, then return a graph without edges
if probability <= 0:
return graph
# for each couple of nodes, add an edge from u to v
# if the number randomly generated is greater than probability probability
for i in range(_snake_case ):
for j in range(i + 1 , _snake_case ):
if random.random() < probability:
graph[i].append(_snake_case )
if not directed:
# if the graph is undirected, add an edge in from j to i, either
graph[j].append(_snake_case )
return graph
def _snake_case ( _snake_case : int ):
return {
i: [j for j in range(_snake_case ) if i != j] for i in range(_snake_case )
}
if __name__ == "__main__":
import doctest
doctest.testmod()
| 715
|
"""simple docstring"""
import json
import os
from functools import lru_cache
from typing import Dict, List, Optional, Tuple, Union
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding, EncodedInput
from ...utils import PaddingStrategy, logging
snake_case__ : Optional[Any] = logging.get_logger(__name__)
snake_case__ : Any = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt'''}
# See all LED models at https://huggingface.co/models?filter=LED
snake_case__ : Optional[Any] = {
'''vocab_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json''',
},
'''merges_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt''',
},
'''tokenizer_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json''',
},
}
snake_case__ : List[Any] = {
'''allenai/led-base-16384''': 16_384,
}
@lru_cache()
# Copied from transformers.models.bart.tokenization_bart.bytes_to_unicode
def _snake_case ( ):
lowerCAmelCase : Optional[int] = (
list(range(ord('''!''' ) , ord('''~''' ) + 1 ) ) + list(range(ord('''¡''' ) , ord('''¬''' ) + 1 ) ) + list(range(ord('''®''' ) , ord('''ÿ''' ) + 1 ) )
)
lowerCAmelCase : str = bs[:]
lowerCAmelCase : Optional[int] = 0
for b in range(2**8 ):
if b not in bs:
bs.append(_snake_case )
cs.append(2**8 + n )
n += 1
lowerCAmelCase : int = [chr(_snake_case ) for n in cs]
return dict(zip(_snake_case , _snake_case ) )
def _snake_case ( _snake_case : List[Any] ):
lowerCAmelCase : List[str] = set()
lowerCAmelCase : Any = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
lowerCAmelCase : Optional[Any] = char
return pairs
class snake_case_( a__ ):
__UpperCamelCase = VOCAB_FILES_NAMES
__UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase = ['''input_ids''', '''attention_mask''']
def __init__( self : Tuple , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Dict , UpperCamelCase_ : Tuple="replace" , UpperCamelCase_ : Union[str, Any]="<s>" , UpperCamelCase_ : List[str]="</s>" , UpperCamelCase_ : str="</s>" , UpperCamelCase_ : int="<s>" , UpperCamelCase_ : int="<unk>" , UpperCamelCase_ : Union[str, Any]="<pad>" , UpperCamelCase_ : Tuple="<mask>" , UpperCamelCase_ : Optional[int]=False , **UpperCamelCase_ : Tuple , ):
lowerCAmelCase : Any = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else bos_token
lowerCAmelCase : Union[str, Any] = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else eos_token
lowerCAmelCase : Optional[int] = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else sep_token
lowerCAmelCase : int = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else cls_token
lowerCAmelCase : Tuple = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else unk_token
lowerCAmelCase : List[Any] = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
lowerCAmelCase : Tuple = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else mask_token
super().__init__(
errors=UpperCamelCase_ , bos_token=UpperCamelCase_ , eos_token=UpperCamelCase_ , unk_token=UpperCamelCase_ , sep_token=UpperCamelCase_ , cls_token=UpperCamelCase_ , pad_token=UpperCamelCase_ , mask_token=UpperCamelCase_ , add_prefix_space=UpperCamelCase_ , **UpperCamelCase_ , )
with open(UpperCamelCase_ , encoding='''utf-8''' ) as vocab_handle:
lowerCAmelCase : Any = json.load(UpperCamelCase_ )
lowerCAmelCase : Dict = {v: k for k, v in self.encoder.items()}
lowerCAmelCase : Optional[int] = errors # how to handle errors in decoding
lowerCAmelCase : List[Any] = bytes_to_unicode()
lowerCAmelCase : Optional[Any] = {v: k for k, v in self.byte_encoder.items()}
with open(UpperCamelCase_ , encoding='''utf-8''' ) as merges_handle:
lowerCAmelCase : Optional[int] = merges_handle.read().split('''\n''' )[1:-1]
lowerCAmelCase : Optional[int] = [tuple(merge.split() ) for merge in bpe_merges]
lowerCAmelCase : Optional[int] = dict(zip(UpperCamelCase_ , range(len(UpperCamelCase_ ) ) ) )
lowerCAmelCase : List[Any] = {}
lowerCAmelCase : Optional[Any] = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
lowerCAmelCase : Dict = re.compile(r'''\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+''' )
@property
# Copied from transformers.models.bart.tokenization_bart.BartTokenizer.vocab_size
def lowerCamelCase__ ( self : Union[str, Any] ):
return len(self.encoder )
def lowerCamelCase__ ( self : Union[str, Any] ):
return dict(self.encoder , **self.added_tokens_encoder )
def lowerCamelCase__ ( self : Any , UpperCamelCase_ : int ):
if token in self.cache:
return self.cache[token]
lowerCAmelCase : List[str] = tuple(UpperCamelCase_ )
lowerCAmelCase : Optional[Any] = get_pairs(UpperCamelCase_ )
if not pairs:
return token
while True:
lowerCAmelCase : List[Any] = min(UpperCamelCase_ , key=lambda UpperCamelCase_ : self.bpe_ranks.get(UpperCamelCase_ , float('''inf''' ) ) )
if bigram not in self.bpe_ranks:
break
lowerCAmelCase, lowerCAmelCase : Any = bigram
lowerCAmelCase : Tuple = []
lowerCAmelCase : Any = 0
while i < len(UpperCamelCase_ ):
try:
lowerCAmelCase : int = word.index(UpperCamelCase_ , UpperCamelCase_ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
lowerCAmelCase : int = j
if word[i] == first and i < len(UpperCamelCase_ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
lowerCAmelCase : Tuple = tuple(UpperCamelCase_ )
lowerCAmelCase : Tuple = new_word
if len(UpperCamelCase_ ) == 1:
break
else:
lowerCAmelCase : Optional[Any] = get_pairs(UpperCamelCase_ )
lowerCAmelCase : Union[str, Any] = ''' '''.join(UpperCamelCase_ )
lowerCAmelCase : List[str] = word
return word
def lowerCamelCase__ ( self : Optional[Any] , UpperCamelCase_ : Tuple ):
lowerCAmelCase : Dict = []
for token in re.findall(self.pat , UpperCamelCase_ ):
lowerCAmelCase : Union[str, Any] = ''''''.join(
self.byte_encoder[b] for b in token.encode('''utf-8''' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(UpperCamelCase_ ).split(''' ''' ) )
return bpe_tokens
def lowerCamelCase__ ( self : int , UpperCamelCase_ : str ):
return self.encoder.get(UpperCamelCase_ , self.encoder.get(self.unk_token ) )
def lowerCamelCase__ ( self : Any , UpperCamelCase_ : Union[str, Any] ):
return self.decoder.get(UpperCamelCase_ )
def lowerCamelCase__ ( self : Any , UpperCamelCase_ : List[str] ):
lowerCAmelCase : Optional[int] = ''''''.join(UpperCamelCase_ )
lowerCAmelCase : Optional[int] = bytearray([self.byte_decoder[c] for c in text] ).decode('''utf-8''' , errors=self.errors )
return text
def lowerCamelCase__ ( self : str , UpperCamelCase_ : str , UpperCamelCase_ : Optional[str] = None ):
if not os.path.isdir(UpperCamelCase_ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
lowerCAmelCase : int = os.path.join(
UpperCamelCase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
lowerCAmelCase : Optional[Any] = os.path.join(
UpperCamelCase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] )
with open(UpperCamelCase_ , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=UpperCamelCase_ , ensure_ascii=UpperCamelCase_ ) + '''\n''' )
lowerCAmelCase : Optional[int] = 0
with open(UpperCamelCase_ , '''w''' , encoding='''utf-8''' ) as writer:
writer.write('''#version: 0.2\n''' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda UpperCamelCase_ : kv[1] ):
if index != token_index:
logger.warning(
F'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
''' Please check that the tokenizer is not corrupted!''' )
lowerCAmelCase : Tuple = token_index
writer.write(''' '''.join(UpperCamelCase_ ) + '''\n''' )
index += 1
return vocab_file, merge_file
def lowerCamelCase__ ( self : Optional[Any] , UpperCamelCase_ : List[int] , UpperCamelCase_ : Optional[List[int]] = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowerCAmelCase : Any = [self.cls_token_id]
lowerCAmelCase : str = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def lowerCamelCase__ ( self : Any , UpperCamelCase_ : List[int] , UpperCamelCase_ : Optional[List[int]] = None , UpperCamelCase_ : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCamelCase_ , token_ids_a=UpperCamelCase_ , already_has_special_tokens=UpperCamelCase_ )
if token_ids_a is None:
return [1] + ([0] * len(UpperCamelCase_ )) + [1]
return [1] + ([0] * len(UpperCamelCase_ )) + [1, 1] + ([0] * len(UpperCamelCase_ )) + [1]
def lowerCamelCase__ ( self : List[str] , UpperCamelCase_ : List[int] , UpperCamelCase_ : Optional[List[int]] = None ):
lowerCAmelCase : Optional[Any] = [self.sep_token_id]
lowerCAmelCase : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowerCamelCase__ ( self : int , UpperCamelCase_ : Any , UpperCamelCase_ : Dict=False , **UpperCamelCase_ : Tuple ):
lowerCAmelCase : Union[str, Any] = kwargs.pop('''add_prefix_space''' , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(UpperCamelCase_ ) > 0 and not text[0].isspace()):
lowerCAmelCase : List[Any] = ''' ''' + text
return (text, kwargs)
def lowerCamelCase__ ( self : str , UpperCamelCase_ : Union[Dict[str, EncodedInput], BatchEncoding] , UpperCamelCase_ : Optional[int] = None , UpperCamelCase_ : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , UpperCamelCase_ : Optional[int] = None , UpperCamelCase_ : Optional[bool] = None , ):
lowerCAmelCase : Dict = super()._pad(
encoded_inputs=UpperCamelCase_ , max_length=UpperCamelCase_ , padding_strategy=UpperCamelCase_ , pad_to_multiple_of=UpperCamelCase_ , return_attention_mask=UpperCamelCase_ , )
# Load from model defaults
if return_attention_mask is None:
lowerCAmelCase : Tuple = '''attention_mask''' in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
lowerCAmelCase : Dict = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
lowerCAmelCase : List[Any] = len(encoded_inputs['''global_attention_mask'''] ) != len(UpperCamelCase_ )
if needs_to_be_padded:
lowerCAmelCase : int = len(UpperCamelCase_ ) - len(encoded_inputs['''global_attention_mask'''] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
lowerCAmelCase : Dict = (
encoded_inputs['''global_attention_mask'''] + [-1] * difference
)
elif self.padding_side == "left":
lowerCAmelCase : int = [-1] * difference + encoded_inputs[
'''global_attention_mask'''
]
else:
raise ValueError('''Invalid padding strategy:''' + str(self.padding_side ) )
return encoded_inputs
| 637
| 0
|
"""simple docstring"""
def _snake_case ( _snake_case : int = 1000 ) -> Any:
return sum(e for e in range(3 , _snake_case ) if e % 3 == 0 or e % 5 == 0 )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 716
|
"""simple docstring"""
def _snake_case ( _snake_case : int = 4000000 ):
lowerCAmelCase : int = [0, 1]
lowerCAmelCase : List[str] = 0
while fib[i] <= n:
fib.append(fib[i] + fib[i + 1] )
if fib[i + 2] > n:
break
i += 1
lowerCAmelCase : int = 0
for j in range(len(_snake_case ) - 1 ):
if fib[j] % 2 == 0:
total += fib[j]
return total
if __name__ == "__main__":
print(f"""{solution() = }""")
| 637
| 0
|
"""simple docstring"""
import requests
from bsa import BeautifulSoup
def _snake_case ( _snake_case : str = "https://www.worldometers.info/coronavirus" ):
lowerCAmelCase : str = BeautifulSoup(requests.get(_snake_case ).text , '''html.parser''' )
lowerCAmelCase : Any = soup.findAll('''h1''' )
lowerCAmelCase : Optional[Any] = soup.findAll('''div''' , {'''class''': '''maincounter-number'''} )
keys += soup.findAll('''span''' , {'''class''': '''panel-title'''} )
values += soup.findAll('''div''' , {'''class''': '''number-table-main'''} )
return {key.text.strip(): value.text.strip() for key, value in zip(_snake_case , _snake_case )}
if __name__ == "__main__":
print('''\033[1m''' + '''COVID-19 Status of the World''' + '''\033[0m\n''')
for key, value in world_covidaa_stats().items():
print(f"""{key}\n{value}\n""")
| 717
|
"""simple docstring"""
def _snake_case ( _snake_case : float , _snake_case : list[float] ):
if discount_rate < 0:
raise ValueError('''Discount rate cannot be negative''' )
if not cash_flows:
raise ValueError('''Cash flows list cannot be empty''' )
lowerCAmelCase : List[str] = sum(
cash_flow / ((1 + discount_rate) ** i) for i, cash_flow in enumerate(_snake_case ) )
return round(_snake_case , ndigits=2 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 637
| 0
|
"""simple docstring"""
import datasets
from .evaluate import evaluate
snake_case__ : List[Any] = '''\
@inproceedings{Rajpurkar2016SQuAD10,
title={SQuAD: 100, 000+ Questions for Machine Comprehension of Text},
author={Pranav Rajpurkar and Jian Zhang and Konstantin Lopyrev and Percy Liang},
booktitle={EMNLP},
year={2016}
}
'''
snake_case__ : Tuple = '''
This metric wrap the official scoring script for version 1 of the Stanford Question Answering Dataset (SQuAD).
Stanford Question Answering Dataset (SQuAD) is a reading comprehension dataset, consisting of questions posed by
crowdworkers on a set of Wikipedia articles, where the answer to every question is a segment of text, or span,
from the corresponding reading passage, or the question might be unanswerable.
'''
snake_case__ : List[Any] = '''
Computes SQuAD scores (F1 and EM).
Args:
predictions: List of question-answers dictionaries with the following key-values:
- \'id\': id of the question-answer pair as given in the references (see below)
- \'prediction_text\': the text of the answer
references: List of question-answers dictionaries with the following key-values:
- \'id\': id of the question-answer pair (see above),
- \'answers\': a Dict in the SQuAD dataset format
{
\'text\': list of possible texts for the answer, as a list of strings
\'answer_start\': list of start positions for the answer, as a list of ints
}
Note that answer_start values are not taken into account to compute the metric.
Returns:
\'exact_match\': Exact match (the normalized answer exactly match the gold answer)
\'f1\': The F-score of predicted tokens versus the gold answer
Examples:
>>> predictions = [{\'prediction_text\': \'1976\', \'id\': \'56e10a3be3433e1400422b22\'}]
>>> references = [{\'answers\': {\'answer_start\': [97], \'text\': [\'1976\']}, \'id\': \'56e10a3be3433e1400422b22\'}]
>>> squad_metric = datasets.load_metric("squad")
>>> results = squad_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'exact_match\': 100.0, \'f1\': 100.0}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class snake_case_( datasets.Metric ):
def lowerCamelCase__ ( self : Tuple ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': {'''id''': datasets.Value('''string''' ), '''prediction_text''': datasets.Value('''string''' )},
'''references''': {
'''id''': datasets.Value('''string''' ),
'''answers''': datasets.features.Sequence(
{
'''text''': datasets.Value('''string''' ),
'''answer_start''': datasets.Value('''int32''' ),
} ),
},
} ) , codebase_urls=['''https://rajpurkar.github.io/SQuAD-explorer/'''] , reference_urls=['''https://rajpurkar.github.io/SQuAD-explorer/'''] , )
def lowerCamelCase__ ( self : Any , UpperCamelCase_ : Any , UpperCamelCase_ : Dict ):
lowerCAmelCase : Optional[int] = {prediction['''id''']: prediction['''prediction_text'''] for prediction in predictions}
lowerCAmelCase : List[Any] = [
{
'''paragraphs''': [
{
'''qas''': [
{
'''answers''': [{'''text''': answer_text} for answer_text in ref['''answers''']['''text''']],
'''id''': ref['''id'''],
}
for ref in references
]
}
]
}
]
lowerCAmelCase : Union[str, Any] = evaluate(dataset=UpperCamelCase_ , predictions=UpperCamelCase_ )
return score
| 718
|
"""simple docstring"""
from __future__ import annotations
def _snake_case ( _snake_case : list[int] , _snake_case : int ):
if len(_snake_case ) == 0:
return False
lowerCAmelCase : List[Any] = len(_snake_case ) // 2
if a_list[midpoint] == item:
return True
if item < a_list[midpoint]:
return binary_search(a_list[:midpoint] , _snake_case )
else:
return binary_search(a_list[midpoint + 1 :] , _snake_case )
if __name__ == "__main__":
snake_case__ : List[str] = input('''Enter numbers separated by comma:\n''').strip()
snake_case__ : Optional[int] = [int(item.strip()) for item in user_input.split(''',''')]
snake_case__ : Dict = int(input('''Enter the number to be found in the list:\n''').strip())
snake_case__ : str = '''''' if binary_search(sequence, target) else '''not '''
print(f"""{target} was {not_str}found in {sequence}""")
| 637
| 0
|
import os
import pytest
from transformers.dynamic_module_utils import get_imports
snake_case__ : Dict = '''
import os
'''
snake_case__ : Dict = '''
def foo():
import os
return False
'''
snake_case__ : Dict = '''
def foo():
def bar():
if True:
import os
return False
return bar()
'''
snake_case__ : List[str] = '''
import os
try:
import bar
except ImportError:
raise ValueError()
'''
snake_case__ : Any = '''
import os
def foo():
try:
import bar
except ImportError:
raise ValueError()
'''
snake_case__ : Tuple = '''
import os
try:
import bar
except (ImportError, AttributeError):
raise ValueError()
'''
snake_case__ : List[str] = '''
import os
try:
import bar
except ImportError as e:
raise ValueError()
'''
snake_case__ : int = '''
import os
try:
import bar
except:
raise ValueError()
'''
snake_case__ : Union[str, Any] = '''
import os
try:
import bar
import baz
except ImportError:
raise ValueError()
'''
snake_case__ : Optional[Any] = '''
import os
try:
import bar
import baz
except ImportError:
x = 1
raise ValueError()
'''
snake_case__ : List[str] = [
TOP_LEVEL_IMPORT,
IMPORT_IN_FUNCTION,
DEEPLY_NESTED_IMPORT,
TOP_LEVEL_TRY_IMPORT,
GENERIC_EXCEPT_IMPORT,
MULTILINE_TRY_IMPORT,
MULTILINE_BOTH_IMPORT,
MULTIPLE_EXCEPTS_IMPORT,
EXCEPT_AS_IMPORT,
TRY_IMPORT_IN_FUNCTION,
]
@pytest.mark.parametrize('''case''' , _snake_case )
def _snake_case ( _snake_case : int , _snake_case : Any ):
lowerCAmelCase : Optional[int] = os.path.join(_snake_case , '''test_file.py''' )
with open(_snake_case , '''w''' ) as _tmp_file:
_tmp_file.write(_snake_case )
lowerCAmelCase : List[Any] = get_imports(_snake_case )
assert parsed_imports == ["os"]
| 719
|
"""simple docstring"""
import os
from collections import namedtuple
import pytest
from datasets import ClassLabel, Features, Sequence, Value
from datasets.commands.test import TestCommand
from datasets.info import DatasetInfo, DatasetInfosDict
snake_case__ : Optional[Any] = namedtuple(
'''_TestCommandArgs''',
[
'''dataset''',
'''name''',
'''cache_dir''',
'''data_dir''',
'''all_configs''',
'''save_infos''',
'''ignore_verifications''',
'''force_redownload''',
'''clear_cache''',
],
defaults=[None, None, None, False, False, False, False, False],
)
def _snake_case ( _snake_case : List[Any] , _snake_case : List[str] ):
return (abs(source - target ) / target) < 0.01
@pytest.mark.integration
def _snake_case ( _snake_case : Any ):
lowerCAmelCase : Union[str, Any] = _TestCommandArgs(dataset=_snake_case , all_configs=_snake_case , save_infos=_snake_case )
lowerCAmelCase : str = TestCommand(*_snake_case )
test_command.run()
lowerCAmelCase : str = os.path.join(_snake_case , '''README.md''' )
assert os.path.exists(_snake_case )
lowerCAmelCase : Tuple = DatasetInfosDict.from_directory(_snake_case )
lowerCAmelCase : List[str] = DatasetInfosDict(
{
'''default''': DatasetInfo(
features=Features(
{
'''tokens''': Sequence(Value('''string''' ) ),
'''ner_tags''': Sequence(
ClassLabel(names=['''O''', '''B-PER''', '''I-PER''', '''B-ORG''', '''I-ORG''', '''B-LOC''', '''I-LOC'''] ) ),
'''langs''': Sequence(Value('''string''' ) ),
'''spans''': Sequence(Value('''string''' ) ),
} ) , splits=[
{
'''name''': '''train''',
'''num_bytes''': 2351563,
'''num_examples''': 10000,
},
{
'''name''': '''validation''',
'''num_bytes''': 238418,
'''num_examples''': 1000,
},
] , download_size=3940680 , dataset_size=2589981 , )
} )
assert dataset_infos.keys() == expected_dataset_infos.keys()
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
lowerCAmelCase, lowerCAmelCase : Union[str, Any] = getattr(dataset_infos['''default'''] , _snake_case ), getattr(expected_dataset_infos['''default'''] , _snake_case )
if key == "num_bytes":
assert is_apercent_close(_snake_case , _snake_case )
elif key == "splits":
assert list(_snake_case ) == list(_snake_case )
for split in result:
assert result[split].name == expected[split].name
assert result[split].num_examples == expected[split].num_examples
assert is_apercent_close(result[split].num_bytes , expected[split].num_bytes )
else:
result == expected
| 637
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
snake_case__ : Dict = {
'''configuration_roberta''': ['''ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''RobertaConfig''', '''RobertaOnnxConfig'''],
'''tokenization_roberta''': ['''RobertaTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ : Optional[Any] = ['''RobertaTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ : Optional[int] = [
'''ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''RobertaForCausalLM''',
'''RobertaForMaskedLM''',
'''RobertaForMultipleChoice''',
'''RobertaForQuestionAnswering''',
'''RobertaForSequenceClassification''',
'''RobertaForTokenClassification''',
'''RobertaModel''',
'''RobertaPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ : Optional[Any] = [
'''TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFRobertaForCausalLM''',
'''TFRobertaForMaskedLM''',
'''TFRobertaForMultipleChoice''',
'''TFRobertaForQuestionAnswering''',
'''TFRobertaForSequenceClassification''',
'''TFRobertaForTokenClassification''',
'''TFRobertaMainLayer''',
'''TFRobertaModel''',
'''TFRobertaPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ : int = [
'''FlaxRobertaForCausalLM''',
'''FlaxRobertaForMaskedLM''',
'''FlaxRobertaForMultipleChoice''',
'''FlaxRobertaForQuestionAnswering''',
'''FlaxRobertaForSequenceClassification''',
'''FlaxRobertaForTokenClassification''',
'''FlaxRobertaModel''',
'''FlaxRobertaPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_roberta import ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, RobertaConfig, RobertaOnnxConfig
from .tokenization_roberta import RobertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roberta_fast import RobertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roberta import (
ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
RobertaForCausalLM,
RobertaForMaskedLM,
RobertaForMultipleChoice,
RobertaForQuestionAnswering,
RobertaForSequenceClassification,
RobertaForTokenClassification,
RobertaModel,
RobertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roberta import (
TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
TFRobertaForMultipleChoice,
TFRobertaForQuestionAnswering,
TFRobertaForSequenceClassification,
TFRobertaForTokenClassification,
TFRobertaMainLayer,
TFRobertaModel,
TFRobertaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
FlaxRobertaPreTrainedModel,
)
else:
import sys
snake_case__ : Optional[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 720
|
"""simple docstring"""
def _snake_case ( _snake_case : int , _snake_case : int ):
return base * power(_snake_case , (exponent - 1) ) if exponent else 1
if __name__ == "__main__":
print('''Raise base to the power of exponent using recursion...''')
snake_case__ : Union[str, Any] = int(input('''Enter the base: ''').strip())
snake_case__ : Optional[Any] = int(input('''Enter the exponent: ''').strip())
snake_case__ : Any = power(base, abs(exponent))
if exponent < 0: # power() does not properly deal w/ negative exponents
snake_case__ : Dict = 1 / result
print(f"""{base} to the power of {exponent} is {result}""")
| 637
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available
snake_case__ : Optional[int] = {'''tokenization_herbert''': ['''HerbertTokenizer''']}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ : int = ['''HerbertTokenizerFast''']
if TYPE_CHECKING:
from .tokenization_herbert import HerbertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_herbert_fast import HerbertTokenizerFast
else:
import sys
snake_case__ : List[str] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 721
|
"""simple docstring"""
import unittest
import numpy as np
import timeout_decorator # noqa
from transformers import BlenderbotConfig, is_flax_available
from transformers.testing_utils import jax_device, require_flax, slow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
snake_case__ : int = '''platform'''
import jax
import jax.numpy as jnp
from transformers import BlenderbotTokenizer
from transformers.models.blenderbot.modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
shift_tokens_right,
)
def _snake_case ( _snake_case : str , _snake_case : Any , _snake_case : str=None , _snake_case : str=None , _snake_case : Dict=None , _snake_case : Tuple=None , _snake_case : str=None , _snake_case : Any=None , ):
if attention_mask is None:
lowerCAmelCase : List[str] = np.where(input_ids != config.pad_token_id , 1 , 0 )
if decoder_attention_mask is None:
lowerCAmelCase : Optional[int] = np.where(decoder_input_ids != config.pad_token_id , 1 , 0 )
if head_mask is None:
lowerCAmelCase : Any = np.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
lowerCAmelCase : int = np.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
lowerCAmelCase : List[str] = np.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
}
class snake_case_:
def __init__( self : int , UpperCamelCase_ : Tuple , UpperCamelCase_ : int=1_3 , UpperCamelCase_ : Union[str, Any]=7 , UpperCamelCase_ : Union[str, Any]=True , UpperCamelCase_ : List[Any]=False , UpperCamelCase_ : Dict=9_9 , UpperCamelCase_ : Optional[int]=1_6 , UpperCamelCase_ : str=2 , UpperCamelCase_ : List[str]=4 , UpperCamelCase_ : List[Any]=4 , UpperCamelCase_ : int="gelu" , UpperCamelCase_ : Optional[int]=0.1 , UpperCamelCase_ : Any=0.1 , UpperCamelCase_ : str=3_2 , UpperCamelCase_ : str=2 , UpperCamelCase_ : Tuple=1 , UpperCamelCase_ : List[Any]=0 , UpperCamelCase_ : Any=0.02 , ):
lowerCAmelCase : Tuple = parent
lowerCAmelCase : str = batch_size
lowerCAmelCase : List[Any] = seq_length
lowerCAmelCase : Optional[int] = is_training
lowerCAmelCase : int = use_labels
lowerCAmelCase : List[Any] = vocab_size
lowerCAmelCase : str = hidden_size
lowerCAmelCase : List[Any] = num_hidden_layers
lowerCAmelCase : Any = num_attention_heads
lowerCAmelCase : List[Any] = intermediate_size
lowerCAmelCase : Optional[int] = hidden_act
lowerCAmelCase : Dict = hidden_dropout_prob
lowerCAmelCase : Optional[int] = attention_probs_dropout_prob
lowerCAmelCase : List[Any] = max_position_embeddings
lowerCAmelCase : Union[str, Any] = eos_token_id
lowerCAmelCase : Dict = pad_token_id
lowerCAmelCase : Optional[Any] = bos_token_id
lowerCAmelCase : List[str] = initializer_range
def lowerCamelCase__ ( self : Dict ):
lowerCAmelCase : List[Any] = np.clip(ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) , 3 , self.vocab_size )
lowerCAmelCase : str = np.concatenate((input_ids, 2 * np.ones((self.batch_size, 1) , dtype=np.intaa )) , -1 )
lowerCAmelCase : Tuple = shift_tokens_right(UpperCamelCase_ , 1 , 2 )
lowerCAmelCase : Union[str, Any] = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , initializer_range=self.initializer_range , use_cache=UpperCamelCase_ , )
lowerCAmelCase : Union[str, Any] = prepare_blenderbot_inputs_dict(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
return config, inputs_dict
def lowerCamelCase__ ( self : str ):
lowerCAmelCase, lowerCAmelCase : Optional[int] = self.prepare_config_and_inputs()
return config, inputs_dict
def lowerCamelCase__ ( self : List[str] , UpperCamelCase_ : List[str] , UpperCamelCase_ : str , UpperCamelCase_ : Tuple ):
lowerCAmelCase : int = 2_0
lowerCAmelCase : Tuple = model_class_name(UpperCamelCase_ )
lowerCAmelCase : Optional[Any] = model.encode(inputs_dict['''input_ids'''] )
lowerCAmelCase, lowerCAmelCase : str = (
inputs_dict['''decoder_input_ids'''],
inputs_dict['''decoder_attention_mask'''],
)
lowerCAmelCase : str = model.init_cache(decoder_input_ids.shape[0] , UpperCamelCase_ , UpperCamelCase_ )
lowerCAmelCase : Union[str, Any] = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype='''i4''' )
lowerCAmelCase : Tuple = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
lowerCAmelCase : List[Any] = model.decode(
decoder_input_ids[:, :-1] , UpperCamelCase_ , decoder_attention_mask=UpperCamelCase_ , past_key_values=UpperCamelCase_ , decoder_position_ids=UpperCamelCase_ , )
lowerCAmelCase : Any = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='''i4''' )
lowerCAmelCase : List[str] = model.decode(
decoder_input_ids[:, -1:] , UpperCamelCase_ , decoder_attention_mask=UpperCamelCase_ , past_key_values=outputs_cache.past_key_values , decoder_position_ids=UpperCamelCase_ , )
lowerCAmelCase : Union[str, Any] = model.decode(UpperCamelCase_ , UpperCamelCase_ )
lowerCAmelCase : int = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=F'''Max diff is {diff}''' )
def lowerCamelCase__ ( self : Union[str, Any] , UpperCamelCase_ : Any , UpperCamelCase_ : Any , UpperCamelCase_ : List[str] ):
lowerCAmelCase : Optional[int] = 2_0
lowerCAmelCase : List[Any] = model_class_name(UpperCamelCase_ )
lowerCAmelCase : Union[str, Any] = model.encode(inputs_dict['''input_ids'''] )
lowerCAmelCase, lowerCAmelCase : Optional[int] = (
inputs_dict['''decoder_input_ids'''],
inputs_dict['''decoder_attention_mask'''],
)
lowerCAmelCase : str = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
lowerCAmelCase : Union[str, Any] = model.init_cache(decoder_input_ids.shape[0] , UpperCamelCase_ , UpperCamelCase_ )
lowerCAmelCase : str = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
lowerCAmelCase : Dict = model.decode(
decoder_input_ids[:, :-1] , UpperCamelCase_ , decoder_attention_mask=UpperCamelCase_ , past_key_values=UpperCamelCase_ , decoder_position_ids=UpperCamelCase_ , )
lowerCAmelCase : Any = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='''i4''' )
lowerCAmelCase : Union[str, Any] = model.decode(
decoder_input_ids[:, -1:] , UpperCamelCase_ , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=UpperCamelCase_ , decoder_position_ids=UpperCamelCase_ , )
lowerCAmelCase : Dict = model.decode(UpperCamelCase_ , UpperCamelCase_ , decoder_attention_mask=UpperCamelCase_ )
lowerCAmelCase : Any = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=F'''Max diff is {diff}''' )
@require_flax
class snake_case_( unittest.TestCase ):
__UpperCamelCase = 99
def lowerCamelCase__ ( self : str ):
lowerCAmelCase : List[Any] = np.array(
[
[7_1, 8_2, 1_8, 3_3, 4_6, 9_1, 2],
[6_8, 3_4, 2_6, 5_8, 3_0, 8_2, 2],
[5, 9_7, 1_7, 3_9, 9_4, 4_0, 2],
[7_6, 8_3, 9_4, 2_5, 7_0, 7_8, 2],
[8_7, 5_9, 4_1, 3_5, 4_8, 6_6, 2],
[5_5, 1_3, 1_6, 5_8, 5, 2, 1], # note padding
[6_4, 2_7, 3_1, 5_1, 1_2, 7_5, 2],
[5_2, 6_4, 8_6, 1_7, 8_3, 3_9, 2],
[4_8, 6_1, 9, 2_4, 7_1, 8_2, 2],
[2_6, 1, 6_0, 4_8, 2_2, 1_3, 2],
[2_1, 5, 6_2, 2_8, 1_4, 7_6, 2],
[4_5, 9_8, 3_7, 8_6, 5_9, 4_8, 2],
[7_0, 7_0, 5_0, 9, 2_8, 0, 2],
] , dtype=np.intaa , )
lowerCAmelCase : List[Any] = input_ids.shape[0]
lowerCAmelCase : Optional[Any] = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=2_4 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=3_2 , decoder_ffn_dim=3_2 , max_position_embeddings=4_8 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
def lowerCamelCase__ ( self : List[str] ):
lowerCAmelCase, lowerCAmelCase, lowerCAmelCase : Any = self._get_config_and_data()
lowerCAmelCase : Any = FlaxBlenderbotForConditionalGeneration(UpperCamelCase_ )
lowerCAmelCase : Optional[int] = lm_model(input_ids=UpperCamelCase_ )
lowerCAmelCase : Tuple = (batch_size, input_ids.shape[1], config.vocab_size)
self.assertEqual(outputs['''logits'''].shape , UpperCamelCase_ )
def lowerCamelCase__ ( self : Any ):
lowerCAmelCase : Any = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=1_4 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=8 , decoder_ffn_dim=8 , max_position_embeddings=4_8 , )
lowerCAmelCase : int = FlaxBlenderbotForConditionalGeneration(UpperCamelCase_ )
lowerCAmelCase : Optional[Any] = np.array([[7_1, 8_2, 1_8, 3_3, 4_6, 9_1, 2], [6_8, 3_4, 2_6, 5_8, 3_0, 2, 1]] , dtype=np.intaa )
lowerCAmelCase : List[str] = np.array([[8_2, 7_1, 8_2, 1_8, 2], [5_8, 6_8, 2, 1, 1]] , dtype=np.intaa )
lowerCAmelCase : List[Any] = lm_model(input_ids=UpperCamelCase_ , decoder_input_ids=UpperCamelCase_ )
lowerCAmelCase : str = (*summary.shape, config.vocab_size)
self.assertEqual(outputs['''logits'''].shape , UpperCamelCase_ )
def lowerCamelCase__ ( self : int ):
lowerCAmelCase : Any = np.array([[7_1, 8_2, 1_8, 3_3, 2, 1, 1], [6_8, 3_4, 2_6, 5_8, 3_0, 8_2, 2]] , dtype=np.intaa )
lowerCAmelCase : Tuple = shift_tokens_right(UpperCamelCase_ , 1 , 2 )
lowerCAmelCase : Optional[int] = np.equal(UpperCamelCase_ , 1 ).astype(np.floataa ).sum()
lowerCAmelCase : str = np.equal(UpperCamelCase_ , 1 ).astype(np.floataa ).sum()
self.assertEqual(shifted.shape , input_ids.shape )
self.assertEqual(UpperCamelCase_ , n_pad_before - 1 )
self.assertTrue(np.equal(shifted[:, 0] , 2 ).all() )
@require_flax
class snake_case_( a__ , unittest.TestCase , a__ ):
__UpperCamelCase = True
__UpperCamelCase = (
(
FlaxBlenderbotModel,
FlaxBlenderbotForConditionalGeneration,
)
if is_flax_available()
else ()
)
__UpperCamelCase = (FlaxBlenderbotForConditionalGeneration,) if is_flax_available() else ()
def lowerCamelCase__ ( self : Dict ):
lowerCAmelCase : Any = FlaxBlenderbotModelTester(self )
def lowerCamelCase__ ( self : Tuple ):
lowerCAmelCase, lowerCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
def lowerCamelCase__ ( self : List[str] ):
lowerCAmelCase, lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
def lowerCamelCase__ ( self : Tuple ):
lowerCAmelCase, lowerCAmelCase : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
lowerCAmelCase : Optional[int] = self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ )
lowerCAmelCase : Optional[Any] = model_class(UpperCamelCase_ )
@jax.jit
def encode_jitted(UpperCamelCase_ : List[str] , UpperCamelCase_ : Optional[Any]=None , **UpperCamelCase_ : List[str] ):
return model.encode(input_ids=UpperCamelCase_ , attention_mask=UpperCamelCase_ )
with self.subTest('''JIT Enabled''' ):
lowerCAmelCase : List[str] = encode_jitted(**UpperCamelCase_ ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
lowerCAmelCase : int = encode_jitted(**UpperCamelCase_ ).to_tuple()
self.assertEqual(len(UpperCamelCase_ ) , len(UpperCamelCase_ ) )
for jitted_output, output in zip(UpperCamelCase_ , UpperCamelCase_ ):
self.assertEqual(jitted_output.shape , output.shape )
def lowerCamelCase__ ( self : Union[str, Any] ):
lowerCAmelCase, lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
lowerCAmelCase : Tuple = model_class(UpperCamelCase_ )
lowerCAmelCase : int = model.encode(inputs_dict['''input_ids'''] , inputs_dict['''attention_mask'''] )
lowerCAmelCase : List[Any] = {
'''decoder_input_ids''': inputs_dict['''decoder_input_ids'''],
'''decoder_attention_mask''': inputs_dict['''decoder_attention_mask'''],
'''encoder_outputs''': encoder_outputs,
}
@jax.jit
def decode_jitted(UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : Dict , UpperCamelCase_ : int ):
return model.decode(
decoder_input_ids=UpperCamelCase_ , decoder_attention_mask=UpperCamelCase_ , encoder_outputs=UpperCamelCase_ , )
with self.subTest('''JIT Enabled''' ):
lowerCAmelCase : str = decode_jitted(**UpperCamelCase_ ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
lowerCAmelCase : Union[str, Any] = decode_jitted(**UpperCamelCase_ ).to_tuple()
self.assertEqual(len(UpperCamelCase_ ) , len(UpperCamelCase_ ) )
for jitted_output, output in zip(UpperCamelCase_ , UpperCamelCase_ ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def lowerCamelCase__ ( self : Optional[int] ):
for model_class_name in self.all_model_classes:
lowerCAmelCase : Optional[int] = model_class_name.from_pretrained('''facebook/blenderbot-400M-distill''' )
# FlaxBlenderbotForSequenceClassification expects eos token in input_ids
lowerCAmelCase : int = np.ones((1, 1) ) * model.config.eos_token_id
lowerCAmelCase : List[str] = model(UpperCamelCase_ )
self.assertIsNotNone(UpperCamelCase_ )
@unittest.skipUnless(jax_device != '''cpu''' , '''3B test too slow on CPU.''' )
@slow
def lowerCamelCase__ ( self : Union[str, Any] ):
lowerCAmelCase : Dict = {'''num_beams''': 1, '''early_stopping''': True, '''min_length''': 1_5, '''max_length''': 2_5}
lowerCAmelCase : List[str] = {'''skip_special_tokens''': True, '''clean_up_tokenization_spaces''': True}
lowerCAmelCase : Tuple = FlaxBlenderbotForConditionalGeneration.from_pretrained('''facebook/blenderbot-3B''' , from_pt=UpperCamelCase_ )
lowerCAmelCase : Union[str, Any] = BlenderbotTokenizer.from_pretrained('''facebook/blenderbot-3B''' )
lowerCAmelCase : List[Any] = ['''Sam''']
lowerCAmelCase : str = tokenizer(UpperCamelCase_ , return_tensors='''jax''' )
lowerCAmelCase : Union[str, Any] = model.generate(**UpperCamelCase_ , **UpperCamelCase_ )
lowerCAmelCase : Tuple = '''Sam is a great name. It means "sun" in Gaelic.'''
lowerCAmelCase : Union[str, Any] = tokenizer.batch_decode(UpperCamelCase_ , **UpperCamelCase_ )
assert generated_txt[0].strip() == tgt_text
| 637
| 0
|
"""simple docstring"""
import os
from glob import glob
import imageio
import torch
import torchvision
import wandb
from img_processing import custom_to_pil, loop_post_process, preprocess, preprocess_vqgan
from loaders import load_vqgan
from PIL import Image
from torch import nn
from transformers import CLIPModel, CLIPTokenizerFast
from utils import get_device, get_timestamp, show_pil
class snake_case_:
def __init__( self : int , UpperCamelCase_ : str = "cpu" , UpperCamelCase_ : str = "openai/clip-vit-large-patch14" ):
lowerCAmelCase : int = device
lowerCAmelCase : Optional[int] = CLIPTokenizerFast.from_pretrained(UpperCamelCase_ )
lowerCAmelCase : Tuple = [0.48_145_466, 0.4_578_275, 0.40_821_073]
lowerCAmelCase : List[str] = [0.26_862_954, 0.26_130_258, 0.27_577_711]
lowerCAmelCase : Dict = torchvision.transforms.Normalize(self.image_mean , self.image_std )
lowerCAmelCase : List[Any] = torchvision.transforms.Resize(2_2_4 )
lowerCAmelCase : str = torchvision.transforms.CenterCrop(2_2_4 )
def lowerCamelCase__ ( self : List[Any] , UpperCamelCase_ : List[Any] ):
lowerCAmelCase : Optional[Any] = self.resize(UpperCamelCase_ )
lowerCAmelCase : Union[str, Any] = self.center_crop(UpperCamelCase_ )
lowerCAmelCase : str = self.normalize(UpperCamelCase_ )
return images
def __call__( self : str , UpperCamelCase_ : Dict=None , UpperCamelCase_ : List[str]=None , **UpperCamelCase_ : Optional[Any] ):
lowerCAmelCase : Union[str, Any] = self.tokenizer(text=UpperCamelCase_ , **UpperCamelCase_ )
lowerCAmelCase : Optional[Any] = self.preprocess_img(UpperCamelCase_ )
lowerCAmelCase : Optional[int] = {key: value.to(self.device ) for (key, value) in encoding.items()}
return encoding
class snake_case_( nn.Module ):
def __init__( self : str , UpperCamelCase_ : str=1_0 , UpperCamelCase_ : int=0.01 , UpperCamelCase_ : Tuple=None , UpperCamelCase_ : int=None , UpperCamelCase_ : int=None , UpperCamelCase_ : List[str]=None , UpperCamelCase_ : Any=None , UpperCamelCase_ : Dict=None , UpperCamelCase_ : List[Any]=False , UpperCamelCase_ : str=True , UpperCamelCase_ : Any="image" , UpperCamelCase_ : Union[str, Any]=True , UpperCamelCase_ : Optional[Any]=False , UpperCamelCase_ : Union[str, Any]=False , UpperCamelCase_ : Tuple=False , ):
super().__init__()
lowerCAmelCase : List[Any] = None
lowerCAmelCase : Optional[Any] = device if device else get_device()
if vqgan:
lowerCAmelCase : Optional[int] = vqgan
else:
lowerCAmelCase : int = load_vqgan(self.device , conf_path=UpperCamelCase_ , ckpt_path=UpperCamelCase_ )
self.vqgan.eval()
if clip:
lowerCAmelCase : List[str] = clip
else:
lowerCAmelCase : Dict = CLIPModel.from_pretrained('''openai/clip-vit-base-patch32''' )
self.clip.to(self.device )
lowerCAmelCase : Tuple = ProcessorGradientFlow(device=self.device )
lowerCAmelCase : int = iterations
lowerCAmelCase : Optional[Any] = lr
lowerCAmelCase : Dict = log
lowerCAmelCase : List[Any] = make_grid
lowerCAmelCase : Dict = return_val
lowerCAmelCase : Tuple = quantize
lowerCAmelCase : Union[str, Any] = self.vqgan.decoder.z_shape
def lowerCamelCase__ ( self : str , UpperCamelCase_ : Any=None , UpperCamelCase_ : Tuple=None , UpperCamelCase_ : Tuple=5 , UpperCamelCase_ : List[Any]=True ):
lowerCAmelCase : str = []
if output_path is None:
lowerCAmelCase : List[Any] = '''./animation.gif'''
if input_path is None:
lowerCAmelCase : int = self.save_path
lowerCAmelCase : Tuple = sorted(glob(input_path + '''/*''' ) )
if not len(UpperCamelCase_ ):
raise ValueError(
'''No images found in save path, aborting (did you pass save_intermediate=True to the generate'''
''' function?)''' )
if len(UpperCamelCase_ ) == 1:
print('''Only one image found in save path, (did you pass save_intermediate=True to the generate function?)''' )
lowerCAmelCase : int = total_duration / len(UpperCamelCase_ )
lowerCAmelCase : Union[str, Any] = [frame_duration] * len(UpperCamelCase_ )
if extend_frames:
lowerCAmelCase : Union[str, Any] = 1.5
lowerCAmelCase : str = 3
for file_name in paths:
if file_name.endswith('''.png''' ):
images.append(imageio.imread(UpperCamelCase_ ) )
imageio.mimsave(UpperCamelCase_ , UpperCamelCase_ , duration=UpperCamelCase_ )
print(F'''gif saved to {output_path}''' )
def lowerCamelCase__ ( self : Optional[int] , UpperCamelCase_ : Union[str, Any]=None , UpperCamelCase_ : List[Any]=None ):
if not (path or img):
raise ValueError('''Input either path or tensor''' )
if img is not None:
raise NotImplementedError
lowerCAmelCase : Dict = preprocess(Image.open(UpperCamelCase_ ) , target_image_size=2_5_6 ).to(self.device )
lowerCAmelCase : str = preprocess_vqgan(UpperCamelCase_ )
lowerCAmelCase : Dict = self.vqgan.encode(UpperCamelCase_ )
return z
def lowerCamelCase__ ( self : int , UpperCamelCase_ : Tuple ):
lowerCAmelCase : List[str] = self.latent.detach().requires_grad_()
lowerCAmelCase : Optional[Any] = base_latent + transform_vector
if self.quantize:
lowerCAmelCase : str = self.vqgan.quantize(UpperCamelCase_ )
else:
lowerCAmelCase : int = trans_latent
return self.vqgan.decode(UpperCamelCase_ )
def lowerCamelCase__ ( self : List[str] , UpperCamelCase_ : Dict , UpperCamelCase_ : Dict , UpperCamelCase_ : Dict=None ):
lowerCAmelCase : Dict = self.clip_preprocessor(text=UpperCamelCase_ , images=UpperCamelCase_ , return_tensors='''pt''' , padding=UpperCamelCase_ )
lowerCAmelCase : List[str] = self.clip(**UpperCamelCase_ )
lowerCAmelCase : Tuple = clip_outputs.logits_per_image
if weights is not None:
lowerCAmelCase : Tuple = similarity_logits * weights
return similarity_logits.sum()
def lowerCamelCase__ ( self : Dict , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : str , UpperCamelCase_ : List[Any] ):
lowerCAmelCase : Union[str, Any] = self._get_clip_similarity(pos_prompts['''prompts'''] , UpperCamelCase_ , weights=(1 / pos_prompts['''weights''']) )
if neg_prompts:
lowerCAmelCase : Optional[int] = self._get_clip_similarity(neg_prompts['''prompts'''] , UpperCamelCase_ , weights=neg_prompts['''weights'''] )
else:
lowerCAmelCase : List[Any] = torch.tensor([1] , device=self.device )
lowerCAmelCase : Tuple = -torch.log(UpperCamelCase_ ) + torch.log(UpperCamelCase_ )
return loss
def lowerCamelCase__ ( self : List[Any] , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : List[Any] , UpperCamelCase_ : List[str] ):
lowerCAmelCase : List[Any] = torch.randn_like(self.latent , requires_grad=UpperCamelCase_ , device=self.device )
lowerCAmelCase : List[Any] = torch.optim.Adam([vector] , lr=self.lr )
for i in range(self.iterations ):
optim.zero_grad()
lowerCAmelCase : str = self._add_vector(UpperCamelCase_ )
lowerCAmelCase : int = loop_post_process(UpperCamelCase_ )
lowerCAmelCase : List[Any] = self._get_CLIP_loss(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
print('''CLIP loss''' , UpperCamelCase_ )
if self.log:
wandb.log({'''CLIP Loss''': clip_loss} )
clip_loss.backward(retain_graph=UpperCamelCase_ )
optim.step()
if self.return_val == "image":
yield custom_to_pil(transformed_img[0] )
else:
yield vector
def lowerCamelCase__ ( self : Any , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : Any , UpperCamelCase_ : Dict ):
wandb.init(reinit=UpperCamelCase_ , project='''face-editor''' )
wandb.config.update({'''Positive Prompts''': positive_prompts} )
wandb.config.update({'''Negative Prompts''': negative_prompts} )
wandb.config.update({'''lr''': self.lr, '''iterations''': self.iterations} )
if image_path:
lowerCAmelCase : Optional[Any] = Image.open(UpperCamelCase_ )
lowerCAmelCase : str = image.resize((2_5_6, 2_5_6) )
wandb.log('''Original Image''' , wandb.Image(UpperCamelCase_ ) )
def lowerCamelCase__ ( self : List[str] , UpperCamelCase_ : Tuple ):
if not prompts:
return []
lowerCAmelCase : Tuple = []
lowerCAmelCase : Optional[Any] = []
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
lowerCAmelCase : Union[str, Any] = [prompt.strip() for prompt in prompts.split('''|''' )]
for prompt in prompts:
if isinstance(UpperCamelCase_ , (tuple, list) ):
lowerCAmelCase : Optional[int] = prompt[0]
lowerCAmelCase : Tuple = float(prompt[1] )
elif ":" in prompt:
lowerCAmelCase : Dict = prompt.split(''':''' )
lowerCAmelCase : List[str] = float(UpperCamelCase_ )
else:
lowerCAmelCase : List[str] = prompt
lowerCAmelCase : List[str] = 1.0
processed_prompts.append(UpperCamelCase_ )
weights.append(UpperCamelCase_ )
return {
"prompts": processed_prompts,
"weights": torch.tensor(UpperCamelCase_ , device=self.device ),
}
def lowerCamelCase__ ( self : int , UpperCamelCase_ : Tuple , UpperCamelCase_ : List[str]=None , UpperCamelCase_ : Any=None , UpperCamelCase_ : Dict=True , UpperCamelCase_ : Optional[int]=False , UpperCamelCase_ : str=True , UpperCamelCase_ : List[Any]=True , UpperCamelCase_ : Any=None , ):
if image_path:
lowerCAmelCase : Optional[int] = self._get_latent(UpperCamelCase_ )
else:
lowerCAmelCase : str = torch.randn(self.latent_dim , device=self.device )
if self.log:
self._init_logging(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
assert pos_prompts, "You must provide at least one positive prompt."
lowerCAmelCase : str = self.process_prompts(UpperCamelCase_ )
lowerCAmelCase : Optional[Any] = self.process_prompts(UpperCamelCase_ )
if save_final and save_path is None:
lowerCAmelCase : Tuple = os.path.join('''./outputs/''' , '''_'''.join(pos_prompts['''prompts'''] ) )
if not os.path.exists(UpperCamelCase_ ):
os.makedirs(UpperCamelCase_ )
else:
lowerCAmelCase : List[str] = save_path + '''_''' + get_timestamp()
os.makedirs(UpperCamelCase_ )
lowerCAmelCase : Any = save_path
lowerCAmelCase : List[Any] = self.vqgan.decode(self.latent )[0]
if show_intermediate:
print('''Original Image''' )
show_pil(custom_to_pil(UpperCamelCase_ ) )
lowerCAmelCase : str = loop_post_process(UpperCamelCase_ )
for iter, transformed_img in enumerate(self._optimize_CLIP(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) ):
if show_intermediate:
show_pil(UpperCamelCase_ )
if save_intermediate:
transformed_img.save(os.path.join(self.save_path , F'''iter_{iter:03d}.png''' ) )
if self.log:
wandb.log({'''Image''': wandb.Image(UpperCamelCase_ )} )
if show_final:
show_pil(UpperCamelCase_ )
if save_final:
transformed_img.save(os.path.join(self.save_path , F'''iter_{iter:03d}_final.png''' ) )
| 700
|
"""simple docstring"""
from __future__ import annotations
from PIL import Image
# Define glider example
snake_case__ : int = [
[0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0],
[1, 1, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
]
# Define blinker example
snake_case__ : Any = [[0, 1, 0], [0, 1, 0], [0, 1, 0]]
def _snake_case ( _snake_case : list[list[int]] ):
lowerCAmelCase : Union[str, Any] = []
for i in range(len(_snake_case ) ):
lowerCAmelCase : Any = []
for j in range(len(cells[i] ) ):
# Get the number of live neighbours
lowerCAmelCase : Optional[int] = 0
if i > 0 and j > 0:
neighbour_count += cells[i - 1][j - 1]
if i > 0:
neighbour_count += cells[i - 1][j]
if i > 0 and j < len(cells[i] ) - 1:
neighbour_count += cells[i - 1][j + 1]
if j > 0:
neighbour_count += cells[i][j - 1]
if j < len(cells[i] ) - 1:
neighbour_count += cells[i][j + 1]
if i < len(_snake_case ) - 1 and j > 0:
neighbour_count += cells[i + 1][j - 1]
if i < len(_snake_case ) - 1:
neighbour_count += cells[i + 1][j]
if i < len(_snake_case ) - 1 and j < len(cells[i] ) - 1:
neighbour_count += cells[i + 1][j + 1]
# Rules of the game of life (excerpt from Wikipedia):
# 1. Any live cell with two or three live neighbours survives.
# 2. Any dead cell with three live neighbours becomes a live cell.
# 3. All other live cells die in the next generation.
# Similarly, all other dead cells stay dead.
lowerCAmelCase : str = cells[i][j] == 1
if (
(alive and 2 <= neighbour_count <= 3)
or not alive
and neighbour_count == 3
):
next_generation_row.append(1 )
else:
next_generation_row.append(0 )
next_generation.append(_snake_case )
return next_generation
def _snake_case ( _snake_case : list[list[int]] , _snake_case : int ):
lowerCAmelCase : int = []
for _ in range(_snake_case ):
# Create output image
lowerCAmelCase : Union[str, Any] = Image.new('''RGB''' , (len(cells[0] ), len(_snake_case )) )
lowerCAmelCase : Union[str, Any] = img.load()
# Save cells to image
for x in range(len(_snake_case ) ):
for y in range(len(cells[0] ) ):
lowerCAmelCase : Optional[int] = 255 - cells[y][x] * 255
lowerCAmelCase : List[Any] = (colour, colour, colour)
# Save image
images.append(_snake_case )
lowerCAmelCase : Union[str, Any] = new_generation(_snake_case )
return images
if __name__ == "__main__":
snake_case__ : Union[str, Any] = generate_images(GLIDER, 16)
images[0].save('''out.gif''', save_all=True, append_images=images[1:])
| 637
| 0
|
from __future__ import annotations
import unittest
from transformers import is_tf_available, is_torch_available
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, SMALL_MODEL_IDENTIFIER, is_pt_tf_cross_test, slow
if is_tf_available():
from transformers import (
AutoConfig,
BertConfig,
GPTaConfig,
TaConfig,
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSequenceClassification,
TFAutoModelWithLMHead,
TFBertForMaskedLM,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertModel,
TFGPTaLMHeadModel,
TFRobertaForMaskedLM,
TFTaForConditionalGeneration,
)
from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST
if is_torch_available():
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForPreTraining,
AutoModelForQuestionAnswering,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoModelWithLMHead,
BertForMaskedLM,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
BertModel,
GPTaLMHeadModel,
RobertaForMaskedLM,
TaForConditionalGeneration,
)
@is_pt_tf_cross_test
class snake_case_( unittest.TestCase ):
@slow
def lowerCamelCase__ ( self : Optional[Any] ):
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
lowerCAmelCase : Any = AutoConfig.from_pretrained(UpperCamelCase_ )
self.assertIsNotNone(UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
lowerCAmelCase : Optional[int] = TFAutoModel.from_pretrained(UpperCamelCase_ , from_pt=UpperCamelCase_ )
self.assertIsNotNone(UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
lowerCAmelCase : Dict = AutoModel.from_pretrained(UpperCamelCase_ , from_tf=UpperCamelCase_ )
self.assertIsNotNone(UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
@slow
def lowerCamelCase__ ( self : List[Any] ):
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
lowerCAmelCase : Any = AutoConfig.from_pretrained(UpperCamelCase_ )
self.assertIsNotNone(UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
lowerCAmelCase : Optional[Any] = TFAutoModelForPreTraining.from_pretrained(UpperCamelCase_ , from_pt=UpperCamelCase_ )
self.assertIsNotNone(UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
lowerCAmelCase : int = AutoModelForPreTraining.from_pretrained(UpperCamelCase_ , from_tf=UpperCamelCase_ )
self.assertIsNotNone(UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
@slow
def lowerCamelCase__ ( self : Optional[Any] ):
for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase : int = AutoConfig.from_pretrained(UpperCamelCase_ )
self.assertIsNotNone(UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
lowerCAmelCase : str = TFAutoModelForCausalLM.from_pretrained(UpperCamelCase_ , from_pt=UpperCamelCase_ )
lowerCAmelCase : Optional[Any] = TFAutoModelForCausalLM.from_pretrained(
UpperCamelCase_ , output_loading_info=UpperCamelCase_ , from_pt=UpperCamelCase_ )
self.assertIsNotNone(UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
lowerCAmelCase : str = AutoModelForCausalLM.from_pretrained(UpperCamelCase_ , from_tf=UpperCamelCase_ )
lowerCAmelCase : Any = AutoModelForCausalLM.from_pretrained(
UpperCamelCase_ , output_loading_info=UpperCamelCase_ , from_tf=UpperCamelCase_ )
self.assertIsNotNone(UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
@slow
def lowerCamelCase__ ( self : Optional[int] ):
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase : List[str] = AutoConfig.from_pretrained(UpperCamelCase_ )
self.assertIsNotNone(UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
lowerCAmelCase : Dict = TFAutoModelWithLMHead.from_pretrained(UpperCamelCase_ , from_pt=UpperCamelCase_ )
self.assertIsNotNone(UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
lowerCAmelCase : str = AutoModelWithLMHead.from_pretrained(UpperCamelCase_ , from_tf=UpperCamelCase_ )
self.assertIsNotNone(UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
@slow
def lowerCamelCase__ ( self : Dict ):
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase : Optional[int] = AutoConfig.from_pretrained(UpperCamelCase_ )
self.assertIsNotNone(UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
lowerCAmelCase : Any = TFAutoModelForMaskedLM.from_pretrained(UpperCamelCase_ , from_pt=UpperCamelCase_ )
lowerCAmelCase : Optional[int] = TFAutoModelForMaskedLM.from_pretrained(
UpperCamelCase_ , output_loading_info=UpperCamelCase_ , from_pt=UpperCamelCase_ )
self.assertIsNotNone(UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
lowerCAmelCase : str = AutoModelForMaskedLM.from_pretrained(UpperCamelCase_ , from_tf=UpperCamelCase_ )
lowerCAmelCase : Optional[int] = AutoModelForMaskedLM.from_pretrained(
UpperCamelCase_ , output_loading_info=UpperCamelCase_ , from_tf=UpperCamelCase_ )
self.assertIsNotNone(UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
@slow
def lowerCamelCase__ ( self : Any ):
for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase : str = AutoConfig.from_pretrained(UpperCamelCase_ )
self.assertIsNotNone(UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
lowerCAmelCase : Any = TFAutoModelForSeqaSeqLM.from_pretrained(UpperCamelCase_ , from_pt=UpperCamelCase_ )
lowerCAmelCase : str = TFAutoModelForSeqaSeqLM.from_pretrained(
UpperCamelCase_ , output_loading_info=UpperCamelCase_ , from_pt=UpperCamelCase_ )
self.assertIsNotNone(UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
lowerCAmelCase : Any = AutoModelForSeqaSeqLM.from_pretrained(UpperCamelCase_ , from_tf=UpperCamelCase_ )
lowerCAmelCase : int = AutoModelForSeqaSeqLM.from_pretrained(
UpperCamelCase_ , output_loading_info=UpperCamelCase_ , from_tf=UpperCamelCase_ )
self.assertIsNotNone(UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
@slow
def lowerCamelCase__ ( self : int ):
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
lowerCAmelCase : Tuple = AutoConfig.from_pretrained(UpperCamelCase_ )
self.assertIsNotNone(UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
lowerCAmelCase : Any = TFAutoModelForSequenceClassification.from_pretrained(UpperCamelCase_ , from_pt=UpperCamelCase_ )
self.assertIsNotNone(UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
lowerCAmelCase : List[str] = AutoModelForSequenceClassification.from_pretrained(UpperCamelCase_ , from_tf=UpperCamelCase_ )
self.assertIsNotNone(UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
@slow
def lowerCamelCase__ ( self : List[Any] ):
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
lowerCAmelCase : Optional[Any] = AutoConfig.from_pretrained(UpperCamelCase_ )
self.assertIsNotNone(UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
lowerCAmelCase : Optional[Any] = TFAutoModelForQuestionAnswering.from_pretrained(UpperCamelCase_ , from_pt=UpperCamelCase_ )
self.assertIsNotNone(UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
lowerCAmelCase : str = AutoModelForQuestionAnswering.from_pretrained(UpperCamelCase_ , from_tf=UpperCamelCase_ )
self.assertIsNotNone(UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
def lowerCamelCase__ ( self : List[str] ):
lowerCAmelCase : List[Any] = TFAutoModelWithLMHead.from_pretrained(UpperCamelCase_ , from_pt=UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
self.assertEqual(model.num_parameters() , 1_4_4_1_0 )
self.assertEqual(model.num_parameters(only_trainable=UpperCamelCase_ ) , 1_4_4_1_0 )
lowerCAmelCase : List[Any] = AutoModelWithLMHead.from_pretrained(UpperCamelCase_ , from_tf=UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
self.assertEqual(model.num_parameters() , 1_4_4_1_0 )
self.assertEqual(model.num_parameters(only_trainable=UpperCamelCase_ ) , 1_4_4_1_0 )
def lowerCamelCase__ ( self : Dict ):
lowerCAmelCase : Optional[int] = TFAutoModelWithLMHead.from_pretrained(UpperCamelCase_ , from_pt=UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
self.assertEqual(model.num_parameters() , 1_4_4_1_0 )
self.assertEqual(model.num_parameters(only_trainable=UpperCamelCase_ ) , 1_4_4_1_0 )
lowerCAmelCase : Tuple = AutoModelWithLMHead.from_pretrained(UpperCamelCase_ , from_tf=UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
self.assertEqual(model.num_parameters() , 1_4_4_1_0 )
self.assertEqual(model.num_parameters(only_trainable=UpperCamelCase_ ) , 1_4_4_1_0 )
| 701
|
"""simple docstring"""
from __future__ import annotations
class snake_case_:
def __init__( self : int , UpperCamelCase_ : str , UpperCamelCase_ : str ):
lowerCAmelCase, lowerCAmelCase : List[str] = text, pattern
lowerCAmelCase, lowerCAmelCase : Union[str, Any] = len(UpperCamelCase_ ), len(UpperCamelCase_ )
def lowerCamelCase__ ( self : List[Any] , UpperCamelCase_ : str ):
for i in range(self.patLen - 1 , -1 , -1 ):
if char == self.pattern[i]:
return i
return -1
def lowerCamelCase__ ( self : Optional[Any] , UpperCamelCase_ : int ):
for i in range(self.patLen - 1 , -1 , -1 ):
if self.pattern[i] != self.text[current_pos + i]:
return current_pos + i
return -1
def lowerCamelCase__ ( self : Dict ):
# searches pattern in text and returns index positions
lowerCAmelCase : Union[str, Any] = []
for i in range(self.textLen - self.patLen + 1 ):
lowerCAmelCase : str = self.mismatch_in_text(UpperCamelCase_ )
if mismatch_index == -1:
positions.append(UpperCamelCase_ )
else:
lowerCAmelCase : Optional[Any] = self.match_in_pattern(self.text[mismatch_index] )
lowerCAmelCase : int = (
mismatch_index - match_index
) # shifting index lgtm [py/multiple-definition]
return positions
snake_case__ : str = '''ABAABA'''
snake_case__ : List[str] = '''AB'''
snake_case__ : Union[str, Any] = BoyerMooreSearch(text, pattern)
snake_case__ : Optional[Any] = bms.bad_character_heuristic()
if len(positions) == 0:
print('''No match found''')
else:
print('''Pattern found in following positions: ''')
print(positions)
| 637
| 0
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case__ : str = logging.get_logger(__name__)
snake_case__ : Union[str, Any] = {
'''google/realm-cc-news-pretrained-embedder''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/config.json'''
),
'''google/realm-cc-news-pretrained-encoder''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/config.json'''
),
'''google/realm-cc-news-pretrained-scorer''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/config.json'''
),
'''google/realm-cc-news-pretrained-openqa''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/config.json'''
),
'''google/realm-orqa-nq-openqa''': '''https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/config.json''',
'''google/realm-orqa-nq-reader''': '''https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/config.json''',
'''google/realm-orqa-wq-openqa''': '''https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/config.json''',
'''google/realm-orqa-wq-reader''': '''https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/config.json''',
# See all REALM models at https://huggingface.co/models?filter=realm
}
class snake_case_( a__ ):
__UpperCamelCase = '''realm'''
def __init__( self : Optional[int] , UpperCamelCase_ : Tuple=3_0_5_2_2 , UpperCamelCase_ : Tuple=7_6_8 , UpperCamelCase_ : List[str]=1_2_8 , UpperCamelCase_ : List[str]=1_2 , UpperCamelCase_ : Union[str, Any]=1_2 , UpperCamelCase_ : Dict=8 , UpperCamelCase_ : Dict=3_0_7_2 , UpperCamelCase_ : List[Any]="gelu_new" , UpperCamelCase_ : Optional[Any]=0.1 , UpperCamelCase_ : List[str]=0.1 , UpperCamelCase_ : List[str]=5_1_2 , UpperCamelCase_ : List[Any]=2 , UpperCamelCase_ : Dict=0.02 , UpperCamelCase_ : List[str]=1E-12 , UpperCamelCase_ : Any=2_5_6 , UpperCamelCase_ : Optional[int]=1_0 , UpperCamelCase_ : int=1E-3 , UpperCamelCase_ : str=5 , UpperCamelCase_ : Optional[Any]=3_2_0 , UpperCamelCase_ : Any=1_3_3_5_3_7_1_8 , UpperCamelCase_ : Optional[Any]=5_0_0_0 , UpperCamelCase_ : Union[str, Any]=1 , UpperCamelCase_ : Tuple=0 , UpperCamelCase_ : Optional[Any]=2 , **UpperCamelCase_ : Optional[Any] , ):
super().__init__(pad_token_id=UpperCamelCase_ , bos_token_id=UpperCamelCase_ , eos_token_id=UpperCamelCase_ , **UpperCamelCase_ )
# Common config
lowerCAmelCase : Dict = vocab_size
lowerCAmelCase : Optional[Any] = max_position_embeddings
lowerCAmelCase : Optional[int] = hidden_size
lowerCAmelCase : Any = retriever_proj_size
lowerCAmelCase : Any = num_hidden_layers
lowerCAmelCase : Any = num_attention_heads
lowerCAmelCase : Dict = num_candidates
lowerCAmelCase : List[str] = intermediate_size
lowerCAmelCase : Tuple = hidden_act
lowerCAmelCase : str = hidden_dropout_prob
lowerCAmelCase : List[str] = attention_probs_dropout_prob
lowerCAmelCase : Dict = initializer_range
lowerCAmelCase : Union[str, Any] = type_vocab_size
lowerCAmelCase : Tuple = layer_norm_eps
# Reader config
lowerCAmelCase : Any = span_hidden_size
lowerCAmelCase : Any = max_span_width
lowerCAmelCase : Tuple = reader_layer_norm_eps
lowerCAmelCase : List[Any] = reader_beam_size
lowerCAmelCase : str = reader_seq_len
# Retrieval config
lowerCAmelCase : int = num_block_records
lowerCAmelCase : Union[str, Any] = searcher_beam_size
| 702
|
"""simple docstring"""
from __future__ import annotations
from typing import Any
class snake_case_( a__ ):
pass
class snake_case_:
def __init__( self : Any , UpperCamelCase_ : Any ):
lowerCAmelCase : Any = data
lowerCAmelCase : Node | None = None
def __iter__( self : int ):
lowerCAmelCase : Any = self
lowerCAmelCase : Union[str, Any] = []
while node:
if node in visited:
raise ContainsLoopError
visited.append(UpperCamelCase_ )
yield node.data
lowerCAmelCase : Optional[int] = node.next_node
@property
def lowerCamelCase__ ( self : str ):
try:
list(self )
return False
except ContainsLoopError:
return True
if __name__ == "__main__":
snake_case__ : Dict = Node(1)
snake_case__ : Any = Node(2)
snake_case__ : int = Node(3)
snake_case__ : Any = Node(4)
print(root_node.has_loop) # False
snake_case__ : Tuple = root_node.next_node
print(root_node.has_loop) # True
snake_case__ : List[Any] = Node(5)
snake_case__ : int = Node(6)
snake_case__ : List[Any] = Node(5)
snake_case__ : Dict = Node(6)
print(root_node.has_loop) # False
snake_case__ : Any = Node(1)
print(root_node.has_loop) # False
| 637
| 0
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
snake_case__ : Optional[int] = logging.get_logger(__name__)
snake_case__ : str = {
'''xlm-roberta-base''': '''https://huggingface.co/xlm-roberta-base/resolve/main/config.json''',
'''xlm-roberta-large''': '''https://huggingface.co/xlm-roberta-large/resolve/main/config.json''',
'''xlm-roberta-large-finetuned-conll02-dutch''': (
'''https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/config.json'''
),
'''xlm-roberta-large-finetuned-conll02-spanish''': (
'''https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/config.json'''
),
'''xlm-roberta-large-finetuned-conll03-english''': (
'''https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/config.json'''
),
'''xlm-roberta-large-finetuned-conll03-german''': (
'''https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/config.json'''
),
}
class snake_case_( a__ ):
__UpperCamelCase = '''xlm-roberta'''
def __init__( self : Dict , UpperCamelCase_ : Union[str, Any]=3_0_5_2_2 , UpperCamelCase_ : Optional[int]=7_6_8 , UpperCamelCase_ : str=1_2 , UpperCamelCase_ : int=1_2 , UpperCamelCase_ : Tuple=3_0_7_2 , UpperCamelCase_ : str="gelu" , UpperCamelCase_ : Dict=0.1 , UpperCamelCase_ : Union[str, Any]=0.1 , UpperCamelCase_ : Optional[int]=5_1_2 , UpperCamelCase_ : Optional[Any]=2 , UpperCamelCase_ : int=0.02 , UpperCamelCase_ : Any=1E-12 , UpperCamelCase_ : Optional[Any]=1 , UpperCamelCase_ : Optional[int]=0 , UpperCamelCase_ : Optional[Any]=2 , UpperCamelCase_ : Optional[int]="absolute" , UpperCamelCase_ : List[Any]=True , UpperCamelCase_ : Any=None , **UpperCamelCase_ : Dict , ):
super().__init__(pad_token_id=UpperCamelCase_ , bos_token_id=UpperCamelCase_ , eos_token_id=UpperCamelCase_ , **UpperCamelCase_ )
lowerCAmelCase : List[Any] = vocab_size
lowerCAmelCase : Union[str, Any] = hidden_size
lowerCAmelCase : List[Any] = num_hidden_layers
lowerCAmelCase : Any = num_attention_heads
lowerCAmelCase : Tuple = hidden_act
lowerCAmelCase : int = intermediate_size
lowerCAmelCase : Optional[int] = hidden_dropout_prob
lowerCAmelCase : List[Any] = attention_probs_dropout_prob
lowerCAmelCase : Any = max_position_embeddings
lowerCAmelCase : str = type_vocab_size
lowerCAmelCase : Dict = initializer_range
lowerCAmelCase : Optional[int] = layer_norm_eps
lowerCAmelCase : int = position_embedding_type
lowerCAmelCase : Dict = use_cache
lowerCAmelCase : List[str] = classifier_dropout
class snake_case_( a__ ):
@property
def lowerCamelCase__ ( self : Tuple ):
if self.task == "multiple-choice":
lowerCAmelCase : Optional[Any] = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
lowerCAmelCase : Optional[Any] = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 703
|
"""simple docstring"""
from torch import nn
class snake_case_( nn.Module ):
def __init__( self : int , UpperCamelCase_ : int , UpperCamelCase_ : int ):
super().__init__()
lowerCAmelCase : str = class_size
lowerCAmelCase : Dict = embed_size
# self.mlp1 = nn.Linear(embed_size, embed_size)
# self.mlp2 = (nn.Linear(embed_size, class_size))
lowerCAmelCase : Any = nn.Linear(UpperCamelCase_ , UpperCamelCase_ )
def lowerCamelCase__ ( self : List[Any] , UpperCamelCase_ : Tuple ):
# hidden_state = nn.functional.relu(self.mlp1(hidden_state))
# hidden_state = self.mlp2(hidden_state)
lowerCAmelCase : int = self.mlp(UpperCamelCase_ )
return logits
| 637
| 0
|
"""simple docstring"""
import os
import shutil
import sys
import tempfile
import unittest
from pathlib import Path
import pytest
import transformers
from transformers import (
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoTokenizer,
BertConfig,
BertTokenizer,
BertTokenizerFast,
CTRLTokenizer,
GPTaTokenizer,
GPTaTokenizerFast,
PreTrainedTokenizerFast,
RobertaTokenizer,
RobertaTokenizerFast,
is_tokenizers_available,
)
from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig
from transformers.models.auto.tokenization_auto import (
TOKENIZER_MAPPING,
get_tokenizer_config,
tokenizer_class_from_name,
)
from transformers.models.roberta.configuration_roberta import RobertaConfig
from transformers.testing_utils import (
DUMMY_DIFF_TOKENIZER_IDENTIFIER,
DUMMY_UNKNOWN_IDENTIFIER,
SMALL_MODEL_IDENTIFIER,
RequestCounter,
require_tokenizers,
slow,
)
sys.path.append(str(Path(__file__).parent.parent.parent.parent / '''utils'''))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_tokenization import CustomTokenizer # noqa E402
if is_tokenizers_available():
from test_module.custom_tokenization_fast import CustomTokenizerFast
class snake_case_( unittest.TestCase ):
def lowerCamelCase__ ( self : Optional[int] ):
lowerCAmelCase : Optional[Any] = 0
@slow
def lowerCamelCase__ ( self : Dict ):
for model_name in (x for x in BERT_PRETRAINED_CONFIG_ARCHIVE_MAP.keys() if "japanese" not in x):
lowerCAmelCase : Optional[Any] = AutoTokenizer.from_pretrained(UpperCamelCase_ )
self.assertIsNotNone(UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , (BertTokenizer, BertTokenizerFast) )
self.assertGreater(len(UpperCamelCase_ ) , 0 )
for model_name in GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP.keys():
lowerCAmelCase : Tuple = AutoTokenizer.from_pretrained(UpperCamelCase_ )
self.assertIsNotNone(UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , (GPTaTokenizer, GPTaTokenizerFast) )
self.assertGreater(len(UpperCamelCase_ ) , 0 )
def lowerCamelCase__ ( self : Union[str, Any] ):
lowerCAmelCase : Optional[Any] = AutoTokenizer.from_pretrained(UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 1_2 )
def lowerCamelCase__ ( self : Dict ):
lowerCAmelCase : Tuple = AutoTokenizer.from_pretrained(UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , (RobertaTokenizer, RobertaTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 2_0 )
def lowerCamelCase__ ( self : Dict ):
lowerCAmelCase : int = AutoConfig.from_pretrained(UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
# Check that tokenizer_type ≠ model_type
lowerCAmelCase : List[Any] = AutoTokenizer.from_pretrained(UpperCamelCase_ , config=UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 1_2 )
def lowerCamelCase__ ( self : Any ):
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('''./tests/fixtures/vocab.txt''' , os.path.join(UpperCamelCase_ , '''vocab.txt''' ) )
lowerCAmelCase : Any = AutoTokenizer.from_pretrained(UpperCamelCase_ , tokenizer_type='''bert''' , use_fast=UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('''./tests/fixtures/vocab.json''' , os.path.join(UpperCamelCase_ , '''vocab.json''' ) )
shutil.copy('''./tests/fixtures/merges.txt''' , os.path.join(UpperCamelCase_ , '''merges.txt''' ) )
lowerCAmelCase : List[Any] = AutoTokenizer.from_pretrained(UpperCamelCase_ , tokenizer_type='''gpt2''' , use_fast=UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
@require_tokenizers
def lowerCamelCase__ ( self : Union[str, Any] ):
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('''./tests/fixtures/vocab.txt''' , os.path.join(UpperCamelCase_ , '''vocab.txt''' ) )
lowerCAmelCase : Optional[Any] = AutoTokenizer.from_pretrained(UpperCamelCase_ , tokenizer_type='''bert''' )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('''./tests/fixtures/vocab.json''' , os.path.join(UpperCamelCase_ , '''vocab.json''' ) )
shutil.copy('''./tests/fixtures/merges.txt''' , os.path.join(UpperCamelCase_ , '''merges.txt''' ) )
lowerCAmelCase : int = AutoTokenizer.from_pretrained(UpperCamelCase_ , tokenizer_type='''gpt2''' )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
def lowerCamelCase__ ( self : Dict ):
with pytest.raises(UpperCamelCase_ ):
AutoTokenizer.from_pretrained('''./''' , tokenizer_type='''xxx''' )
@require_tokenizers
def lowerCamelCase__ ( self : str ):
for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]:
lowerCAmelCase : Dict = tokenizer_class.from_pretrained('''wietsedv/bert-base-dutch-cased''' )
self.assertIsInstance(UpperCamelCase_ , (BertTokenizer, BertTokenizerFast) )
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
self.assertEqual(tokenizer.basic_tokenizer.do_lower_case , UpperCamelCase_ )
else:
self.assertEqual(tokenizer.do_lower_case , UpperCamelCase_ )
self.assertEqual(tokenizer.model_max_length , 5_1_2 )
@require_tokenizers
def lowerCamelCase__ ( self : Optional[int] ):
for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]:
with self.assertRaisesRegex(
UpperCamelCase_ , '''julien-c/herlolip-not-exists is not a local folder and is not a valid model identifier''' , ):
lowerCAmelCase : Any = tokenizer_class.from_pretrained('''julien-c/herlolip-not-exists''' )
def lowerCamelCase__ ( self : Tuple ):
# tests: https://github.com/huggingface/transformers/pull/13251
# 1. models with `-`, e.g. xlm-roberta -> xlm_roberta
# 2. models that don't remap 1-1 from model-name to model file, e.g., openai-gpt -> openai
lowerCAmelCase : Optional[Any] = TOKENIZER_MAPPING.values()
lowerCAmelCase : Optional[Any] = []
for slow_tok, fast_tok in tokenizers:
if slow_tok is not None:
tokenizer_names.append(slow_tok.__name__ )
if fast_tok is not None:
tokenizer_names.append(fast_tok.__name__ )
for tokenizer_name in tokenizer_names:
# must find the right class
tokenizer_class_from_name(UpperCamelCase_ )
@require_tokenizers
def lowerCamelCase__ ( self : Any ):
self.assertIsInstance(AutoTokenizer.from_pretrained('''bert-base-cased''' , use_fast=UpperCamelCase_ ) , UpperCamelCase_ )
self.assertIsInstance(AutoTokenizer.from_pretrained('''bert-base-cased''' ) , UpperCamelCase_ )
@require_tokenizers
def lowerCamelCase__ ( self : Dict ):
lowerCAmelCase : List[Any] = AutoTokenizer.from_pretrained('''distilbert-base-uncased''' , do_lower_case=UpperCamelCase_ )
lowerCAmelCase : Union[str, Any] = '''Hello, world. How are you?'''
lowerCAmelCase : Optional[Any] = tokenizer.tokenize(UpperCamelCase_ )
self.assertEqual('''[UNK]''' , tokens[0] )
lowerCAmelCase : List[str] = AutoTokenizer.from_pretrained('''microsoft/mpnet-base''' , do_lower_case=UpperCamelCase_ )
lowerCAmelCase : Optional[int] = tokenizer.tokenize(UpperCamelCase_ )
self.assertEqual('''[UNK]''' , tokens[0] )
@require_tokenizers
def lowerCamelCase__ ( self : int ):
lowerCAmelCase : Union[str, Any] = AutoTokenizer.from_pretrained('''robot-test/dummy-tokenizer-fast-with-model-config''' )
self.assertEqual(type(UpperCamelCase_ ) , UpperCamelCase_ )
self.assertEqual(tokenizer.model_max_length , 5_1_2 )
self.assertEqual(tokenizer.vocab_size , 3_0_0_0_0 )
self.assertEqual(tokenizer.unk_token , '''[UNK]''' )
self.assertEqual(tokenizer.padding_side , '''right''' )
self.assertEqual(tokenizer.truncation_side , '''right''' )
def lowerCamelCase__ ( self : List[Any] ):
lowerCAmelCase : int = AutoTokenizer.from_pretrained(UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , (BertTokenizer, BertTokenizerFast) )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(UpperCamelCase_ )
lowerCAmelCase : List[Any] = AutoTokenizer.from_pretrained(UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , tokenizer.__class__ )
self.assertEqual(tokenizera.vocab_size , 1_2 )
def lowerCamelCase__ ( self : List[str] ):
lowerCAmelCase : List[Any] = AutoTokenizer.from_pretrained('''ctrl''' )
# There is no fast CTRL so this always gives us a slow tokenizer.
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
def lowerCamelCase__ ( self : Dict ):
# Check we can load the tokenizer config of an online model.
lowerCAmelCase : Any = get_tokenizer_config('''bert-base-cased''' )
lowerCAmelCase : Optional[int] = config.pop('''_commit_hash''' , UpperCamelCase_ )
# If we ever update bert-base-cased tokenizer config, this dict here will need to be updated.
self.assertEqual(UpperCamelCase_ , {'''do_lower_case''': False} )
# This model does not have a tokenizer_config so we get back an empty dict.
lowerCAmelCase : Union[str, Any] = get_tokenizer_config(UpperCamelCase_ )
self.assertDictEqual(UpperCamelCase_ , {} )
# A tokenizer saved with `save_pretrained` always creates a tokenizer config.
lowerCAmelCase : List[Any] = AutoTokenizer.from_pretrained(UpperCamelCase_ )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(UpperCamelCase_ )
lowerCAmelCase : Dict = get_tokenizer_config(UpperCamelCase_ )
# Check the class of the tokenizer was properly saved (note that it always saves the slow class).
self.assertEqual(config['''tokenizer_class'''] , '''BertTokenizer''' )
def lowerCamelCase__ ( self : Optional[int] ):
try:
AutoConfig.register('''custom''' , UpperCamelCase_ )
AutoTokenizer.register(UpperCamelCase_ , slow_tokenizer_class=UpperCamelCase_ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(UpperCamelCase_ ):
AutoTokenizer.register(UpperCamelCase_ , slow_tokenizer_class=UpperCamelCase_ )
lowerCAmelCase : Union[str, Any] = CustomTokenizer.from_pretrained(UpperCamelCase_ )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(UpperCamelCase_ )
lowerCAmelCase : Tuple = AutoTokenizer.from_pretrained(UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
@require_tokenizers
def lowerCamelCase__ ( self : str ):
try:
AutoConfig.register('''custom''' , UpperCamelCase_ )
# Can register in two steps
AutoTokenizer.register(UpperCamelCase_ , slow_tokenizer_class=UpperCamelCase_ )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, None) )
AutoTokenizer.register(UpperCamelCase_ , fast_tokenizer_class=UpperCamelCase_ )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, CustomTokenizerFast) )
del TOKENIZER_MAPPING._extra_content[CustomConfig]
# Can register in one step
AutoTokenizer.register(
UpperCamelCase_ , slow_tokenizer_class=UpperCamelCase_ , fast_tokenizer_class=UpperCamelCase_ )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, CustomTokenizerFast) )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(UpperCamelCase_ ):
AutoTokenizer.register(UpperCamelCase_ , fast_tokenizer_class=UpperCamelCase_ )
# We pass through a bert tokenizer fast cause there is no converter slow to fast for our new toknizer
# and that model does not have a tokenizer.json
with tempfile.TemporaryDirectory() as tmp_dir:
lowerCAmelCase : Dict = BertTokenizerFast.from_pretrained(UpperCamelCase_ )
bert_tokenizer.save_pretrained(UpperCamelCase_ )
lowerCAmelCase : int = CustomTokenizerFast.from_pretrained(UpperCamelCase_ )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(UpperCamelCase_ )
lowerCAmelCase : Optional[int] = AutoTokenizer.from_pretrained(UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
lowerCAmelCase : List[str] = AutoTokenizer.from_pretrained(UpperCamelCase_ , use_fast=UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
def lowerCamelCase__ ( self : Optional[int] ):
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(UpperCamelCase_ ):
lowerCAmelCase : int = AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(UpperCamelCase_ ):
lowerCAmelCase : str = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=UpperCamelCase_ )
lowerCAmelCase : List[str] = AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=UpperCamelCase_ )
self.assertTrue(tokenizer.special_attribute_present )
# Test tokenizer can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(UpperCamelCase_ )
lowerCAmelCase : Optional[Any] = AutoTokenizer.from_pretrained(UpperCamelCase_ , trust_remote_code=UpperCamelCase_ )
self.assertTrue(reloaded_tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , '''NewTokenizerFast''' )
# Test we can also load the slow version
lowerCAmelCase : Union[str, Any] = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=UpperCamelCase_ , use_fast=UpperCamelCase_ )
self.assertTrue(tokenizer.special_attribute_present )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
# Test tokenizer can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(UpperCamelCase_ )
lowerCAmelCase : List[str] = AutoTokenizer.from_pretrained(UpperCamelCase_ , trust_remote_code=UpperCamelCase_ , use_fast=UpperCamelCase_ )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , '''NewTokenizer''' )
self.assertTrue(reloaded_tokenizer.special_attribute_present )
else:
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , '''NewTokenizer''' )
@require_tokenizers
def lowerCamelCase__ ( self : Optional[int] ):
class snake_case_( a__ ):
__UpperCamelCase = False
class snake_case_( a__ ):
__UpperCamelCase = NewTokenizer
__UpperCamelCase = False
try:
AutoConfig.register('''custom''' , UpperCamelCase_ )
AutoTokenizer.register(UpperCamelCase_ , slow_tokenizer_class=UpperCamelCase_ )
AutoTokenizer.register(UpperCamelCase_ , fast_tokenizer_class=UpperCamelCase_ )
# If remote code is not set, the default is to use local
lowerCAmelCase : Optional[int] = AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''' )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' )
self.assertFalse(tokenizer.special_attribute_present )
lowerCAmelCase : str = AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''' , use_fast=UpperCamelCase_ )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
self.assertFalse(tokenizer.special_attribute_present )
# If remote code is disabled, we load the local one.
lowerCAmelCase : Union[str, Any] = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=UpperCamelCase_ )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' )
self.assertFalse(tokenizer.special_attribute_present )
lowerCAmelCase : Dict = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=UpperCamelCase_ , use_fast=UpperCamelCase_ )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
self.assertFalse(tokenizer.special_attribute_present )
# If remote is enabled, we load from the Hub
lowerCAmelCase : int = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=UpperCamelCase_ )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' )
self.assertTrue(tokenizer.special_attribute_present )
lowerCAmelCase : int = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=UpperCamelCase_ , use_fast=UpperCamelCase_ )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
self.assertTrue(tokenizer.special_attribute_present )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
def lowerCamelCase__ ( self : Tuple ):
lowerCAmelCase : str = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer_legacy''' , trust_remote_code=UpperCamelCase_ )
self.assertTrue(tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' )
# Test we can also load the slow version
lowerCAmelCase : List[str] = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer_legacy''' , trust_remote_code=UpperCamelCase_ , use_fast=UpperCamelCase_ )
self.assertTrue(tokenizer.special_attribute_present )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
else:
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
def lowerCamelCase__ ( self : str ):
with self.assertRaisesRegex(
UpperCamelCase_ , '''bert-base is not a local folder and is not a valid model identifier''' ):
lowerCAmelCase : List[str] = AutoTokenizer.from_pretrained('''bert-base''' )
def lowerCamelCase__ ( self : int ):
with self.assertRaisesRegex(
UpperCamelCase_ , r'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)''' ):
lowerCAmelCase : List[Any] = AutoTokenizer.from_pretrained(UpperCamelCase_ , revision='''aaaaaa''' )
def lowerCamelCase__ ( self : Optional[int] ):
# Make sure we have cached the tokenizer.
lowerCAmelCase : List[str] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
with RequestCounter() as counter:
lowerCAmelCase : int = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
self.assertEqual(counter.get_request_count , 0 )
self.assertEqual(counter.head_request_count , 1 )
self.assertEqual(counter.other_request_count , 0 )
| 704
|
"""simple docstring"""
class snake_case_:
def __init__( self : Union[str, Any] , UpperCamelCase_ : str ):
lowerCAmelCase : Dict = val
lowerCAmelCase : str = None
lowerCAmelCase : Dict = None
def lowerCamelCase__ ( self : Union[str, Any] , UpperCamelCase_ : Dict ):
if self.val:
if val < self.val:
if self.left is None:
lowerCAmelCase : int = Node(UpperCamelCase_ )
else:
self.left.insert(UpperCamelCase_ )
elif val > self.val:
if self.right is None:
lowerCAmelCase : Any = Node(UpperCamelCase_ )
else:
self.right.insert(UpperCamelCase_ )
else:
lowerCAmelCase : Optional[Any] = val
def _snake_case ( _snake_case : Tuple , _snake_case : str ):
# Recursive traversal
if root:
inorder(root.left , _snake_case )
res.append(root.val )
inorder(root.right , _snake_case )
def _snake_case ( _snake_case : Optional[Any] ):
# Build BST
if len(_snake_case ) == 0:
return arr
lowerCAmelCase : Optional[Any] = Node(arr[0] )
for i in range(1 , len(_snake_case ) ):
root.insert(arr[i] )
# Traverse BST in order.
lowerCAmelCase : Optional[int] = []
inorder(_snake_case , _snake_case )
return res
if __name__ == "__main__":
print(tree_sort([10, 1, 3, 2, 9, 14, 13]))
| 637
| 0
|
"""simple docstring"""
from sklearn.metrics import mean_squared_error
import datasets
snake_case__ : int = '''\
@article{scikit-learn,
title={Scikit-learn: Machine Learning in {P}ython},
author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.
and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.
and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and
Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},
journal={Journal of Machine Learning Research},
volume={12},
pages={2825--2830},
year={2011}
}
'''
snake_case__ : Dict = '''\
Mean Squared Error(MSE) is the average of the square of difference between the predicted
and actual values.
'''
snake_case__ : Dict = '''
Args:
predictions: array-like of shape (n_samples,) or (n_samples, n_outputs)
Estimated target values.
references: array-like of shape (n_samples,) or (n_samples, n_outputs)
Ground truth (correct) target values.
sample_weight: array-like of shape (n_samples,), default=None
Sample weights.
multioutput: {"raw_values", "uniform_average"} or array-like of shape (n_outputs,), default="uniform_average"
Defines aggregating of multiple output values. Array-like value defines weights used to average errors.
"raw_values" : Returns a full set of errors in case of multioutput input.
"uniform_average" : Errors of all outputs are averaged with uniform weight.
squared : bool, default=True
If True returns MSE value, if False returns RMSE (Root Mean Squared Error) value.
Returns:
mse : mean squared error.
Examples:
>>> mse_metric = datasets.load_metric("mse")
>>> predictions = [2.5, 0.0, 2, 8]
>>> references = [3, -0.5, 2, 7]
>>> results = mse_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'mse\': 0.375}
>>> rmse_result = mse_metric.compute(predictions=predictions, references=references, squared=False)
>>> print(rmse_result)
{\'mse\': 0.6123724356957945}
If you\'re using multi-dimensional lists, then set the config as follows :
>>> mse_metric = datasets.load_metric("mse", "multilist")
>>> predictions = [[0.5, 1], [-1, 1], [7, -6]]
>>> references = [[0, 2], [-1, 2], [8, -5]]
>>> results = mse_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'mse\': 0.7083333333333334}
>>> results = mse_metric.compute(predictions=predictions, references=references, multioutput=\'raw_values\')
>>> print(results) # doctest: +NORMALIZE_WHITESPACE
{\'mse\': array([0.41666667, 1. ])}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class snake_case_( datasets.Metric ):
def lowerCamelCase__ ( self : Optional[Any] ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , reference_urls=[
'''https://scikit-learn.org/stable/modules/generated/sklearn.metrics.mean_squared_error.html'''
] , )
def lowerCamelCase__ ( self : List[Any] ):
if self.config_name == "multilist":
return {
"predictions": datasets.Sequence(datasets.Value('''float''' ) ),
"references": datasets.Sequence(datasets.Value('''float''' ) ),
}
else:
return {
"predictions": datasets.Value('''float''' ),
"references": datasets.Value('''float''' ),
}
def lowerCamelCase__ ( self : Optional[int] , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : str=None , UpperCamelCase_ : Optional[Any]="uniform_average" , UpperCamelCase_ : Tuple=True ):
lowerCAmelCase : int = mean_squared_error(
UpperCamelCase_ , UpperCamelCase_ , sample_weight=UpperCamelCase_ , multioutput=UpperCamelCase_ , squared=UpperCamelCase_ )
return {"mse": mse}
| 705
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
snake_case__ : Tuple = logging.get_logger(__name__)
snake_case__ : int = {
'''facebook/levit-128S''': '''https://huggingface.co/facebook/levit-128S/resolve/main/config.json''',
# See all LeViT models at https://huggingface.co/models?filter=levit
}
class snake_case_( a__ ):
__UpperCamelCase = '''levit'''
def __init__( self : str , UpperCamelCase_ : Union[str, Any]=2_2_4 , UpperCamelCase_ : Union[str, Any]=3 , UpperCamelCase_ : Union[str, Any]=3 , UpperCamelCase_ : int=2 , UpperCamelCase_ : Union[str, Any]=1 , UpperCamelCase_ : Tuple=1_6 , UpperCamelCase_ : Dict=[1_2_8, 2_5_6, 3_8_4] , UpperCamelCase_ : Optional[Any]=[4, 8, 1_2] , UpperCamelCase_ : Dict=[4, 4, 4] , UpperCamelCase_ : Any=[1_6, 1_6, 1_6] , UpperCamelCase_ : str=0 , UpperCamelCase_ : int=[2, 2, 2] , UpperCamelCase_ : Optional[Any]=[2, 2, 2] , UpperCamelCase_ : str=0.02 , **UpperCamelCase_ : List[str] , ):
super().__init__(**UpperCamelCase_ )
lowerCAmelCase : Tuple = image_size
lowerCAmelCase : int = num_channels
lowerCAmelCase : Optional[int] = kernel_size
lowerCAmelCase : Dict = stride
lowerCAmelCase : List[Any] = padding
lowerCAmelCase : Dict = hidden_sizes
lowerCAmelCase : List[str] = num_attention_heads
lowerCAmelCase : Tuple = depths
lowerCAmelCase : Dict = key_dim
lowerCAmelCase : Union[str, Any] = drop_path_rate
lowerCAmelCase : List[Any] = patch_size
lowerCAmelCase : Tuple = attention_ratio
lowerCAmelCase : Optional[int] = mlp_ratio
lowerCAmelCase : Union[str, Any] = initializer_range
lowerCAmelCase : List[str] = [
['''Subsample''', key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2],
['''Subsample''', key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2],
]
class snake_case_( a__ ):
__UpperCamelCase = version.parse('''1.11''' )
@property
def lowerCamelCase__ ( self : Tuple ):
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def lowerCamelCase__ ( self : Optional[Any] ):
return 1E-4
| 637
| 0
|
"""simple docstring"""
from __future__ import annotations
from typing import Any
class snake_case_:
def __init__( self : Union[str, Any] , UpperCamelCase_ : int , UpperCamelCase_ : int , UpperCamelCase_ : float = 0 ):
lowerCAmelCase : List[str] = row, column
lowerCAmelCase : List[str] = [[default_value for c in range(UpperCamelCase_ )] for r in range(UpperCamelCase_ )]
def __str__( self : str ):
lowerCAmelCase : List[str] = F'''Matrix consist of {self.row} rows and {self.column} columns\n'''
# Make string identifier
lowerCAmelCase : Dict = 0
for row_vector in self.array:
for obj in row_vector:
lowerCAmelCase : Dict = max(UpperCamelCase_ , len(str(UpperCamelCase_ ) ) )
lowerCAmelCase : Optional[int] = F'''%{max_element_length}s'''
# Make string and return
def single_line(UpperCamelCase_ : list[float] ) -> str:
nonlocal string_format_identifier
lowerCAmelCase : List[str] = '''['''
line += ", ".join(string_format_identifier % (obj,) for obj in row_vector )
line += "]"
return line
s += "\n".join(single_line(UpperCamelCase_ ) for row_vector in self.array )
return s
def __repr__( self : Optional[int] ):
return str(self )
def lowerCamelCase__ ( self : List[Any] , UpperCamelCase_ : tuple[int, int] ):
if not (isinstance(UpperCamelCase_ , (list, tuple) ) and len(UpperCamelCase_ ) == 2):
return False
elif not (0 <= loc[0] < self.row and 0 <= loc[1] < self.column):
return False
else:
return True
def __getitem__( self : str , UpperCamelCase_ : tuple[int, int] ):
assert self.validate_indicies(UpperCamelCase_ )
return self.array[loc[0]][loc[1]]
def __setitem__( self : Any , UpperCamelCase_ : tuple[int, int] , UpperCamelCase_ : float ):
assert self.validate_indicies(UpperCamelCase_ )
lowerCAmelCase : Optional[int] = value
def __add__( self : str , UpperCamelCase_ : Matrix ):
assert isinstance(UpperCamelCase_ , UpperCamelCase_ )
assert self.row == another.row and self.column == another.column
# Add
lowerCAmelCase : Dict = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
lowerCAmelCase : Optional[int] = self[r, c] + another[r, c]
return result
def __neg__( self : str ):
lowerCAmelCase : List[str] = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
lowerCAmelCase : Any = -self[r, c]
return result
def __sub__( self : Dict , UpperCamelCase_ : Matrix ):
return self + (-another)
def __mul__( self : str , UpperCamelCase_ : int | float | Matrix ):
if isinstance(UpperCamelCase_ , (int, float) ): # Scalar multiplication
lowerCAmelCase : Optional[int] = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
lowerCAmelCase : Tuple = self[r, c] * another
return result
elif isinstance(UpperCamelCase_ , UpperCamelCase_ ): # Matrix multiplication
assert self.column == another.row
lowerCAmelCase : int = Matrix(self.row , another.column )
for r in range(self.row ):
for c in range(another.column ):
for i in range(self.column ):
result[r, c] += self[r, i] * another[i, c]
return result
else:
lowerCAmelCase : Dict = F'''Unsupported type given for another ({type(UpperCamelCase_ )})'''
raise TypeError(UpperCamelCase_ )
def lowerCamelCase__ ( self : Optional[Any] ):
lowerCAmelCase : Optional[int] = Matrix(self.column , self.row )
for r in range(self.row ):
for c in range(self.column ):
lowerCAmelCase : List[str] = self[r, c]
return result
def lowerCamelCase__ ( self : Optional[int] , UpperCamelCase_ : Matrix , UpperCamelCase_ : Matrix ):
assert isinstance(UpperCamelCase_ , UpperCamelCase_ ) and isinstance(UpperCamelCase_ , UpperCamelCase_ )
assert self.row == self.column == u.row == v.row # u, v should be column vector
assert u.column == v.column == 1 # u, v should be column vector
# Calculate
lowerCAmelCase : Optional[int] = v.transpose()
lowerCAmelCase : Any = (v_t * self * u)[0, 0] + 1
if numerator_factor == 0:
return None # It's not invertable
return self - ((self * u) * (v_t * self) * (1.0 / numerator_factor))
# Testing
if __name__ == "__main__":
def _snake_case ( ):
# a^(-1)
lowerCAmelCase : List[str] = Matrix(3 , 3 , 0 )
for i in range(3 ):
lowerCAmelCase : Union[str, Any] = 1
print(f'''a^(-1) is {ainv}''' )
# u, v
lowerCAmelCase : Dict = Matrix(3 , 1 , 0 )
lowerCAmelCase : Tuple = 1, 2, -3
lowerCAmelCase : List[str] = Matrix(3 , 1 , 0 )
lowerCAmelCase : Any = 4, -2, 5
print(f'''u is {u}''' )
print(f'''v is {v}''' )
print(f'''uv^T is {u * v.transpose()}''' )
# Sherman Morrison
print(f'''(a + uv^T)^(-1) is {ainv.sherman_morrison(_snake_case , _snake_case )}''' )
def _snake_case ( ):
import doctest
doctest.testmod()
testa()
| 706
|
"""simple docstring"""
import time
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers.generation import (
MaxLengthCriteria,
MaxNewTokensCriteria,
MaxTimeCriteria,
StoppingCriteriaList,
validate_stopping_criteria,
)
@require_torch
class snake_case_( unittest.TestCase ):
def lowerCamelCase__ ( self : int , UpperCamelCase_ : int ):
lowerCAmelCase : str = 3
lowerCAmelCase : Tuple = 2_5_0
lowerCAmelCase : Optional[Any] = ids_tensor((batch_size, length) , UpperCamelCase_ )
lowerCAmelCase : Optional[Any] = torch.ones((batch_size, length) , device=UpperCamelCase_ , dtype=torch.float ) / length
return input_ids, scores
def lowerCamelCase__ ( self : Optional[Any] ):
lowerCAmelCase, lowerCAmelCase : Optional[int] = self._get_tensors(5 )
lowerCAmelCase : Union[str, Any] = StoppingCriteriaList(
[
MaxLengthCriteria(max_length=1_0 ),
MaxTimeCriteria(max_time=0.1 ),
] )
self.assertFalse(criteria(UpperCamelCase_ , UpperCamelCase_ ) )
lowerCAmelCase, lowerCAmelCase : List[str] = self._get_tensors(9 )
self.assertFalse(criteria(UpperCamelCase_ , UpperCamelCase_ ) )
lowerCAmelCase, lowerCAmelCase : Any = self._get_tensors(1_0 )
self.assertTrue(criteria(UpperCamelCase_ , UpperCamelCase_ ) )
def lowerCamelCase__ ( self : Optional[Any] ):
lowerCAmelCase : Optional[Any] = MaxLengthCriteria(max_length=1_0 )
lowerCAmelCase, lowerCAmelCase : Optional[Any] = self._get_tensors(5 )
self.assertFalse(criteria(UpperCamelCase_ , UpperCamelCase_ ) )
lowerCAmelCase, lowerCAmelCase : List[str] = self._get_tensors(9 )
self.assertFalse(criteria(UpperCamelCase_ , UpperCamelCase_ ) )
lowerCAmelCase, lowerCAmelCase : str = self._get_tensors(1_0 )
self.assertTrue(criteria(UpperCamelCase_ , UpperCamelCase_ ) )
def lowerCamelCase__ ( self : List[Any] ):
lowerCAmelCase : Optional[Any] = MaxNewTokensCriteria(start_length=5 , max_new_tokens=5 )
lowerCAmelCase, lowerCAmelCase : Optional[int] = self._get_tensors(5 )
self.assertFalse(criteria(UpperCamelCase_ , UpperCamelCase_ ) )
lowerCAmelCase, lowerCAmelCase : Union[str, Any] = self._get_tensors(9 )
self.assertFalse(criteria(UpperCamelCase_ , UpperCamelCase_ ) )
lowerCAmelCase, lowerCAmelCase : str = self._get_tensors(1_0 )
self.assertTrue(criteria(UpperCamelCase_ , UpperCamelCase_ ) )
lowerCAmelCase : Dict = StoppingCriteriaList([criteria] )
self.assertEqual(criteria_list.max_length , 1_0 )
def lowerCamelCase__ ( self : Union[str, Any] ):
lowerCAmelCase, lowerCAmelCase : Tuple = self._get_tensors(5 )
lowerCAmelCase : List[str] = MaxTimeCriteria(max_time=0.1 )
self.assertFalse(criteria(UpperCamelCase_ , UpperCamelCase_ ) )
lowerCAmelCase : List[str] = MaxTimeCriteria(max_time=0.1 , initial_timestamp=time.time() - 0.2 )
self.assertTrue(criteria(UpperCamelCase_ , UpperCamelCase_ ) )
def lowerCamelCase__ ( self : str ):
validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(1_0 )] ) , 1_0 )
with self.assertWarns(UpperCamelCase_ ):
validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(1_0 )] ) , 1_1 )
lowerCAmelCase : str = validate_stopping_criteria(StoppingCriteriaList() , 1_1 )
self.assertEqual(len(UpperCamelCase_ ) , 1 )
| 637
| 0
|
"""simple docstring"""
import unittest
from transformers import GPTSwaTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
snake_case__ : List[Any] = get_tests_dir('''fixtures/test_sentencepiece_with_bytefallback.model''')
@require_sentencepiece
@require_tokenizers
class snake_case_( a__ , unittest.TestCase ):
__UpperCamelCase = GPTSwaTokenizer
__UpperCamelCase = False
__UpperCamelCase = True
__UpperCamelCase = False
def lowerCamelCase__ ( self : List[str] ):
super().setUp()
# We have a SentencePiece fixture for testing
lowerCAmelCase : Any = GPTSwaTokenizer(UpperCamelCase_ , eos_token='''<unk>''' , bos_token='''<unk>''' , pad_token='''<unk>''' )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCamelCase__ ( self : Optional[int] , UpperCamelCase_ : Dict ):
lowerCAmelCase : Optional[Any] = '''This is a test'''
lowerCAmelCase : int = '''This is a test'''
return input_text, output_text
def lowerCamelCase__ ( self : Tuple ):
lowerCAmelCase : int = '''<s>'''
lowerCAmelCase : str = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCamelCase_ ) , UpperCamelCase_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCamelCase_ ) , UpperCamelCase_ )
def lowerCamelCase__ ( self : List[Any] ):
lowerCAmelCase : List[str] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<unk>''' )
self.assertEqual(vocab_keys[1] , '''<s>''' )
self.assertEqual(vocab_keys[-1] , '''j''' )
self.assertEqual(len(UpperCamelCase_ ) , 2_0_0_0 )
def lowerCamelCase__ ( self : Optional[Any] ):
self.assertEqual(self.get_tokenizer().vocab_size , 2_0_0_0 )
def lowerCamelCase__ ( self : List[Any] ):
lowerCAmelCase : Union[str, Any] = GPTSwaTokenizer(UpperCamelCase_ )
lowerCAmelCase : List[Any] = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(UpperCamelCase_ , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCamelCase_ ) , [4_6_5, 2_8_7, 2_6_5, 6_3_1, 8_4_2] )
lowerCAmelCase : Tuple = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
# fmt: off
self.assertListEqual(
UpperCamelCase_ , ['''▁I''', '''▁was''', '''▁bor''', '''n''', '''▁in''', '''▁''', '''<0x39>''', '''2''', '''0''', '''0''', '''0''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁f''', '''al''', '''s''', '''<0xC3>''', '''<0xA9>''', '''.'''] , )
# fmt: on
lowerCAmelCase : Dict = tokenizer.convert_tokens_to_ids(UpperCamelCase_ )
self.assertListEqual(
UpperCamelCase_ , [2_6_2, 2_7_2, 1_5_2_5, 2_8_6, 2_7_1, 2_6_8, 6_0, 9_1_6, 6_3_3, 6_3_3, 6_3_3, 2_5_9, 2_6_6, 3_0_1, 2_8_7, 3_8_4, 3_6_7, 2_6_3, 1_9_8, 1_7_2, 2_6_0] , )
lowerCAmelCase : Dict = tokenizer.convert_ids_to_tokens(UpperCamelCase_ )
# fmt: off
self.assertListEqual(
UpperCamelCase_ , ['''▁I''', '''▁was''', '''▁bor''', '''n''', '''▁in''', '''▁''', '''<0x39>''', '''2''', '''0''', '''0''', '''0''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁f''', '''al''', '''s''', '''<0xC3>''', '''<0xA9>''', '''.'''] )
# fmt: on
def lowerCamelCase__ ( self : Union[str, Any] ):
lowerCAmelCase : Union[str, Any] = GPTSwaTokenizer(UpperCamelCase_ )
lowerCAmelCase : int = ['''This is a test''', '''I was born in 92000, and this is falsé.''']
lowerCAmelCase : str = [
[4_6_5, 2_8_7, 2_6_5, 6_3_1, 8_4_2],
[2_6_2, 2_7_2, 1_5_2_5, 2_8_6, 2_7_1, 2_6_8, 6_0, 9_1_6, 6_3_3, 6_3_3, 6_3_3, 2_5_9, 2_6_6, 3_0_1, 2_8_7, 3_8_4, 3_6_7, 2_6_3, 1_9_8, 1_7_2, 2_6_0],
]
# Test that encode_fast returns the same as tokenize + convert_tokens_to_ids
for text, expected_ids in zip(UpperCamelCase_ , UpperCamelCase_ ):
self.assertListEqual(tokenizer.encode_fast(UpperCamelCase_ ) , UpperCamelCase_ )
# Test that decode_fast returns the input text
for text, token_ids in zip(UpperCamelCase_ , UpperCamelCase_ ):
self.assertEqual(tokenizer.decode_fast(UpperCamelCase_ ) , UpperCamelCase_ )
@slow
def lowerCamelCase__ ( self : int ):
lowerCAmelCase : Optional[int] = [
'''<|python|>def fibonacci(n)\n if n < 0:\n print(\'Incorrect input\')''',
'''Hey there, how are you doing this fine day?''',
'''This is a text with a trailing spaces followed by a dot .''',
'''Häj sväjs lillebrör! =)''',
'''Det är inget fel på Mr. Cool''',
]
# fmt: off
lowerCAmelCase : List[Any] = {'''input_ids''': [[6_3_4_2_3, 5, 6_8_1_1, 1_4_9_5_4, 2_8_2, 8_1_6, 3_8_2_1, 6_3_4_6_6, 6_3_4_2_5, 6_3_4_6_2, 1_8, 6_3_9_7_8, 6_7_8, 3_0_1, 1_3_2_0, 6_3_4_2_3, 6_3_4_5_5, 6_3_4_5_8, 1_8, 6_3_9_8_2, 4_2_4_6, 3_9_4_0, 1_9_0_1, 4_7_7_8_9, 5_5_4_7, 1_8_9_9_4], [1_9_6_3_0, 1_1_0_0, 6_3_4_4_6, 1_3_4_2, 6_3_3, 5_4_4, 4_4_8_8, 5_9_3, 5_1_0_2, 2_4_1_6, 6_3_4_9_5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1_6_5_2, 4_2_8, 2_6_8, 1_9_3_6, 5_1_5, 2_6_8, 5_8_5_9_3, 2_2_4_1_3, 9_1_0_6, 5_4_6, 2_6_8, 3_3_2_1_3, 6_3_9_7_9, 6_9_8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [5_5_1_3_0, 6_3_4_5_0, 9_2_4, 6_3_4_4_9, 2_2_4_9, 4_0_6_2, 1_5_5_8, 3_1_8, 6_3_5_0_4, 2_1_4_9_8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [5_0_9, 3_7_7, 2_8_2_7, 2_5_5_9, 3_3_2, 6_5_7_5, 6_3_4_4_3, 2_6_8_0_1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''token_type_ids''': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]}
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=UpperCamelCase_ , model_name='''AI-Sweden/gpt-sw3-126m''' , sequences=UpperCamelCase_ , )
| 707
|
"""simple docstring"""
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin
@dataclass
# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->UnCLIP
class snake_case_( a__ ):
__UpperCamelCase = 42
__UpperCamelCase = None
def _snake_case ( _snake_case : Dict , _snake_case : List[str]=0.999 , _snake_case : Dict="cosine" , ):
if alpha_transform_type == "cosine":
def alpha_bar_fn(_snake_case : List[Any] ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(_snake_case : Optional[int] ):
return math.exp(t * -12.0 )
else:
raise ValueError(f'''Unsupported alpha_tranform_type: {alpha_transform_type}''' )
lowerCAmelCase : List[Any] = []
for i in range(_snake_case ):
lowerCAmelCase : int = i / num_diffusion_timesteps
lowerCAmelCase : Tuple = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(_snake_case ) / alpha_bar_fn(_snake_case ) , _snake_case ) )
return torch.tensor(_snake_case , dtype=torch.floataa )
class snake_case_( a__ , a__ ):
@register_to_config
def __init__( self : Any , UpperCamelCase_ : int = 1_0_0_0 , UpperCamelCase_ : str = "fixed_small_log" , UpperCamelCase_ : bool = True , UpperCamelCase_ : Optional[float] = 1.0 , UpperCamelCase_ : str = "epsilon" , UpperCamelCase_ : str = "squaredcos_cap_v2" , ):
if beta_schedule != "squaredcos_cap_v2":
raise ValueError('''UnCLIPScheduler only supports `beta_schedule`: \'squaredcos_cap_v2\'''' )
lowerCAmelCase : Any = betas_for_alpha_bar(UpperCamelCase_ )
lowerCAmelCase : str = 1.0 - self.betas
lowerCAmelCase : Union[str, Any] = torch.cumprod(self.alphas , dim=0 )
lowerCAmelCase : Tuple = torch.tensor(1.0 )
# standard deviation of the initial noise distribution
lowerCAmelCase : Any = 1.0
# setable values
lowerCAmelCase : Any = None
lowerCAmelCase : Any = torch.from_numpy(np.arange(0 , UpperCamelCase_ )[::-1].copy() )
lowerCAmelCase : List[str] = variance_type
def lowerCamelCase__ ( self : Union[str, Any] , UpperCamelCase_ : torch.FloatTensor , UpperCamelCase_ : Optional[int] = None ):
return sample
def lowerCamelCase__ ( self : Tuple , UpperCamelCase_ : int , UpperCamelCase_ : Union[str, torch.device] = None ):
lowerCAmelCase : Any = num_inference_steps
lowerCAmelCase : str = (self.config.num_train_timesteps - 1) / (self.num_inference_steps - 1)
lowerCAmelCase : Tuple = (np.arange(0 , UpperCamelCase_ ) * step_ratio).round()[::-1].copy().astype(np.intaa )
lowerCAmelCase : Any = torch.from_numpy(UpperCamelCase_ ).to(UpperCamelCase_ )
def lowerCamelCase__ ( self : Optional[Any] , UpperCamelCase_ : List[Any] , UpperCamelCase_ : str=None , UpperCamelCase_ : Tuple=None , UpperCamelCase_ : Any=None ):
if prev_timestep is None:
lowerCAmelCase : Any = t - 1
lowerCAmelCase : int = self.alphas_cumprod[t]
lowerCAmelCase : Union[str, Any] = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one
lowerCAmelCase : Dict = 1 - alpha_prod_t
lowerCAmelCase : str = 1 - alpha_prod_t_prev
if prev_timestep == t - 1:
lowerCAmelCase : Tuple = self.betas[t]
else:
lowerCAmelCase : Union[str, Any] = 1 - alpha_prod_t / alpha_prod_t_prev
# For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
# and sample from it to get previous sample
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
lowerCAmelCase : Optional[Any] = beta_prod_t_prev / beta_prod_t * beta
if variance_type is None:
lowerCAmelCase : List[str] = self.config.variance_type
# hacks - were probably added for training stability
if variance_type == "fixed_small_log":
lowerCAmelCase : Any = torch.log(torch.clamp(UpperCamelCase_ , min=1E-20 ) )
lowerCAmelCase : Union[str, Any] = torch.exp(0.5 * variance )
elif variance_type == "learned_range":
# NOTE difference with DDPM scheduler
lowerCAmelCase : Optional[Any] = variance.log()
lowerCAmelCase : Union[str, Any] = beta.log()
lowerCAmelCase : Dict = (predicted_variance + 1) / 2
lowerCAmelCase : Union[str, Any] = frac * max_log + (1 - frac) * min_log
return variance
def lowerCamelCase__ ( self : Optional[Any] , UpperCamelCase_ : torch.FloatTensor , UpperCamelCase_ : int , UpperCamelCase_ : torch.FloatTensor , UpperCamelCase_ : Optional[int] = None , UpperCamelCase_ : List[Any]=None , UpperCamelCase_ : bool = True , ):
lowerCAmelCase : Optional[Any] = timestep
if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type == "learned_range":
lowerCAmelCase, lowerCAmelCase : List[Any] = torch.split(UpperCamelCase_ , sample.shape[1] , dim=1 )
else:
lowerCAmelCase : Optional[int] = None
# 1. compute alphas, betas
if prev_timestep is None:
lowerCAmelCase : Any = t - 1
lowerCAmelCase : Union[str, Any] = self.alphas_cumprod[t]
lowerCAmelCase : Optional[int] = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one
lowerCAmelCase : int = 1 - alpha_prod_t
lowerCAmelCase : str = 1 - alpha_prod_t_prev
if prev_timestep == t - 1:
lowerCAmelCase : List[Any] = self.betas[t]
lowerCAmelCase : Optional[int] = self.alphas[t]
else:
lowerCAmelCase : List[Any] = 1 - alpha_prod_t / alpha_prod_t_prev
lowerCAmelCase : Dict = 1 - beta
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if self.config.prediction_type == "epsilon":
lowerCAmelCase : List[Any] = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif self.config.prediction_type == "sample":
lowerCAmelCase : Tuple = model_output
else:
raise ValueError(
F'''prediction_type given as {self.config.prediction_type} must be one of `epsilon` or `sample`'''
''' for the UnCLIPScheduler.''' )
# 3. Clip "predicted x_0"
if self.config.clip_sample:
lowerCAmelCase : Dict = torch.clamp(
UpperCamelCase_ , -self.config.clip_sample_range , self.config.clip_sample_range )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
lowerCAmelCase : int = (alpha_prod_t_prev ** 0.5 * beta) / beta_prod_t
lowerCAmelCase : List[Any] = alpha ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
lowerCAmelCase : str = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
lowerCAmelCase : int = 0
if t > 0:
lowerCAmelCase : Union[str, Any] = randn_tensor(
model_output.shape , dtype=model_output.dtype , generator=UpperCamelCase_ , device=model_output.device )
lowerCAmelCase : Any = self._get_variance(
UpperCamelCase_ , predicted_variance=UpperCamelCase_ , prev_timestep=UpperCamelCase_ , )
if self.variance_type == "fixed_small_log":
lowerCAmelCase : str = variance
elif self.variance_type == "learned_range":
lowerCAmelCase : Optional[Any] = (0.5 * variance).exp()
else:
raise ValueError(
F'''variance_type given as {self.variance_type} must be one of `fixed_small_log` or `learned_range`'''
''' for the UnCLIPScheduler.''' )
lowerCAmelCase : List[Any] = variance * variance_noise
lowerCAmelCase : int = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample,)
return UnCLIPSchedulerOutput(prev_sample=UpperCamelCase_ , pred_original_sample=UpperCamelCase_ )
def lowerCamelCase__ ( self : Optional[int] , UpperCamelCase_ : torch.FloatTensor , UpperCamelCase_ : torch.FloatTensor , UpperCamelCase_ : torch.IntTensor , ):
# Make sure alphas_cumprod and timestep have same device and dtype as original_samples
lowerCAmelCase : Tuple = self.alphas_cumprod.to(device=original_samples.device , dtype=original_samples.dtype )
lowerCAmelCase : int = timesteps.to(original_samples.device )
lowerCAmelCase : Dict = alphas_cumprod[timesteps] ** 0.5
lowerCAmelCase : str = sqrt_alpha_prod.flatten()
while len(sqrt_alpha_prod.shape ) < len(original_samples.shape ):
lowerCAmelCase : Any = sqrt_alpha_prod.unsqueeze(-1 )
lowerCAmelCase : List[str] = (1 - alphas_cumprod[timesteps]) ** 0.5
lowerCAmelCase : Tuple = sqrt_one_minus_alpha_prod.flatten()
while len(sqrt_one_minus_alpha_prod.shape ) < len(original_samples.shape ):
lowerCAmelCase : int = sqrt_one_minus_alpha_prod.unsqueeze(-1 )
lowerCAmelCase : Dict = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise
return noisy_samples
| 637
| 0
|
"""simple docstring"""
import sys
from pathlib import Path
snake_case__ : str = Path(__file__).resolve().parents[3] / '''src'''
sys.path.insert(1, str(git_repo_path))
import dataclasses # noqa
import io # noqa
import itertools # noqa
import json # noqa
import os # noqa
import unittest # noqa
from copy import deepcopy # noqa
from parameterized import parameterized # noqa
from transformers import TrainingArguments, is_torch_available # noqa
from transformers.deepspeed import is_deepspeed_available # noqa
from transformers.file_utils import WEIGHTS_NAME # noqa
from transformers.testing_utils import ( # noqa
CaptureLogger,
ExtendSysPath,
TestCasePlus,
execute_subprocess_async,
get_gpu_count,
mockenv_context,
require_deepspeed,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
from transformers.trainer_utils import set_seed # noqa
set_seed(42)
snake_case__ : Union[str, Any] = {'''base''': '''patrickvonplaten/wav2vec2_tiny_random''', '''robust''': '''patrickvonplaten/wav2vec2_tiny_random_robust'''}
snake_case__ : str = '''zero2'''
snake_case__ : Optional[Any] = '''zero3'''
snake_case__ : Optional[int] = [ZEROa, ZEROa]
def _snake_case ( _snake_case : Optional[Any] , _snake_case : int , _snake_case : str ):
# customize the test name generator function as we want both params to appear in the sub-test
# name, as by default it shows only the first param
lowerCAmelCase : Union[str, Any] = parameterized.to_safe_name('''_'''.join(str(_snake_case ) for x in param.args ) )
return f'''{func.__name__}_{param_based_name}'''
# Cartesian-product of zero stages with models to test
snake_case__ : str = list(itertools.product(stages, models.keys()))
@slow
@require_deepspeed
@require_torch_gpu
class snake_case_( a__ ):
@parameterized.expand(UpperCamelCase_ , name_func=UpperCamelCase_ )
def lowerCamelCase__ ( self : List[str] , UpperCamelCase_ : int , UpperCamelCase_ : Dict ):
self.run_and_check(
stage=UpperCamelCase_ , model=UpperCamelCase_ , distributed=UpperCamelCase_ , fpaa=UpperCamelCase_ , )
@require_torch_multi_gpu
@parameterized.expand(UpperCamelCase_ , name_func=UpperCamelCase_ )
def lowerCamelCase__ ( self : Any , UpperCamelCase_ : int , UpperCamelCase_ : Optional[int] ):
self.run_and_check(
stage=UpperCamelCase_ , model=UpperCamelCase_ , distributed=UpperCamelCase_ , fpaa=UpperCamelCase_ , )
@parameterized.expand(UpperCamelCase_ , name_func=UpperCamelCase_ )
def lowerCamelCase__ ( self : Union[str, Any] , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : Optional[int] ):
self.run_and_check(
stage=UpperCamelCase_ , model=UpperCamelCase_ , distributed=UpperCamelCase_ , fpaa=UpperCamelCase_ , )
@require_torch_multi_gpu
@parameterized.expand(UpperCamelCase_ , name_func=UpperCamelCase_ )
def lowerCamelCase__ ( self : str , UpperCamelCase_ : Tuple , UpperCamelCase_ : List[str] ):
self.run_and_check(
stage=UpperCamelCase_ , model=UpperCamelCase_ , distributed=UpperCamelCase_ , fpaa=UpperCamelCase_ , )
def lowerCamelCase__ ( self : int , UpperCamelCase_ : int ):
# XXX: run_asr is premature and doesn't save any results
# so all we check for now is that the process didn't fail
pass
def lowerCamelCase__ ( self : List[str] , UpperCamelCase_ : str , UpperCamelCase_ : str , UpperCamelCase_ : int = 1_0 , UpperCamelCase_ : bool = True , UpperCamelCase_ : bool = True , UpperCamelCase_ : bool = True , ):
lowerCAmelCase : Tuple = models[model]
lowerCAmelCase : Any = self.run_trainer(
stage=UpperCamelCase_ , model_name=UpperCamelCase_ , eval_steps=UpperCamelCase_ , num_train_epochs=1 , distributed=UpperCamelCase_ , fpaa=UpperCamelCase_ , )
self.do_checks(UpperCamelCase_ )
return output_dir
def lowerCamelCase__ ( self : Optional[Any] , UpperCamelCase_ : str , UpperCamelCase_ : str , UpperCamelCase_ : int = 1_0 , UpperCamelCase_ : int = 1 , UpperCamelCase_ : bool = True , UpperCamelCase_ : bool = True , ):
lowerCAmelCase : Any = self.get_auto_remove_tmp_dir('''./xxx''' , after=UpperCamelCase_ )
lowerCAmelCase : Tuple = F'''
--model_name_or_path {model_name}
--dataset_name hf-internal-testing/librispeech_asr_dummy
--dataset_config_name clean
--train_split_name validation
--validation_split_name validation
--output_dir {output_dir}
--num_train_epochs {str(UpperCamelCase_ )}
--per_device_train_batch_size 2
--per_device_eval_batch_size 2
--evaluation_strategy steps
--learning_rate 5e-4
--warmup_steps 8
--orthography timit
--preprocessing_num_workers 1
--group_by_length
--freeze_feature_extractor
--report_to none
--save_steps 0
--eval_steps {eval_steps}
--report_to none
'''.split()
if fpaa:
args.extend(['''--fp16'''] )
# currently ds_config_wav2vec2_zero.json requires "zero_optimization.find_unused_parameters": true,
# hence the separate config files
lowerCAmelCase : Optional[int] = F'''--deepspeed {self.test_file_dir_str}/ds_config_wav2vec2_{stage}.json'''.split()
lowerCAmelCase : Dict = [F'''{self.examples_dir_str}/research_projects/wav2vec2/run_asr.py''']
lowerCAmelCase : Optional[int] = self.get_launcher(UpperCamelCase_ )
lowerCAmelCase : Tuple = launcher + script + args + ds_args
# keep for quick debug
# print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die
execute_subprocess_async(UpperCamelCase_ , env=self.get_env() )
return output_dir
def lowerCamelCase__ ( self : List[str] , UpperCamelCase_ : Optional[int]=False ):
# 1. explicitly set --num_nodes=1 just in case these tests end up run on a multi-node setup
# - it won't be able to handle that
# 2. for now testing with just 2 gpus max (since some quality tests may give different
# results with mode gpus because we use very little data)
lowerCAmelCase : Union[str, Any] = min(2 , get_gpu_count() ) if distributed else 1
return F'''deepspeed --num_nodes 1 --num_gpus {num_gpus}'''.split()
| 708
|
"""simple docstring"""
import unittest
from parameterized import parameterized
from transformers import LlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaTokenizer
class snake_case_:
def __init__( self : int , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Dict=1_3 , UpperCamelCase_ : Optional[Any]=7 , UpperCamelCase_ : Optional[int]=True , UpperCamelCase_ : Dict=True , UpperCamelCase_ : Optional[int]=False , UpperCamelCase_ : Any=True , UpperCamelCase_ : List[str]=9_9 , UpperCamelCase_ : Tuple=3_2 , UpperCamelCase_ : Optional[Any]=5 , UpperCamelCase_ : str=4 , UpperCamelCase_ : Any=3_7 , UpperCamelCase_ : Optional[Any]="gelu" , UpperCamelCase_ : Tuple=0.1 , UpperCamelCase_ : Union[str, Any]=0.1 , UpperCamelCase_ : Union[str, Any]=5_1_2 , UpperCamelCase_ : Union[str, Any]=1_6 , UpperCamelCase_ : Any=2 , UpperCamelCase_ : Optional[Any]=0.02 , UpperCamelCase_ : List[Any]=3 , UpperCamelCase_ : Any=4 , UpperCamelCase_ : int=None , ):
lowerCAmelCase : Any = parent
lowerCAmelCase : Any = batch_size
lowerCAmelCase : List[Any] = seq_length
lowerCAmelCase : str = is_training
lowerCAmelCase : List[Any] = use_input_mask
lowerCAmelCase : Optional[int] = use_token_type_ids
lowerCAmelCase : Union[str, Any] = use_labels
lowerCAmelCase : List[str] = vocab_size
lowerCAmelCase : Tuple = hidden_size
lowerCAmelCase : int = num_hidden_layers
lowerCAmelCase : Union[str, Any] = num_attention_heads
lowerCAmelCase : Optional[int] = intermediate_size
lowerCAmelCase : List[Any] = hidden_act
lowerCAmelCase : int = hidden_dropout_prob
lowerCAmelCase : Tuple = attention_probs_dropout_prob
lowerCAmelCase : Optional[Any] = max_position_embeddings
lowerCAmelCase : Optional[int] = type_vocab_size
lowerCAmelCase : Tuple = type_sequence_label_size
lowerCAmelCase : List[str] = initializer_range
lowerCAmelCase : str = num_labels
lowerCAmelCase : Optional[int] = num_choices
lowerCAmelCase : Tuple = scope
def lowerCamelCase__ ( self : Optional[int] ):
lowerCAmelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase : Tuple = None
if self.use_input_mask:
lowerCAmelCase : str = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase : List[str] = None
if self.use_token_type_ids:
lowerCAmelCase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCAmelCase : int = None
lowerCAmelCase : int = None
lowerCAmelCase : Tuple = None
if self.use_labels:
lowerCAmelCase : int = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase : Union[str, Any] = ids_tensor([self.batch_size] , self.num_choices )
lowerCAmelCase : Optional[Any] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCamelCase__ ( self : Tuple ):
return LlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCamelCase_ , initializer_range=self.initializer_range , )
def lowerCamelCase__ ( self : int , UpperCamelCase_ : Any , UpperCamelCase_ : Dict , UpperCamelCase_ : Any , UpperCamelCase_ : Dict , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : List[str] , UpperCamelCase_ : Tuple ):
lowerCAmelCase : List[Any] = LlamaModel(config=UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
lowerCAmelCase : Dict = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ )
lowerCAmelCase : Optional[int] = model(UpperCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase__ ( self : Any , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Tuple , UpperCamelCase_ : Tuple , UpperCamelCase_ : Dict , UpperCamelCase_ : int , UpperCamelCase_ : Dict , UpperCamelCase_ : Tuple , UpperCamelCase_ : int , UpperCamelCase_ : Any , ):
lowerCAmelCase : Tuple = True
lowerCAmelCase : Optional[int] = LlamaModel(UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
lowerCAmelCase : List[Any] = model(
UpperCamelCase_ , attention_mask=UpperCamelCase_ , encoder_hidden_states=UpperCamelCase_ , encoder_attention_mask=UpperCamelCase_ , )
lowerCAmelCase : Dict = model(
UpperCamelCase_ , attention_mask=UpperCamelCase_ , encoder_hidden_states=UpperCamelCase_ , )
lowerCAmelCase : Tuple = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase__ ( self : Any , UpperCamelCase_ : int , UpperCamelCase_ : Tuple , UpperCamelCase_ : Any , UpperCamelCase_ : List[Any] , UpperCamelCase_ : str , UpperCamelCase_ : int , UpperCamelCase_ : int , UpperCamelCase_ : int , UpperCamelCase_ : str , ):
lowerCAmelCase : Optional[Any] = LlamaForCausalLM(config=UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
lowerCAmelCase : List[str] = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ , labels=UpperCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCamelCase__ ( self : str , UpperCamelCase_ : List[str] , UpperCamelCase_ : int , UpperCamelCase_ : Tuple , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : int , UpperCamelCase_ : str , UpperCamelCase_ : Dict , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : List[Any] , ):
lowerCAmelCase : Union[str, Any] = True
lowerCAmelCase : str = True
lowerCAmelCase : Tuple = LlamaForCausalLM(config=UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
# first forward pass
lowerCAmelCase : Optional[Any] = model(
UpperCamelCase_ , attention_mask=UpperCamelCase_ , encoder_hidden_states=UpperCamelCase_ , encoder_attention_mask=UpperCamelCase_ , use_cache=UpperCamelCase_ , )
lowerCAmelCase : Dict = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
lowerCAmelCase : Any = ids_tensor((self.batch_size, 3) , config.vocab_size )
lowerCAmelCase : Dict = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
lowerCAmelCase : Optional[Any] = torch.cat([input_ids, next_tokens] , dim=-1 )
lowerCAmelCase : List[str] = torch.cat([input_mask, next_mask] , dim=-1 )
lowerCAmelCase : Dict = model(
UpperCamelCase_ , attention_mask=UpperCamelCase_ , encoder_hidden_states=UpperCamelCase_ , encoder_attention_mask=UpperCamelCase_ , output_hidden_states=UpperCamelCase_ , )['''hidden_states'''][0]
lowerCAmelCase : str = model(
UpperCamelCase_ , attention_mask=UpperCamelCase_ , encoder_hidden_states=UpperCamelCase_ , encoder_attention_mask=UpperCamelCase_ , past_key_values=UpperCamelCase_ , output_hidden_states=UpperCamelCase_ , )['''hidden_states'''][0]
# select random slice
lowerCAmelCase : Tuple = ids_tensor((1,) , output_from_past.shape[-1] ).item()
lowerCAmelCase : Any = output_from_no_past[:, -3:, random_slice_idx].detach()
lowerCAmelCase : Optional[int] = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(UpperCamelCase_ , UpperCamelCase_ , atol=1E-3 ) )
def lowerCamelCase__ ( self : Union[str, Any] ):
lowerCAmelCase : Dict = self.prepare_config_and_inputs()
(
(
lowerCAmelCase
), (
lowerCAmelCase
), (
lowerCAmelCase
), (
lowerCAmelCase
), (
lowerCAmelCase
), (
lowerCAmelCase
), (
lowerCAmelCase
),
) : Tuple = config_and_inputs
lowerCAmelCase : Optional[int] = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class snake_case_( a__ , a__ , a__ , unittest.TestCase ):
__UpperCamelCase = (LlamaModel, LlamaForCausalLM, LlamaForSequenceClassification) if is_torch_available() else ()
__UpperCamelCase = (LlamaForCausalLM,) if is_torch_available() else ()
__UpperCamelCase = (
{
'''feature-extraction''': LlamaModel,
'''text-classification''': LlamaForSequenceClassification,
'''text-generation''': LlamaForCausalLM,
'''zero-shot''': LlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
__UpperCamelCase = False
__UpperCamelCase = False
def lowerCamelCase__ ( self : Optional[Any] ):
lowerCAmelCase : Any = LlamaModelTester(self )
lowerCAmelCase : Dict = ConfigTester(self , config_class=UpperCamelCase_ , hidden_size=3_7 )
def lowerCamelCase__ ( self : str ):
self.config_tester.run_common_tests()
def lowerCamelCase__ ( self : Tuple ):
lowerCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase_ )
def lowerCamelCase__ ( self : List[Any] ):
lowerCAmelCase : str = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
lowerCAmelCase : str = type
self.model_tester.create_and_check_model(*UpperCamelCase_ )
def lowerCamelCase__ ( self : List[Any] ):
lowerCAmelCase, lowerCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase : List[str] = 3
lowerCAmelCase : List[str] = input_dict['''input_ids''']
lowerCAmelCase : List[str] = input_ids.ne(1 ).to(UpperCamelCase_ )
lowerCAmelCase : Tuple = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
lowerCAmelCase : Union[str, Any] = LlamaForSequenceClassification(UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
lowerCAmelCase : List[Any] = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ , labels=UpperCamelCase_ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def lowerCamelCase__ ( self : Optional[int] ):
lowerCAmelCase, lowerCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase : Any = 3
lowerCAmelCase : int = '''single_label_classification'''
lowerCAmelCase : Tuple = input_dict['''input_ids''']
lowerCAmelCase : Tuple = input_ids.ne(1 ).to(UpperCamelCase_ )
lowerCAmelCase : str = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
lowerCAmelCase : Tuple = LlamaForSequenceClassification(UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
lowerCAmelCase : Any = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ , labels=UpperCamelCase_ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def lowerCamelCase__ ( self : Union[str, Any] ):
lowerCAmelCase, lowerCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase : Any = 3
lowerCAmelCase : Dict = '''multi_label_classification'''
lowerCAmelCase : Union[str, Any] = input_dict['''input_ids''']
lowerCAmelCase : Tuple = input_ids.ne(1 ).to(UpperCamelCase_ )
lowerCAmelCase : Any = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
lowerCAmelCase : Optional[int] = LlamaForSequenceClassification(UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
lowerCAmelCase : Optional[Any] = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ , labels=UpperCamelCase_ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip('''LLaMA buffers include complex numbers, which breaks this test''' )
def lowerCamelCase__ ( self : Optional[Any] ):
pass
@parameterized.expand([('''linear''',), ('''dynamic''',)] )
def lowerCamelCase__ ( self : List[str] , UpperCamelCase_ : Tuple ):
lowerCAmelCase, lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase : Optional[int] = ids_tensor([1, 1_0] , config.vocab_size )
lowerCAmelCase : int = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(4_2 ) # Fixed seed at init time so the two models get the same random weights
lowerCAmelCase : List[Any] = LlamaModel(UpperCamelCase_ )
original_model.to(UpperCamelCase_ )
original_model.eval()
lowerCAmelCase : Optional[int] = original_model(UpperCamelCase_ ).last_hidden_state
lowerCAmelCase : List[Any] = original_model(UpperCamelCase_ ).last_hidden_state
set_seed(4_2 ) # Fixed seed at init time so the two models get the same random weights
lowerCAmelCase : int = {'''type''': scaling_type, '''factor''': 10.0}
lowerCAmelCase : List[str] = LlamaModel(UpperCamelCase_ )
scaled_model.to(UpperCamelCase_ )
scaled_model.eval()
lowerCAmelCase : Union[str, Any] = scaled_model(UpperCamelCase_ ).last_hidden_state
lowerCAmelCase : Optional[int] = scaled_model(UpperCamelCase_ ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(UpperCamelCase_ , UpperCamelCase_ , atol=1E-5 ) )
else:
self.assertFalse(torch.allclose(UpperCamelCase_ , UpperCamelCase_ , atol=1E-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(UpperCamelCase_ , UpperCamelCase_ , atol=1E-5 ) )
@require_torch
class snake_case_( unittest.TestCase ):
@unittest.skip('''Logits are not exactly the same, once we fix the instabalities somehow, will update!''' )
@slow
def lowerCamelCase__ ( self : List[Any] ):
lowerCAmelCase : Tuple = [1, 3_0_6, 4_6_5_8, 2_7_8, 6_5_9_3, 3_1_0, 2_8_3_4, 3_3_8]
lowerCAmelCase : Optional[Any] = LlamaForCausalLM.from_pretrained('''meta-llama/Llama-2-7b-hf''' , device_map='''auto''' )
lowerCAmelCase : str = model(torch.tensor([input_ids] ) )
# Expected mean on dim = -1
lowerCAmelCase : int = torch.tensor([[-6.6_550, -4.1_227, -4.9_859, -3.2_406, 0.8_262, -3.0_033, 1.2_964, -3.3_699]] )
torch.testing.assert_close(out.mean(-1 ) , UpperCamelCase_ , atol=1E-2 , rtol=1E-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
lowerCAmelCase : Tuple = torch.tensor([-12.8_281, -7.4_453, -0.4_639, -8.0_625, -7.2_500, -8.0_000, -6.4_883, -7.7_695, -7.8_438, -7.0_312, -6.2_188, -7.1_328, -1.8_496, 1.9_961, -8.6_250, -6.7_227, -12.8_281, -6.9_492, -7.0_742, -7.7_852, -7.5_820, -7.9_062, -6.9_375, -7.9_805, -8.3_438, -8.1_562, -8.0_469, -7.6_250, -7.7_422, -7.3_398,] )
# fmt: on
torch.testing.assert_close(out[0, 0, :3_0] , UpperCamelCase_ , atol=1E-5 , rtol=1E-5 )
@unittest.skip('''Logits are not exactly the same, once we fix the instabalities somehow, will update!''' )
@slow
def lowerCamelCase__ ( self : Dict ):
lowerCAmelCase : str = [1, 3_0_6, 4_6_5_8, 2_7_8, 6_5_9_3, 3_1_0, 2_8_3_4, 3_3_8]
lowerCAmelCase : Dict = LlamaForCausalLM.from_pretrained('''meta-llama/Llama-2-13b-hf''' , device_map='''auto''' )
lowerCAmelCase : str = model(torch.tensor(UpperCamelCase_ ) )
# Expected mean on dim = -1
lowerCAmelCase : Any = torch.tensor([[-2.0_622, -1.2_794, -1.1_638, -0.9_788, -1.4_603, -1.0_238, -1.7_893, -1.4_411]] )
torch.testing.assert_close(out.mean(-1 ) , UpperCamelCase_ , atol=1E-2 , rtol=1E-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
lowerCAmelCase : Tuple = torch.tensor([-8.1_406, -8.0_547, 2.7_461, -1.2_344, -0.1_448, -1.8_262, -1.0_020, -1.8_154, -1.6_895, -1.8_516, -2.3_574, -0.9_277, 3.7_598, 6.5_742, -1.2_998, -0.1_177, -8.1_406, -2.9_688, -2.9_199, -3.1_699, -3.5_254, -2.3_555, -2.7_988, -3.4_141, -2.8_262, -4.5_195, -3.3_379, -3.3_164, -2.7_832, -3.0_273] )
# fmt: on
torch.testing.assert_close(out[0, 0, :3_0] , UpperCamelCase_ , atol=1E-5 , rtol=1E-5 )
@unittest.skip('''Logits are not exactly the same, once we fix the instabalities somehow, will update!''' )
@slow
def lowerCamelCase__ ( self : Optional[int] ):
lowerCAmelCase : int = [1, 3_0_6, 4_6_5_8, 2_7_8, 6_5_9_3, 3_1_0, 2_8_3_4, 3_3_8]
lowerCAmelCase : List[str] = LlamaForCausalLM.from_pretrained('''meta-llama/Llama-2-13b-chat-hf''' , device_map='''auto''' )
lowerCAmelCase : List[Any] = model(torch.tensor(UpperCamelCase_ ) )
# Expected mean on dim = -1
lowerCAmelCase : List[str] = torch.tensor([[-0.8_562, -1.8_520, -0.7_551, -0.4_162, -1.5_161, -1.2_038, -2.4_823, -2.3_254]] )
torch.testing.assert_close(out.mean(-1 ) , UpperCamelCase_ , atol=1E-2 , rtol=1E-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
lowerCAmelCase : Dict = torch.tensor([-2.2_227, 4.8_828, 0.9_023, -0.4_578, -0.7_871, -0.1_033, -0.6_221, -0.5_786, -0.7_803, -1.0_674, -1.2_920, -0.1_570, 0.8_008, 2.0_723, -0.9_497, 0.2_771, -2.2_227, -0.7_612, -1.4_346, -1.2_061, -1.6_426, -0.3_000, -0.7_139, -1.1_934, -1.8_691, -1.6_973, -1.5_947, -1.2_705, -0.3_523, -0.5_513] )
# fmt: on
torch.testing.assert_close(out.mean(-1 ) , UpperCamelCase_ , atol=1E-2 , rtol=1E-2 )
@unittest.skip(
'''Logits are not exactly the same, once we fix the instabalities somehow, will update! Also it is gonna be a `too_slow` test''' )
@slow
def lowerCamelCase__ ( self : List[str] ):
lowerCAmelCase : Optional[Any] = [1, 3_0_6, 4_6_5_8, 2_7_8, 6_5_9_3, 3_1_0, 2_8_3_4, 3_3_8]
lowerCAmelCase : Optional[int] = LlamaForCausalLM.from_pretrained('''meta-llama/Llama-2-70b-hf''' , device_map='''auto''' )
lowerCAmelCase : Any = model(torch.tensor(UpperCamelCase_ ) )
lowerCAmelCase : Optional[Any] = torch.tensor(
[[-4.2_327, -3.3_360, -4.6_665, -4.7_631, -1.8_180, -3.4_170, -1.4_211, -3.1_810]] , dtype=torch.floataa )
torch.testing.assert_close(out.mean(-1 ) , UpperCamelCase_ , atol=1E-2 , rtol=1E-2 )
# fmt: off
lowerCAmelCase : Any = torch.tensor([-9.4_922, -3.9_551, 1.7_998, -5.6_758, -5.1_055, -5.8_984, -4.8_320, -6.8_086, -6.5_391, -5.6_172, -5.5_820, -5.5_352, 1.7_881, 3.6_289, -6.5_117, -3.4_785, -9.5_000, -6.0_352, -6.8_125, -6.0_195, -6.6_836, -5.4_727, -6.2_812, -6.0_391, -7.3_398, -7.4_297, -7.4_844, -6.5_820, -5.8_789, -5.5_312] )
# fmt: on
torch.testing.assert_close(out[0, 0, :3_0] , UpperCamelCase_ , atol=1E-5 , rtol=1E-5 )
@unittest.skip('''Model is curently gated''' )
@slow
def lowerCamelCase__ ( self : List[Any] ):
lowerCAmelCase : List[Any] = '''Simply put, the theory of relativity states that 1) the laws of physics are the same everywhere in the universe and 2) the passage of time and the length of objects can vary depending on the observer\'s frame of reference.\n\nThe first part of the theory, that the laws of physics are the same everywhere, is known as the "princi'''
lowerCAmelCase : int = '''Simply put, the theory of relativity states that '''
lowerCAmelCase : str = LlamaTokenizer.from_pretrained('''meta-llama/Llama-2-13b-chat-hf''' )
lowerCAmelCase : Optional[int] = tokenizer.encode(UpperCamelCase_ , return_tensors='''pt''' )
lowerCAmelCase : List[Any] = LlamaForCausalLM.from_pretrained(
'''meta-llama/Llama-2-13b-chat-hf''' , device_map='''sequential''' , use_safetensors=UpperCamelCase_ )
# greedy generation outputs
lowerCAmelCase : int = model.generate(UpperCamelCase_ , max_new_tokens=6_4 , top_p=UpperCamelCase_ , temperature=1 , do_sample=UpperCamelCase_ )
lowerCAmelCase : int = tokenizer.decode(generated_ids[0] , skip_special_tokens=UpperCamelCase_ )
self.assertEqual(UpperCamelCase_ , UpperCamelCase_ )
| 637
| 0
|
"""simple docstring"""
from __future__ import annotations
from collections.abc import Iterator
from typing import Generic, TypeVar
snake_case__ : Optional[int] = TypeVar('''T''')
class snake_case_( Generic[T] ):
def __init__( self : Optional[Any] , UpperCamelCase_ : T ):
lowerCAmelCase : str = data
lowerCAmelCase : Node[T] | None = None
def __str__( self : List[Any] ):
return F'''{self.data}'''
class snake_case_( Generic[T] ):
def __init__( self : List[Any] ):
lowerCAmelCase : Node[T] | None = None
def __iter__( self : Union[str, Any] ):
lowerCAmelCase : List[Any] = self.top
while node:
yield node.data
lowerCAmelCase : Dict = node.next
def __str__( self : Optional[int] ):
return "->".join([str(UpperCamelCase_ ) for item in self] )
def __len__( self : Dict ):
return len(tuple(iter(self ) ) )
def lowerCamelCase__ ( self : int ):
return self.top is None
def lowerCamelCase__ ( self : List[Any] , UpperCamelCase_ : T ):
lowerCAmelCase : List[Any] = Node(UpperCamelCase_ )
if not self.is_empty():
lowerCAmelCase : str = self.top
lowerCAmelCase : Tuple = node
def lowerCamelCase__ ( self : Any ):
if self.is_empty():
raise IndexError('''pop from empty stack''' )
assert isinstance(self.top , UpperCamelCase_ )
lowerCAmelCase : Optional[Any] = self.top
lowerCAmelCase : List[str] = self.top.next
return pop_node.data
def lowerCamelCase__ ( self : Optional[int] ):
if self.is_empty():
raise IndexError('''peek from empty stack''' )
assert self.top is not None
return self.top.data
def lowerCamelCase__ ( self : Optional[int] ):
lowerCAmelCase : Optional[int] = None
if __name__ == "__main__":
from doctest import testmod
testmod()
| 709
|
"""simple docstring"""
import os
import tempfile
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from torch import nn
from transformers import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_inverse_sqrt_schedule,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
def _snake_case ( _snake_case : Tuple , _snake_case : Union[str, Any]=10 ):
lowerCAmelCase : Dict = []
for _ in range(_snake_case ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
return lrs
def _snake_case ( _snake_case : Optional[int] , _snake_case : int=10 ):
lowerCAmelCase : Optional[int] = []
for step in range(_snake_case ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
if step == num_steps // 2:
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCAmelCase : List[str] = os.path.join(_snake_case , '''schedule.bin''' )
torch.save(scheduler.state_dict() , _snake_case )
lowerCAmelCase : List[Any] = torch.load(_snake_case )
scheduler.load_state_dict(_snake_case )
return lrs
@require_torch
class snake_case_( unittest.TestCase ):
def lowerCamelCase__ ( self : Optional[int] , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : str , UpperCamelCase_ : Any ):
self.assertEqual(len(UpperCamelCase_ ) , len(UpperCamelCase_ ) )
for a, b in zip(UpperCamelCase_ , UpperCamelCase_ ):
self.assertAlmostEqual(UpperCamelCase_ , UpperCamelCase_ , delta=UpperCamelCase_ )
def lowerCamelCase__ ( self : Tuple ):
lowerCAmelCase : Any = torch.tensor([0.1, -0.2, -0.1] , requires_grad=UpperCamelCase_ )
lowerCAmelCase : List[str] = torch.tensor([0.4, 0.2, -0.5] )
lowerCAmelCase : List[Any] = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
lowerCAmelCase : Union[str, Any] = AdamW(params=[w] , lr=2E-1 , weight_decay=0.0 )
for _ in range(1_0_0 ):
lowerCAmelCase : Union[str, Any] = criterion(UpperCamelCase_ , UpperCamelCase_ )
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1E-2 )
def lowerCamelCase__ ( self : Union[str, Any] ):
lowerCAmelCase : Tuple = torch.tensor([0.1, -0.2, -0.1] , requires_grad=UpperCamelCase_ )
lowerCAmelCase : Union[str, Any] = torch.tensor([0.4, 0.2, -0.5] )
lowerCAmelCase : Optional[int] = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
lowerCAmelCase : Any = Adafactor(
params=[w] , lr=1E-2 , eps=(1E-30, 1E-3) , clip_threshold=1.0 , decay_rate=-0.8 , betaa=UpperCamelCase_ , weight_decay=0.0 , relative_step=UpperCamelCase_ , scale_parameter=UpperCamelCase_ , warmup_init=UpperCamelCase_ , )
for _ in range(1_0_0_0 ):
lowerCAmelCase : List[Any] = criterion(UpperCamelCase_ , UpperCamelCase_ )
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1E-2 )
@require_torch
class snake_case_( unittest.TestCase ):
__UpperCamelCase = nn.Linear(50 , 50 ) if is_torch_available() else None
__UpperCamelCase = AdamW(m.parameters() , lr=10.0 ) if is_torch_available() else None
__UpperCamelCase = 10
def lowerCamelCase__ ( self : Union[str, Any] , UpperCamelCase_ : str , UpperCamelCase_ : str , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Union[str, Any]=None ):
self.assertEqual(len(UpperCamelCase_ ) , len(UpperCamelCase_ ) )
for a, b in zip(UpperCamelCase_ , UpperCamelCase_ ):
self.assertAlmostEqual(UpperCamelCase_ , UpperCamelCase_ , delta=UpperCamelCase_ , msg=UpperCamelCase_ )
def lowerCamelCase__ ( self : Union[str, Any] ):
lowerCAmelCase : Tuple = {'''num_warmup_steps''': 2, '''num_training_steps''': 1_0}
# schedulers doct format
# function: (sched_args_dict, expected_learning_rates)
lowerCAmelCase : Optional[Any] = {
get_constant_schedule: ({}, [10.0] * self.num_steps),
get_constant_schedule_with_warmup: (
{'''num_warmup_steps''': 4},
[0.0, 2.5, 5.0, 7.5, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0],
),
get_linear_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 8.75, 7.5, 6.25, 5.0, 3.75, 2.5, 1.25],
),
get_cosine_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 9.61, 8.53, 6.91, 5.0, 3.08, 1.46, 0.38],
),
get_cosine_with_hard_restarts_schedule_with_warmup: (
{**common_kwargs, '''num_cycles''': 2},
[0.0, 5.0, 10.0, 8.53, 5.0, 1.46, 10.0, 8.53, 5.0, 1.46],
),
get_polynomial_decay_schedule_with_warmup: (
{**common_kwargs, '''power''': 2.0, '''lr_end''': 1E-7},
[0.0, 5.0, 10.0, 7.656, 5.625, 3.906, 2.5, 1.406, 0.625, 0.156],
),
get_inverse_sqrt_schedule: (
{'''num_warmup_steps''': 2},
[0.0, 5.0, 10.0, 8.165, 7.071, 6.325, 5.774, 5.345, 5.0, 4.714],
),
}
for scheduler_func, data in scheds.items():
lowerCAmelCase, lowerCAmelCase : Union[str, Any] = data
lowerCAmelCase : List[Any] = scheduler_func(self.optimizer , **UpperCamelCase_ )
self.assertEqual(len([scheduler.get_lr()[0]] ) , 1 )
lowerCAmelCase : str = unwrap_schedule(UpperCamelCase_ , self.num_steps )
self.assertListAlmostEqual(
UpperCamelCase_ , UpperCamelCase_ , tol=1E-2 , msg=F'''failed for {scheduler_func} in normal scheduler''' , )
lowerCAmelCase : Optional[int] = scheduler_func(self.optimizer , **UpperCamelCase_ )
if scheduler_func.__name__ != "get_constant_schedule":
LambdaScheduleWrapper.wrap_scheduler(UpperCamelCase_ ) # wrap to test picklability of the schedule
lowerCAmelCase : List[Any] = unwrap_and_save_reload_schedule(UpperCamelCase_ , self.num_steps )
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ , msg=F'''failed for {scheduler_func} in save and reload''' )
class snake_case_:
def __init__( self : List[Any] , UpperCamelCase_ : Any ):
lowerCAmelCase : Tuple = fn
def __call__( self : Union[str, Any] , *UpperCamelCase_ : Optional[Any] , **UpperCamelCase_ : List[Any] ):
return self.fn(*UpperCamelCase_ , **UpperCamelCase_ )
@classmethod
def lowerCamelCase__ ( self : Any , UpperCamelCase_ : Optional[int] ):
lowerCAmelCase : Union[str, Any] = list(map(self , scheduler.lr_lambdas ) )
| 637
| 0
|
"""simple docstring"""
import pytest
from datasets.parallel import ParallelBackendConfig, parallel_backend
from datasets.utils.py_utils import map_nested
from .utils import require_dill_gt_0_3_2, require_joblibspark, require_not_windows
def _snake_case ( _snake_case : Dict ): # picklable for multiprocessing
return i + 1
@require_dill_gt_0_3_2
@require_joblibspark
@require_not_windows
def _snake_case ( ):
with parallel_backend('''spark''' ):
assert ParallelBackendConfig.backend_name == "spark"
lowerCAmelCase : str = [1, 2, 3]
with pytest.raises(_snake_case ):
with parallel_backend('''unsupported backend''' ):
map_nested(_snake_case , _snake_case , num_proc=2 )
with pytest.raises(_snake_case ):
with parallel_backend('''unsupported backend''' ):
map_nested(_snake_case , _snake_case , num_proc=-1 )
@require_dill_gt_0_3_2
@require_joblibspark
@require_not_windows
@pytest.mark.parametrize('''num_proc''' , [2, -1] )
def _snake_case ( _snake_case : str ):
lowerCAmelCase : Any = [1, 2]
lowerCAmelCase : List[str] = {'''a''': 1, '''b''': 2}
lowerCAmelCase : int = {'''a''': [1, 2], '''b''': [3, 4]}
lowerCAmelCase : str = {'''a''': {'''1''': 1}, '''b''': 2}
lowerCAmelCase : List[str] = {'''a''': 1, '''b''': 2, '''c''': 3, '''d''': 4}
lowerCAmelCase : Optional[Any] = [2, 3]
lowerCAmelCase : Optional[Any] = {'''a''': 2, '''b''': 3}
lowerCAmelCase : Optional[int] = {'''a''': [2, 3], '''b''': [4, 5]}
lowerCAmelCase : str = {'''a''': {'''1''': 2}, '''b''': 3}
lowerCAmelCase : Any = {'''a''': 2, '''b''': 3, '''c''': 4, '''d''': 5}
with parallel_backend('''spark''' ):
assert map_nested(_snake_case , _snake_case , num_proc=_snake_case ) == expected_map_nested_sa
assert map_nested(_snake_case , _snake_case , num_proc=_snake_case ) == expected_map_nested_sa
assert map_nested(_snake_case , _snake_case , num_proc=_snake_case ) == expected_map_nested_sa
assert map_nested(_snake_case , _snake_case , num_proc=_snake_case ) == expected_map_nested_sa
assert map_nested(_snake_case , _snake_case , num_proc=_snake_case ) == expected_map_nested_sa
| 710
|
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer
from .base import PipelineTool
class snake_case_( a__ ):
__UpperCamelCase = '''philschmid/bart-large-cnn-samsum'''
__UpperCamelCase = (
'''This is a tool that summarizes an English text. It takes an input `text` containing the text to summarize, '''
'''and returns a summary of the text.'''
)
__UpperCamelCase = '''summarizer'''
__UpperCamelCase = AutoTokenizer
__UpperCamelCase = AutoModelForSeqaSeqLM
__UpperCamelCase = ['''text''']
__UpperCamelCase = ['''text''']
def lowerCamelCase__ ( self : Dict , UpperCamelCase_ : int ):
return self.pre_processor(UpperCamelCase_ , return_tensors='''pt''' , truncation=UpperCamelCase_ )
def lowerCamelCase__ ( self : Optional[int] , UpperCamelCase_ : str ):
return self.model.generate(**UpperCamelCase_ )[0]
def lowerCamelCase__ ( self : Any , UpperCamelCase_ : Tuple ):
return self.pre_processor.decode(UpperCamelCase_ , skip_special_tokens=UpperCamelCase_ , clean_up_tokenization_spaces=UpperCamelCase_ )
| 637
| 0
|
"""simple docstring"""
import unittest
from pathlib import Path
from tempfile import TemporaryDirectory
from transformers import AutoConfig, TFGPTaLMHeadModel, is_keras_nlp_available, is_tf_available
from transformers.models.gpta.tokenization_gpta import GPTaTokenizer
from transformers.testing_utils import require_keras_nlp, require_tf, slow
if is_tf_available():
import tensorflow as tf
if is_keras_nlp_available():
from transformers.models.gpta import TFGPTaTokenizer
snake_case__ : List[Any] = ['''gpt2''']
snake_case__ : Optional[int] = '''gpt2'''
if is_tf_available():
class snake_case_( tf.Module ):
def __init__( self : Optional[Any] , UpperCamelCase_ : List[str] ):
super().__init__()
lowerCAmelCase : Any = tokenizer
lowerCAmelCase : Any = AutoConfig.from_pretrained(UpperCamelCase_ )
lowerCAmelCase : Optional[int] = TFGPTaLMHeadModel.from_config(UpperCamelCase_ )
@tf.function(input_signature=(tf.TensorSpec((None,) , tf.string , name='''text''' ),) )
def lowerCamelCase__ ( self : Any , UpperCamelCase_ : List[str] ):
lowerCAmelCase : Dict = self.tokenizer(UpperCamelCase_ )
lowerCAmelCase : List[str] = tokenized['''input_ids'''].to_tensor()
lowerCAmelCase : Union[str, Any] = tf.cast(input_ids_dense > 0 , tf.intaa )
# input_mask = tf.reshape(input_mask, [-1, MAX_SEQ_LEN])
lowerCAmelCase : Any = self.model(input_ids=UpperCamelCase_ , attention_mask=UpperCamelCase_ )['''logits''']
return outputs
@require_tf
@require_keras_nlp
class snake_case_( unittest.TestCase ):
def lowerCamelCase__ ( self : Union[str, Any] ):
super().setUp()
lowerCAmelCase : Optional[Any] = [GPTaTokenizer.from_pretrained(UpperCamelCase_ ) for checkpoint in (TOKENIZER_CHECKPOINTS)]
lowerCAmelCase : Tuple = [TFGPTaTokenizer.from_pretrained(UpperCamelCase_ ) for checkpoint in TOKENIZER_CHECKPOINTS]
assert len(self.tokenizers ) == len(self.tf_tokenizers )
lowerCAmelCase : Any = [
'''This is a straightforward English test sentence.''',
'''This one has some weird characters\rto\nsee\r\nif those\u00E9break things.''',
'''Now we\'re going to add some Chinese: 一 二 三 一二三''',
'''And some much more rare Chinese: 齉 堃 齉堃''',
'''Je vais aussi écrire en français pour tester les accents''',
'''Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ''',
]
lowerCAmelCase : List[Any] = list(zip(self.test_sentences , self.test_sentences[::-1] ) )
def lowerCamelCase__ ( self : Optional[Any] ):
for tokenizer, tf_tokenizer in zip(self.tokenizers , self.tf_tokenizers ):
for test_inputs in self.test_sentences:
lowerCAmelCase : Tuple = tokenizer([test_inputs] , return_tensors='''tf''' )
lowerCAmelCase : Optional[int] = tf_tokenizer([test_inputs] )
for key in python_outputs.keys():
# convert them to numpy to avoid messing with ragged tensors
lowerCAmelCase : Optional[Any] = python_outputs[key].numpy()
lowerCAmelCase : List[str] = tf_outputs[key].numpy()
self.assertTrue(tf.reduce_all(python_outputs_values.shape == tf_outputs_values.shape ) )
self.assertTrue(tf.reduce_all(tf.cast(UpperCamelCase_ , tf.intaa ) == tf_outputs_values ) )
@slow
def lowerCamelCase__ ( self : int ):
for tf_tokenizer in self.tf_tokenizers:
lowerCAmelCase : Union[str, Any] = tf.function(UpperCamelCase_ )
for test_inputs in self.test_sentences:
lowerCAmelCase : Union[str, Any] = tf.constant(UpperCamelCase_ )
lowerCAmelCase : str = compiled_tokenizer(UpperCamelCase_ )
lowerCAmelCase : Any = tf_tokenizer(UpperCamelCase_ )
for key in eager_outputs.keys():
self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key] ) )
@slow
def lowerCamelCase__ ( self : Union[str, Any] ):
for tf_tokenizer in self.tf_tokenizers:
lowerCAmelCase : Any = ModelToSave(tokenizer=UpperCamelCase_ )
lowerCAmelCase : int = tf.convert_to_tensor([self.test_sentences[0]] )
lowerCAmelCase : str = model.serving(UpperCamelCase_ ) # Build model with some sample inputs
with TemporaryDirectory() as tempdir:
lowerCAmelCase : Dict = Path(UpperCamelCase_ ) / '''saved.model'''
tf.saved_model.save(UpperCamelCase_ , UpperCamelCase_ , signatures={'''serving_default''': model.serving} )
lowerCAmelCase : List[str] = tf.saved_model.load(UpperCamelCase_ )
lowerCAmelCase : Tuple = loaded_model.signatures['''serving_default'''](UpperCamelCase_ )['''output_0''']
# We may see small differences because the loaded model is compiled, so we need an epsilon for the test
self.assertTrue(tf.reduce_all(out == loaded_output ) )
@slow
def lowerCamelCase__ ( self : Any ):
for tf_tokenizer in self.tf_tokenizers:
lowerCAmelCase : Union[str, Any] = tf.convert_to_tensor([self.test_sentences[0]] )
lowerCAmelCase : Dict = tf_tokenizer(UpperCamelCase_ ) # Build model with some sample inputs
lowerCAmelCase : Optional[int] = tf_tokenizer.get_config()
lowerCAmelCase : int = TFGPTaTokenizer.from_config(UpperCamelCase_ )
lowerCAmelCase : List[str] = model_from_config(UpperCamelCase_ )
for key in from_config_output.keys():
self.assertTrue(tf.reduce_all(from_config_output[key] == out[key] ) )
@slow
def lowerCamelCase__ ( self : Optional[Any] ):
for tf_tokenizer in self.tf_tokenizers:
# for the test to run
lowerCAmelCase : str = 1_2_3_1_2_3
for max_length in [3, 5, 1_0_2_4]:
lowerCAmelCase : Optional[int] = tf.convert_to_tensor([self.test_sentences[0]] )
lowerCAmelCase : Any = tf_tokenizer(UpperCamelCase_ , max_length=UpperCamelCase_ )
lowerCAmelCase : Dict = out['''input_ids'''].numpy().shape[1]
assert out_length == max_length
| 711
|
"""simple docstring"""
snake_case__ : List[Any] = '''Tobias Carryer'''
from time import time
class snake_case_:
def __init__( self : Optional[Any] , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Tuple , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : Dict=int(time() ) ): # noqa: B008
lowerCAmelCase : str = multiplier
lowerCAmelCase : Optional[int] = increment
lowerCAmelCase : Optional[Any] = modulo
lowerCAmelCase : Optional[Any] = seed
def lowerCamelCase__ ( self : Union[str, Any] ):
lowerCAmelCase : Optional[int] = (self.multiplier * self.seed + self.increment) % self.modulo
return self.seed
if __name__ == "__main__":
# Show the LCG in action.
snake_case__ : int = LinearCongruentialGenerator(1_664_525, 1_013_904_223, 2 << 31)
while True:
print(lcg.next_number())
| 637
| 0
|
"""simple docstring"""
import os
import re
import unicodedata
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import is_torch_available, logging
if is_torch_available():
import torch
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
snake_case__ : Optional[Any] = logging.get_logger(__name__)
snake_case__ : Any = {'''vocab_file''': '''spiece.model'''}
snake_case__ : Union[str, Any] = {
'''vocab_file''': {
'''AI-Sweden/gpt-sw3-126m''': '''https://huggingface.co/AI-Sweden/gpt-sw3-126m/resolve/main/spiece.model''',
'''AI-Sweden/gpt-sw3-350m''': '''https://huggingface.co/AI-Sweden/gpt-sw3-350m/resolve/main/spiece.model''',
'''AI-Sweden/gpt-sw3-1.6b''': '''https://huggingface.co/AI-Sweden/gpt-sw3-1.6b/resolve/main/spiece.model''',
'''AI-Sweden/gpt-sw3-6.7b''': '''https://huggingface.co/AI-Sweden/gpt-sw3-6.7b/resolve/main/spiece.model''',
'''AI-Sweden/gpt-sw3-20b''': '''https://huggingface.co/AI-Sweden/gpt-sw3-20b/resolve/main/spiece.model''',
}
}
snake_case__ : Union[str, Any] = {
'''AI-Sweden/gpt-sw3-126m''': 2_048,
'''AI-Sweden/gpt-sw3-350m''': 2_048,
'''AI-Sweden/gpt-sw3-1.6b''': 2_048,
'''AI-Sweden/gpt-sw3-6.7b''': 2_048,
'''AI-Sweden/gpt-sw3-20b''': 2_048,
}
class snake_case_( a__ ):
__UpperCamelCase = VOCAB_FILES_NAMES
__UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase = ['''input_ids''', '''attention_mask''']
def __init__( self : List[Any] , UpperCamelCase_ : int , UpperCamelCase_ : Optional[int]=False , UpperCamelCase_ : Tuple=False , UpperCamelCase_ : List[str]=False , UpperCamelCase_ : Any=None , UpperCamelCase_ : Dict=None , UpperCamelCase_ : Any=None , UpperCamelCase_ : List[str]=None , UpperCamelCase_ : Optional[Dict[str, Any]] = None , **UpperCamelCase_ : str , ):
lowerCAmelCase : Union[str, Any] = {} if sp_model_kwargs is None else sp_model_kwargs
lowerCAmelCase : Any = kwargs.get('''name_or_path''' )
if name_or_path is None:
logger.warning(
'''name_or_path not provided, will work for all GPTSw3 models except gpt-sw3-7b,'''
''' you are testing the model, this can safely be ignored''' )
lowerCAmelCase : Optional[Any] = '''None'''
# Default definitions for our 2 tokenizer versions, with None-checks to enable proper testing
lowerCAmelCase : Dict = '''<|endoftext|>''' if eos_token is None else eos_token
lowerCAmelCase : int = '''<unk>''' if unk_token is None else unk_token
if "gpt-sw3-7b" in name_or_path:
lowerCAmelCase : Optional[Any] = unk_token if pad_token is None else pad_token
lowerCAmelCase : Union[str, Any] = eos_token if bos_token is None else bos_token
else:
lowerCAmelCase : int = '''<pad>''' if pad_token is None else pad_token
lowerCAmelCase : Tuple = '''<s>''' if bos_token is None else bos_token
super().__init__(
do_lower_case=UpperCamelCase_ , remove_space=UpperCamelCase_ , keep_accents=UpperCamelCase_ , bos_token=UpperCamelCase_ , eos_token=UpperCamelCase_ , unk_token=UpperCamelCase_ , pad_token=UpperCamelCase_ , sp_model_kwargs=self.sp_model_kwargs , **UpperCamelCase_ , )
lowerCAmelCase : str = do_lower_case
lowerCAmelCase : Union[str, Any] = remove_space
lowerCAmelCase : Dict = keep_accents
lowerCAmelCase : Optional[int] = vocab_file
lowerCAmelCase : Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(UpperCamelCase_ )
# Used for whitespace normalization in input texts
# fmt : off
lowerCAmelCase : int = {''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', '''''', ''''''}
# fmt : on
# Regular expression to remove non-printing characters (e.g. some unicode control chars) in preprocessing
lowerCAmelCase : List[Any] = re.compile(
F'''[{"".join(map(UpperCamelCase_ , list(range(0 , 9 ) ) + list(range(1_1 , 3_2 ) ) + list(range(1_2_7 , 1_6_0 ) ) + [1_6_0, 1_7_3, 8_2_0_3] ) )}]''' )
def __getstate__( self : Dict ):
lowerCAmelCase : Tuple = self.__dict__.copy()
lowerCAmelCase : Optional[Any] = None
return state
def __setstate__( self : Union[str, Any] , UpperCamelCase_ : str ):
lowerCAmelCase : Dict = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
lowerCAmelCase : Union[str, Any] = {}
lowerCAmelCase : int = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
@property
# Copied from transformers.models.albert.tokenization_albert.AlbertTokenizer.vocab_size
def lowerCamelCase__ ( self : Any ):
return len(self.sp_model )
def lowerCamelCase__ ( self : Union[str, Any] , UpperCamelCase_ : str ):
lowerCAmelCase : Any = self.non_printing_characters_re.sub('''''' , UpperCamelCase_ )
# Normalize whitespaces
lowerCAmelCase : str = ''''''.join([char if char not in self.whitespaces else ''' ''' for char in text] )
# NFC Unicode normalization
lowerCAmelCase : Optional[Any] = unicodedata.normalize('''NFC''' , UpperCamelCase_ )
return text
def lowerCamelCase__ ( self : Union[str, Any] , UpperCamelCase_ : str , **UpperCamelCase_ : int ):
lowerCAmelCase : int = self.preprocess_text(UpperCamelCase_ )
return self.sp_model.encode(UpperCamelCase_ , out_type=UpperCamelCase_ )
def lowerCamelCase__ ( self : Union[str, Any] , UpperCamelCase_ : str ):
return self.sp_model.PieceToId(UpperCamelCase_ )
def lowerCamelCase__ ( self : str , UpperCamelCase_ : int ):
return self.sp_model.IdToPiece(UpperCamelCase_ )
@staticmethod
def lowerCamelCase__ ( UpperCamelCase_ : str ):
return out_string
def lowerCamelCase__ ( self : Dict , UpperCamelCase_ : List[str] ):
lowerCAmelCase : Tuple = []
lowerCAmelCase : Optional[int] = ''''''
lowerCAmelCase : Union[str, Any] = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
# TODO: Check if this is needed, as it ensures that decode(encode(doc)) != doc by adding extra whitespace in the decoded document
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(UpperCamelCase_ ) + token
lowerCAmelCase : int = True
lowerCAmelCase : Dict = []
else:
current_sub_tokens.append(UpperCamelCase_ )
lowerCAmelCase : Optional[Any] = False
out_string += self.sp_model.decode(UpperCamelCase_ )
return out_string
def lowerCamelCase__ ( self : List[str] ):
lowerCAmelCase : Dict = {self.convert_ids_to_tokens(UpperCamelCase_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def lowerCamelCase__ ( self : str , UpperCamelCase_ : str , UpperCamelCase_ : Optional[str] = None ):
if not os.path.isdir(UpperCamelCase_ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
lowerCAmelCase : int = os.path.join(
UpperCamelCase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , UpperCamelCase_ )
elif not os.path.isfile(self.vocab_file ):
with open(UpperCamelCase_ , '''wb''' ) as fi:
lowerCAmelCase : Any = self.sp_model.serialized_model_proto()
fi.write(UpperCamelCase_ )
return (out_vocab_file,)
def lowerCamelCase__ ( self : Any , UpperCamelCase_ : Union[str, List[str]] , UpperCamelCase_ : Union[str, bool] = False ):
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
lowerCAmelCase : Any = self.preprocess_text(UpperCamelCase_ )
lowerCAmelCase : Optional[int] = self.sp_model.encode(UpperCamelCase_ )
else:
lowerCAmelCase : Any = [self.preprocess_text(UpperCamelCase_ ) for t in text]
lowerCAmelCase : Union[str, Any] = self.sp_model.encode(UpperCamelCase_ )
if return_tensors is True or return_tensors == "pt":
lowerCAmelCase : List[Any] = torch.tensor(UpperCamelCase_ )
return token_ids
def lowerCamelCase__ ( self : Union[str, Any] , UpperCamelCase_ : Union[int, List[int]] ):
return self.sp_model.decode(UpperCamelCase_ )
def lowerCamelCase__ ( self : Optional[Any] , UpperCamelCase_ : "Conversation" ):
lowerCAmelCase : Dict = [F'''User: {text}''' if is_user else F'''Bot: {text}''' for is_user, text in conversation.iter_texts()]
lowerCAmelCase : List[str] = (
F'''{self.eos_token}{self.bos_token}''' + F'''{self.bos_token}'''.join(UpperCamelCase_ ) + F'''{self.bos_token}Bot:'''
)
return self.encode(text=UpperCamelCase_ )
| 712
|
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_big_bird import BigBirdTokenizer
else:
snake_case__ : Optional[Any] = None
snake_case__ : Union[str, Any] = logging.get_logger(__name__)
snake_case__ : List[str] = {'''vocab_file''': '''spiece.model''', '''tokenizer_file''': '''tokenizer.json'''}
snake_case__ : Any = {
'''vocab_file''': {
'''google/bigbird-roberta-base''': '''https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model''',
'''google/bigbird-roberta-large''': (
'''https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model'''
),
'''google/bigbird-base-trivia-itc''': (
'''https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model'''
),
},
'''tokenizer_file''': {
'''google/bigbird-roberta-base''': (
'''https://huggingface.co/google/bigbird-roberta-base/resolve/main/tokenizer.json'''
),
'''google/bigbird-roberta-large''': (
'''https://huggingface.co/google/bigbird-roberta-large/resolve/main/tokenizer.json'''
),
'''google/bigbird-base-trivia-itc''': (
'''https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/tokenizer.json'''
),
},
}
snake_case__ : int = {
'''google/bigbird-roberta-base''': 4_096,
'''google/bigbird-roberta-large''': 4_096,
'''google/bigbird-base-trivia-itc''': 4_096,
}
snake_case__ : Optional[Any] = '''▁'''
class snake_case_( a__ ):
__UpperCamelCase = VOCAB_FILES_NAMES
__UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase = BigBirdTokenizer
__UpperCamelCase = ['''input_ids''', '''attention_mask''']
__UpperCamelCase = []
def __init__( self : Union[str, Any] , UpperCamelCase_ : str=None , UpperCamelCase_ : Any=None , UpperCamelCase_ : str="<unk>" , UpperCamelCase_ : str="<s>" , UpperCamelCase_ : str="</s>" , UpperCamelCase_ : int="<pad>" , UpperCamelCase_ : List[Any]="[SEP]" , UpperCamelCase_ : Dict="[MASK]" , UpperCamelCase_ : Any="[CLS]" , **UpperCamelCase_ : Any , ):
lowerCAmelCase : Tuple = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else bos_token
lowerCAmelCase : int = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else eos_token
lowerCAmelCase : List[Any] = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else unk_token
lowerCAmelCase : List[str] = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else pad_token
lowerCAmelCase : Any = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else cls_token
lowerCAmelCase : Tuple = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else sep_token
# Mask token behave like a normal word, i.e. include the space before it
lowerCAmelCase : Optional[Any] = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else mask_token
super().__init__(
UpperCamelCase_ , tokenizer_file=UpperCamelCase_ , bos_token=UpperCamelCase_ , eos_token=UpperCamelCase_ , unk_token=UpperCamelCase_ , sep_token=UpperCamelCase_ , pad_token=UpperCamelCase_ , cls_token=UpperCamelCase_ , mask_token=UpperCamelCase_ , **UpperCamelCase_ , )
lowerCAmelCase : Optional[int] = vocab_file
lowerCAmelCase : Optional[int] = False if not self.vocab_file else True
def lowerCamelCase__ ( self : Union[str, Any] , UpperCamelCase_ : List[int] , UpperCamelCase_ : Optional[List[int]] = None ):
lowerCAmelCase : str = [self.sep_token_id]
lowerCAmelCase : Tuple = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def lowerCamelCase__ ( self : Dict , UpperCamelCase_ : List[int] , UpperCamelCase_ : Optional[List[int]] = None , UpperCamelCase_ : bool = False ):
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'''You should not supply a second sequence if the provided sequence of '''
'''ids is already formatted with special tokens for the model.''' )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is None:
return [1] + ([0] * len(UpperCamelCase_ )) + [1]
return [1] + ([0] * len(UpperCamelCase_ )) + [1] + ([0] * len(UpperCamelCase_ )) + [1]
def lowerCamelCase__ ( self : Dict , UpperCamelCase_ : List[int] , UpperCamelCase_ : Optional[List[int]] = None ):
lowerCAmelCase : Tuple = [self.sep_token_id]
lowerCAmelCase : Tuple = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowerCamelCase__ ( self : Optional[int] , UpperCamelCase_ : str , UpperCamelCase_ : Optional[str] = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(UpperCamelCase_ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
lowerCAmelCase : Optional[int] = os.path.join(
UpperCamelCase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase_ ):
copyfile(self.vocab_file , UpperCamelCase_ )
return (out_vocab_file,)
| 637
| 0
|
"""simple docstring"""
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class snake_case_( a__ ):
__UpperCamelCase = ['''image_processor''', '''tokenizer''']
__UpperCamelCase = '''Pix2StructImageProcessor'''
__UpperCamelCase = ('''T5Tokenizer''', '''T5TokenizerFast''')
def __init__( self : Union[str, Any] , UpperCamelCase_ : Dict , UpperCamelCase_ : Tuple ):
lowerCAmelCase : str = False
super().__init__(UpperCamelCase_ , UpperCamelCase_ )
def __call__( self : Tuple , UpperCamelCase_ : Union[str, Any]=None , UpperCamelCase_ : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , UpperCamelCase_ : bool = True , UpperCamelCase_ : Union[bool, str, PaddingStrategy] = False , UpperCamelCase_ : Union[bool, str, TruncationStrategy] = None , UpperCamelCase_ : Optional[int] = None , UpperCamelCase_ : Optional[int] = 2_0_4_8 , UpperCamelCase_ : int = 0 , UpperCamelCase_ : Optional[int] = None , UpperCamelCase_ : Optional[bool] = None , UpperCamelCase_ : bool = False , UpperCamelCase_ : bool = False , UpperCamelCase_ : bool = False , UpperCamelCase_ : bool = False , UpperCamelCase_ : bool = False , UpperCamelCase_ : bool = True , UpperCamelCase_ : Optional[Union[str, TensorType]] = None , **UpperCamelCase_ : str , ):
if images is None and text is None:
raise ValueError('''You have to specify either images or text.''' )
# Get only text
if images is None and not self.image_processor.is_vqa:
lowerCAmelCase : str = self.tokenizer
lowerCAmelCase : Optional[int] = self.tokenizer(
text=UpperCamelCase_ , add_special_tokens=UpperCamelCase_ , padding=UpperCamelCase_ , truncation=UpperCamelCase_ , max_length=UpperCamelCase_ , stride=UpperCamelCase_ , pad_to_multiple_of=UpperCamelCase_ , return_attention_mask=UpperCamelCase_ , return_overflowing_tokens=UpperCamelCase_ , return_special_tokens_mask=UpperCamelCase_ , return_offsets_mapping=UpperCamelCase_ , return_token_type_ids=UpperCamelCase_ , return_length=UpperCamelCase_ , verbose=UpperCamelCase_ , return_tensors=UpperCamelCase_ , **UpperCamelCase_ , )
return text_encoding
if not self.image_processor.is_vqa:
# add pixel_values
lowerCAmelCase : Any = self.image_processor(
UpperCamelCase_ , return_tensors=UpperCamelCase_ , max_patches=UpperCamelCase_ , **UpperCamelCase_ )
else:
# add pixel_values and bbox
lowerCAmelCase : Tuple = self.image_processor(
UpperCamelCase_ , return_tensors=UpperCamelCase_ , max_patches=UpperCamelCase_ , header_text=UpperCamelCase_ , **UpperCamelCase_ )
if text is not None and not self.image_processor.is_vqa:
lowerCAmelCase : Optional[int] = self.tokenizer(
text=UpperCamelCase_ , add_special_tokens=UpperCamelCase_ , padding=UpperCamelCase_ , truncation=UpperCamelCase_ , max_length=UpperCamelCase_ , stride=UpperCamelCase_ , pad_to_multiple_of=UpperCamelCase_ , return_attention_mask=UpperCamelCase_ , return_overflowing_tokens=UpperCamelCase_ , return_special_tokens_mask=UpperCamelCase_ , return_offsets_mapping=UpperCamelCase_ , return_token_type_ids=UpperCamelCase_ , return_length=UpperCamelCase_ , verbose=UpperCamelCase_ , return_tensors=UpperCamelCase_ , **UpperCamelCase_ , )
if "attention_mask" in text_encoding:
lowerCAmelCase : Any = text_encoding.pop('''attention_mask''' )
if "input_ids" in text_encoding:
lowerCAmelCase : Tuple = text_encoding.pop('''input_ids''' )
else:
lowerCAmelCase : Optional[Any] = None
if text_encoding is not None:
encoding_image_processor.update(UpperCamelCase_ )
return encoding_image_processor
def lowerCamelCase__ ( self : str , *UpperCamelCase_ : int , **UpperCamelCase_ : Optional[Any] ):
return self.tokenizer.batch_decode(*UpperCamelCase_ , **UpperCamelCase_ )
def lowerCamelCase__ ( self : Tuple , *UpperCamelCase_ : int , **UpperCamelCase_ : Dict ):
return self.tokenizer.decode(*UpperCamelCase_ , **UpperCamelCase_ )
@property
def lowerCamelCase__ ( self : Tuple ):
lowerCAmelCase : Any = self.tokenizer.model_input_names
lowerCAmelCase : Any = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 713
|
"""simple docstring"""
# using dfs for finding eulerian path traversal
def _snake_case ( _snake_case : Optional[Any] , _snake_case : List[Any] , _snake_case : str , _snake_case : List[Any]=None ):
lowerCAmelCase : Any = (path or []) + [u]
for v in graph[u]:
if visited_edge[u][v] is False:
lowerCAmelCase, lowerCAmelCase : Union[str, Any] = True, True
lowerCAmelCase : int = dfs(_snake_case , _snake_case , _snake_case , _snake_case )
return path
def _snake_case ( _snake_case : Optional[int] , _snake_case : Dict ):
lowerCAmelCase : Tuple = 0
lowerCAmelCase : Optional[Any] = -1
for i in range(_snake_case ):
if i not in graph.keys():
continue
if len(graph[i] ) % 2 == 1:
odd_degree_nodes += 1
lowerCAmelCase : Optional[Any] = i
if odd_degree_nodes == 0:
return 1, odd_node
if odd_degree_nodes == 2:
return 2, odd_node
return 3, odd_node
def _snake_case ( _snake_case : Tuple , _snake_case : List[Any] ):
lowerCAmelCase : Any = [[False for _ in range(max_node + 1 )] for _ in range(max_node + 1 )]
lowerCAmelCase, lowerCAmelCase : Optional[int] = check_circuit_or_path(_snake_case , _snake_case )
if check == 3:
print('''graph is not Eulerian''' )
print('''no path''' )
return
lowerCAmelCase : Dict = 1
if check == 2:
lowerCAmelCase : int = odd_node
print('''graph has a Euler path''' )
if check == 1:
print('''graph has a Euler cycle''' )
lowerCAmelCase : List[str] = dfs(_snake_case , _snake_case , _snake_case )
print(_snake_case )
def _snake_case ( ):
lowerCAmelCase : Optional[Any] = {1: [2, 3, 4], 2: [1, 3], 3: [1, 2], 4: [1, 5], 5: [4]}
lowerCAmelCase : Union[str, Any] = {1: [2, 3, 4, 5], 2: [1, 3], 3: [1, 2], 4: [1, 5], 5: [1, 4]}
lowerCAmelCase : List[Any] = {1: [2, 3, 4], 2: [1, 3, 4], 3: [1, 2], 4: [1, 2, 5], 5: [4]}
lowerCAmelCase : Optional[Any] = {1: [2, 3], 2: [1, 3], 3: [1, 2]}
lowerCAmelCase : Any = {
1: [],
2: []
# all degree is zero
}
lowerCAmelCase : List[str] = 10
check_euler(_snake_case , _snake_case )
check_euler(_snake_case , _snake_case )
check_euler(_snake_case , _snake_case )
check_euler(_snake_case , _snake_case )
check_euler(_snake_case , _snake_case )
if __name__ == "__main__":
main()
| 637
| 0
|
"""simple docstring"""
import os
from typing import BinaryIO, Optional, Union
import numpy as np
import pyarrow.parquet as pq
from .. import Audio, Dataset, Features, Image, NamedSplit, Value, config
from ..features.features import FeatureType, _visit
from ..formatting import query_table
from ..packaged_modules import _PACKAGED_DATASETS_MODULES
from ..packaged_modules.parquet.parquet import Parquet
from ..utils import logging
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
def _snake_case ( _snake_case : Features ):
lowerCAmelCase : int = np.inf
def set_batch_size(_snake_case : FeatureType ) -> None:
nonlocal batch_size
if isinstance(_snake_case , _snake_case ):
lowerCAmelCase : int = min(_snake_case , config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS )
elif isinstance(_snake_case , _snake_case ):
lowerCAmelCase : str = min(_snake_case , config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS )
elif isinstance(_snake_case , _snake_case ) and feature.dtype == "binary":
lowerCAmelCase : Dict = min(_snake_case , config.PARQUET_ROW_GROUP_SIZE_FOR_BINARY_DATASETS )
_visit(_snake_case , _snake_case )
return None if batch_size is np.inf else batch_size
class snake_case_( a__ ):
def __init__( self : int , UpperCamelCase_ : NestedDataStructureLike[PathLike] , UpperCamelCase_ : Optional[NamedSplit] = None , UpperCamelCase_ : Optional[Features] = None , UpperCamelCase_ : str = None , UpperCamelCase_ : bool = False , UpperCamelCase_ : bool = False , UpperCamelCase_ : Optional[int] = None , **UpperCamelCase_ : List[str] , ):
super().__init__(
UpperCamelCase_ , split=UpperCamelCase_ , features=UpperCamelCase_ , cache_dir=UpperCamelCase_ , keep_in_memory=UpperCamelCase_ , streaming=UpperCamelCase_ , num_proc=UpperCamelCase_ , **UpperCamelCase_ , )
lowerCAmelCase : Optional[int] = path_or_paths if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else {self.split: path_or_paths}
lowerCAmelCase : Optional[Any] = _PACKAGED_DATASETS_MODULES['''parquet'''][1]
lowerCAmelCase : Optional[Any] = Parquet(
cache_dir=UpperCamelCase_ , data_files=UpperCamelCase_ , features=UpperCamelCase_ , hash=UpperCamelCase_ , **UpperCamelCase_ , )
def lowerCamelCase__ ( self : Optional[Any] ):
# Build iterable dataset
if self.streaming:
lowerCAmelCase : Union[str, Any] = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
lowerCAmelCase : Optional[Any] = None
lowerCAmelCase : List[Any] = None
lowerCAmelCase : Union[str, Any] = None
lowerCAmelCase : str = None
self.builder.download_and_prepare(
download_config=UpperCamelCase_ , download_mode=UpperCamelCase_ , verification_mode=UpperCamelCase_ , base_path=UpperCamelCase_ , num_proc=self.num_proc , )
lowerCAmelCase : Dict = self.builder.as_dataset(
split=self.split , verification_mode=UpperCamelCase_ , in_memory=self.keep_in_memory )
return dataset
class snake_case_:
def __init__( self : Optional[int] , UpperCamelCase_ : Dataset , UpperCamelCase_ : Union[PathLike, BinaryIO] , UpperCamelCase_ : Optional[int] = None , **UpperCamelCase_ : Union[str, Any] , ):
lowerCAmelCase : Optional[int] = dataset
lowerCAmelCase : str = path_or_buf
lowerCAmelCase : Optional[int] = batch_size or get_writer_batch_size(dataset.features )
lowerCAmelCase : Optional[Any] = parquet_writer_kwargs
def lowerCamelCase__ ( self : Dict ):
lowerCAmelCase : Optional[Any] = self.batch_size if self.batch_size else config.DEFAULT_MAX_BATCH_SIZE
if isinstance(self.path_or_buf , (str, bytes, os.PathLike) ):
with open(self.path_or_buf , '''wb+''' ) as buffer:
lowerCAmelCase : Any = self._write(file_obj=UpperCamelCase_ , batch_size=UpperCamelCase_ , **self.parquet_writer_kwargs )
else:
lowerCAmelCase : List[str] = self._write(file_obj=self.path_or_buf , batch_size=UpperCamelCase_ , **self.parquet_writer_kwargs )
return written
def lowerCamelCase__ ( self : Optional[Any] , UpperCamelCase_ : BinaryIO , UpperCamelCase_ : int , **UpperCamelCase_ : Dict ):
lowerCAmelCase : str = 0
lowerCAmelCase : int = parquet_writer_kwargs.pop('''path_or_buf''' , UpperCamelCase_ )
lowerCAmelCase : List[str] = self.dataset.features.arrow_schema
lowerCAmelCase : Tuple = pq.ParquetWriter(UpperCamelCase_ , schema=UpperCamelCase_ , **UpperCamelCase_ )
for offset in logging.tqdm(
range(0 , len(self.dataset ) , UpperCamelCase_ ) , unit='''ba''' , disable=not logging.is_progress_bar_enabled() , desc='''Creating parquet from Arrow format''' , ):
lowerCAmelCase : Union[str, Any] = query_table(
table=self.dataset._data , key=slice(UpperCamelCase_ , offset + batch_size ) , indices=self.dataset._indices if self.dataset._indices is not None else None , )
writer.write_table(UpperCamelCase_ )
written += batch.nbytes
writer.close()
return written
| 714
|
"""simple docstring"""
import os
import shutil
import sys
import tempfile
import unittest
from pathlib import Path
import pytest
import transformers
from transformers import (
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoTokenizer,
BertConfig,
BertTokenizer,
BertTokenizerFast,
CTRLTokenizer,
GPTaTokenizer,
GPTaTokenizerFast,
PreTrainedTokenizerFast,
RobertaTokenizer,
RobertaTokenizerFast,
is_tokenizers_available,
)
from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig
from transformers.models.auto.tokenization_auto import (
TOKENIZER_MAPPING,
get_tokenizer_config,
tokenizer_class_from_name,
)
from transformers.models.roberta.configuration_roberta import RobertaConfig
from transformers.testing_utils import (
DUMMY_DIFF_TOKENIZER_IDENTIFIER,
DUMMY_UNKNOWN_IDENTIFIER,
SMALL_MODEL_IDENTIFIER,
RequestCounter,
require_tokenizers,
slow,
)
sys.path.append(str(Path(__file__).parent.parent.parent.parent / '''utils'''))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_tokenization import CustomTokenizer # noqa E402
if is_tokenizers_available():
from test_module.custom_tokenization_fast import CustomTokenizerFast
class snake_case_( unittest.TestCase ):
def lowerCamelCase__ ( self : Optional[int] ):
lowerCAmelCase : Optional[Any] = 0
@slow
def lowerCamelCase__ ( self : Dict ):
for model_name in (x for x in BERT_PRETRAINED_CONFIG_ARCHIVE_MAP.keys() if "japanese" not in x):
lowerCAmelCase : Optional[Any] = AutoTokenizer.from_pretrained(UpperCamelCase_ )
self.assertIsNotNone(UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , (BertTokenizer, BertTokenizerFast) )
self.assertGreater(len(UpperCamelCase_ ) , 0 )
for model_name in GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP.keys():
lowerCAmelCase : Tuple = AutoTokenizer.from_pretrained(UpperCamelCase_ )
self.assertIsNotNone(UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , (GPTaTokenizer, GPTaTokenizerFast) )
self.assertGreater(len(UpperCamelCase_ ) , 0 )
def lowerCamelCase__ ( self : Union[str, Any] ):
lowerCAmelCase : Optional[Any] = AutoTokenizer.from_pretrained(UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 1_2 )
def lowerCamelCase__ ( self : Dict ):
lowerCAmelCase : Tuple = AutoTokenizer.from_pretrained(UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , (RobertaTokenizer, RobertaTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 2_0 )
def lowerCamelCase__ ( self : Dict ):
lowerCAmelCase : int = AutoConfig.from_pretrained(UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
# Check that tokenizer_type ≠ model_type
lowerCAmelCase : List[Any] = AutoTokenizer.from_pretrained(UpperCamelCase_ , config=UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 1_2 )
def lowerCamelCase__ ( self : Any ):
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('''./tests/fixtures/vocab.txt''' , os.path.join(UpperCamelCase_ , '''vocab.txt''' ) )
lowerCAmelCase : Any = AutoTokenizer.from_pretrained(UpperCamelCase_ , tokenizer_type='''bert''' , use_fast=UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('''./tests/fixtures/vocab.json''' , os.path.join(UpperCamelCase_ , '''vocab.json''' ) )
shutil.copy('''./tests/fixtures/merges.txt''' , os.path.join(UpperCamelCase_ , '''merges.txt''' ) )
lowerCAmelCase : List[Any] = AutoTokenizer.from_pretrained(UpperCamelCase_ , tokenizer_type='''gpt2''' , use_fast=UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
@require_tokenizers
def lowerCamelCase__ ( self : Union[str, Any] ):
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('''./tests/fixtures/vocab.txt''' , os.path.join(UpperCamelCase_ , '''vocab.txt''' ) )
lowerCAmelCase : Optional[Any] = AutoTokenizer.from_pretrained(UpperCamelCase_ , tokenizer_type='''bert''' )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('''./tests/fixtures/vocab.json''' , os.path.join(UpperCamelCase_ , '''vocab.json''' ) )
shutil.copy('''./tests/fixtures/merges.txt''' , os.path.join(UpperCamelCase_ , '''merges.txt''' ) )
lowerCAmelCase : int = AutoTokenizer.from_pretrained(UpperCamelCase_ , tokenizer_type='''gpt2''' )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
def lowerCamelCase__ ( self : Dict ):
with pytest.raises(UpperCamelCase_ ):
AutoTokenizer.from_pretrained('''./''' , tokenizer_type='''xxx''' )
@require_tokenizers
def lowerCamelCase__ ( self : str ):
for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]:
lowerCAmelCase : Dict = tokenizer_class.from_pretrained('''wietsedv/bert-base-dutch-cased''' )
self.assertIsInstance(UpperCamelCase_ , (BertTokenizer, BertTokenizerFast) )
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
self.assertEqual(tokenizer.basic_tokenizer.do_lower_case , UpperCamelCase_ )
else:
self.assertEqual(tokenizer.do_lower_case , UpperCamelCase_ )
self.assertEqual(tokenizer.model_max_length , 5_1_2 )
@require_tokenizers
def lowerCamelCase__ ( self : Optional[int] ):
for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]:
with self.assertRaisesRegex(
UpperCamelCase_ , '''julien-c/herlolip-not-exists is not a local folder and is not a valid model identifier''' , ):
lowerCAmelCase : Any = tokenizer_class.from_pretrained('''julien-c/herlolip-not-exists''' )
def lowerCamelCase__ ( self : Tuple ):
# tests: https://github.com/huggingface/transformers/pull/13251
# 1. models with `-`, e.g. xlm-roberta -> xlm_roberta
# 2. models that don't remap 1-1 from model-name to model file, e.g., openai-gpt -> openai
lowerCAmelCase : Optional[Any] = TOKENIZER_MAPPING.values()
lowerCAmelCase : Optional[Any] = []
for slow_tok, fast_tok in tokenizers:
if slow_tok is not None:
tokenizer_names.append(slow_tok.__name__ )
if fast_tok is not None:
tokenizer_names.append(fast_tok.__name__ )
for tokenizer_name in tokenizer_names:
# must find the right class
tokenizer_class_from_name(UpperCamelCase_ )
@require_tokenizers
def lowerCamelCase__ ( self : Any ):
self.assertIsInstance(AutoTokenizer.from_pretrained('''bert-base-cased''' , use_fast=UpperCamelCase_ ) , UpperCamelCase_ )
self.assertIsInstance(AutoTokenizer.from_pretrained('''bert-base-cased''' ) , UpperCamelCase_ )
@require_tokenizers
def lowerCamelCase__ ( self : Dict ):
lowerCAmelCase : List[Any] = AutoTokenizer.from_pretrained('''distilbert-base-uncased''' , do_lower_case=UpperCamelCase_ )
lowerCAmelCase : Union[str, Any] = '''Hello, world. How are you?'''
lowerCAmelCase : Optional[Any] = tokenizer.tokenize(UpperCamelCase_ )
self.assertEqual('''[UNK]''' , tokens[0] )
lowerCAmelCase : List[str] = AutoTokenizer.from_pretrained('''microsoft/mpnet-base''' , do_lower_case=UpperCamelCase_ )
lowerCAmelCase : Optional[int] = tokenizer.tokenize(UpperCamelCase_ )
self.assertEqual('''[UNK]''' , tokens[0] )
@require_tokenizers
def lowerCamelCase__ ( self : int ):
lowerCAmelCase : Union[str, Any] = AutoTokenizer.from_pretrained('''robot-test/dummy-tokenizer-fast-with-model-config''' )
self.assertEqual(type(UpperCamelCase_ ) , UpperCamelCase_ )
self.assertEqual(tokenizer.model_max_length , 5_1_2 )
self.assertEqual(tokenizer.vocab_size , 3_0_0_0_0 )
self.assertEqual(tokenizer.unk_token , '''[UNK]''' )
self.assertEqual(tokenizer.padding_side , '''right''' )
self.assertEqual(tokenizer.truncation_side , '''right''' )
def lowerCamelCase__ ( self : List[Any] ):
lowerCAmelCase : int = AutoTokenizer.from_pretrained(UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , (BertTokenizer, BertTokenizerFast) )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(UpperCamelCase_ )
lowerCAmelCase : List[Any] = AutoTokenizer.from_pretrained(UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , tokenizer.__class__ )
self.assertEqual(tokenizera.vocab_size , 1_2 )
def lowerCamelCase__ ( self : List[str] ):
lowerCAmelCase : List[Any] = AutoTokenizer.from_pretrained('''ctrl''' )
# There is no fast CTRL so this always gives us a slow tokenizer.
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
def lowerCamelCase__ ( self : Dict ):
# Check we can load the tokenizer config of an online model.
lowerCAmelCase : Any = get_tokenizer_config('''bert-base-cased''' )
lowerCAmelCase : Optional[int] = config.pop('''_commit_hash''' , UpperCamelCase_ )
# If we ever update bert-base-cased tokenizer config, this dict here will need to be updated.
self.assertEqual(UpperCamelCase_ , {'''do_lower_case''': False} )
# This model does not have a tokenizer_config so we get back an empty dict.
lowerCAmelCase : Union[str, Any] = get_tokenizer_config(UpperCamelCase_ )
self.assertDictEqual(UpperCamelCase_ , {} )
# A tokenizer saved with `save_pretrained` always creates a tokenizer config.
lowerCAmelCase : List[Any] = AutoTokenizer.from_pretrained(UpperCamelCase_ )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(UpperCamelCase_ )
lowerCAmelCase : Dict = get_tokenizer_config(UpperCamelCase_ )
# Check the class of the tokenizer was properly saved (note that it always saves the slow class).
self.assertEqual(config['''tokenizer_class'''] , '''BertTokenizer''' )
def lowerCamelCase__ ( self : Optional[int] ):
try:
AutoConfig.register('''custom''' , UpperCamelCase_ )
AutoTokenizer.register(UpperCamelCase_ , slow_tokenizer_class=UpperCamelCase_ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(UpperCamelCase_ ):
AutoTokenizer.register(UpperCamelCase_ , slow_tokenizer_class=UpperCamelCase_ )
lowerCAmelCase : Union[str, Any] = CustomTokenizer.from_pretrained(UpperCamelCase_ )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(UpperCamelCase_ )
lowerCAmelCase : Tuple = AutoTokenizer.from_pretrained(UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
@require_tokenizers
def lowerCamelCase__ ( self : str ):
try:
AutoConfig.register('''custom''' , UpperCamelCase_ )
# Can register in two steps
AutoTokenizer.register(UpperCamelCase_ , slow_tokenizer_class=UpperCamelCase_ )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, None) )
AutoTokenizer.register(UpperCamelCase_ , fast_tokenizer_class=UpperCamelCase_ )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, CustomTokenizerFast) )
del TOKENIZER_MAPPING._extra_content[CustomConfig]
# Can register in one step
AutoTokenizer.register(
UpperCamelCase_ , slow_tokenizer_class=UpperCamelCase_ , fast_tokenizer_class=UpperCamelCase_ )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, CustomTokenizerFast) )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(UpperCamelCase_ ):
AutoTokenizer.register(UpperCamelCase_ , fast_tokenizer_class=UpperCamelCase_ )
# We pass through a bert tokenizer fast cause there is no converter slow to fast for our new toknizer
# and that model does not have a tokenizer.json
with tempfile.TemporaryDirectory() as tmp_dir:
lowerCAmelCase : Dict = BertTokenizerFast.from_pretrained(UpperCamelCase_ )
bert_tokenizer.save_pretrained(UpperCamelCase_ )
lowerCAmelCase : int = CustomTokenizerFast.from_pretrained(UpperCamelCase_ )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(UpperCamelCase_ )
lowerCAmelCase : Optional[int] = AutoTokenizer.from_pretrained(UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
lowerCAmelCase : List[str] = AutoTokenizer.from_pretrained(UpperCamelCase_ , use_fast=UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
def lowerCamelCase__ ( self : Optional[int] ):
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(UpperCamelCase_ ):
lowerCAmelCase : int = AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(UpperCamelCase_ ):
lowerCAmelCase : str = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=UpperCamelCase_ )
lowerCAmelCase : List[str] = AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=UpperCamelCase_ )
self.assertTrue(tokenizer.special_attribute_present )
# Test tokenizer can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(UpperCamelCase_ )
lowerCAmelCase : Optional[Any] = AutoTokenizer.from_pretrained(UpperCamelCase_ , trust_remote_code=UpperCamelCase_ )
self.assertTrue(reloaded_tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , '''NewTokenizerFast''' )
# Test we can also load the slow version
lowerCAmelCase : Union[str, Any] = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=UpperCamelCase_ , use_fast=UpperCamelCase_ )
self.assertTrue(tokenizer.special_attribute_present )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
# Test tokenizer can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(UpperCamelCase_ )
lowerCAmelCase : List[str] = AutoTokenizer.from_pretrained(UpperCamelCase_ , trust_remote_code=UpperCamelCase_ , use_fast=UpperCamelCase_ )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , '''NewTokenizer''' )
self.assertTrue(reloaded_tokenizer.special_attribute_present )
else:
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , '''NewTokenizer''' )
@require_tokenizers
def lowerCamelCase__ ( self : Optional[int] ):
class snake_case_( a__ ):
__UpperCamelCase = False
class snake_case_( a__ ):
__UpperCamelCase = NewTokenizer
__UpperCamelCase = False
try:
AutoConfig.register('''custom''' , UpperCamelCase_ )
AutoTokenizer.register(UpperCamelCase_ , slow_tokenizer_class=UpperCamelCase_ )
AutoTokenizer.register(UpperCamelCase_ , fast_tokenizer_class=UpperCamelCase_ )
# If remote code is not set, the default is to use local
lowerCAmelCase : Optional[int] = AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''' )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' )
self.assertFalse(tokenizer.special_attribute_present )
lowerCAmelCase : str = AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''' , use_fast=UpperCamelCase_ )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
self.assertFalse(tokenizer.special_attribute_present )
# If remote code is disabled, we load the local one.
lowerCAmelCase : Union[str, Any] = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=UpperCamelCase_ )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' )
self.assertFalse(tokenizer.special_attribute_present )
lowerCAmelCase : Dict = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=UpperCamelCase_ , use_fast=UpperCamelCase_ )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
self.assertFalse(tokenizer.special_attribute_present )
# If remote is enabled, we load from the Hub
lowerCAmelCase : int = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=UpperCamelCase_ )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' )
self.assertTrue(tokenizer.special_attribute_present )
lowerCAmelCase : int = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=UpperCamelCase_ , use_fast=UpperCamelCase_ )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
self.assertTrue(tokenizer.special_attribute_present )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
def lowerCamelCase__ ( self : Tuple ):
lowerCAmelCase : str = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer_legacy''' , trust_remote_code=UpperCamelCase_ )
self.assertTrue(tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' )
# Test we can also load the slow version
lowerCAmelCase : List[str] = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer_legacy''' , trust_remote_code=UpperCamelCase_ , use_fast=UpperCamelCase_ )
self.assertTrue(tokenizer.special_attribute_present )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
else:
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
def lowerCamelCase__ ( self : str ):
with self.assertRaisesRegex(
UpperCamelCase_ , '''bert-base is not a local folder and is not a valid model identifier''' ):
lowerCAmelCase : List[str] = AutoTokenizer.from_pretrained('''bert-base''' )
def lowerCamelCase__ ( self : int ):
with self.assertRaisesRegex(
UpperCamelCase_ , r'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)''' ):
lowerCAmelCase : List[Any] = AutoTokenizer.from_pretrained(UpperCamelCase_ , revision='''aaaaaa''' )
def lowerCamelCase__ ( self : Optional[int] ):
# Make sure we have cached the tokenizer.
lowerCAmelCase : List[str] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
with RequestCounter() as counter:
lowerCAmelCase : int = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
self.assertEqual(counter.get_request_count , 0 )
self.assertEqual(counter.head_request_count , 1 )
self.assertEqual(counter.other_request_count , 0 )
| 637
| 0
|
"""simple docstring"""
snake_case__ : Dict = [
999,
800,
799,
600,
599,
500,
400,
399,
377,
355,
333,
311,
288,
266,
244,
222,
200,
199,
177,
155,
133,
111,
88,
66,
44,
22,
0,
]
snake_case__ : Dict = [
999,
976,
952,
928,
905,
882,
858,
857,
810,
762,
715,
714,
572,
429,
428,
286,
285,
238,
190,
143,
142,
118,
95,
71,
47,
24,
0,
]
snake_case__ : Any = [
999,
988,
977,
966,
955,
944,
933,
922,
911,
900,
899,
879,
859,
840,
820,
800,
799,
766,
733,
700,
699,
650,
600,
599,
500,
499,
400,
399,
350,
300,
299,
266,
233,
200,
199,
179,
159,
140,
120,
100,
99,
88,
77,
66,
55,
44,
33,
22,
11,
0,
]
snake_case__ : Optional[int] = [
999,
995,
992,
989,
985,
981,
978,
975,
971,
967,
964,
961,
957,
956,
951,
947,
942,
937,
933,
928,
923,
919,
914,
913,
908,
903,
897,
892,
887,
881,
876,
871,
870,
864,
858,
852,
846,
840,
834,
828,
827,
820,
813,
806,
799,
792,
785,
784,
777,
770,
763,
756,
749,
742,
741,
733,
724,
716,
707,
699,
698,
688,
677,
666,
656,
655,
645,
634,
623,
613,
612,
598,
584,
570,
569,
555,
541,
527,
526,
505,
484,
483,
462,
440,
439,
396,
395,
352,
351,
308,
307,
264,
263,
220,
219,
176,
132,
88,
44,
0,
]
snake_case__ : Dict = [
999,
997,
995,
992,
990,
988,
986,
984,
981,
979,
977,
975,
972,
970,
968,
966,
964,
961,
959,
957,
956,
954,
951,
949,
946,
944,
941,
939,
936,
934,
931,
929,
926,
924,
921,
919,
916,
914,
913,
910,
907,
905,
902,
899,
896,
893,
891,
888,
885,
882,
879,
877,
874,
871,
870,
867,
864,
861,
858,
855,
852,
849,
846,
843,
840,
837,
834,
831,
828,
827,
824,
821,
817,
814,
811,
808,
804,
801,
798,
795,
791,
788,
785,
784,
780,
777,
774,
770,
766,
763,
760,
756,
752,
749,
746,
742,
741,
737,
733,
730,
726,
722,
718,
714,
710,
707,
703,
699,
698,
694,
690,
685,
681,
677,
673,
669,
664,
660,
656,
655,
650,
646,
641,
636,
632,
627,
622,
618,
613,
612,
607,
602,
596,
591,
586,
580,
575,
570,
569,
563,
557,
551,
545,
539,
533,
527,
526,
519,
512,
505,
498,
491,
484,
483,
474,
466,
457,
449,
440,
439,
428,
418,
407,
396,
395,
381,
366,
352,
351,
330,
308,
307,
286,
264,
263,
242,
220,
219,
176,
175,
132,
131,
88,
44,
0,
]
snake_case__ : Union[str, Any] = [
999,
991,
982,
974,
966,
958,
950,
941,
933,
925,
916,
908,
900,
899,
874,
850,
825,
800,
799,
700,
600,
500,
400,
300,
200,
100,
0,
]
snake_case__ : List[Any] = [
999,
992,
985,
978,
971,
964,
957,
949,
942,
935,
928,
921,
914,
907,
900,
899,
879,
859,
840,
820,
800,
799,
766,
733,
700,
699,
650,
600,
599,
500,
499,
400,
399,
300,
299,
200,
199,
100,
99,
0,
]
snake_case__ : Optional[int] = [
999,
996,
992,
989,
985,
982,
979,
975,
972,
968,
965,
961,
958,
955,
951,
948,
944,
941,
938,
934,
931,
927,
924,
920,
917,
914,
910,
907,
903,
900,
899,
891,
884,
876,
869,
861,
853,
846,
838,
830,
823,
815,
808,
800,
799,
788,
777,
766,
755,
744,
733,
722,
711,
700,
699,
688,
677,
666,
655,
644,
633,
622,
611,
600,
599,
585,
571,
557,
542,
528,
514,
500,
499,
485,
471,
457,
442,
428,
414,
400,
399,
379,
359,
340,
320,
300,
299,
279,
259,
240,
220,
200,
199,
166,
133,
100,
99,
66,
33,
0,
]
| 715
|
"""simple docstring"""
import json
import os
from functools import lru_cache
from typing import Dict, List, Optional, Tuple, Union
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding, EncodedInput
from ...utils import PaddingStrategy, logging
snake_case__ : Optional[Any] = logging.get_logger(__name__)
snake_case__ : Any = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt'''}
# See all LED models at https://huggingface.co/models?filter=LED
snake_case__ : Optional[Any] = {
'''vocab_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json''',
},
'''merges_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt''',
},
'''tokenizer_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json''',
},
}
snake_case__ : List[Any] = {
'''allenai/led-base-16384''': 16_384,
}
@lru_cache()
# Copied from transformers.models.bart.tokenization_bart.bytes_to_unicode
def _snake_case ( ):
lowerCAmelCase : Optional[int] = (
list(range(ord('''!''' ) , ord('''~''' ) + 1 ) ) + list(range(ord('''¡''' ) , ord('''¬''' ) + 1 ) ) + list(range(ord('''®''' ) , ord('''ÿ''' ) + 1 ) )
)
lowerCAmelCase : str = bs[:]
lowerCAmelCase : Optional[int] = 0
for b in range(2**8 ):
if b not in bs:
bs.append(_snake_case )
cs.append(2**8 + n )
n += 1
lowerCAmelCase : int = [chr(_snake_case ) for n in cs]
return dict(zip(_snake_case , _snake_case ) )
def _snake_case ( _snake_case : List[Any] ):
lowerCAmelCase : List[str] = set()
lowerCAmelCase : Any = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
lowerCAmelCase : Optional[Any] = char
return pairs
class snake_case_( a__ ):
__UpperCamelCase = VOCAB_FILES_NAMES
__UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase = ['''input_ids''', '''attention_mask''']
def __init__( self : Tuple , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Dict , UpperCamelCase_ : Tuple="replace" , UpperCamelCase_ : Union[str, Any]="<s>" , UpperCamelCase_ : List[str]="</s>" , UpperCamelCase_ : str="</s>" , UpperCamelCase_ : int="<s>" , UpperCamelCase_ : int="<unk>" , UpperCamelCase_ : Union[str, Any]="<pad>" , UpperCamelCase_ : Tuple="<mask>" , UpperCamelCase_ : Optional[int]=False , **UpperCamelCase_ : Tuple , ):
lowerCAmelCase : Any = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else bos_token
lowerCAmelCase : Union[str, Any] = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else eos_token
lowerCAmelCase : Optional[int] = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else sep_token
lowerCAmelCase : int = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else cls_token
lowerCAmelCase : Tuple = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else unk_token
lowerCAmelCase : List[Any] = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
lowerCAmelCase : Tuple = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else mask_token
super().__init__(
errors=UpperCamelCase_ , bos_token=UpperCamelCase_ , eos_token=UpperCamelCase_ , unk_token=UpperCamelCase_ , sep_token=UpperCamelCase_ , cls_token=UpperCamelCase_ , pad_token=UpperCamelCase_ , mask_token=UpperCamelCase_ , add_prefix_space=UpperCamelCase_ , **UpperCamelCase_ , )
with open(UpperCamelCase_ , encoding='''utf-8''' ) as vocab_handle:
lowerCAmelCase : Any = json.load(UpperCamelCase_ )
lowerCAmelCase : Dict = {v: k for k, v in self.encoder.items()}
lowerCAmelCase : Optional[int] = errors # how to handle errors in decoding
lowerCAmelCase : List[Any] = bytes_to_unicode()
lowerCAmelCase : Optional[Any] = {v: k for k, v in self.byte_encoder.items()}
with open(UpperCamelCase_ , encoding='''utf-8''' ) as merges_handle:
lowerCAmelCase : Optional[int] = merges_handle.read().split('''\n''' )[1:-1]
lowerCAmelCase : Optional[int] = [tuple(merge.split() ) for merge in bpe_merges]
lowerCAmelCase : Optional[int] = dict(zip(UpperCamelCase_ , range(len(UpperCamelCase_ ) ) ) )
lowerCAmelCase : List[Any] = {}
lowerCAmelCase : Optional[Any] = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
lowerCAmelCase : Dict = re.compile(r'''\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+''' )
@property
# Copied from transformers.models.bart.tokenization_bart.BartTokenizer.vocab_size
def lowerCamelCase__ ( self : Union[str, Any] ):
return len(self.encoder )
def lowerCamelCase__ ( self : Union[str, Any] ):
return dict(self.encoder , **self.added_tokens_encoder )
def lowerCamelCase__ ( self : Any , UpperCamelCase_ : int ):
if token in self.cache:
return self.cache[token]
lowerCAmelCase : List[str] = tuple(UpperCamelCase_ )
lowerCAmelCase : Optional[Any] = get_pairs(UpperCamelCase_ )
if not pairs:
return token
while True:
lowerCAmelCase : List[Any] = min(UpperCamelCase_ , key=lambda UpperCamelCase_ : self.bpe_ranks.get(UpperCamelCase_ , float('''inf''' ) ) )
if bigram not in self.bpe_ranks:
break
lowerCAmelCase, lowerCAmelCase : Any = bigram
lowerCAmelCase : Tuple = []
lowerCAmelCase : Any = 0
while i < len(UpperCamelCase_ ):
try:
lowerCAmelCase : int = word.index(UpperCamelCase_ , UpperCamelCase_ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
lowerCAmelCase : int = j
if word[i] == first and i < len(UpperCamelCase_ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
lowerCAmelCase : Tuple = tuple(UpperCamelCase_ )
lowerCAmelCase : Tuple = new_word
if len(UpperCamelCase_ ) == 1:
break
else:
lowerCAmelCase : Optional[Any] = get_pairs(UpperCamelCase_ )
lowerCAmelCase : Union[str, Any] = ''' '''.join(UpperCamelCase_ )
lowerCAmelCase : List[str] = word
return word
def lowerCamelCase__ ( self : Optional[Any] , UpperCamelCase_ : Tuple ):
lowerCAmelCase : Dict = []
for token in re.findall(self.pat , UpperCamelCase_ ):
lowerCAmelCase : Union[str, Any] = ''''''.join(
self.byte_encoder[b] for b in token.encode('''utf-8''' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(UpperCamelCase_ ).split(''' ''' ) )
return bpe_tokens
def lowerCamelCase__ ( self : int , UpperCamelCase_ : str ):
return self.encoder.get(UpperCamelCase_ , self.encoder.get(self.unk_token ) )
def lowerCamelCase__ ( self : Any , UpperCamelCase_ : Union[str, Any] ):
return self.decoder.get(UpperCamelCase_ )
def lowerCamelCase__ ( self : Any , UpperCamelCase_ : List[str] ):
lowerCAmelCase : Optional[int] = ''''''.join(UpperCamelCase_ )
lowerCAmelCase : Optional[int] = bytearray([self.byte_decoder[c] for c in text] ).decode('''utf-8''' , errors=self.errors )
return text
def lowerCamelCase__ ( self : str , UpperCamelCase_ : str , UpperCamelCase_ : Optional[str] = None ):
if not os.path.isdir(UpperCamelCase_ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
lowerCAmelCase : int = os.path.join(
UpperCamelCase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
lowerCAmelCase : Optional[Any] = os.path.join(
UpperCamelCase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] )
with open(UpperCamelCase_ , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=UpperCamelCase_ , ensure_ascii=UpperCamelCase_ ) + '''\n''' )
lowerCAmelCase : Optional[int] = 0
with open(UpperCamelCase_ , '''w''' , encoding='''utf-8''' ) as writer:
writer.write('''#version: 0.2\n''' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda UpperCamelCase_ : kv[1] ):
if index != token_index:
logger.warning(
F'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
''' Please check that the tokenizer is not corrupted!''' )
lowerCAmelCase : Tuple = token_index
writer.write(''' '''.join(UpperCamelCase_ ) + '''\n''' )
index += 1
return vocab_file, merge_file
def lowerCamelCase__ ( self : Optional[Any] , UpperCamelCase_ : List[int] , UpperCamelCase_ : Optional[List[int]] = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowerCAmelCase : Any = [self.cls_token_id]
lowerCAmelCase : str = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def lowerCamelCase__ ( self : Any , UpperCamelCase_ : List[int] , UpperCamelCase_ : Optional[List[int]] = None , UpperCamelCase_ : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCamelCase_ , token_ids_a=UpperCamelCase_ , already_has_special_tokens=UpperCamelCase_ )
if token_ids_a is None:
return [1] + ([0] * len(UpperCamelCase_ )) + [1]
return [1] + ([0] * len(UpperCamelCase_ )) + [1, 1] + ([0] * len(UpperCamelCase_ )) + [1]
def lowerCamelCase__ ( self : List[str] , UpperCamelCase_ : List[int] , UpperCamelCase_ : Optional[List[int]] = None ):
lowerCAmelCase : Optional[Any] = [self.sep_token_id]
lowerCAmelCase : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowerCamelCase__ ( self : int , UpperCamelCase_ : Any , UpperCamelCase_ : Dict=False , **UpperCamelCase_ : Tuple ):
lowerCAmelCase : Union[str, Any] = kwargs.pop('''add_prefix_space''' , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(UpperCamelCase_ ) > 0 and not text[0].isspace()):
lowerCAmelCase : List[Any] = ''' ''' + text
return (text, kwargs)
def lowerCamelCase__ ( self : str , UpperCamelCase_ : Union[Dict[str, EncodedInput], BatchEncoding] , UpperCamelCase_ : Optional[int] = None , UpperCamelCase_ : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , UpperCamelCase_ : Optional[int] = None , UpperCamelCase_ : Optional[bool] = None , ):
lowerCAmelCase : Dict = super()._pad(
encoded_inputs=UpperCamelCase_ , max_length=UpperCamelCase_ , padding_strategy=UpperCamelCase_ , pad_to_multiple_of=UpperCamelCase_ , return_attention_mask=UpperCamelCase_ , )
# Load from model defaults
if return_attention_mask is None:
lowerCAmelCase : Tuple = '''attention_mask''' in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
lowerCAmelCase : Dict = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
lowerCAmelCase : List[Any] = len(encoded_inputs['''global_attention_mask'''] ) != len(UpperCamelCase_ )
if needs_to_be_padded:
lowerCAmelCase : int = len(UpperCamelCase_ ) - len(encoded_inputs['''global_attention_mask'''] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
lowerCAmelCase : Dict = (
encoded_inputs['''global_attention_mask'''] + [-1] * difference
)
elif self.padding_side == "left":
lowerCAmelCase : int = [-1] * difference + encoded_inputs[
'''global_attention_mask'''
]
else:
raise ValueError('''Invalid padding strategy:''' + str(self.padding_side ) )
return encoded_inputs
| 637
| 0
|
"""simple docstring"""
from __future__ import annotations
def _snake_case ( _snake_case : list[float] , _snake_case : List[str] ) -> Tuple:
print(f'''Vertex\tShortest Distance from vertex {src}''' )
for i, d in enumerate(_snake_case ):
print(f'''{i}\t\t{d}''' )
def _snake_case ( _snake_case : list[dict[str, int]] , _snake_case : list[float] , _snake_case : int ) -> int:
for j in range(_snake_case ):
lowerCAmelCase : Optional[int] = (graph[j][k] for k in ['''src''', '''dst''', '''weight'''])
if distance[u] != float('''inf''' ) and distance[u] + w < distance[v]:
return True
return False
def _snake_case ( _snake_case : list[dict[str, int]] , _snake_case : int , _snake_case : int , _snake_case : int ) -> str:
lowerCAmelCase : Optional[int] = [float('''inf''' )] * vertex_count
lowerCAmelCase : Tuple = 0.0
for _ in range(vertex_count - 1 ):
for j in range(_snake_case ):
lowerCAmelCase : int = (graph[j][k] for k in ['''src''', '''dst''', '''weight'''])
if distance[u] != float('''inf''' ) and distance[u] + w < distance[v]:
lowerCAmelCase : int = distance[u] + w
lowerCAmelCase : Optional[Any] = check_negative_cycle(_snake_case , _snake_case , _snake_case )
if negative_cycle_exists:
raise Exception('''Negative cycle found''' )
return distance
if __name__ == "__main__":
import doctest
doctest.testmod()
snake_case__ : Dict = int(input('''Enter number of vertices: ''').strip())
snake_case__ : Any = int(input('''Enter number of edges: ''').strip())
snake_case__ : list[dict[str, int]] = [{} for _ in range(E)]
for i in range(E):
print('''Edge ''', i + 1)
snake_case__ : Union[str, Any] = (
int(x)
for x in input('''Enter source, destination, weight: ''').strip().split(''' ''')
)
snake_case__ : Dict = {'''src''': src, '''dst''': dest, '''weight''': weight}
snake_case__ : str = int(input('''\nEnter shortest path source:''').strip())
snake_case__ : Tuple = bellman_ford(graph, V, E, source)
print_distance(shortest_distance, 0)
| 716
|
"""simple docstring"""
def _snake_case ( _snake_case : int = 4000000 ):
lowerCAmelCase : int = [0, 1]
lowerCAmelCase : List[str] = 0
while fib[i] <= n:
fib.append(fib[i] + fib[i + 1] )
if fib[i + 2] > n:
break
i += 1
lowerCAmelCase : int = 0
for j in range(len(_snake_case ) - 1 ):
if fib[j] % 2 == 0:
total += fib[j]
return total
if __name__ == "__main__":
print(f"""{solution() = }""")
| 637
| 0
|
"""simple docstring"""
from __future__ import annotations
from math import pi
# Define the Reduced Planck Constant ℏ (H bar), speed of light C, value of
# Pi and the function
snake_case__ = 1.054571817e-34 # unit of ℏ : J * s
snake_case__ = 3e8 # unit of c : m * s^-1
def _snake_case ( _snake_case : float , _snake_case : float , _snake_case : float ):
if (force, area, distance).count(0 ) != 1:
raise ValueError('''One and only one argument must be 0''' )
if force < 0:
raise ValueError('''Magnitude of force can not be negative''' )
if distance < 0:
raise ValueError('''Distance can not be negative''' )
if area < 0:
raise ValueError('''Area can not be negative''' )
if force == 0:
lowerCAmelCase : Union[str, Any] = (REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / (
240 * (distance) ** 4
)
return {"force": force}
elif area == 0:
lowerCAmelCase : List[str] = (240 * force * (distance) ** 4) / (
REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2
)
return {"area": area}
elif distance == 0:
lowerCAmelCase : Optional[Any] = (
(REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / (240 * force)
) ** (1 / 4)
return {"distance": distance}
raise ValueError('''One and only one argument must be 0''' )
# Run doctest
if __name__ == "__main__":
import doctest
doctest.testmod()
| 717
|
"""simple docstring"""
def _snake_case ( _snake_case : float , _snake_case : list[float] ):
if discount_rate < 0:
raise ValueError('''Discount rate cannot be negative''' )
if not cash_flows:
raise ValueError('''Cash flows list cannot be empty''' )
lowerCAmelCase : List[str] = sum(
cash_flow / ((1 + discount_rate) ** i) for i, cash_flow in enumerate(_snake_case ) )
return round(_snake_case , ndigits=2 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 637
| 0
|
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
snake_case__ : Optional[int] = '''▁'''
snake_case__ : Any = {'''vocab_file''': '''spiece.model'''}
snake_case__ : int = {
'''vocab_file''': {'''google/pegasus-xsum''': '''https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model'''}
}
snake_case__ : Any = {
'''google/pegasus-xsum''': 512,
}
snake_case__ : List[Any] = logging.get_logger(__name__)
class snake_case_( a__ ):
__UpperCamelCase = VOCAB_FILES_NAMES
__UpperCamelCase = VOCAB_FILES_NAMES
__UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase = ['''input_ids''', '''attention_mask''']
def __init__( self : int , UpperCamelCase_ : Any , UpperCamelCase_ : Optional[Any]="<pad>" , UpperCamelCase_ : Any="</s>" , UpperCamelCase_ : Tuple="<unk>" , UpperCamelCase_ : List[Any]="<mask_2>" , UpperCamelCase_ : str="<mask_1>" , UpperCamelCase_ : List[Any]=None , UpperCamelCase_ : Tuple=1_0_3 , UpperCamelCase_ : Optional[Dict[str, Any]] = None , **UpperCamelCase_ : Optional[Any] , ):
lowerCAmelCase : int = offset
if additional_special_tokens is not None:
if not isinstance(UpperCamelCase_ , UpperCamelCase_ ):
raise TypeError(
F'''additional_special_tokens should be of type {type(UpperCamelCase_ )}, but is'''
F''' {type(UpperCamelCase_ )}''' )
lowerCAmelCase : Union[str, Any] = (
([mask_token_sent] + additional_special_tokens)
if mask_token_sent not in additional_special_tokens and mask_token_sent is not None
else additional_special_tokens
)
# fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken
additional_special_tokens_extended += [
F'''<unk_{i}>''' for i in range(len(UpperCamelCase_ ) , self.offset - 1 )
]
if len(set(UpperCamelCase_ ) ) != len(UpperCamelCase_ ):
raise ValueError(
'''Please make sure that the provided additional_special_tokens do not contain an incorrectly'''
F''' shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}.''' )
lowerCAmelCase : Tuple = additional_special_tokens_extended
else:
lowerCAmelCase : str = [mask_token_sent] if mask_token_sent is not None else []
additional_special_tokens += [F'''<unk_{i}>''' for i in range(2 , self.offset )]
lowerCAmelCase : str = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=UpperCamelCase_ , unk_token=UpperCamelCase_ , mask_token=UpperCamelCase_ , pad_token=UpperCamelCase_ , mask_token_sent=UpperCamelCase_ , offset=UpperCamelCase_ , additional_special_tokens=UpperCamelCase_ , sp_model_kwargs=self.sp_model_kwargs , **UpperCamelCase_ , )
lowerCAmelCase : Tuple = mask_token_sent
lowerCAmelCase : List[str] = vocab_file
lowerCAmelCase : int = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(UpperCamelCase_ )
# add special tokens to encoder dict
lowerCAmelCase : Dict[int, str] = {
0: self.pad_token,
1: self.eos_token,
}
if self.mask_token_sent is not None:
self.encoder.update(
{
2: self.mask_token_sent,
3: self.mask_token,
} )
if self.offset > 0:
# entries 2-104 are only used for pretraining and called <mask_1>, <mask_2>, unk_2, ...unk_102
# mask_token_sent is already added to list -> so start at 1
self.encoder.update({i + 3: additional_special_tokens[i] for i in range(1 , self.offset - 1 )} )
lowerCAmelCase : Dict[str, int] = {v: k for k, v in self.encoder.items()}
@property
def lowerCamelCase__ ( self : str ):
return len(self.sp_model ) + self.offset
def lowerCamelCase__ ( self : Union[str, Any] ):
lowerCAmelCase : Any = {self.convert_ids_to_tokens(UpperCamelCase_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : List[str] ):
lowerCAmelCase : List[str] = self.__dict__.copy()
lowerCAmelCase : Union[str, Any] = None
return state
def __setstate__( self : Any , UpperCamelCase_ : Tuple ):
lowerCAmelCase : Any = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
lowerCAmelCase : int = {}
lowerCAmelCase : List[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def lowerCamelCase__ ( self : str , UpperCamelCase_ : str ):
return self.sp_model.encode(UpperCamelCase_ , out_type=UpperCamelCase_ )
def lowerCamelCase__ ( self : Tuple , UpperCamelCase_ : str ):
if token in self.decoder:
return self.decoder[token]
elif token in self.added_tokens_decoder:
return self.added_tokens_decoder[token]
lowerCAmelCase : List[Any] = self.sp_model.piece_to_id(UpperCamelCase_ )
return sp_id + self.offset
def lowerCamelCase__ ( self : int , UpperCamelCase_ : int ):
if index in self.encoder:
return self.encoder[index]
elif index in self.added_tokens_encoder:
return self.added_tokens_encoder[index]
else:
lowerCAmelCase : Any = self.sp_model.IdToPiece(index - self.offset )
return token
def lowerCamelCase__ ( self : Optional[Any] , UpperCamelCase_ : Optional[Any] ):
lowerCAmelCase : List[str] = []
lowerCAmelCase : int = ''''''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(UpperCamelCase_ ) + token
lowerCAmelCase : Optional[int] = []
else:
current_sub_tokens.append(UpperCamelCase_ )
out_string += self.sp_model.decode(UpperCamelCase_ )
return out_string.strip()
def lowerCamelCase__ ( self : Union[str, Any] , UpperCamelCase_ : List[str]=False ):
return 1
def lowerCamelCase__ ( self : Optional[int] , UpperCamelCase_ : List[str] ):
lowerCAmelCase : int = set(self.all_special_ids ) # call it once instead of inside list comp
all_special_ids.remove(self.unk_token_id ) # <unk> is only sometimes special
return [1 if x in all_special_ids else 0 for x in seq]
def lowerCamelCase__ ( self : Tuple , UpperCamelCase_ : List , UpperCamelCase_ : Optional[List] = None , UpperCamelCase_ : bool = False ):
if already_has_special_tokens:
return self._special_token_mask(UpperCamelCase_ )
elif token_ids_a is None:
return self._special_token_mask(UpperCamelCase_ ) + [1]
else:
return self._special_token_mask(token_ids_a + token_ids_a ) + [1]
def lowerCamelCase__ ( self : Optional[int] , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : List[str]=None ):
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def lowerCamelCase__ ( self : Any , UpperCamelCase_ : str , UpperCamelCase_ : Optional[str] = None ):
if not os.path.isdir(UpperCamelCase_ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
lowerCAmelCase : str = os.path.join(
UpperCamelCase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , UpperCamelCase_ )
elif not os.path.isfile(self.vocab_file ):
with open(UpperCamelCase_ , '''wb''' ) as fi:
lowerCAmelCase : Tuple = self.sp_model.serialized_model_proto()
fi.write(UpperCamelCase_ )
return (out_vocab_file,)
| 718
|
"""simple docstring"""
from __future__ import annotations
def _snake_case ( _snake_case : list[int] , _snake_case : int ):
if len(_snake_case ) == 0:
return False
lowerCAmelCase : List[Any] = len(_snake_case ) // 2
if a_list[midpoint] == item:
return True
if item < a_list[midpoint]:
return binary_search(a_list[:midpoint] , _snake_case )
else:
return binary_search(a_list[midpoint + 1 :] , _snake_case )
if __name__ == "__main__":
snake_case__ : List[str] = input('''Enter numbers separated by comma:\n''').strip()
snake_case__ : Optional[int] = [int(item.strip()) for item in user_input.split(''',''')]
snake_case__ : Dict = int(input('''Enter the number to be found in the list:\n''').strip())
snake_case__ : str = '''''' if binary_search(sequence, target) else '''not '''
print(f"""{target} was {not_str}found in {sequence}""")
| 637
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_torch_available,
)
snake_case__ : int = {
'''configuration_trocr''': ['''TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''TrOCRConfig'''],
'''processing_trocr''': ['''TrOCRProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ : int = [
'''TROCR_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TrOCRForCausalLM''',
'''TrOCRPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig
from .processing_trocr import TrOCRProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel
else:
import sys
snake_case__ : List[str] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 719
|
"""simple docstring"""
import os
from collections import namedtuple
import pytest
from datasets import ClassLabel, Features, Sequence, Value
from datasets.commands.test import TestCommand
from datasets.info import DatasetInfo, DatasetInfosDict
snake_case__ : Optional[Any] = namedtuple(
'''_TestCommandArgs''',
[
'''dataset''',
'''name''',
'''cache_dir''',
'''data_dir''',
'''all_configs''',
'''save_infos''',
'''ignore_verifications''',
'''force_redownload''',
'''clear_cache''',
],
defaults=[None, None, None, False, False, False, False, False],
)
def _snake_case ( _snake_case : List[Any] , _snake_case : List[str] ):
return (abs(source - target ) / target) < 0.01
@pytest.mark.integration
def _snake_case ( _snake_case : Any ):
lowerCAmelCase : Union[str, Any] = _TestCommandArgs(dataset=_snake_case , all_configs=_snake_case , save_infos=_snake_case )
lowerCAmelCase : str = TestCommand(*_snake_case )
test_command.run()
lowerCAmelCase : str = os.path.join(_snake_case , '''README.md''' )
assert os.path.exists(_snake_case )
lowerCAmelCase : Tuple = DatasetInfosDict.from_directory(_snake_case )
lowerCAmelCase : List[str] = DatasetInfosDict(
{
'''default''': DatasetInfo(
features=Features(
{
'''tokens''': Sequence(Value('''string''' ) ),
'''ner_tags''': Sequence(
ClassLabel(names=['''O''', '''B-PER''', '''I-PER''', '''B-ORG''', '''I-ORG''', '''B-LOC''', '''I-LOC'''] ) ),
'''langs''': Sequence(Value('''string''' ) ),
'''spans''': Sequence(Value('''string''' ) ),
} ) , splits=[
{
'''name''': '''train''',
'''num_bytes''': 2351563,
'''num_examples''': 10000,
},
{
'''name''': '''validation''',
'''num_bytes''': 238418,
'''num_examples''': 1000,
},
] , download_size=3940680 , dataset_size=2589981 , )
} )
assert dataset_infos.keys() == expected_dataset_infos.keys()
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
lowerCAmelCase, lowerCAmelCase : Union[str, Any] = getattr(dataset_infos['''default'''] , _snake_case ), getattr(expected_dataset_infos['''default'''] , _snake_case )
if key == "num_bytes":
assert is_apercent_close(_snake_case , _snake_case )
elif key == "splits":
assert list(_snake_case ) == list(_snake_case )
for split in result:
assert result[split].name == expected[split].name
assert result[split].num_examples == expected[split].num_examples
assert is_apercent_close(result[split].num_bytes , expected[split].num_bytes )
else:
result == expected
| 637
| 0
|
"""simple docstring"""
import math
import torch
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from .attention_processor import Attention
from .embeddings import get_timestep_embedding
from .modeling_utils import ModelMixin
class snake_case_( a__ , a__ ):
'''simple docstring'''
@register_to_config
def __init__( self : str , UpperCamelCase_ : int = 1_2_8 , UpperCamelCase_ : int = 2_5_6 , UpperCamelCase_ : float = 2_0_0_0.0 , UpperCamelCase_ : int = 7_6_8 , UpperCamelCase_ : int = 1_2 , UpperCamelCase_ : int = 1_2 , UpperCamelCase_ : int = 6_4 , UpperCamelCase_ : int = 2_0_4_8 , UpperCamelCase_ : float = 0.1 , ):
super().__init__()
lowerCAmelCase : Optional[int] = nn.Sequential(
nn.Linear(UpperCamelCase_ , d_model * 4 , bias=UpperCamelCase_ ) , nn.SiLU() , nn.Linear(d_model * 4 , d_model * 4 , bias=UpperCamelCase_ ) , nn.SiLU() , )
lowerCAmelCase : Dict = nn.Embedding(UpperCamelCase_ , UpperCamelCase_ )
lowerCAmelCase : str = False
lowerCAmelCase : Optional[Any] = nn.Linear(UpperCamelCase_ , UpperCamelCase_ , bias=UpperCamelCase_ )
lowerCAmelCase : Union[str, Any] = nn.Dropout(p=UpperCamelCase_ )
lowerCAmelCase : int = nn.ModuleList()
for lyr_num in range(UpperCamelCase_ ):
# FiLM conditional T5 decoder
lowerCAmelCase : Tuple = DecoderLayer(d_model=UpperCamelCase_ , d_kv=UpperCamelCase_ , num_heads=UpperCamelCase_ , d_ff=UpperCamelCase_ , dropout_rate=UpperCamelCase_ )
self.decoders.append(UpperCamelCase_ )
lowerCAmelCase : Union[str, Any] = TaLayerNorm(UpperCamelCase_ )
lowerCAmelCase : Optional[int] = nn.Dropout(p=UpperCamelCase_ )
lowerCAmelCase : int = nn.Linear(UpperCamelCase_ , UpperCamelCase_ , bias=UpperCamelCase_ )
def lowerCamelCase__ ( self : int , UpperCamelCase_ : List[Any] , UpperCamelCase_ : List[str] ):
lowerCAmelCase : Union[str, Any] = torch.mul(query_input.unsqueeze(-1 ) , key_input.unsqueeze(-2 ) )
return mask.unsqueeze(-3 )
def lowerCamelCase__ ( self : Optional[Any] , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : Any , UpperCamelCase_ : str ):
lowerCAmelCase : Tuple = decoder_input_tokens.shape
assert decoder_noise_time.shape == (batch,)
# decoder_noise_time is in [0, 1), so rescale to expected timing range.
lowerCAmelCase : Union[str, Any] = get_timestep_embedding(
decoder_noise_time * self.config.max_decoder_noise_time , embedding_dim=self.config.d_model , max_period=self.config.max_decoder_noise_time , ).to(dtype=self.dtype )
lowerCAmelCase : str = self.conditioning_emb(UpperCamelCase_ ).unsqueeze(1 )
assert conditioning_emb.shape == (batch, 1, self.config.d_model * 4)
lowerCAmelCase : Tuple = decoder_input_tokens.shape[1]
# If we want to use relative positions for audio context, we can just offset
# this sequence by the length of encodings_and_masks.
lowerCAmelCase : int = torch.broadcast_to(
torch.arange(UpperCamelCase_ , device=decoder_input_tokens.device ) , (batch, seq_length) , )
lowerCAmelCase : Any = self.position_encoding(UpperCamelCase_ )
lowerCAmelCase : Tuple = self.continuous_inputs_projection(UpperCamelCase_ )
inputs += position_encodings
lowerCAmelCase : Tuple = self.dropout(UpperCamelCase_ )
# decoder: No padding present.
lowerCAmelCase : Optional[Any] = torch.ones(
decoder_input_tokens.shape[:2] , device=decoder_input_tokens.device , dtype=inputs.dtype )
# Translate encoding masks to encoder-decoder masks.
lowerCAmelCase : int = [(x, self.encoder_decoder_mask(UpperCamelCase_ , UpperCamelCase_ )) for x, y in encodings_and_masks]
# cross attend style: concat encodings
lowerCAmelCase : Any = torch.cat([x[0] for x in encodings_and_encdec_masks] , dim=1 )
lowerCAmelCase : Optional[int] = torch.cat([x[1] for x in encodings_and_encdec_masks] , dim=-1 )
for lyr in self.decoders:
lowerCAmelCase : Optional[int] = lyr(
UpperCamelCase_ , conditioning_emb=UpperCamelCase_ , encoder_hidden_states=UpperCamelCase_ , encoder_attention_mask=UpperCamelCase_ , )[0]
lowerCAmelCase : List[str] = self.decoder_norm(UpperCamelCase_ )
lowerCAmelCase : Any = self.post_dropout(UpperCamelCase_ )
lowerCAmelCase : int = self.spec_out(UpperCamelCase_ )
return spec_out
class snake_case_( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[int] , UpperCamelCase_ : Tuple , UpperCamelCase_ : Any , UpperCamelCase_ : str , UpperCamelCase_ : int , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : Any=1E-6 ):
super().__init__()
lowerCAmelCase : Union[str, Any] = nn.ModuleList()
# cond self attention: layer 0
self.layer.append(
TaLayerSelfAttentionCond(d_model=UpperCamelCase_ , d_kv=UpperCamelCase_ , num_heads=UpperCamelCase_ , dropout_rate=UpperCamelCase_ ) )
# cross attention: layer 1
self.layer.append(
TaLayerCrossAttention(
d_model=UpperCamelCase_ , d_kv=UpperCamelCase_ , num_heads=UpperCamelCase_ , dropout_rate=UpperCamelCase_ , layer_norm_epsilon=UpperCamelCase_ , ) )
# Film Cond MLP + dropout: last layer
self.layer.append(
TaLayerFFCond(d_model=UpperCamelCase_ , d_ff=UpperCamelCase_ , dropout_rate=UpperCamelCase_ , layer_norm_epsilon=UpperCamelCase_ ) )
def lowerCamelCase__ ( self : Dict , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : Optional[int]=None , UpperCamelCase_ : Tuple=None , UpperCamelCase_ : Optional[int]=None , UpperCamelCase_ : List[Any]=None , UpperCamelCase_ : Any=None , ):
lowerCAmelCase : Optional[int] = self.layer[0](
UpperCamelCase_ , conditioning_emb=UpperCamelCase_ , attention_mask=UpperCamelCase_ , )
if encoder_hidden_states is not None:
lowerCAmelCase : List[str] = torch.where(encoder_attention_mask > 0 , 0 , -1E10 ).to(
encoder_hidden_states.dtype )
lowerCAmelCase : Optional[int] = self.layer[1](
UpperCamelCase_ , key_value_states=UpperCamelCase_ , attention_mask=UpperCamelCase_ , )
# Apply Film Conditional Feed Forward layer
lowerCAmelCase : Dict = self.layer[-1](UpperCamelCase_ , UpperCamelCase_ )
return (hidden_states,)
class snake_case_( nn.Module ):
'''simple docstring'''
def __init__( self : int , UpperCamelCase_ : str , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Dict , UpperCamelCase_ : Optional[int] ):
super().__init__()
lowerCAmelCase : Union[str, Any] = TaLayerNorm(UpperCamelCase_ )
lowerCAmelCase : Dict = TaFiLMLayer(in_features=d_model * 4 , out_features=UpperCamelCase_ )
lowerCAmelCase : Optional[int] = Attention(query_dim=UpperCamelCase_ , heads=UpperCamelCase_ , dim_head=UpperCamelCase_ , out_bias=UpperCamelCase_ , scale_qk=UpperCamelCase_ )
lowerCAmelCase : Union[str, Any] = nn.Dropout(UpperCamelCase_ )
def lowerCamelCase__ ( self : Dict , UpperCamelCase_ : Tuple , UpperCamelCase_ : List[Any]=None , UpperCamelCase_ : Optional[Any]=None , ):
# pre_self_attention_layer_norm
lowerCAmelCase : List[str] = self.layer_norm(UpperCamelCase_ )
if conditioning_emb is not None:
lowerCAmelCase : Optional[Any] = self.FiLMLayer(UpperCamelCase_ , UpperCamelCase_ )
# Self-attention block
lowerCAmelCase : int = self.attention(UpperCamelCase_ )
lowerCAmelCase : str = hidden_states + self.dropout(UpperCamelCase_ )
return hidden_states
class snake_case_( nn.Module ):
'''simple docstring'''
def __init__( self : List[Any] , UpperCamelCase_ : str , UpperCamelCase_ : int , UpperCamelCase_ : List[str] , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Optional[int] ):
super().__init__()
lowerCAmelCase : str = Attention(query_dim=UpperCamelCase_ , heads=UpperCamelCase_ , dim_head=UpperCamelCase_ , out_bias=UpperCamelCase_ , scale_qk=UpperCamelCase_ )
lowerCAmelCase : Tuple = TaLayerNorm(UpperCamelCase_ , eps=UpperCamelCase_ )
lowerCAmelCase : Tuple = nn.Dropout(UpperCamelCase_ )
def lowerCamelCase__ ( self : Optional[int] , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Tuple=None , UpperCamelCase_ : List[str]=None , ):
lowerCAmelCase : Optional[Any] = self.layer_norm(UpperCamelCase_ )
lowerCAmelCase : Union[str, Any] = self.attention(
UpperCamelCase_ , encoder_hidden_states=UpperCamelCase_ , attention_mask=attention_mask.squeeze(1 ) , )
lowerCAmelCase : Tuple = hidden_states + self.dropout(UpperCamelCase_ )
return layer_output
class snake_case_( nn.Module ):
'''simple docstring'''
def __init__( self : str , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : str ):
super().__init__()
lowerCAmelCase : Optional[int] = TaDenseGatedActDense(d_model=UpperCamelCase_ , d_ff=UpperCamelCase_ , dropout_rate=UpperCamelCase_ )
lowerCAmelCase : Optional[int] = TaFiLMLayer(in_features=d_model * 4 , out_features=UpperCamelCase_ )
lowerCAmelCase : int = TaLayerNorm(UpperCamelCase_ , eps=UpperCamelCase_ )
lowerCAmelCase : Dict = nn.Dropout(UpperCamelCase_ )
def lowerCamelCase__ ( self : str , UpperCamelCase_ : str , UpperCamelCase_ : List[Any]=None ):
lowerCAmelCase : str = self.layer_norm(UpperCamelCase_ )
if conditioning_emb is not None:
lowerCAmelCase : Tuple = self.film(UpperCamelCase_ , UpperCamelCase_ )
lowerCAmelCase : Tuple = self.DenseReluDense(UpperCamelCase_ )
lowerCAmelCase : List[str] = hidden_states + self.dropout(UpperCamelCase_ )
return hidden_states
class snake_case_( nn.Module ):
'''simple docstring'''
def __init__( self : Any , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : Tuple , UpperCamelCase_ : List[Any] ):
super().__init__()
lowerCAmelCase : Union[str, Any] = nn.Linear(UpperCamelCase_ , UpperCamelCase_ , bias=UpperCamelCase_ )
lowerCAmelCase : Any = nn.Linear(UpperCamelCase_ , UpperCamelCase_ , bias=UpperCamelCase_ )
lowerCAmelCase : List[str] = nn.Linear(UpperCamelCase_ , UpperCamelCase_ , bias=UpperCamelCase_ )
lowerCAmelCase : Dict = nn.Dropout(UpperCamelCase_ )
lowerCAmelCase : Any = NewGELUActivation()
def lowerCamelCase__ ( self : Dict , UpperCamelCase_ : Optional[Any] ):
lowerCAmelCase : str = self.act(self.wi_a(UpperCamelCase_ ) )
lowerCAmelCase : Tuple = self.wi_a(UpperCamelCase_ )
lowerCAmelCase : Optional[int] = hidden_gelu * hidden_linear
lowerCAmelCase : Union[str, Any] = self.dropout(UpperCamelCase_ )
lowerCAmelCase : Dict = self.wo(UpperCamelCase_ )
return hidden_states
class snake_case_( nn.Module ):
'''simple docstring'''
def __init__( self : int , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Optional[Any]=1E-6 ):
super().__init__()
lowerCAmelCase : Optional[int] = nn.Parameter(torch.ones(UpperCamelCase_ ) )
lowerCAmelCase : Optional[Any] = eps
def lowerCamelCase__ ( self : Any , UpperCamelCase_ : Optional[int] ):
# T5 uses a layer_norm which only scales and doesn't shift, which is also known as Root Mean
# Square Layer Normalization https://arxiv.org/abs/1910.07467 thus variance is calculated
# w/o mean and there is no bias. Additionally we want to make sure that the accumulation for
# half-precision inputs is done in fp32
lowerCAmelCase : Optional[Any] = hidden_states.to(torch.floataa ).pow(2 ).mean(-1 , keepdim=UpperCamelCase_ )
lowerCAmelCase : str = hidden_states * torch.rsqrt(variance + self.variance_epsilon )
# convert into half-precision if necessary
if self.weight.dtype in [torch.floataa, torch.bfloataa]:
lowerCAmelCase : Any = hidden_states.to(self.weight.dtype )
return self.weight * hidden_states
class snake_case_( nn.Module ):
'''simple docstring'''
def lowerCamelCase__ ( self : Optional[Any] , UpperCamelCase_ : torch.Tensor ):
return 0.5 * input * (1.0 + torch.tanh(math.sqrt(2.0 / math.pi ) * (input + 0.044_715 * torch.pow(UpperCamelCase_ , 3.0 )) ))
class snake_case_( nn.Module ):
'''simple docstring'''
def __init__( self : int , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : List[str] ):
super().__init__()
lowerCAmelCase : Optional[Any] = nn.Linear(UpperCamelCase_ , out_features * 2 , bias=UpperCamelCase_ )
def lowerCamelCase__ ( self : int , UpperCamelCase_ : int , UpperCamelCase_ : Optional[int] ):
lowerCAmelCase : Tuple = self.scale_bias(UpperCamelCase_ )
lowerCAmelCase : Optional[int] = torch.chunk(UpperCamelCase_ , 2 , -1 )
lowerCAmelCase : List[str] = x * (1 + scale) + shift
return x
| 720
|
"""simple docstring"""
def _snake_case ( _snake_case : int , _snake_case : int ):
return base * power(_snake_case , (exponent - 1) ) if exponent else 1
if __name__ == "__main__":
print('''Raise base to the power of exponent using recursion...''')
snake_case__ : Union[str, Any] = int(input('''Enter the base: ''').strip())
snake_case__ : Optional[Any] = int(input('''Enter the exponent: ''').strip())
snake_case__ : Any = power(base, abs(exponent))
if exponent < 0: # power() does not properly deal w/ negative exponents
snake_case__ : Dict = 1 / result
print(f"""{base} to the power of {exponent} is {result}""")
| 637
| 0
|
"""simple docstring"""
import torch
from diffusers import StableDiffusionPipeline
snake_case__ : Optional[Any] = '''path-to-your-trained-model'''
snake_case__ : Any = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.floataa).to('''cuda''')
snake_case__ : Optional[Any] = '''A photo of sks dog in a bucket'''
snake_case__ : Optional[int] = pipe(prompt, num_inference_steps=50, guidance_scale=7.5).images[0]
image.save('''dog-bucket.png''')
| 721
|
"""simple docstring"""
import unittest
import numpy as np
import timeout_decorator # noqa
from transformers import BlenderbotConfig, is_flax_available
from transformers.testing_utils import jax_device, require_flax, slow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
snake_case__ : int = '''platform'''
import jax
import jax.numpy as jnp
from transformers import BlenderbotTokenizer
from transformers.models.blenderbot.modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
shift_tokens_right,
)
def _snake_case ( _snake_case : str , _snake_case : Any , _snake_case : str=None , _snake_case : str=None , _snake_case : Dict=None , _snake_case : Tuple=None , _snake_case : str=None , _snake_case : Any=None , ):
if attention_mask is None:
lowerCAmelCase : List[str] = np.where(input_ids != config.pad_token_id , 1 , 0 )
if decoder_attention_mask is None:
lowerCAmelCase : Optional[int] = np.where(decoder_input_ids != config.pad_token_id , 1 , 0 )
if head_mask is None:
lowerCAmelCase : Any = np.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
lowerCAmelCase : int = np.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
lowerCAmelCase : List[str] = np.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
}
class snake_case_:
def __init__( self : int , UpperCamelCase_ : Tuple , UpperCamelCase_ : int=1_3 , UpperCamelCase_ : Union[str, Any]=7 , UpperCamelCase_ : Union[str, Any]=True , UpperCamelCase_ : List[Any]=False , UpperCamelCase_ : Dict=9_9 , UpperCamelCase_ : Optional[int]=1_6 , UpperCamelCase_ : str=2 , UpperCamelCase_ : List[str]=4 , UpperCamelCase_ : List[Any]=4 , UpperCamelCase_ : int="gelu" , UpperCamelCase_ : Optional[int]=0.1 , UpperCamelCase_ : Any=0.1 , UpperCamelCase_ : str=3_2 , UpperCamelCase_ : str=2 , UpperCamelCase_ : Tuple=1 , UpperCamelCase_ : List[Any]=0 , UpperCamelCase_ : Any=0.02 , ):
lowerCAmelCase : Tuple = parent
lowerCAmelCase : str = batch_size
lowerCAmelCase : List[Any] = seq_length
lowerCAmelCase : Optional[int] = is_training
lowerCAmelCase : int = use_labels
lowerCAmelCase : List[Any] = vocab_size
lowerCAmelCase : str = hidden_size
lowerCAmelCase : List[Any] = num_hidden_layers
lowerCAmelCase : Any = num_attention_heads
lowerCAmelCase : List[Any] = intermediate_size
lowerCAmelCase : Optional[int] = hidden_act
lowerCAmelCase : Dict = hidden_dropout_prob
lowerCAmelCase : Optional[int] = attention_probs_dropout_prob
lowerCAmelCase : List[Any] = max_position_embeddings
lowerCAmelCase : Union[str, Any] = eos_token_id
lowerCAmelCase : Dict = pad_token_id
lowerCAmelCase : Optional[Any] = bos_token_id
lowerCAmelCase : List[str] = initializer_range
def lowerCamelCase__ ( self : Dict ):
lowerCAmelCase : List[Any] = np.clip(ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) , 3 , self.vocab_size )
lowerCAmelCase : str = np.concatenate((input_ids, 2 * np.ones((self.batch_size, 1) , dtype=np.intaa )) , -1 )
lowerCAmelCase : Tuple = shift_tokens_right(UpperCamelCase_ , 1 , 2 )
lowerCAmelCase : Union[str, Any] = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , initializer_range=self.initializer_range , use_cache=UpperCamelCase_ , )
lowerCAmelCase : Union[str, Any] = prepare_blenderbot_inputs_dict(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
return config, inputs_dict
def lowerCamelCase__ ( self : str ):
lowerCAmelCase, lowerCAmelCase : Optional[int] = self.prepare_config_and_inputs()
return config, inputs_dict
def lowerCamelCase__ ( self : List[str] , UpperCamelCase_ : List[str] , UpperCamelCase_ : str , UpperCamelCase_ : Tuple ):
lowerCAmelCase : int = 2_0
lowerCAmelCase : Tuple = model_class_name(UpperCamelCase_ )
lowerCAmelCase : Optional[Any] = model.encode(inputs_dict['''input_ids'''] )
lowerCAmelCase, lowerCAmelCase : str = (
inputs_dict['''decoder_input_ids'''],
inputs_dict['''decoder_attention_mask'''],
)
lowerCAmelCase : str = model.init_cache(decoder_input_ids.shape[0] , UpperCamelCase_ , UpperCamelCase_ )
lowerCAmelCase : Union[str, Any] = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype='''i4''' )
lowerCAmelCase : Tuple = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
lowerCAmelCase : List[Any] = model.decode(
decoder_input_ids[:, :-1] , UpperCamelCase_ , decoder_attention_mask=UpperCamelCase_ , past_key_values=UpperCamelCase_ , decoder_position_ids=UpperCamelCase_ , )
lowerCAmelCase : Any = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='''i4''' )
lowerCAmelCase : List[str] = model.decode(
decoder_input_ids[:, -1:] , UpperCamelCase_ , decoder_attention_mask=UpperCamelCase_ , past_key_values=outputs_cache.past_key_values , decoder_position_ids=UpperCamelCase_ , )
lowerCAmelCase : Union[str, Any] = model.decode(UpperCamelCase_ , UpperCamelCase_ )
lowerCAmelCase : int = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=F'''Max diff is {diff}''' )
def lowerCamelCase__ ( self : Union[str, Any] , UpperCamelCase_ : Any , UpperCamelCase_ : Any , UpperCamelCase_ : List[str] ):
lowerCAmelCase : Optional[int] = 2_0
lowerCAmelCase : List[Any] = model_class_name(UpperCamelCase_ )
lowerCAmelCase : Union[str, Any] = model.encode(inputs_dict['''input_ids'''] )
lowerCAmelCase, lowerCAmelCase : Optional[int] = (
inputs_dict['''decoder_input_ids'''],
inputs_dict['''decoder_attention_mask'''],
)
lowerCAmelCase : str = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
lowerCAmelCase : Union[str, Any] = model.init_cache(decoder_input_ids.shape[0] , UpperCamelCase_ , UpperCamelCase_ )
lowerCAmelCase : str = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
lowerCAmelCase : Dict = model.decode(
decoder_input_ids[:, :-1] , UpperCamelCase_ , decoder_attention_mask=UpperCamelCase_ , past_key_values=UpperCamelCase_ , decoder_position_ids=UpperCamelCase_ , )
lowerCAmelCase : Any = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='''i4''' )
lowerCAmelCase : Union[str, Any] = model.decode(
decoder_input_ids[:, -1:] , UpperCamelCase_ , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=UpperCamelCase_ , decoder_position_ids=UpperCamelCase_ , )
lowerCAmelCase : Dict = model.decode(UpperCamelCase_ , UpperCamelCase_ , decoder_attention_mask=UpperCamelCase_ )
lowerCAmelCase : Any = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=F'''Max diff is {diff}''' )
@require_flax
class snake_case_( unittest.TestCase ):
__UpperCamelCase = 99
def lowerCamelCase__ ( self : str ):
lowerCAmelCase : List[Any] = np.array(
[
[7_1, 8_2, 1_8, 3_3, 4_6, 9_1, 2],
[6_8, 3_4, 2_6, 5_8, 3_0, 8_2, 2],
[5, 9_7, 1_7, 3_9, 9_4, 4_0, 2],
[7_6, 8_3, 9_4, 2_5, 7_0, 7_8, 2],
[8_7, 5_9, 4_1, 3_5, 4_8, 6_6, 2],
[5_5, 1_3, 1_6, 5_8, 5, 2, 1], # note padding
[6_4, 2_7, 3_1, 5_1, 1_2, 7_5, 2],
[5_2, 6_4, 8_6, 1_7, 8_3, 3_9, 2],
[4_8, 6_1, 9, 2_4, 7_1, 8_2, 2],
[2_6, 1, 6_0, 4_8, 2_2, 1_3, 2],
[2_1, 5, 6_2, 2_8, 1_4, 7_6, 2],
[4_5, 9_8, 3_7, 8_6, 5_9, 4_8, 2],
[7_0, 7_0, 5_0, 9, 2_8, 0, 2],
] , dtype=np.intaa , )
lowerCAmelCase : List[Any] = input_ids.shape[0]
lowerCAmelCase : Optional[Any] = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=2_4 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=3_2 , decoder_ffn_dim=3_2 , max_position_embeddings=4_8 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
def lowerCamelCase__ ( self : List[str] ):
lowerCAmelCase, lowerCAmelCase, lowerCAmelCase : Any = self._get_config_and_data()
lowerCAmelCase : Any = FlaxBlenderbotForConditionalGeneration(UpperCamelCase_ )
lowerCAmelCase : Optional[int] = lm_model(input_ids=UpperCamelCase_ )
lowerCAmelCase : Tuple = (batch_size, input_ids.shape[1], config.vocab_size)
self.assertEqual(outputs['''logits'''].shape , UpperCamelCase_ )
def lowerCamelCase__ ( self : Any ):
lowerCAmelCase : Any = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=1_4 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=8 , decoder_ffn_dim=8 , max_position_embeddings=4_8 , )
lowerCAmelCase : int = FlaxBlenderbotForConditionalGeneration(UpperCamelCase_ )
lowerCAmelCase : Optional[Any] = np.array([[7_1, 8_2, 1_8, 3_3, 4_6, 9_1, 2], [6_8, 3_4, 2_6, 5_8, 3_0, 2, 1]] , dtype=np.intaa )
lowerCAmelCase : List[str] = np.array([[8_2, 7_1, 8_2, 1_8, 2], [5_8, 6_8, 2, 1, 1]] , dtype=np.intaa )
lowerCAmelCase : List[Any] = lm_model(input_ids=UpperCamelCase_ , decoder_input_ids=UpperCamelCase_ )
lowerCAmelCase : str = (*summary.shape, config.vocab_size)
self.assertEqual(outputs['''logits'''].shape , UpperCamelCase_ )
def lowerCamelCase__ ( self : int ):
lowerCAmelCase : Any = np.array([[7_1, 8_2, 1_8, 3_3, 2, 1, 1], [6_8, 3_4, 2_6, 5_8, 3_0, 8_2, 2]] , dtype=np.intaa )
lowerCAmelCase : Tuple = shift_tokens_right(UpperCamelCase_ , 1 , 2 )
lowerCAmelCase : Optional[int] = np.equal(UpperCamelCase_ , 1 ).astype(np.floataa ).sum()
lowerCAmelCase : str = np.equal(UpperCamelCase_ , 1 ).astype(np.floataa ).sum()
self.assertEqual(shifted.shape , input_ids.shape )
self.assertEqual(UpperCamelCase_ , n_pad_before - 1 )
self.assertTrue(np.equal(shifted[:, 0] , 2 ).all() )
@require_flax
class snake_case_( a__ , unittest.TestCase , a__ ):
__UpperCamelCase = True
__UpperCamelCase = (
(
FlaxBlenderbotModel,
FlaxBlenderbotForConditionalGeneration,
)
if is_flax_available()
else ()
)
__UpperCamelCase = (FlaxBlenderbotForConditionalGeneration,) if is_flax_available() else ()
def lowerCamelCase__ ( self : Dict ):
lowerCAmelCase : Any = FlaxBlenderbotModelTester(self )
def lowerCamelCase__ ( self : Tuple ):
lowerCAmelCase, lowerCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
def lowerCamelCase__ ( self : List[str] ):
lowerCAmelCase, lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
def lowerCamelCase__ ( self : Tuple ):
lowerCAmelCase, lowerCAmelCase : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
lowerCAmelCase : Optional[int] = self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ )
lowerCAmelCase : Optional[Any] = model_class(UpperCamelCase_ )
@jax.jit
def encode_jitted(UpperCamelCase_ : List[str] , UpperCamelCase_ : Optional[Any]=None , **UpperCamelCase_ : List[str] ):
return model.encode(input_ids=UpperCamelCase_ , attention_mask=UpperCamelCase_ )
with self.subTest('''JIT Enabled''' ):
lowerCAmelCase : List[str] = encode_jitted(**UpperCamelCase_ ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
lowerCAmelCase : int = encode_jitted(**UpperCamelCase_ ).to_tuple()
self.assertEqual(len(UpperCamelCase_ ) , len(UpperCamelCase_ ) )
for jitted_output, output in zip(UpperCamelCase_ , UpperCamelCase_ ):
self.assertEqual(jitted_output.shape , output.shape )
def lowerCamelCase__ ( self : Union[str, Any] ):
lowerCAmelCase, lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
lowerCAmelCase : Tuple = model_class(UpperCamelCase_ )
lowerCAmelCase : int = model.encode(inputs_dict['''input_ids'''] , inputs_dict['''attention_mask'''] )
lowerCAmelCase : List[Any] = {
'''decoder_input_ids''': inputs_dict['''decoder_input_ids'''],
'''decoder_attention_mask''': inputs_dict['''decoder_attention_mask'''],
'''encoder_outputs''': encoder_outputs,
}
@jax.jit
def decode_jitted(UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : Dict , UpperCamelCase_ : int ):
return model.decode(
decoder_input_ids=UpperCamelCase_ , decoder_attention_mask=UpperCamelCase_ , encoder_outputs=UpperCamelCase_ , )
with self.subTest('''JIT Enabled''' ):
lowerCAmelCase : str = decode_jitted(**UpperCamelCase_ ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
lowerCAmelCase : Union[str, Any] = decode_jitted(**UpperCamelCase_ ).to_tuple()
self.assertEqual(len(UpperCamelCase_ ) , len(UpperCamelCase_ ) )
for jitted_output, output in zip(UpperCamelCase_ , UpperCamelCase_ ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def lowerCamelCase__ ( self : Optional[int] ):
for model_class_name in self.all_model_classes:
lowerCAmelCase : Optional[int] = model_class_name.from_pretrained('''facebook/blenderbot-400M-distill''' )
# FlaxBlenderbotForSequenceClassification expects eos token in input_ids
lowerCAmelCase : int = np.ones((1, 1) ) * model.config.eos_token_id
lowerCAmelCase : List[str] = model(UpperCamelCase_ )
self.assertIsNotNone(UpperCamelCase_ )
@unittest.skipUnless(jax_device != '''cpu''' , '''3B test too slow on CPU.''' )
@slow
def lowerCamelCase__ ( self : Union[str, Any] ):
lowerCAmelCase : Dict = {'''num_beams''': 1, '''early_stopping''': True, '''min_length''': 1_5, '''max_length''': 2_5}
lowerCAmelCase : List[str] = {'''skip_special_tokens''': True, '''clean_up_tokenization_spaces''': True}
lowerCAmelCase : Tuple = FlaxBlenderbotForConditionalGeneration.from_pretrained('''facebook/blenderbot-3B''' , from_pt=UpperCamelCase_ )
lowerCAmelCase : Union[str, Any] = BlenderbotTokenizer.from_pretrained('''facebook/blenderbot-3B''' )
lowerCAmelCase : List[Any] = ['''Sam''']
lowerCAmelCase : str = tokenizer(UpperCamelCase_ , return_tensors='''jax''' )
lowerCAmelCase : Union[str, Any] = model.generate(**UpperCamelCase_ , **UpperCamelCase_ )
lowerCAmelCase : Tuple = '''Sam is a great name. It means "sun" in Gaelic.'''
lowerCAmelCase : Union[str, Any] = tokenizer.batch_decode(UpperCamelCase_ , **UpperCamelCase_ )
assert generated_txt[0].strip() == tgt_text
| 637
| 0
|
"""simple docstring"""
import argparse
import pathlib
import fairseq
import torch
from fairseq.models.roberta import RobertaModel as FairseqRobertaModel
from fairseq.modules import TransformerSentenceEncoderLayer
from packaging import version
from transformers import XLMRobertaConfig, XLMRobertaXLForMaskedLM, XLMRobertaXLForSequenceClassification
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertSelfAttention,
BertSelfOutput,
)
from transformers.models.roberta.modeling_roberta import RobertaAttention
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse('''1.0.0a'''):
raise Exception('''requires fairseq >= 1.0.0a''')
logging.set_verbosity_info()
snake_case__ : List[str] = logging.get_logger(__name__)
snake_case__ : Tuple = '''Hello world! cécé herlolip'''
def _snake_case ( _snake_case : str , _snake_case : str , _snake_case : bool ):
lowerCAmelCase : Any = FairseqRobertaModel.from_pretrained(_snake_case )
roberta.eval() # disable dropout
lowerCAmelCase : Optional[int] = roberta.model.encoder.sentence_encoder
lowerCAmelCase : Union[str, Any] = XLMRobertaConfig(
vocab_size=roberta_sent_encoder.embed_tokens.num_embeddings , hidden_size=roberta.cfg.model.encoder_embed_dim , num_hidden_layers=roberta.cfg.model.encoder_layers , num_attention_heads=roberta.cfg.model.encoder_attention_heads , intermediate_size=roberta.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=514 , type_vocab_size=1 , layer_norm_eps=1E-5 , )
if classification_head:
lowerCAmelCase : Any = roberta.model.classification_heads['''mnli'''].out_proj.weight.shape[0]
print('''Our RoBERTa config:''' , _snake_case )
lowerCAmelCase : List[str] = XLMRobertaXLForSequenceClassification(_snake_case ) if classification_head else XLMRobertaXLForMaskedLM(_snake_case )
model.eval()
# Now let's copy all the weights.
# Embeddings
lowerCAmelCase : Tuple = roberta_sent_encoder.embed_tokens.weight
lowerCAmelCase : List[str] = roberta_sent_encoder.embed_positions.weight
lowerCAmelCase : List[str] = torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c RoBERTa doesn't use them.
lowerCAmelCase : Any = roberta_sent_encoder.layer_norm.weight
lowerCAmelCase : Any = roberta_sent_encoder.layer_norm.bias
for i in range(config.num_hidden_layers ):
# Encoder: start of layer
lowerCAmelCase : BertLayer = model.roberta.encoder.layer[i]
lowerCAmelCase : TransformerSentenceEncoderLayer = roberta_sent_encoder.layers[i]
lowerCAmelCase : RobertaAttention = layer.attention
lowerCAmelCase : Optional[Any] = roberta_layer.self_attn_layer_norm.weight
lowerCAmelCase : str = roberta_layer.self_attn_layer_norm.bias
# self attention
lowerCAmelCase : BertSelfAttention = layer.attention.self
assert (
roberta_layer.self_attn.k_proj.weight.data.shape
== roberta_layer.self_attn.q_proj.weight.data.shape
== roberta_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size) )
)
lowerCAmelCase : Dict = roberta_layer.self_attn.q_proj.weight
lowerCAmelCase : Tuple = roberta_layer.self_attn.q_proj.bias
lowerCAmelCase : Dict = roberta_layer.self_attn.k_proj.weight
lowerCAmelCase : Union[str, Any] = roberta_layer.self_attn.k_proj.bias
lowerCAmelCase : Tuple = roberta_layer.self_attn.v_proj.weight
lowerCAmelCase : str = roberta_layer.self_attn.v_proj.bias
# self-attention output
lowerCAmelCase : BertSelfOutput = layer.attention.output
assert self_output.dense.weight.shape == roberta_layer.self_attn.out_proj.weight.shape
lowerCAmelCase : Union[str, Any] = roberta_layer.self_attn.out_proj.weight
lowerCAmelCase : Optional[Any] = roberta_layer.self_attn.out_proj.bias
# this one is final layer norm
lowerCAmelCase : Any = roberta_layer.final_layer_norm.weight
lowerCAmelCase : List[Any] = roberta_layer.final_layer_norm.bias
# intermediate
lowerCAmelCase : BertIntermediate = layer.intermediate
assert intermediate.dense.weight.shape == roberta_layer.fca.weight.shape
lowerCAmelCase : Any = roberta_layer.fca.weight
lowerCAmelCase : Any = roberta_layer.fca.bias
# output
lowerCAmelCase : BertOutput = layer.output
assert bert_output.dense.weight.shape == roberta_layer.fca.weight.shape
lowerCAmelCase : Optional[int] = roberta_layer.fca.weight
lowerCAmelCase : List[Any] = roberta_layer.fca.bias
# end of layer
if classification_head:
lowerCAmelCase : int = roberta.model.classification_heads['''mnli'''].dense.weight
lowerCAmelCase : Optional[int] = roberta.model.classification_heads['''mnli'''].dense.bias
lowerCAmelCase : Union[str, Any] = roberta.model.classification_heads['''mnli'''].out_proj.weight
lowerCAmelCase : Optional[Any] = roberta.model.classification_heads['''mnli'''].out_proj.bias
else:
# LM Head
lowerCAmelCase : Tuple = roberta.model.encoder.lm_head.dense.weight
lowerCAmelCase : List[str] = roberta.model.encoder.lm_head.dense.bias
lowerCAmelCase : str = roberta.model.encoder.lm_head.layer_norm.weight
lowerCAmelCase : Union[str, Any] = roberta.model.encoder.lm_head.layer_norm.bias
lowerCAmelCase : str = roberta.model.encoder.lm_head.weight
lowerCAmelCase : Dict = roberta.model.encoder.lm_head.bias
# Let's check that we get the same results.
lowerCAmelCase : torch.Tensor = roberta.encode(_snake_case ).unsqueeze(0 ) # batch of size 1
lowerCAmelCase : Tuple = model(_snake_case )[0]
if classification_head:
lowerCAmelCase : int = roberta.model.classification_heads['''mnli'''](roberta.extract_features(_snake_case ) )
else:
lowerCAmelCase : Any = roberta.model(_snake_case )[0]
print(our_output.shape , their_output.shape )
lowerCAmelCase : int = torch.max(torch.abs(our_output - their_output ) ).item()
print(f'''max_absolute_diff = {max_absolute_diff}''' ) # ~ 1e-7
lowerCAmelCase : Dict = torch.allclose(_snake_case , _snake_case , atol=1E-3 )
print('''Do both models output the same tensors?''' , '''🔥''' if success else '''💩''' )
if not success:
raise Exception('''Something went wRoNg''' )
pathlib.Path(_snake_case ).mkdir(parents=_snake_case , exist_ok=_snake_case )
print(f'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(_snake_case )
if __name__ == "__main__":
snake_case__ : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--roberta_checkpoint_path''', default=None, type=str, required=True, help='''Path the official PyTorch dump.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--classification_head''', action='''store_true''', help='''Whether to convert a final classification head.'''
)
snake_case__ : int = parser.parse_args()
convert_xlm_roberta_xl_checkpoint_to_pytorch(
args.roberta_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
)
| 700
|
"""simple docstring"""
from __future__ import annotations
from PIL import Image
# Define glider example
snake_case__ : int = [
[0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0],
[1, 1, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
]
# Define blinker example
snake_case__ : Any = [[0, 1, 0], [0, 1, 0], [0, 1, 0]]
def _snake_case ( _snake_case : list[list[int]] ):
lowerCAmelCase : Union[str, Any] = []
for i in range(len(_snake_case ) ):
lowerCAmelCase : Any = []
for j in range(len(cells[i] ) ):
# Get the number of live neighbours
lowerCAmelCase : Optional[int] = 0
if i > 0 and j > 0:
neighbour_count += cells[i - 1][j - 1]
if i > 0:
neighbour_count += cells[i - 1][j]
if i > 0 and j < len(cells[i] ) - 1:
neighbour_count += cells[i - 1][j + 1]
if j > 0:
neighbour_count += cells[i][j - 1]
if j < len(cells[i] ) - 1:
neighbour_count += cells[i][j + 1]
if i < len(_snake_case ) - 1 and j > 0:
neighbour_count += cells[i + 1][j - 1]
if i < len(_snake_case ) - 1:
neighbour_count += cells[i + 1][j]
if i < len(_snake_case ) - 1 and j < len(cells[i] ) - 1:
neighbour_count += cells[i + 1][j + 1]
# Rules of the game of life (excerpt from Wikipedia):
# 1. Any live cell with two or three live neighbours survives.
# 2. Any dead cell with three live neighbours becomes a live cell.
# 3. All other live cells die in the next generation.
# Similarly, all other dead cells stay dead.
lowerCAmelCase : str = cells[i][j] == 1
if (
(alive and 2 <= neighbour_count <= 3)
or not alive
and neighbour_count == 3
):
next_generation_row.append(1 )
else:
next_generation_row.append(0 )
next_generation.append(_snake_case )
return next_generation
def _snake_case ( _snake_case : list[list[int]] , _snake_case : int ):
lowerCAmelCase : int = []
for _ in range(_snake_case ):
# Create output image
lowerCAmelCase : Union[str, Any] = Image.new('''RGB''' , (len(cells[0] ), len(_snake_case )) )
lowerCAmelCase : Union[str, Any] = img.load()
# Save cells to image
for x in range(len(_snake_case ) ):
for y in range(len(cells[0] ) ):
lowerCAmelCase : Optional[int] = 255 - cells[y][x] * 255
lowerCAmelCase : List[Any] = (colour, colour, colour)
# Save image
images.append(_snake_case )
lowerCAmelCase : Union[str, Any] = new_generation(_snake_case )
return images
if __name__ == "__main__":
snake_case__ : Union[str, Any] = generate_images(GLIDER, 16)
images[0].save('''out.gif''', save_all=True, append_images=images[1:])
| 637
| 0
|
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartTokenizer, MBartTokenizerFast, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
snake_case__ : Tuple = get_tests_dir('''fixtures/test_sentencepiece.model''')
if is_torch_available():
from transformers.models.mbart.modeling_mbart import shift_tokens_right
snake_case__ : Dict = 250_004
snake_case__ : List[Any] = 250_020
@require_sentencepiece
@require_tokenizers
class snake_case_( a__ , unittest.TestCase ):
__UpperCamelCase = MBartTokenizer
__UpperCamelCase = MBartTokenizerFast
__UpperCamelCase = True
__UpperCamelCase = True
def lowerCamelCase__ ( self : int ):
super().setUp()
# We have a SentencePiece fixture for testing
lowerCAmelCase : Tuple = MBartTokenizer(UpperCamelCase_ , keep_accents=UpperCamelCase_ )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCamelCase__ ( self : Optional[Any] ):
lowerCAmelCase : int = MBartTokenizer(UpperCamelCase_ , keep_accents=UpperCamelCase_ )
lowerCAmelCase : int = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(UpperCamelCase_ , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(UpperCamelCase_ ) , [value + tokenizer.fairseq_offset for value in [2_8_5, 4_6, 1_0, 1_7_0, 3_8_2]] , )
lowerCAmelCase : Optional[int] = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
UpperCamelCase_ , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
lowerCAmelCase : Tuple = tokenizer.convert_tokens_to_ids(UpperCamelCase_ )
self.assertListEqual(
UpperCamelCase_ , [
value + tokenizer.fairseq_offset
for value in [8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, 2, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, 2, 4]
# ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^
] , )
lowerCAmelCase : Tuple = tokenizer.convert_ids_to_tokens(UpperCamelCase_ )
self.assertListEqual(
UpperCamelCase_ , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
] , )
def lowerCamelCase__ ( self : int ):
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
lowerCAmelCase : Optional[int] = (self.rust_tokenizer_class, '''hf-internal-testing/tiny-random-mbart''', {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
lowerCAmelCase : Tuple = self.rust_tokenizer_class.from_pretrained(UpperCamelCase_ , **UpperCamelCase_ )
lowerCAmelCase : List[Any] = self.tokenizer_class.from_pretrained(UpperCamelCase_ , **UpperCamelCase_ )
lowerCAmelCase : str = tempfile.mkdtemp()
lowerCAmelCase : Optional[int] = tokenizer_r.save_pretrained(UpperCamelCase_ )
lowerCAmelCase : int = tokenizer_p.save_pretrained(UpperCamelCase_ )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) )
lowerCAmelCase : Optional[Any] = tuple(f for f in tokenizer_r_files if '''tokenizer.json''' not in f )
self.assertSequenceEqual(UpperCamelCase_ , UpperCamelCase_ )
# Checks everything loads correctly in the same way
lowerCAmelCase : List[Any] = tokenizer_r.from_pretrained(UpperCamelCase_ )
lowerCAmelCase : Optional[Any] = tokenizer_p.from_pretrained(UpperCamelCase_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(UpperCamelCase_ , UpperCamelCase_ ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(UpperCamelCase_ )
# Save tokenizer rust, legacy_format=True
lowerCAmelCase : Any = tempfile.mkdtemp()
lowerCAmelCase : Dict = tokenizer_r.save_pretrained(UpperCamelCase_ , legacy_format=UpperCamelCase_ )
lowerCAmelCase : List[Any] = tokenizer_p.save_pretrained(UpperCamelCase_ )
# Checks it save with the same files
self.assertSequenceEqual(UpperCamelCase_ , UpperCamelCase_ )
# Checks everything loads correctly in the same way
lowerCAmelCase : str = tokenizer_r.from_pretrained(UpperCamelCase_ )
lowerCAmelCase : Dict = tokenizer_p.from_pretrained(UpperCamelCase_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(UpperCamelCase_ , UpperCamelCase_ ) )
shutil.rmtree(UpperCamelCase_ )
# Save tokenizer rust, legacy_format=False
lowerCAmelCase : Optional[Any] = tempfile.mkdtemp()
lowerCAmelCase : Optional[int] = tokenizer_r.save_pretrained(UpperCamelCase_ , legacy_format=UpperCamelCase_ )
lowerCAmelCase : List[Any] = tokenizer_p.save_pretrained(UpperCamelCase_ )
# Checks it saved the tokenizer.json file
self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
lowerCAmelCase : Optional[Any] = tokenizer_r.from_pretrained(UpperCamelCase_ )
lowerCAmelCase : Optional[int] = tokenizer_p.from_pretrained(UpperCamelCase_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(UpperCamelCase_ , UpperCamelCase_ ) )
shutil.rmtree(UpperCamelCase_ )
@require_torch
@require_sentencepiece
@require_tokenizers
class snake_case_( unittest.TestCase ):
__UpperCamelCase = '''facebook/mbart-large-en-ro'''
__UpperCamelCase = [
''' UN Chief Says There Is No Military Solution in Syria''',
''' Secretary-General Ban Ki-moon says his response to Russia\'s stepped up military support for Syria is that "there is no military solution" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.''',
]
__UpperCamelCase = [
'''Şeful ONU declară că nu există o soluţie militară în Siria''',
'''Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei'''
''' pentru Siria este că "nu există o soluţie militară" la conflictul de aproape cinci ani şi că noi arme nu vor'''
''' face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.''',
]
__UpperCamelCase = [8_274, 127_873, 25_916, 7, 8_622, 2_071, 438, 67_485, 53, 187_895, 23, 51_712, 2, EN_CODE]
@classmethod
def lowerCamelCase__ ( cls : Optional[Any] ):
lowerCAmelCase : MBartTokenizer = MBartTokenizer.from_pretrained(
cls.checkpoint_name , src_lang='''en_XX''' , tgt_lang='''ro_RO''' )
lowerCAmelCase : List[Any] = 1
return cls
def lowerCamelCase__ ( self : Union[str, Any] ):
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''ar_AR'''] , 2_5_0_0_0_1 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''en_EN'''] , 2_5_0_0_0_4 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''ro_RO'''] , 2_5_0_0_2_0 )
def lowerCamelCase__ ( self : str ):
lowerCAmelCase : List[Any] = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , UpperCamelCase_ )
def lowerCamelCase__ ( self : Tuple ):
self.assertIn(UpperCamelCase_ , self.tokenizer.all_special_ids )
lowerCAmelCase : Union[str, Any] = [RO_CODE, 8_8_4, 9_0_1_9, 9_6, 9, 9_1_6, 8_6_7_9_2, 3_6, 1_8_7_4_3, 1_5_5_9_6, 5, 2]
lowerCAmelCase : Optional[Any] = self.tokenizer.decode(UpperCamelCase_ , skip_special_tokens=UpperCamelCase_ )
lowerCAmelCase : Any = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=UpperCamelCase_ )
self.assertEqual(UpperCamelCase_ , UpperCamelCase_ )
self.assertNotIn(self.tokenizer.eos_token , UpperCamelCase_ )
def lowerCamelCase__ ( self : Tuple ):
lowerCAmelCase : List[Any] = ['''this is gunna be a long sentence ''' * 2_0]
assert isinstance(src_text[0] , UpperCamelCase_ )
lowerCAmelCase : Dict = 1_0
lowerCAmelCase : List[str] = self.tokenizer(UpperCamelCase_ , max_length=UpperCamelCase_ , truncation=UpperCamelCase_ ).input_ids[0]
self.assertEqual(ids[-2] , 2 )
self.assertEqual(ids[-1] , UpperCamelCase_ )
self.assertEqual(len(UpperCamelCase_ ) , UpperCamelCase_ )
def lowerCamelCase__ ( self : int ):
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(['''<mask>''', '''ar_AR'''] ) , [2_5_0_0_2_6, 2_5_0_0_0_1] )
def lowerCamelCase__ ( self : Union[str, Any] ):
lowerCAmelCase : Tuple = tempfile.mkdtemp()
lowerCAmelCase : Optional[int] = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(UpperCamelCase_ )
lowerCAmelCase : Union[str, Any] = MBartTokenizer.from_pretrained(UpperCamelCase_ )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , UpperCamelCase_ )
@require_torch
def lowerCamelCase__ ( self : List[str] ):
lowerCAmelCase : List[Any] = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=UpperCamelCase_ , return_tensors='''pt''' )
lowerCAmelCase : str = shift_tokens_right(batch['''labels'''] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
assert batch.input_ids[1][-2:].tolist() == [2, EN_CODE]
assert batch.decoder_input_ids[1][0].tolist() == RO_CODE
assert batch.decoder_input_ids[1][-1] == 2
assert batch.labels[1][-2:].tolist() == [2, RO_CODE]
@require_torch
def lowerCamelCase__ ( self : Tuple ):
lowerCAmelCase : Dict = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=UpperCamelCase_ , truncation=UpperCamelCase_ , max_length=len(self.expected_src_tokens ) , return_tensors='''pt''' , )
lowerCAmelCase : List[str] = shift_tokens_right(batch['''labels'''] , self.tokenizer.pad_token_id )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
self.assertEqual((2, 1_4) , batch.input_ids.shape )
self.assertEqual((2, 1_4) , batch.attention_mask.shape )
lowerCAmelCase : List[Any] = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , UpperCamelCase_ )
self.assertEqual(2 , batch.decoder_input_ids[0, -1] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id, EN_CODE] )
def lowerCamelCase__ ( self : str ):
lowerCAmelCase : str = self.tokenizer(self.src_text , padding=UpperCamelCase_ , truncation=UpperCamelCase_ , max_length=3 , return_tensors='''pt''' )
lowerCAmelCase : Union[str, Any] = self.tokenizer(
text_target=self.tgt_text , padding=UpperCamelCase_ , truncation=UpperCamelCase_ , max_length=1_0 , return_tensors='''pt''' )
lowerCAmelCase : str = targets['''input_ids''']
lowerCAmelCase : int = shift_tokens_right(UpperCamelCase_ , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 1_0 )
@require_torch
def lowerCamelCase__ ( self : List[Any] ):
lowerCAmelCase : str = self.tokenizer._build_translation_inputs(
'''A test''' , return_tensors='''pt''' , src_lang='''en_XX''' , tgt_lang='''ar_AR''' )
self.assertEqual(
nested_simplify(UpperCamelCase_ ) , {
# A, test, EOS, en_XX
'''input_ids''': [[6_2, 3_0_3_4, 2, 2_5_0_0_0_4]],
'''attention_mask''': [[1, 1, 1, 1]],
# ar_AR
'''forced_bos_token_id''': 2_5_0_0_0_1,
} , )
| 701
|
"""simple docstring"""
from __future__ import annotations
class snake_case_:
def __init__( self : int , UpperCamelCase_ : str , UpperCamelCase_ : str ):
lowerCAmelCase, lowerCAmelCase : List[str] = text, pattern
lowerCAmelCase, lowerCAmelCase : Union[str, Any] = len(UpperCamelCase_ ), len(UpperCamelCase_ )
def lowerCamelCase__ ( self : List[Any] , UpperCamelCase_ : str ):
for i in range(self.patLen - 1 , -1 , -1 ):
if char == self.pattern[i]:
return i
return -1
def lowerCamelCase__ ( self : Optional[Any] , UpperCamelCase_ : int ):
for i in range(self.patLen - 1 , -1 , -1 ):
if self.pattern[i] != self.text[current_pos + i]:
return current_pos + i
return -1
def lowerCamelCase__ ( self : Dict ):
# searches pattern in text and returns index positions
lowerCAmelCase : Union[str, Any] = []
for i in range(self.textLen - self.patLen + 1 ):
lowerCAmelCase : str = self.mismatch_in_text(UpperCamelCase_ )
if mismatch_index == -1:
positions.append(UpperCamelCase_ )
else:
lowerCAmelCase : Optional[Any] = self.match_in_pattern(self.text[mismatch_index] )
lowerCAmelCase : int = (
mismatch_index - match_index
) # shifting index lgtm [py/multiple-definition]
return positions
snake_case__ : str = '''ABAABA'''
snake_case__ : List[str] = '''AB'''
snake_case__ : Union[str, Any] = BoyerMooreSearch(text, pattern)
snake_case__ : Optional[Any] = bms.bad_character_heuristic()
if len(positions) == 0:
print('''No match found''')
else:
print('''Pattern found in following positions: ''')
print(positions)
| 637
| 0
|
"""simple docstring"""
import pickle
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, XLMRobertaTokenizer, XLMRobertaTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
snake_case__ : Dict = get_tests_dir('''fixtures/test_sentencepiece.model''')
@require_sentencepiece
@require_tokenizers
class snake_case_( a__ , unittest.TestCase ):
__UpperCamelCase = XLMRobertaTokenizer
__UpperCamelCase = XLMRobertaTokenizerFast
__UpperCamelCase = True
__UpperCamelCase = True
def lowerCamelCase__ ( self : Dict ):
super().setUp()
# We have a SentencePiece fixture for testing
lowerCAmelCase : Dict = XLMRobertaTokenizer(UpperCamelCase_ , keep_accents=UpperCamelCase_ )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCamelCase__ ( self : Any ):
lowerCAmelCase : Any = '''<pad>'''
lowerCAmelCase : List[Any] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCamelCase_ ) , UpperCamelCase_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCamelCase_ ) , UpperCamelCase_ )
def lowerCamelCase__ ( self : Optional[int] ):
lowerCAmelCase : Tuple = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<s>''' )
self.assertEqual(vocab_keys[1] , '''<pad>''' )
self.assertEqual(vocab_keys[-1] , '''<mask>''' )
self.assertEqual(len(UpperCamelCase_ ) , 1_0_0_2 )
def lowerCamelCase__ ( self : Tuple ):
self.assertEqual(self.get_tokenizer().vocab_size , 1_0_0_2 )
def lowerCamelCase__ ( self : Union[str, Any] ):
lowerCAmelCase : Tuple = XLMRobertaTokenizer(UpperCamelCase_ , keep_accents=UpperCamelCase_ )
lowerCAmelCase : Optional[int] = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(UpperCamelCase_ , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(UpperCamelCase_ ) , [value + tokenizer.fairseq_offset for value in [2_8_5, 4_6, 1_0, 1_7_0, 3_8_2]] , )
lowerCAmelCase : Tuple = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
UpperCamelCase_ , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
lowerCAmelCase : Any = tokenizer.convert_tokens_to_ids(UpperCamelCase_ )
self.assertListEqual(
UpperCamelCase_ , [
value + tokenizer.fairseq_offset
for value in [8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, 2, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, 2, 4]
# ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^
] , )
lowerCAmelCase : List[Any] = tokenizer.convert_ids_to_tokens(UpperCamelCase_ )
self.assertListEqual(
UpperCamelCase_ , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
] , )
def lowerCamelCase__ ( self : List[str] ):
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
lowerCAmelCase : List[str] = (self.rust_tokenizer_class, '''hf-internal-testing/tiny-xlm-roberta''', {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
lowerCAmelCase : List[str] = self.rust_tokenizer_class.from_pretrained(UpperCamelCase_ , **UpperCamelCase_ )
lowerCAmelCase : List[Any] = self.tokenizer_class.from_pretrained(UpperCamelCase_ , **UpperCamelCase_ )
lowerCAmelCase : Optional[int] = tempfile.mkdtemp()
lowerCAmelCase : Union[str, Any] = tokenizer_r.save_pretrained(UpperCamelCase_ )
lowerCAmelCase : Union[str, Any] = tokenizer_p.save_pretrained(UpperCamelCase_ )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) )
lowerCAmelCase : Tuple = tuple(f for f in tokenizer_r_files if '''tokenizer.json''' not in f )
self.assertSequenceEqual(UpperCamelCase_ , UpperCamelCase_ )
# Checks everything loads correctly in the same way
lowerCAmelCase : Any = tokenizer_r.from_pretrained(UpperCamelCase_ )
lowerCAmelCase : Optional[int] = tokenizer_p.from_pretrained(UpperCamelCase_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(UpperCamelCase_ , UpperCamelCase_ ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(UpperCamelCase_ )
# Save tokenizer rust, legacy_format=True
lowerCAmelCase : Union[str, Any] = tempfile.mkdtemp()
lowerCAmelCase : Optional[Any] = tokenizer_r.save_pretrained(UpperCamelCase_ , legacy_format=UpperCamelCase_ )
lowerCAmelCase : str = tokenizer_p.save_pretrained(UpperCamelCase_ )
# Checks it save with the same files
self.assertSequenceEqual(UpperCamelCase_ , UpperCamelCase_ )
# Checks everything loads correctly in the same way
lowerCAmelCase : int = tokenizer_r.from_pretrained(UpperCamelCase_ )
lowerCAmelCase : List[Any] = tokenizer_p.from_pretrained(UpperCamelCase_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(UpperCamelCase_ , UpperCamelCase_ ) )
shutil.rmtree(UpperCamelCase_ )
# Save tokenizer rust, legacy_format=False
lowerCAmelCase : Dict = tempfile.mkdtemp()
lowerCAmelCase : Any = tokenizer_r.save_pretrained(UpperCamelCase_ , legacy_format=UpperCamelCase_ )
lowerCAmelCase : List[Any] = tokenizer_p.save_pretrained(UpperCamelCase_ )
# Checks it saved the tokenizer.json file
self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
lowerCAmelCase : str = tokenizer_r.from_pretrained(UpperCamelCase_ )
lowerCAmelCase : Union[str, Any] = tokenizer_p.from_pretrained(UpperCamelCase_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(UpperCamelCase_ , UpperCamelCase_ ) )
shutil.rmtree(UpperCamelCase_ )
@cached_property
def lowerCamelCase__ ( self : List[str] ):
return XLMRobertaTokenizer.from_pretrained('''xlm-roberta-base''' )
def lowerCamelCase__ ( self : Optional[Any] ):
with tempfile.NamedTemporaryFile() as f:
shutil.copyfile(UpperCamelCase_ , f.name )
lowerCAmelCase : List[str] = XLMRobertaTokenizer(f.name , keep_accents=UpperCamelCase_ )
lowerCAmelCase : Dict = pickle.dumps(UpperCamelCase_ )
pickle.loads(UpperCamelCase_ )
def lowerCamelCase__ ( self : int ):
if not self.test_rust_tokenizer:
return
lowerCAmelCase : Dict = self.get_tokenizer()
lowerCAmelCase : Union[str, Any] = self.get_rust_tokenizer()
lowerCAmelCase : Optional[Any] = '''I was born in 92000, and this is falsé.'''
lowerCAmelCase : Dict = tokenizer.tokenize(UpperCamelCase_ )
lowerCAmelCase : List[str] = rust_tokenizer.tokenize(UpperCamelCase_ )
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
lowerCAmelCase : List[Any] = tokenizer.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ )
lowerCAmelCase : Dict = rust_tokenizer.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ )
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
lowerCAmelCase : Tuple = self.get_rust_tokenizer()
lowerCAmelCase : Dict = tokenizer.encode(UpperCamelCase_ )
lowerCAmelCase : Optional[Any] = rust_tokenizer.encode(UpperCamelCase_ )
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
@slow
def lowerCamelCase__ ( self : str ):
lowerCAmelCase : Dict = '''Hello World!'''
lowerCAmelCase : Tuple = [0, 3_5_3_7_8, 6_6_6_1, 3_8, 2]
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer
# xlmr.eval()
# xlmr.encode(symbols)
self.assertListEqual(UpperCamelCase_ , self.big_tokenizer.encode(UpperCamelCase_ ) )
@slow
def lowerCamelCase__ ( self : Optional[int] ):
lowerCAmelCase : List[Any] = (
'''This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'''
''' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth'''
)
lowerCAmelCase : Tuple = [
0,
3_2_9_3,
8_3,
1_0,
4_5_5_2,
4_9_8_9,
7_9_8_6,
6_7_8,
1_0,
5_9_1_5,
1_1_1,
1_7_9_4_5_9,
1_2_4_8_5_0,
4,
6_0_4_4,
2_3_7,
1_2,
6,
5,
6,
4,
6_7_8_0,
7_0_5,
1_5,
1_3_8_8,
4_4,
3_7_8,
1_0_1_1_4,
7_1_1,
1_5_2,
2_0,
6,
5,
2_2_3_7_6,
6_4_2,
1_2_2_1,
1_5_1_9_0,
3_4_1_5_3,
4_5_0,
5_6_0_8,
9_5_9,
1_1_1_9,
5_7_7_0_2,
1_3_6,
1_8_6,
4_7,
1_0_9_8,
2_9_3_6_7,
4_7,
# 4426, # What fairseq tokenizes from "<unk>": "_<"
# 3678, # What fairseq tokenizes from "<unk>": "unk"
# 2740, # What fairseq tokenizes from "<unk>": ">"
3, # What we tokenize from "<unk>": "<unk>"
6, # Residue from the tokenization: an extra sentencepiece underline
4,
6_0_4_4,
2_3_7,
6_2_8_4,
5_0_9_0_1,
5_2_8,
3_1,
9_0,
3_4,
9_2_7,
2,
]
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer
# xlmr.eval()
# xlmr.encode(symbols)
self.assertListEqual(UpperCamelCase_ , self.big_tokenizer.encode(UpperCamelCase_ ) )
@slow
def lowerCamelCase__ ( self : Any ):
# fmt: off
lowerCAmelCase : List[str] = {'''input_ids''': [[0, 1_1_0_6_2, 8_2_7_7_2, 7, 1_5, 8_2_7_7_2, 5_3_8, 5_1_5_2_9, 2_3_7, 1_7_1_9_8, 1_2_9_0, 2_0_6, 9, 2_1_5_1_7_5, 1_3_1_4, 1_3_6, 1_7_1_9_8, 1_2_9_0, 2_0_6, 9, 5_6_3_5_9, 4_2, 1_2_2_0_0_9, 9, 1_6_4_6_6, 1_6, 8_7_3_4_4, 4_5_3_7, 9, 4_7_1_7, 7_8_3_8_1, 6, 1_5_9_9_5_8, 7, 1_5, 2_4_4_8_0, 6_1_8, 4, 5_2_7, 2_2_6_9_3, 5_4_2_8, 4, 2_7_7_7, 2_4_4_8_0, 9_8_7_4, 4, 4_3_5_2_3, 5_9_4, 4, 8_0_3, 1_8_3_9_2, 3_3_1_8_9, 1_8, 4, 4_3_5_2_3, 2_4_4_4_7, 1_2_3_9_9, 1_0_0, 2_4_9_5_5, 8_3_6_5_8, 9_6_2_6, 1_4_4_0_5_7, 1_5, 8_3_9, 2_2_3_3_5, 1_6, 1_3_6, 2_4_9_5_5, 8_3_6_5_8, 8_3_4_7_9, 1_5, 3_9_1_0_2, 7_2_4, 1_6, 6_7_8, 6_4_5, 2_7_8_9, 1_3_2_8, 4_5_8_9, 4_2, 1_2_2_0_0_9, 1_1_5_7_7_4, 2_3, 8_0_5, 1_3_2_8, 4_6_8_7_6, 7, 1_3_6, 5_3_8_9_4, 1_9_4_0, 4_2_2_2_7, 4_1_1_5_9, 1_7_7_2_1, 8_2_3, 4_2_5, 4, 2_7_5_1_2, 9_8_7_2_2, 2_0_6, 1_3_6, 5_5_3_1, 4_9_7_0, 9_1_9, 1_7_3_3_6, 5, 2], [0, 2_0_0_8_0, 6_1_8, 8_3, 8_2_7_7_5, 4_7, 4_7_9, 9, 1_5_1_7, 7_3, 5_3_8_9_4, 3_3_3, 8_0_5_8_1, 1_1_0_1_1_7, 1_8_8_1_1, 5_2_5_6, 1_2_9_5, 5_1, 1_5_2_5_2_6, 2_9_7, 7_9_8_6, 3_9_0, 1_2_4_4_1_6, 5_3_8, 3_5_4_3_1, 2_1_4, 9_8, 1_5_0_4_4, 2_5_7_3_7, 1_3_6, 7_1_0_8, 4_3_7_0_1, 2_3, 7_5_6, 1_3_5_3_5_5, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 5_8_1, 6_3_7_7_3, 1_1_9_4_5_5, 6, 1_4_7_7_9_7, 8_8_2_0_3, 7, 6_4_5, 7_0, 2_1, 3_2_8_5, 1_0_2_6_9, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=UpperCamelCase_ , model_name='''xlm-roberta-base''' , revision='''d9d8a8ea5eb94b1c6654ae9249df7793cd2933d3''' , )
| 702
|
"""simple docstring"""
from __future__ import annotations
from typing import Any
class snake_case_( a__ ):
pass
class snake_case_:
def __init__( self : Any , UpperCamelCase_ : Any ):
lowerCAmelCase : Any = data
lowerCAmelCase : Node | None = None
def __iter__( self : int ):
lowerCAmelCase : Any = self
lowerCAmelCase : Union[str, Any] = []
while node:
if node in visited:
raise ContainsLoopError
visited.append(UpperCamelCase_ )
yield node.data
lowerCAmelCase : Optional[int] = node.next_node
@property
def lowerCamelCase__ ( self : str ):
try:
list(self )
return False
except ContainsLoopError:
return True
if __name__ == "__main__":
snake_case__ : Dict = Node(1)
snake_case__ : Any = Node(2)
snake_case__ : int = Node(3)
snake_case__ : Any = Node(4)
print(root_node.has_loop) # False
snake_case__ : Tuple = root_node.next_node
print(root_node.has_loop) # True
snake_case__ : List[Any] = Node(5)
snake_case__ : int = Node(6)
snake_case__ : List[Any] = Node(5)
snake_case__ : Dict = Node(6)
print(root_node.has_loop) # False
snake_case__ : Any = Node(1)
print(root_node.has_loop) # False
| 637
| 0
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case__ : List[Any] = logging.get_logger(__name__)
snake_case__ : List[Any] = {
'''tiiuae/falcon-40b''': '''https://huggingface.co/tiiuae/falcon-40b/resolve/main/config.json''',
'''tiiuae/falcon-7b''': '''https://huggingface.co/tiiuae/falcon-7b/resolve/main/config.json''',
}
class snake_case_( a__ ):
__UpperCamelCase = '''falcon'''
__UpperCamelCase = ['''past_key_values''']
def __init__( self : Union[str, Any] , UpperCamelCase_ : Optional[Any]=6_5_0_2_4 , UpperCamelCase_ : List[Any]=4_5_4_4 , UpperCamelCase_ : Tuple=3_2 , UpperCamelCase_ : Optional[Any]=7_1 , UpperCamelCase_ : Tuple=1E-5 , UpperCamelCase_ : Dict=0.02 , UpperCamelCase_ : List[str]=True , UpperCamelCase_ : Tuple=0.0 , UpperCamelCase_ : int=0.0 , UpperCamelCase_ : Any=None , UpperCamelCase_ : str=False , UpperCamelCase_ : Optional[int]=False , UpperCamelCase_ : Optional[Any]=True , UpperCamelCase_ : Any=True , UpperCamelCase_ : List[Any]=False , UpperCamelCase_ : Optional[Any]=1_1 , UpperCamelCase_ : Optional[int]=1_1 , **UpperCamelCase_ : List[str] , ):
lowerCAmelCase : str = vocab_size
# Backward compatibility with n_embed kwarg
lowerCAmelCase : List[Any] = kwargs.pop('''n_embed''' , UpperCamelCase_ )
lowerCAmelCase : str = hidden_size if n_embed is None else n_embed
lowerCAmelCase : int = num_hidden_layers
lowerCAmelCase : Any = num_attention_heads
lowerCAmelCase : str = layer_norm_epsilon
lowerCAmelCase : str = initializer_range
lowerCAmelCase : Optional[int] = use_cache
lowerCAmelCase : Dict = hidden_dropout
lowerCAmelCase : List[Any] = attention_dropout
lowerCAmelCase : List[Any] = bos_token_id
lowerCAmelCase : Optional[Any] = eos_token_id
lowerCAmelCase : Union[str, Any] = num_attention_heads if num_kv_heads is None else num_kv_heads
lowerCAmelCase : Optional[int] = alibi
lowerCAmelCase : Union[str, Any] = new_decoder_architecture
lowerCAmelCase : List[Any] = multi_query # Ignored when new_decoder_architecture is True
lowerCAmelCase : Any = parallel_attn
lowerCAmelCase : Union[str, Any] = bias
super().__init__(bos_token_id=UpperCamelCase_ , eos_token_id=UpperCamelCase_ , **UpperCamelCase_ )
@property
def lowerCamelCase__ ( self : Optional[Any] ):
return self.hidden_size // self.num_attention_heads
@property
def lowerCamelCase__ ( self : Dict ):
return not self.alibi
| 703
|
"""simple docstring"""
from torch import nn
class snake_case_( nn.Module ):
def __init__( self : int , UpperCamelCase_ : int , UpperCamelCase_ : int ):
super().__init__()
lowerCAmelCase : str = class_size
lowerCAmelCase : Dict = embed_size
# self.mlp1 = nn.Linear(embed_size, embed_size)
# self.mlp2 = (nn.Linear(embed_size, class_size))
lowerCAmelCase : Any = nn.Linear(UpperCamelCase_ , UpperCamelCase_ )
def lowerCamelCase__ ( self : List[Any] , UpperCamelCase_ : Tuple ):
# hidden_state = nn.functional.relu(self.mlp1(hidden_state))
# hidden_state = self.mlp2(hidden_state)
lowerCAmelCase : int = self.mlp(UpperCamelCase_ )
return logits
| 637
| 0
|
"""simple docstring"""
import argparse
import torch
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_from_original_stable_diffusion_ckpt
if __name__ == "__main__":
snake_case__ : int = argparse.ArgumentParser()
parser.add_argument(
'''--checkpoint_path''', default=None, type=str, required=True, help='''Path to the checkpoint to convert.'''
)
# !wget https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml
parser.add_argument(
'''--original_config_file''',
default=None,
type=str,
help='''The YAML config file corresponding to the original architecture.''',
)
parser.add_argument(
'''--num_in_channels''',
default=None,
type=int,
help='''The number of input channels. If `None` number of input channels will be automatically inferred.''',
)
parser.add_argument(
'''--scheduler_type''',
default='''pndm''',
type=str,
help='''Type of scheduler to use. Should be one of [\'pndm\', \'lms\', \'ddim\', \'euler\', \'euler-ancestral\', \'dpm\']''',
)
parser.add_argument(
'''--pipeline_type''',
default=None,
type=str,
help=(
'''The pipeline type. One of \'FrozenOpenCLIPEmbedder\', \'FrozenCLIPEmbedder\', \'PaintByExample\''''
'''. If `None` pipeline will be automatically inferred.'''
),
)
parser.add_argument(
'''--image_size''',
default=None,
type=int,
help=(
'''The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2'''
''' Base. Use 768 for Stable Diffusion v2.'''
),
)
parser.add_argument(
'''--prediction_type''',
default=None,
type=str,
help=(
'''The prediction type that the model was trained on. Use \'epsilon\' for Stable Diffusion v1.X and Stable'''
''' Diffusion v2 Base. Use \'v_prediction\' for Stable Diffusion v2.'''
),
)
parser.add_argument(
'''--extract_ema''',
action='''store_true''',
help=(
'''Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights'''
''' or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield'''
''' higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning.'''
),
)
parser.add_argument(
'''--upcast_attention''',
action='''store_true''',
help=(
'''Whether the attention computation should always be upcasted. This is necessary when running stable'''
''' diffusion 2.1.'''
),
)
parser.add_argument(
'''--from_safetensors''',
action='''store_true''',
help='''If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.''',
)
parser.add_argument(
'''--to_safetensors''',
action='''store_true''',
help='''Whether to store pipeline in safetensors format or not.''',
)
parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the output model.''')
parser.add_argument('''--device''', type=str, help='''Device to use (e.g. cpu, cuda:0, cuda:1, etc.)''')
parser.add_argument(
'''--stable_unclip''',
type=str,
default=None,
required=False,
help='''Set if this is a stable unCLIP model. One of \'txt2img\' or \'img2img\'.''',
)
parser.add_argument(
'''--stable_unclip_prior''',
type=str,
default=None,
required=False,
help='''Set if this is a stable unCLIP txt2img model. Selects which prior to use. If `--stable_unclip` is set to `txt2img`, the karlo prior (https://huggingface.co/kakaobrain/karlo-v1-alpha/tree/main/prior) is selected by default.''',
)
parser.add_argument(
'''--clip_stats_path''',
type=str,
help='''Path to the clip stats file. Only required if the stable unclip model\'s config specifies `model.params.noise_aug_config.params.clip_stats_path`.''',
required=False,
)
parser.add_argument(
'''--controlnet''', action='''store_true''', default=None, help='''Set flag if this is a controlnet checkpoint.'''
)
parser.add_argument('''--half''', action='''store_true''', help='''Save weights in half precision.''')
parser.add_argument(
'''--vae_path''',
type=str,
default=None,
required=False,
help='''Set to a path, hub id to an already converted vae to not convert it again.''',
)
snake_case__ : List[str] = parser.parse_args()
snake_case__ : Tuple = download_from_original_stable_diffusion_ckpt(
checkpoint_path=args.checkpoint_path,
original_config_file=args.original_config_file,
image_size=args.image_size,
prediction_type=args.prediction_type,
model_type=args.pipeline_type,
extract_ema=args.extract_ema,
scheduler_type=args.scheduler_type,
num_in_channels=args.num_in_channels,
upcast_attention=args.upcast_attention,
from_safetensors=args.from_safetensors,
device=args.device,
stable_unclip=args.stable_unclip,
stable_unclip_prior=args.stable_unclip_prior,
clip_stats_path=args.clip_stats_path,
controlnet=args.controlnet,
vae_path=args.vae_path,
)
if args.half:
pipe.to(torch_dtype=torch.floataa)
if args.controlnet:
# only save the controlnet model
pipe.controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
else:
pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 704
|
"""simple docstring"""
class snake_case_:
def __init__( self : Union[str, Any] , UpperCamelCase_ : str ):
lowerCAmelCase : Dict = val
lowerCAmelCase : str = None
lowerCAmelCase : Dict = None
def lowerCamelCase__ ( self : Union[str, Any] , UpperCamelCase_ : Dict ):
if self.val:
if val < self.val:
if self.left is None:
lowerCAmelCase : int = Node(UpperCamelCase_ )
else:
self.left.insert(UpperCamelCase_ )
elif val > self.val:
if self.right is None:
lowerCAmelCase : Any = Node(UpperCamelCase_ )
else:
self.right.insert(UpperCamelCase_ )
else:
lowerCAmelCase : Optional[Any] = val
def _snake_case ( _snake_case : Tuple , _snake_case : str ):
# Recursive traversal
if root:
inorder(root.left , _snake_case )
res.append(root.val )
inorder(root.right , _snake_case )
def _snake_case ( _snake_case : Optional[Any] ):
# Build BST
if len(_snake_case ) == 0:
return arr
lowerCAmelCase : Optional[Any] = Node(arr[0] )
for i in range(1 , len(_snake_case ) ):
root.insert(arr[i] )
# Traverse BST in order.
lowerCAmelCase : Optional[int] = []
inorder(_snake_case , _snake_case )
return res
if __name__ == "__main__":
print(tree_sort([10, 1, 3, 2, 9, 14, 13]))
| 637
| 0
|
"""simple docstring"""
def _snake_case ( _snake_case : str , _snake_case : str ):
lowerCAmelCase : int = len(_snake_case )
lowerCAmelCase : int = len(_snake_case )
lowerCAmelCase : int = (
first_str_length if first_str_length > second_str_length else second_str_length
)
lowerCAmelCase : list = []
for char_count in range(_snake_case ):
if char_count < first_str_length:
output_list.append(first_str[char_count] )
if char_count < second_str_length:
output_list.append(second_str[char_count] )
return "".join(_snake_case )
if __name__ == "__main__":
print(alternative_string_arrange('''AB''', '''XYZ'''), end=''' ''')
| 705
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
snake_case__ : Tuple = logging.get_logger(__name__)
snake_case__ : int = {
'''facebook/levit-128S''': '''https://huggingface.co/facebook/levit-128S/resolve/main/config.json''',
# See all LeViT models at https://huggingface.co/models?filter=levit
}
class snake_case_( a__ ):
__UpperCamelCase = '''levit'''
def __init__( self : str , UpperCamelCase_ : Union[str, Any]=2_2_4 , UpperCamelCase_ : Union[str, Any]=3 , UpperCamelCase_ : Union[str, Any]=3 , UpperCamelCase_ : int=2 , UpperCamelCase_ : Union[str, Any]=1 , UpperCamelCase_ : Tuple=1_6 , UpperCamelCase_ : Dict=[1_2_8, 2_5_6, 3_8_4] , UpperCamelCase_ : Optional[Any]=[4, 8, 1_2] , UpperCamelCase_ : Dict=[4, 4, 4] , UpperCamelCase_ : Any=[1_6, 1_6, 1_6] , UpperCamelCase_ : str=0 , UpperCamelCase_ : int=[2, 2, 2] , UpperCamelCase_ : Optional[Any]=[2, 2, 2] , UpperCamelCase_ : str=0.02 , **UpperCamelCase_ : List[str] , ):
super().__init__(**UpperCamelCase_ )
lowerCAmelCase : Tuple = image_size
lowerCAmelCase : int = num_channels
lowerCAmelCase : Optional[int] = kernel_size
lowerCAmelCase : Dict = stride
lowerCAmelCase : List[Any] = padding
lowerCAmelCase : Dict = hidden_sizes
lowerCAmelCase : List[str] = num_attention_heads
lowerCAmelCase : Tuple = depths
lowerCAmelCase : Dict = key_dim
lowerCAmelCase : Union[str, Any] = drop_path_rate
lowerCAmelCase : List[Any] = patch_size
lowerCAmelCase : Tuple = attention_ratio
lowerCAmelCase : Optional[int] = mlp_ratio
lowerCAmelCase : Union[str, Any] = initializer_range
lowerCAmelCase : List[str] = [
['''Subsample''', key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2],
['''Subsample''', key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2],
]
class snake_case_( a__ ):
__UpperCamelCase = version.parse('''1.11''' )
@property
def lowerCamelCase__ ( self : Tuple ):
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def lowerCamelCase__ ( self : Optional[Any] ):
return 1E-4
| 637
| 0
|
"""simple docstring"""
import json
import os
import unittest
from transformers import DebertaTokenizer, DebertaTokenizerFast
from transformers.models.deberta.tokenization_deberta import VOCAB_FILES_NAMES
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class snake_case_( a__ , unittest.TestCase ):
__UpperCamelCase = DebertaTokenizer
__UpperCamelCase = True
__UpperCamelCase = DebertaTokenizerFast
def lowerCamelCase__ ( self : List[str] ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
lowerCAmelCase : List[Any] = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''[UNK]''',
]
lowerCAmelCase : Optional[Any] = dict(zip(UpperCamelCase_ , range(len(UpperCamelCase_ ) ) ) )
lowerCAmelCase : Any = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
lowerCAmelCase : Union[str, Any] = {'''unk_token''': '''[UNK]'''}
lowerCAmelCase : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
lowerCAmelCase : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(UpperCamelCase_ ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(UpperCamelCase_ ) )
def lowerCamelCase__ ( self : Optional[int] , **UpperCamelCase_ : Optional[Any] ):
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **UpperCamelCase_ )
def lowerCamelCase__ ( self : Optional[int] , UpperCamelCase_ : Dict ):
lowerCAmelCase : Tuple = '''lower newer'''
lowerCAmelCase : List[Any] = '''lower newer'''
return input_text, output_text
def lowerCamelCase__ ( self : str ):
lowerCAmelCase : Optional[Any] = self.get_tokenizer()
lowerCAmelCase : str = '''lower newer'''
lowerCAmelCase : Optional[int] = ['''l''', '''o''', '''w''', '''er''', '''\u0120''', '''n''', '''e''', '''w''', '''er''']
lowerCAmelCase : Optional[int] = tokenizer.tokenize(UpperCamelCase_ )
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
lowerCAmelCase : Optional[Any] = tokens + [tokenizer.unk_token]
lowerCAmelCase : List[str] = [0, 1, 2, 1_5, 1_0, 9, 3, 2, 1_5, 1_9]
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCamelCase_ ) , UpperCamelCase_ )
def lowerCamelCase__ ( self : List[str] ):
lowerCAmelCase : Any = self.get_tokenizer()
lowerCAmelCase : List[Any] = tokenizer('''Hello''' , '''World''' )
lowerCAmelCase : str = [0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1]
self.assertListEqual(tokd['''token_type_ids'''] , UpperCamelCase_ )
@slow
def lowerCamelCase__ ( self : Tuple ):
lowerCAmelCase : Tuple = self.tokenizer_class.from_pretrained('''microsoft/deberta-base''' )
lowerCAmelCase : str = tokenizer.encode('''sequence builders''' , add_special_tokens=UpperCamelCase_ )
lowerCAmelCase : Optional[int] = tokenizer.encode('''multi-sequence build''' , add_special_tokens=UpperCamelCase_ )
lowerCAmelCase : Optional[int] = tokenizer.encode(
'''sequence builders''' , add_special_tokens=UpperCamelCase_ , add_prefix_space=UpperCamelCase_ )
lowerCAmelCase : int = tokenizer.encode(
'''sequence builders''' , '''multi-sequence build''' , add_special_tokens=UpperCamelCase_ , add_prefix_space=UpperCamelCase_ )
lowerCAmelCase : int = tokenizer.build_inputs_with_special_tokens(UpperCamelCase_ )
lowerCAmelCase : List[str] = tokenizer.build_inputs_with_special_tokens(UpperCamelCase_ , UpperCamelCase_ )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
@slow
def lowerCamelCase__ ( self : Union[str, Any] ):
lowerCAmelCase : Union[str, Any] = [self.tokenizer_class]
if self.test_rust_tokenizer:
tokenizer_classes.append(self.rust_tokenizer_class )
for tokenizer_class in tokenizer_classes:
lowerCAmelCase : Tuple = tokenizer_class.from_pretrained('''microsoft/deberta-base''' )
lowerCAmelCase : Tuple = [
'''ALBERT: A Lite BERT for Self-supervised Learning of Language Representations''',
'''ALBERT incorporates two parameter reduction techniques''',
'''The first one is a factorized embedding parameterization. By decomposing the large vocabulary'''
''' embedding matrix into two small matrices, we separate the size of the hidden layers from the size of'''
''' vocabulary embedding.''',
]
lowerCAmelCase : Dict = tokenizer(UpperCamelCase_ , padding=UpperCamelCase_ )
lowerCAmelCase : Optional[int] = [tokenizer.decode(UpperCamelCase_ , skip_special_tokens=UpperCamelCase_ ) for seq in encoding['''input_ids''']]
# fmt: off
lowerCAmelCase : int = {
'''input_ids''': [
[1, 2_1_1_8, 1_1_1_2_6, 5_6_5, 3_5, 8_3, 2_5_1_9_1, 1_6_3, 1_8_8_5_4, 1_3, 1_2_1_5_6, 1_2, 1_6_1_0_1, 2_5_3_7_6, 1_3_8_0_7, 9, 2_2_2_0_5, 2_7_8_9_3, 1_6_3_5, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 2_1_1_8, 1_1_1_2_6, 5_6_5, 2_4_5_3_6, 8_0, 4_3_7_9_7, 4_8_7_8, 7_3_7_3, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1_3_3, 7_8, 6_5, 1_6, 1_0, 3_7_2_4, 1_5_3_8, 3_3_1_8_3, 1_1_3_0_3, 4_3_7_9_7, 1_9_3_8, 4, 8_7_0, 2_4_1_6_5, 2_9_1_0_5, 5, 7_3_9, 3_2_6_4_4, 3_3_1_8_3, 1_1_3_0_3, 3_6_1_7_3, 8_8, 8_0, 6_5_0, 7_8_2_1, 4_5_9_4_0, 6, 5_2, 2_5_5_9, 5, 1_8_3_6, 9, 5, 7_3_9_7, 1_3_1_7_1, 3_1, 5, 1_8_3_6, 9, 3_2_6_4_4, 3_3_1_8_3, 1_1_3_0_3, 4, 2]
],
'''token_type_ids''': [
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
],
'''attention_mask''': [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
]
}
# fmt: on
lowerCAmelCase : Any = [
'''ALBERT: A Lite BERT for Self-supervised Learning of Language Representations''',
'''ALBERT incorporates two parameter reduction techniques''',
'''The first one is a factorized embedding parameterization. By decomposing the large vocabulary'''
''' embedding matrix into two small matrices, we separate the size of the hidden layers from the size of'''
''' vocabulary embedding.''',
]
self.assertDictEqual(encoding.data , UpperCamelCase_ )
for expected, decoded in zip(UpperCamelCase_ , UpperCamelCase_ ):
self.assertEqual(UpperCamelCase_ , UpperCamelCase_ )
| 706
|
"""simple docstring"""
import time
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers.generation import (
MaxLengthCriteria,
MaxNewTokensCriteria,
MaxTimeCriteria,
StoppingCriteriaList,
validate_stopping_criteria,
)
@require_torch
class snake_case_( unittest.TestCase ):
def lowerCamelCase__ ( self : int , UpperCamelCase_ : int ):
lowerCAmelCase : str = 3
lowerCAmelCase : Tuple = 2_5_0
lowerCAmelCase : Optional[Any] = ids_tensor((batch_size, length) , UpperCamelCase_ )
lowerCAmelCase : Optional[Any] = torch.ones((batch_size, length) , device=UpperCamelCase_ , dtype=torch.float ) / length
return input_ids, scores
def lowerCamelCase__ ( self : Optional[Any] ):
lowerCAmelCase, lowerCAmelCase : Optional[int] = self._get_tensors(5 )
lowerCAmelCase : Union[str, Any] = StoppingCriteriaList(
[
MaxLengthCriteria(max_length=1_0 ),
MaxTimeCriteria(max_time=0.1 ),
] )
self.assertFalse(criteria(UpperCamelCase_ , UpperCamelCase_ ) )
lowerCAmelCase, lowerCAmelCase : List[str] = self._get_tensors(9 )
self.assertFalse(criteria(UpperCamelCase_ , UpperCamelCase_ ) )
lowerCAmelCase, lowerCAmelCase : Any = self._get_tensors(1_0 )
self.assertTrue(criteria(UpperCamelCase_ , UpperCamelCase_ ) )
def lowerCamelCase__ ( self : Optional[Any] ):
lowerCAmelCase : Optional[Any] = MaxLengthCriteria(max_length=1_0 )
lowerCAmelCase, lowerCAmelCase : Optional[Any] = self._get_tensors(5 )
self.assertFalse(criteria(UpperCamelCase_ , UpperCamelCase_ ) )
lowerCAmelCase, lowerCAmelCase : List[str] = self._get_tensors(9 )
self.assertFalse(criteria(UpperCamelCase_ , UpperCamelCase_ ) )
lowerCAmelCase, lowerCAmelCase : str = self._get_tensors(1_0 )
self.assertTrue(criteria(UpperCamelCase_ , UpperCamelCase_ ) )
def lowerCamelCase__ ( self : List[Any] ):
lowerCAmelCase : Optional[Any] = MaxNewTokensCriteria(start_length=5 , max_new_tokens=5 )
lowerCAmelCase, lowerCAmelCase : Optional[int] = self._get_tensors(5 )
self.assertFalse(criteria(UpperCamelCase_ , UpperCamelCase_ ) )
lowerCAmelCase, lowerCAmelCase : Union[str, Any] = self._get_tensors(9 )
self.assertFalse(criteria(UpperCamelCase_ , UpperCamelCase_ ) )
lowerCAmelCase, lowerCAmelCase : str = self._get_tensors(1_0 )
self.assertTrue(criteria(UpperCamelCase_ , UpperCamelCase_ ) )
lowerCAmelCase : Dict = StoppingCriteriaList([criteria] )
self.assertEqual(criteria_list.max_length , 1_0 )
def lowerCamelCase__ ( self : Union[str, Any] ):
lowerCAmelCase, lowerCAmelCase : Tuple = self._get_tensors(5 )
lowerCAmelCase : List[str] = MaxTimeCriteria(max_time=0.1 )
self.assertFalse(criteria(UpperCamelCase_ , UpperCamelCase_ ) )
lowerCAmelCase : List[str] = MaxTimeCriteria(max_time=0.1 , initial_timestamp=time.time() - 0.2 )
self.assertTrue(criteria(UpperCamelCase_ , UpperCamelCase_ ) )
def lowerCamelCase__ ( self : str ):
validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(1_0 )] ) , 1_0 )
with self.assertWarns(UpperCamelCase_ ):
validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(1_0 )] ) , 1_1 )
lowerCAmelCase : str = validate_stopping_criteria(StoppingCriteriaList() , 1_1 )
self.assertEqual(len(UpperCamelCase_ ) , 1 )
| 637
| 0
|
"""simple docstring"""
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import numpy as np
import torch
from datasets import load_dataset
from torchvision.transforms import Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, ToTensor
import transformers
from transformers import (
CONFIG_MAPPING,
IMAGE_PROCESSOR_MAPPING,
MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING,
AutoConfig,
AutoImageProcessor,
AutoModelForMaskedImageModeling,
HfArgumentParser,
Trainer,
TrainingArguments,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
snake_case__ : Optional[int] = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('''4.31.0''')
require_version('''datasets>=1.8.0''', '''To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt''')
snake_case__ : Union[str, Any] = list(MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING.keys())
snake_case__ : Optional[Any] = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class snake_case_:
__UpperCamelCase = field(
default='''cifar10''' , metadata={'''help''': '''Name of a dataset from the datasets package'''} )
__UpperCamelCase = field(
default=a__ , metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''} )
__UpperCamelCase = field(
default=a__ , metadata={'''help''': '''The column name of the images in the files. If not set, will try to use \'image\' or \'img\'.'''} , )
__UpperCamelCase = field(default=a__ , metadata={'''help''': '''A folder containing the training data.'''} )
__UpperCamelCase = field(default=a__ , metadata={'''help''': '''A folder containing the validation data.'''} )
__UpperCamelCase = field(
default=0.15 , metadata={'''help''': '''Percent to split off of train for validation.'''} )
__UpperCamelCase = field(default=32 , metadata={'''help''': '''The size of the square patches to use for masking.'''} )
__UpperCamelCase = field(
default=0.6 , metadata={'''help''': '''Percentage of patches to mask.'''} , )
__UpperCamelCase = field(
default=a__ , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of training examples to this '''
'''value if set.'''
)
} , )
__UpperCamelCase = field(
default=a__ , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of evaluation examples to this '''
'''value if set.'''
)
} , )
def lowerCamelCase__ ( self : Any ):
lowerCAmelCase : Tuple = {}
if self.train_dir is not None:
lowerCAmelCase : List[Any] = self.train_dir
if self.validation_dir is not None:
lowerCAmelCase : Tuple = self.validation_dir
lowerCAmelCase : Dict = data_files if data_files else None
@dataclass
class snake_case_:
__UpperCamelCase = field(
default=a__ , metadata={
'''help''': (
'''The model checkpoint for weights initialization. Can be a local path to a pytorch_model.bin or a '''
'''checkpoint identifier on the hub. '''
'''Don\'t set if you want to train a model from scratch.'''
)
} , )
__UpperCamelCase = field(
default=a__ , metadata={'''help''': '''If training from scratch, pass a model type from the list: ''' + ''', '''.join(a__ )} , )
__UpperCamelCase = field(
default=a__ , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
__UpperCamelCase = field(
default=a__ , metadata={
'''help''': (
'''Override some existing default config settings when a model is trained from scratch. Example: '''
'''n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index'''
)
} , )
__UpperCamelCase = field(
default=a__ , metadata={'''help''': '''Where do you want to store (cache) the pretrained models/datasets downloaded from the hub'''} , )
__UpperCamelCase = field(
default='''main''' , metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''} , )
__UpperCamelCase = field(default=a__ , metadata={'''help''': '''Name or path of preprocessor config.'''} )
__UpperCamelCase = field(
default=a__ , metadata={
'''help''': (
'''Will use the token generated when running `huggingface-cli login` (necessary to use this script '''
'''with private models).'''
)
} , )
__UpperCamelCase = field(
default=a__ , metadata={
'''help''': (
'''The size (resolution) of each image. If not specified, will use `image_size` of the configuration.'''
)
} , )
__UpperCamelCase = field(
default=a__ , metadata={
'''help''': (
'''The size (resolution) of each patch. If not specified, will use `patch_size` of the configuration.'''
)
} , )
__UpperCamelCase = field(
default=a__ , metadata={'''help''': '''Stride to use for the encoder.'''} , )
class snake_case_:
def __init__( self : Dict , UpperCamelCase_ : List[Any]=1_9_2 , UpperCamelCase_ : Union[str, Any]=3_2 , UpperCamelCase_ : str=4 , UpperCamelCase_ : Optional[int]=0.6 ):
lowerCAmelCase : Optional[int] = input_size
lowerCAmelCase : List[Any] = mask_patch_size
lowerCAmelCase : Any = model_patch_size
lowerCAmelCase : Any = mask_ratio
if self.input_size % self.mask_patch_size != 0:
raise ValueError('''Input size must be divisible by mask patch size''' )
if self.mask_patch_size % self.model_patch_size != 0:
raise ValueError('''Mask patch size must be divisible by model patch size''' )
lowerCAmelCase : int = self.input_size // self.mask_patch_size
lowerCAmelCase : List[Any] = self.mask_patch_size // self.model_patch_size
lowerCAmelCase : Tuple = self.rand_size**2
lowerCAmelCase : Optional[Any] = int(np.ceil(self.token_count * self.mask_ratio ) )
def __call__( self : str ):
lowerCAmelCase : Any = np.random.permutation(self.token_count )[: self.mask_count]
lowerCAmelCase : Optional[Any] = np.zeros(self.token_count , dtype=UpperCamelCase_ )
lowerCAmelCase : Optional[Any] = 1
lowerCAmelCase : Optional[int] = mask.reshape((self.rand_size, self.rand_size) )
lowerCAmelCase : List[str] = mask.repeat(self.scale , axis=0 ).repeat(self.scale , axis=1 )
return torch.tensor(mask.flatten() )
def _snake_case ( _snake_case : str ):
lowerCAmelCase : int = torch.stack([example['''pixel_values'''] for example in examples] )
lowerCAmelCase : Any = torch.stack([example['''mask'''] for example in examples] )
return {"pixel_values": pixel_values, "bool_masked_pos": mask}
def _snake_case ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
lowerCAmelCase : int = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
lowerCAmelCase : Union[str, Any] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
lowerCAmelCase : int = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('''run_mim''' , _snake_case , _snake_case )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
lowerCAmelCase : List[str] = training_args.get_process_log_level()
logger.setLevel(_snake_case )
transformers.utils.logging.set_verbosity(_snake_case )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'''
+ f'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' )
logger.info(f'''Training/evaluation parameters {training_args}''' )
# Detecting last checkpoint.
lowerCAmelCase : str = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
lowerCAmelCase : List[Any] = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f'''Output directory ({training_args.output_dir}) already exists and is not empty. '''
'''Use --overwrite_output_dir to overcome.''' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '''
'''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' )
# Initialize our dataset.
lowerCAmelCase : Optional[Any] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , data_files=data_args.data_files , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# If we don't have a validation split, split off a percentage of train as validation.
lowerCAmelCase : Optional[int] = None if '''validation''' in ds.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split , _snake_case ) and data_args.train_val_split > 0.0:
lowerCAmelCase : List[str] = ds['''train'''].train_test_split(data_args.train_val_split )
lowerCAmelCase : List[str] = split['''train''']
lowerCAmelCase : Dict = split['''test''']
# Create config
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowerCAmelCase : List[Any] = {
'''cache_dir''': model_args.cache_dir,
'''revision''': model_args.model_revision,
'''use_auth_token''': True if model_args.use_auth_token else None,
}
if model_args.config_name_or_path:
lowerCAmelCase : Optional[Any] = AutoConfig.from_pretrained(model_args.config_name_or_path , **_snake_case )
elif model_args.model_name_or_path:
lowerCAmelCase : Union[str, Any] = AutoConfig.from_pretrained(model_args.model_name_or_path , **_snake_case )
else:
lowerCAmelCase : Union[str, Any] = CONFIG_MAPPING[model_args.model_type]()
logger.warning('''You are instantiating a new config instance from scratch.''' )
if model_args.config_overrides is not None:
logger.info(f'''Overriding config: {model_args.config_overrides}''' )
config.update_from_string(model_args.config_overrides )
logger.info(f'''New config: {config}''' )
# make sure the decoder_type is "simmim" (only relevant for BEiT)
if hasattr(_snake_case , '''decoder_type''' ):
lowerCAmelCase : Tuple = '''simmim'''
# adapt config
lowerCAmelCase : List[str] = model_args.image_size if model_args.image_size is not None else config.image_size
lowerCAmelCase : int = model_args.patch_size if model_args.patch_size is not None else config.patch_size
lowerCAmelCase : str = (
model_args.encoder_stride if model_args.encoder_stride is not None else config.encoder_stride
)
config.update(
{
'''image_size''': model_args.image_size,
'''patch_size''': model_args.patch_size,
'''encoder_stride''': model_args.encoder_stride,
} )
# create image processor
if model_args.image_processor_name:
lowerCAmelCase : Dict = AutoImageProcessor.from_pretrained(model_args.image_processor_name , **_snake_case )
elif model_args.model_name_or_path:
lowerCAmelCase : Optional[int] = AutoImageProcessor.from_pretrained(model_args.model_name_or_path , **_snake_case )
else:
lowerCAmelCase : List[str] = {
conf.model_type: image_processor_class for conf, image_processor_class in IMAGE_PROCESSOR_MAPPING.items()
}
lowerCAmelCase : List[Any] = IMAGE_PROCESSOR_TYPES[model_args.model_type]()
# create model
if model_args.model_name_or_path:
lowerCAmelCase : Any = AutoModelForMaskedImageModeling.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=_snake_case , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
else:
logger.info('''Training new model from scratch''' )
lowerCAmelCase : int = AutoModelForMaskedImageModeling.from_config(_snake_case )
if training_args.do_train:
lowerCAmelCase : Union[str, Any] = ds['''train'''].column_names
else:
lowerCAmelCase : str = ds['''validation'''].column_names
if data_args.image_column_name is not None:
lowerCAmelCase : Dict = data_args.image_column_name
elif "image" in column_names:
lowerCAmelCase : Union[str, Any] = '''image'''
elif "img" in column_names:
lowerCAmelCase : Optional[int] = '''img'''
else:
lowerCAmelCase : str = column_names[0]
# transformations as done in original SimMIM paper
# source: https://github.com/microsoft/SimMIM/blob/main/data/data_simmim.py
lowerCAmelCase : str = Compose(
[
Lambda(lambda _snake_case : img.convert('''RGB''' ) if img.mode != "RGB" else img ),
RandomResizedCrop(model_args.image_size , scale=(0.67, 1.0) , ratio=(3.0 / 4.0, 4.0 / 3.0) ),
RandomHorizontalFlip(),
ToTensor(),
Normalize(mean=image_processor.image_mean , std=image_processor.image_std ),
] )
# create mask generator
lowerCAmelCase : Union[str, Any] = MaskGenerator(
input_size=model_args.image_size , mask_patch_size=data_args.mask_patch_size , model_patch_size=model_args.patch_size , mask_ratio=data_args.mask_ratio , )
def preprocess_images(_snake_case : Union[str, Any] ):
lowerCAmelCase : Union[str, Any] = [transforms(_snake_case ) for image in examples[image_column_name]]
lowerCAmelCase : Optional[int] = [mask_generator() for i in range(len(examples[image_column_name] ) )]
return examples
if training_args.do_train:
if "train" not in ds:
raise ValueError('''--do_train requires a train dataset''' )
if data_args.max_train_samples is not None:
lowerCAmelCase : Optional[int] = ds['''train'''].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
# Set the training transforms
ds["train"].set_transform(_snake_case )
if training_args.do_eval:
if "validation" not in ds:
raise ValueError('''--do_eval requires a validation dataset''' )
if data_args.max_eval_samples is not None:
lowerCAmelCase : Dict = (
ds['''validation'''].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
ds["validation"].set_transform(_snake_case )
# Initialize our trainer
lowerCAmelCase : Any = Trainer(
model=_snake_case , args=_snake_case , train_dataset=ds['''train'''] if training_args.do_train else None , eval_dataset=ds['''validation'''] if training_args.do_eval else None , tokenizer=_snake_case , data_collator=_snake_case , )
# Training
if training_args.do_train:
lowerCAmelCase : List[Any] = None
if training_args.resume_from_checkpoint is not None:
lowerCAmelCase : Any = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
lowerCAmelCase : Dict = last_checkpoint
lowerCAmelCase : Union[str, Any] = trainer.train(resume_from_checkpoint=_snake_case )
trainer.save_model()
trainer.log_metrics('''train''' , train_result.metrics )
trainer.save_metrics('''train''' , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
lowerCAmelCase : Optional[int] = trainer.evaluate()
trainer.log_metrics('''eval''' , _snake_case )
trainer.save_metrics('''eval''' , _snake_case )
# Write model card and (optionally) push to hub
lowerCAmelCase : List[Any] = {
'''finetuned_from''': model_args.model_name_or_path,
'''tasks''': '''masked-image-modeling''',
'''dataset''': data_args.dataset_name,
'''tags''': ['''masked-image-modeling'''],
}
if training_args.push_to_hub:
trainer.push_to_hub(**_snake_case )
else:
trainer.create_model_card(**_snake_case )
if __name__ == "__main__":
main()
| 707
|
"""simple docstring"""
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin
@dataclass
# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->UnCLIP
class snake_case_( a__ ):
__UpperCamelCase = 42
__UpperCamelCase = None
def _snake_case ( _snake_case : Dict , _snake_case : List[str]=0.999 , _snake_case : Dict="cosine" , ):
if alpha_transform_type == "cosine":
def alpha_bar_fn(_snake_case : List[Any] ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(_snake_case : Optional[int] ):
return math.exp(t * -12.0 )
else:
raise ValueError(f'''Unsupported alpha_tranform_type: {alpha_transform_type}''' )
lowerCAmelCase : List[Any] = []
for i in range(_snake_case ):
lowerCAmelCase : int = i / num_diffusion_timesteps
lowerCAmelCase : Tuple = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(_snake_case ) / alpha_bar_fn(_snake_case ) , _snake_case ) )
return torch.tensor(_snake_case , dtype=torch.floataa )
class snake_case_( a__ , a__ ):
@register_to_config
def __init__( self : Any , UpperCamelCase_ : int = 1_0_0_0 , UpperCamelCase_ : str = "fixed_small_log" , UpperCamelCase_ : bool = True , UpperCamelCase_ : Optional[float] = 1.0 , UpperCamelCase_ : str = "epsilon" , UpperCamelCase_ : str = "squaredcos_cap_v2" , ):
if beta_schedule != "squaredcos_cap_v2":
raise ValueError('''UnCLIPScheduler only supports `beta_schedule`: \'squaredcos_cap_v2\'''' )
lowerCAmelCase : Any = betas_for_alpha_bar(UpperCamelCase_ )
lowerCAmelCase : str = 1.0 - self.betas
lowerCAmelCase : Union[str, Any] = torch.cumprod(self.alphas , dim=0 )
lowerCAmelCase : Tuple = torch.tensor(1.0 )
# standard deviation of the initial noise distribution
lowerCAmelCase : Any = 1.0
# setable values
lowerCAmelCase : Any = None
lowerCAmelCase : Any = torch.from_numpy(np.arange(0 , UpperCamelCase_ )[::-1].copy() )
lowerCAmelCase : List[str] = variance_type
def lowerCamelCase__ ( self : Union[str, Any] , UpperCamelCase_ : torch.FloatTensor , UpperCamelCase_ : Optional[int] = None ):
return sample
def lowerCamelCase__ ( self : Tuple , UpperCamelCase_ : int , UpperCamelCase_ : Union[str, torch.device] = None ):
lowerCAmelCase : Any = num_inference_steps
lowerCAmelCase : str = (self.config.num_train_timesteps - 1) / (self.num_inference_steps - 1)
lowerCAmelCase : Tuple = (np.arange(0 , UpperCamelCase_ ) * step_ratio).round()[::-1].copy().astype(np.intaa )
lowerCAmelCase : Any = torch.from_numpy(UpperCamelCase_ ).to(UpperCamelCase_ )
def lowerCamelCase__ ( self : Optional[Any] , UpperCamelCase_ : List[Any] , UpperCamelCase_ : str=None , UpperCamelCase_ : Tuple=None , UpperCamelCase_ : Any=None ):
if prev_timestep is None:
lowerCAmelCase : Any = t - 1
lowerCAmelCase : int = self.alphas_cumprod[t]
lowerCAmelCase : Union[str, Any] = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one
lowerCAmelCase : Dict = 1 - alpha_prod_t
lowerCAmelCase : str = 1 - alpha_prod_t_prev
if prev_timestep == t - 1:
lowerCAmelCase : Tuple = self.betas[t]
else:
lowerCAmelCase : Union[str, Any] = 1 - alpha_prod_t / alpha_prod_t_prev
# For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
# and sample from it to get previous sample
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
lowerCAmelCase : Optional[Any] = beta_prod_t_prev / beta_prod_t * beta
if variance_type is None:
lowerCAmelCase : List[str] = self.config.variance_type
# hacks - were probably added for training stability
if variance_type == "fixed_small_log":
lowerCAmelCase : Any = torch.log(torch.clamp(UpperCamelCase_ , min=1E-20 ) )
lowerCAmelCase : Union[str, Any] = torch.exp(0.5 * variance )
elif variance_type == "learned_range":
# NOTE difference with DDPM scheduler
lowerCAmelCase : Optional[Any] = variance.log()
lowerCAmelCase : Union[str, Any] = beta.log()
lowerCAmelCase : Dict = (predicted_variance + 1) / 2
lowerCAmelCase : Union[str, Any] = frac * max_log + (1 - frac) * min_log
return variance
def lowerCamelCase__ ( self : Optional[Any] , UpperCamelCase_ : torch.FloatTensor , UpperCamelCase_ : int , UpperCamelCase_ : torch.FloatTensor , UpperCamelCase_ : Optional[int] = None , UpperCamelCase_ : List[Any]=None , UpperCamelCase_ : bool = True , ):
lowerCAmelCase : Optional[Any] = timestep
if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type == "learned_range":
lowerCAmelCase, lowerCAmelCase : List[Any] = torch.split(UpperCamelCase_ , sample.shape[1] , dim=1 )
else:
lowerCAmelCase : Optional[int] = None
# 1. compute alphas, betas
if prev_timestep is None:
lowerCAmelCase : Any = t - 1
lowerCAmelCase : Union[str, Any] = self.alphas_cumprod[t]
lowerCAmelCase : Optional[int] = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one
lowerCAmelCase : int = 1 - alpha_prod_t
lowerCAmelCase : str = 1 - alpha_prod_t_prev
if prev_timestep == t - 1:
lowerCAmelCase : List[Any] = self.betas[t]
lowerCAmelCase : Optional[int] = self.alphas[t]
else:
lowerCAmelCase : List[Any] = 1 - alpha_prod_t / alpha_prod_t_prev
lowerCAmelCase : Dict = 1 - beta
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if self.config.prediction_type == "epsilon":
lowerCAmelCase : List[Any] = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif self.config.prediction_type == "sample":
lowerCAmelCase : Tuple = model_output
else:
raise ValueError(
F'''prediction_type given as {self.config.prediction_type} must be one of `epsilon` or `sample`'''
''' for the UnCLIPScheduler.''' )
# 3. Clip "predicted x_0"
if self.config.clip_sample:
lowerCAmelCase : Dict = torch.clamp(
UpperCamelCase_ , -self.config.clip_sample_range , self.config.clip_sample_range )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
lowerCAmelCase : int = (alpha_prod_t_prev ** 0.5 * beta) / beta_prod_t
lowerCAmelCase : List[Any] = alpha ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
lowerCAmelCase : str = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
lowerCAmelCase : int = 0
if t > 0:
lowerCAmelCase : Union[str, Any] = randn_tensor(
model_output.shape , dtype=model_output.dtype , generator=UpperCamelCase_ , device=model_output.device )
lowerCAmelCase : Any = self._get_variance(
UpperCamelCase_ , predicted_variance=UpperCamelCase_ , prev_timestep=UpperCamelCase_ , )
if self.variance_type == "fixed_small_log":
lowerCAmelCase : str = variance
elif self.variance_type == "learned_range":
lowerCAmelCase : Optional[Any] = (0.5 * variance).exp()
else:
raise ValueError(
F'''variance_type given as {self.variance_type} must be one of `fixed_small_log` or `learned_range`'''
''' for the UnCLIPScheduler.''' )
lowerCAmelCase : List[Any] = variance * variance_noise
lowerCAmelCase : int = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample,)
return UnCLIPSchedulerOutput(prev_sample=UpperCamelCase_ , pred_original_sample=UpperCamelCase_ )
def lowerCamelCase__ ( self : Optional[int] , UpperCamelCase_ : torch.FloatTensor , UpperCamelCase_ : torch.FloatTensor , UpperCamelCase_ : torch.IntTensor , ):
# Make sure alphas_cumprod and timestep have same device and dtype as original_samples
lowerCAmelCase : Tuple = self.alphas_cumprod.to(device=original_samples.device , dtype=original_samples.dtype )
lowerCAmelCase : int = timesteps.to(original_samples.device )
lowerCAmelCase : Dict = alphas_cumprod[timesteps] ** 0.5
lowerCAmelCase : str = sqrt_alpha_prod.flatten()
while len(sqrt_alpha_prod.shape ) < len(original_samples.shape ):
lowerCAmelCase : Any = sqrt_alpha_prod.unsqueeze(-1 )
lowerCAmelCase : List[str] = (1 - alphas_cumprod[timesteps]) ** 0.5
lowerCAmelCase : Tuple = sqrt_one_minus_alpha_prod.flatten()
while len(sqrt_one_minus_alpha_prod.shape ) < len(original_samples.shape ):
lowerCAmelCase : int = sqrt_one_minus_alpha_prod.unsqueeze(-1 )
lowerCAmelCase : Dict = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise
return noisy_samples
| 637
| 0
|
"""simple docstring"""
from __future__ import annotations
from typing import Any
class snake_case_:
def __init__( self : Union[str, Any] , UpperCamelCase_ : int ):
lowerCAmelCase : Optional[int] = num_of_nodes
lowerCAmelCase : list[list[int]] = []
lowerCAmelCase : dict[int, int] = {}
def lowerCamelCase__ ( self : Optional[Any] , UpperCamelCase_ : int , UpperCamelCase_ : int , UpperCamelCase_ : int ):
self.m_edges.append([u_node, v_node, weight] )
def lowerCamelCase__ ( self : Any , UpperCamelCase_ : int ):
if self.m_component[u_node] == u_node:
return u_node
return self.find_component(self.m_component[u_node] )
def lowerCamelCase__ ( self : Any , UpperCamelCase_ : int ):
if self.m_component[u_node] != u_node:
for k in self.m_component:
lowerCAmelCase : List[str] = self.find_component(UpperCamelCase_ )
def lowerCamelCase__ ( self : Any , UpperCamelCase_ : list[int] , UpperCamelCase_ : int , UpperCamelCase_ : int ):
if component_size[u_node] <= component_size[v_node]:
lowerCAmelCase : Any = v_node
component_size[v_node] += component_size[u_node]
self.set_component(UpperCamelCase_ )
elif component_size[u_node] >= component_size[v_node]:
lowerCAmelCase : Union[str, Any] = self.find_component(UpperCamelCase_ )
component_size[u_node] += component_size[v_node]
self.set_component(UpperCamelCase_ )
def lowerCamelCase__ ( self : List[str] ):
lowerCAmelCase : Optional[int] = []
lowerCAmelCase : Tuple = 0
lowerCAmelCase : list[Any] = [-1] * self.m_num_of_nodes
# A list of components (initialized to all of the nodes)
for node in range(self.m_num_of_nodes ):
self.m_component.update({node: node} )
component_size.append(1 )
lowerCAmelCase : Any = self.m_num_of_nodes
while num_of_components > 1:
for edge in self.m_edges:
lowerCAmelCase : List[Any] = edge
lowerCAmelCase : Union[str, Any] = self.m_component[u]
lowerCAmelCase : Optional[Any] = self.m_component[v]
if u_component != v_component:
for component in (u_component, v_component):
if (
minimum_weight_edge[component] == -1
or minimum_weight_edge[component][2] > w
):
lowerCAmelCase : str = [u, v, w]
for edge in minimum_weight_edge:
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
lowerCAmelCase : List[str] = edge
lowerCAmelCase : int = self.m_component[u]
lowerCAmelCase : Any = self.m_component[v]
if u_component != v_component:
mst_weight += w
self.union(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
print(F'''Added edge [{u} - {v}]\nAdded weight: {w}\n''' )
num_of_components -= 1
lowerCAmelCase : Dict = [-1] * self.m_num_of_nodes
print(F'''The total weight of the minimal spanning tree is: {mst_weight}''' )
def _snake_case ( ):
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 708
|
"""simple docstring"""
import unittest
from parameterized import parameterized
from transformers import LlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaTokenizer
class snake_case_:
def __init__( self : int , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Dict=1_3 , UpperCamelCase_ : Optional[Any]=7 , UpperCamelCase_ : Optional[int]=True , UpperCamelCase_ : Dict=True , UpperCamelCase_ : Optional[int]=False , UpperCamelCase_ : Any=True , UpperCamelCase_ : List[str]=9_9 , UpperCamelCase_ : Tuple=3_2 , UpperCamelCase_ : Optional[Any]=5 , UpperCamelCase_ : str=4 , UpperCamelCase_ : Any=3_7 , UpperCamelCase_ : Optional[Any]="gelu" , UpperCamelCase_ : Tuple=0.1 , UpperCamelCase_ : Union[str, Any]=0.1 , UpperCamelCase_ : Union[str, Any]=5_1_2 , UpperCamelCase_ : Union[str, Any]=1_6 , UpperCamelCase_ : Any=2 , UpperCamelCase_ : Optional[Any]=0.02 , UpperCamelCase_ : List[Any]=3 , UpperCamelCase_ : Any=4 , UpperCamelCase_ : int=None , ):
lowerCAmelCase : Any = parent
lowerCAmelCase : Any = batch_size
lowerCAmelCase : List[Any] = seq_length
lowerCAmelCase : str = is_training
lowerCAmelCase : List[Any] = use_input_mask
lowerCAmelCase : Optional[int] = use_token_type_ids
lowerCAmelCase : Union[str, Any] = use_labels
lowerCAmelCase : List[str] = vocab_size
lowerCAmelCase : Tuple = hidden_size
lowerCAmelCase : int = num_hidden_layers
lowerCAmelCase : Union[str, Any] = num_attention_heads
lowerCAmelCase : Optional[int] = intermediate_size
lowerCAmelCase : List[Any] = hidden_act
lowerCAmelCase : int = hidden_dropout_prob
lowerCAmelCase : Tuple = attention_probs_dropout_prob
lowerCAmelCase : Optional[Any] = max_position_embeddings
lowerCAmelCase : Optional[int] = type_vocab_size
lowerCAmelCase : Tuple = type_sequence_label_size
lowerCAmelCase : List[str] = initializer_range
lowerCAmelCase : str = num_labels
lowerCAmelCase : Optional[int] = num_choices
lowerCAmelCase : Tuple = scope
def lowerCamelCase__ ( self : Optional[int] ):
lowerCAmelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase : Tuple = None
if self.use_input_mask:
lowerCAmelCase : str = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase : List[str] = None
if self.use_token_type_ids:
lowerCAmelCase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCAmelCase : int = None
lowerCAmelCase : int = None
lowerCAmelCase : Tuple = None
if self.use_labels:
lowerCAmelCase : int = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase : Union[str, Any] = ids_tensor([self.batch_size] , self.num_choices )
lowerCAmelCase : Optional[Any] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCamelCase__ ( self : Tuple ):
return LlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCamelCase_ , initializer_range=self.initializer_range , )
def lowerCamelCase__ ( self : int , UpperCamelCase_ : Any , UpperCamelCase_ : Dict , UpperCamelCase_ : Any , UpperCamelCase_ : Dict , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : List[str] , UpperCamelCase_ : Tuple ):
lowerCAmelCase : List[Any] = LlamaModel(config=UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
lowerCAmelCase : Dict = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ )
lowerCAmelCase : Optional[int] = model(UpperCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase__ ( self : Any , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Tuple , UpperCamelCase_ : Tuple , UpperCamelCase_ : Dict , UpperCamelCase_ : int , UpperCamelCase_ : Dict , UpperCamelCase_ : Tuple , UpperCamelCase_ : int , UpperCamelCase_ : Any , ):
lowerCAmelCase : Tuple = True
lowerCAmelCase : Optional[int] = LlamaModel(UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
lowerCAmelCase : List[Any] = model(
UpperCamelCase_ , attention_mask=UpperCamelCase_ , encoder_hidden_states=UpperCamelCase_ , encoder_attention_mask=UpperCamelCase_ , )
lowerCAmelCase : Dict = model(
UpperCamelCase_ , attention_mask=UpperCamelCase_ , encoder_hidden_states=UpperCamelCase_ , )
lowerCAmelCase : Tuple = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase__ ( self : Any , UpperCamelCase_ : int , UpperCamelCase_ : Tuple , UpperCamelCase_ : Any , UpperCamelCase_ : List[Any] , UpperCamelCase_ : str , UpperCamelCase_ : int , UpperCamelCase_ : int , UpperCamelCase_ : int , UpperCamelCase_ : str , ):
lowerCAmelCase : Optional[Any] = LlamaForCausalLM(config=UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
lowerCAmelCase : List[str] = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ , labels=UpperCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCamelCase__ ( self : str , UpperCamelCase_ : List[str] , UpperCamelCase_ : int , UpperCamelCase_ : Tuple , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : int , UpperCamelCase_ : str , UpperCamelCase_ : Dict , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : List[Any] , ):
lowerCAmelCase : Union[str, Any] = True
lowerCAmelCase : str = True
lowerCAmelCase : Tuple = LlamaForCausalLM(config=UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
# first forward pass
lowerCAmelCase : Optional[Any] = model(
UpperCamelCase_ , attention_mask=UpperCamelCase_ , encoder_hidden_states=UpperCamelCase_ , encoder_attention_mask=UpperCamelCase_ , use_cache=UpperCamelCase_ , )
lowerCAmelCase : Dict = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
lowerCAmelCase : Any = ids_tensor((self.batch_size, 3) , config.vocab_size )
lowerCAmelCase : Dict = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
lowerCAmelCase : Optional[Any] = torch.cat([input_ids, next_tokens] , dim=-1 )
lowerCAmelCase : List[str] = torch.cat([input_mask, next_mask] , dim=-1 )
lowerCAmelCase : Dict = model(
UpperCamelCase_ , attention_mask=UpperCamelCase_ , encoder_hidden_states=UpperCamelCase_ , encoder_attention_mask=UpperCamelCase_ , output_hidden_states=UpperCamelCase_ , )['''hidden_states'''][0]
lowerCAmelCase : str = model(
UpperCamelCase_ , attention_mask=UpperCamelCase_ , encoder_hidden_states=UpperCamelCase_ , encoder_attention_mask=UpperCamelCase_ , past_key_values=UpperCamelCase_ , output_hidden_states=UpperCamelCase_ , )['''hidden_states'''][0]
# select random slice
lowerCAmelCase : Tuple = ids_tensor((1,) , output_from_past.shape[-1] ).item()
lowerCAmelCase : Any = output_from_no_past[:, -3:, random_slice_idx].detach()
lowerCAmelCase : Optional[int] = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(UpperCamelCase_ , UpperCamelCase_ , atol=1E-3 ) )
def lowerCamelCase__ ( self : Union[str, Any] ):
lowerCAmelCase : Dict = self.prepare_config_and_inputs()
(
(
lowerCAmelCase
), (
lowerCAmelCase
), (
lowerCAmelCase
), (
lowerCAmelCase
), (
lowerCAmelCase
), (
lowerCAmelCase
), (
lowerCAmelCase
),
) : Tuple = config_and_inputs
lowerCAmelCase : Optional[int] = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class snake_case_( a__ , a__ , a__ , unittest.TestCase ):
__UpperCamelCase = (LlamaModel, LlamaForCausalLM, LlamaForSequenceClassification) if is_torch_available() else ()
__UpperCamelCase = (LlamaForCausalLM,) if is_torch_available() else ()
__UpperCamelCase = (
{
'''feature-extraction''': LlamaModel,
'''text-classification''': LlamaForSequenceClassification,
'''text-generation''': LlamaForCausalLM,
'''zero-shot''': LlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
__UpperCamelCase = False
__UpperCamelCase = False
def lowerCamelCase__ ( self : Optional[Any] ):
lowerCAmelCase : Any = LlamaModelTester(self )
lowerCAmelCase : Dict = ConfigTester(self , config_class=UpperCamelCase_ , hidden_size=3_7 )
def lowerCamelCase__ ( self : str ):
self.config_tester.run_common_tests()
def lowerCamelCase__ ( self : Tuple ):
lowerCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase_ )
def lowerCamelCase__ ( self : List[Any] ):
lowerCAmelCase : str = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
lowerCAmelCase : str = type
self.model_tester.create_and_check_model(*UpperCamelCase_ )
def lowerCamelCase__ ( self : List[Any] ):
lowerCAmelCase, lowerCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase : List[str] = 3
lowerCAmelCase : List[str] = input_dict['''input_ids''']
lowerCAmelCase : List[str] = input_ids.ne(1 ).to(UpperCamelCase_ )
lowerCAmelCase : Tuple = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
lowerCAmelCase : Union[str, Any] = LlamaForSequenceClassification(UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
lowerCAmelCase : List[Any] = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ , labels=UpperCamelCase_ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def lowerCamelCase__ ( self : Optional[int] ):
lowerCAmelCase, lowerCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase : Any = 3
lowerCAmelCase : int = '''single_label_classification'''
lowerCAmelCase : Tuple = input_dict['''input_ids''']
lowerCAmelCase : Tuple = input_ids.ne(1 ).to(UpperCamelCase_ )
lowerCAmelCase : str = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
lowerCAmelCase : Tuple = LlamaForSequenceClassification(UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
lowerCAmelCase : Any = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ , labels=UpperCamelCase_ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def lowerCamelCase__ ( self : Union[str, Any] ):
lowerCAmelCase, lowerCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase : Any = 3
lowerCAmelCase : Dict = '''multi_label_classification'''
lowerCAmelCase : Union[str, Any] = input_dict['''input_ids''']
lowerCAmelCase : Tuple = input_ids.ne(1 ).to(UpperCamelCase_ )
lowerCAmelCase : Any = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
lowerCAmelCase : Optional[int] = LlamaForSequenceClassification(UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
lowerCAmelCase : Optional[Any] = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ , labels=UpperCamelCase_ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip('''LLaMA buffers include complex numbers, which breaks this test''' )
def lowerCamelCase__ ( self : Optional[Any] ):
pass
@parameterized.expand([('''linear''',), ('''dynamic''',)] )
def lowerCamelCase__ ( self : List[str] , UpperCamelCase_ : Tuple ):
lowerCAmelCase, lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase : Optional[int] = ids_tensor([1, 1_0] , config.vocab_size )
lowerCAmelCase : int = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(4_2 ) # Fixed seed at init time so the two models get the same random weights
lowerCAmelCase : List[Any] = LlamaModel(UpperCamelCase_ )
original_model.to(UpperCamelCase_ )
original_model.eval()
lowerCAmelCase : Optional[int] = original_model(UpperCamelCase_ ).last_hidden_state
lowerCAmelCase : List[Any] = original_model(UpperCamelCase_ ).last_hidden_state
set_seed(4_2 ) # Fixed seed at init time so the two models get the same random weights
lowerCAmelCase : int = {'''type''': scaling_type, '''factor''': 10.0}
lowerCAmelCase : List[str] = LlamaModel(UpperCamelCase_ )
scaled_model.to(UpperCamelCase_ )
scaled_model.eval()
lowerCAmelCase : Union[str, Any] = scaled_model(UpperCamelCase_ ).last_hidden_state
lowerCAmelCase : Optional[int] = scaled_model(UpperCamelCase_ ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(UpperCamelCase_ , UpperCamelCase_ , atol=1E-5 ) )
else:
self.assertFalse(torch.allclose(UpperCamelCase_ , UpperCamelCase_ , atol=1E-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(UpperCamelCase_ , UpperCamelCase_ , atol=1E-5 ) )
@require_torch
class snake_case_( unittest.TestCase ):
@unittest.skip('''Logits are not exactly the same, once we fix the instabalities somehow, will update!''' )
@slow
def lowerCamelCase__ ( self : List[Any] ):
lowerCAmelCase : Tuple = [1, 3_0_6, 4_6_5_8, 2_7_8, 6_5_9_3, 3_1_0, 2_8_3_4, 3_3_8]
lowerCAmelCase : Optional[Any] = LlamaForCausalLM.from_pretrained('''meta-llama/Llama-2-7b-hf''' , device_map='''auto''' )
lowerCAmelCase : str = model(torch.tensor([input_ids] ) )
# Expected mean on dim = -1
lowerCAmelCase : int = torch.tensor([[-6.6_550, -4.1_227, -4.9_859, -3.2_406, 0.8_262, -3.0_033, 1.2_964, -3.3_699]] )
torch.testing.assert_close(out.mean(-1 ) , UpperCamelCase_ , atol=1E-2 , rtol=1E-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
lowerCAmelCase : Tuple = torch.tensor([-12.8_281, -7.4_453, -0.4_639, -8.0_625, -7.2_500, -8.0_000, -6.4_883, -7.7_695, -7.8_438, -7.0_312, -6.2_188, -7.1_328, -1.8_496, 1.9_961, -8.6_250, -6.7_227, -12.8_281, -6.9_492, -7.0_742, -7.7_852, -7.5_820, -7.9_062, -6.9_375, -7.9_805, -8.3_438, -8.1_562, -8.0_469, -7.6_250, -7.7_422, -7.3_398,] )
# fmt: on
torch.testing.assert_close(out[0, 0, :3_0] , UpperCamelCase_ , atol=1E-5 , rtol=1E-5 )
@unittest.skip('''Logits are not exactly the same, once we fix the instabalities somehow, will update!''' )
@slow
def lowerCamelCase__ ( self : Dict ):
lowerCAmelCase : str = [1, 3_0_6, 4_6_5_8, 2_7_8, 6_5_9_3, 3_1_0, 2_8_3_4, 3_3_8]
lowerCAmelCase : Dict = LlamaForCausalLM.from_pretrained('''meta-llama/Llama-2-13b-hf''' , device_map='''auto''' )
lowerCAmelCase : str = model(torch.tensor(UpperCamelCase_ ) )
# Expected mean on dim = -1
lowerCAmelCase : Any = torch.tensor([[-2.0_622, -1.2_794, -1.1_638, -0.9_788, -1.4_603, -1.0_238, -1.7_893, -1.4_411]] )
torch.testing.assert_close(out.mean(-1 ) , UpperCamelCase_ , atol=1E-2 , rtol=1E-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
lowerCAmelCase : Tuple = torch.tensor([-8.1_406, -8.0_547, 2.7_461, -1.2_344, -0.1_448, -1.8_262, -1.0_020, -1.8_154, -1.6_895, -1.8_516, -2.3_574, -0.9_277, 3.7_598, 6.5_742, -1.2_998, -0.1_177, -8.1_406, -2.9_688, -2.9_199, -3.1_699, -3.5_254, -2.3_555, -2.7_988, -3.4_141, -2.8_262, -4.5_195, -3.3_379, -3.3_164, -2.7_832, -3.0_273] )
# fmt: on
torch.testing.assert_close(out[0, 0, :3_0] , UpperCamelCase_ , atol=1E-5 , rtol=1E-5 )
@unittest.skip('''Logits are not exactly the same, once we fix the instabalities somehow, will update!''' )
@slow
def lowerCamelCase__ ( self : Optional[int] ):
lowerCAmelCase : int = [1, 3_0_6, 4_6_5_8, 2_7_8, 6_5_9_3, 3_1_0, 2_8_3_4, 3_3_8]
lowerCAmelCase : List[str] = LlamaForCausalLM.from_pretrained('''meta-llama/Llama-2-13b-chat-hf''' , device_map='''auto''' )
lowerCAmelCase : List[Any] = model(torch.tensor(UpperCamelCase_ ) )
# Expected mean on dim = -1
lowerCAmelCase : List[str] = torch.tensor([[-0.8_562, -1.8_520, -0.7_551, -0.4_162, -1.5_161, -1.2_038, -2.4_823, -2.3_254]] )
torch.testing.assert_close(out.mean(-1 ) , UpperCamelCase_ , atol=1E-2 , rtol=1E-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
lowerCAmelCase : Dict = torch.tensor([-2.2_227, 4.8_828, 0.9_023, -0.4_578, -0.7_871, -0.1_033, -0.6_221, -0.5_786, -0.7_803, -1.0_674, -1.2_920, -0.1_570, 0.8_008, 2.0_723, -0.9_497, 0.2_771, -2.2_227, -0.7_612, -1.4_346, -1.2_061, -1.6_426, -0.3_000, -0.7_139, -1.1_934, -1.8_691, -1.6_973, -1.5_947, -1.2_705, -0.3_523, -0.5_513] )
# fmt: on
torch.testing.assert_close(out.mean(-1 ) , UpperCamelCase_ , atol=1E-2 , rtol=1E-2 )
@unittest.skip(
'''Logits are not exactly the same, once we fix the instabalities somehow, will update! Also it is gonna be a `too_slow` test''' )
@slow
def lowerCamelCase__ ( self : List[str] ):
lowerCAmelCase : Optional[Any] = [1, 3_0_6, 4_6_5_8, 2_7_8, 6_5_9_3, 3_1_0, 2_8_3_4, 3_3_8]
lowerCAmelCase : Optional[int] = LlamaForCausalLM.from_pretrained('''meta-llama/Llama-2-70b-hf''' , device_map='''auto''' )
lowerCAmelCase : Any = model(torch.tensor(UpperCamelCase_ ) )
lowerCAmelCase : Optional[Any] = torch.tensor(
[[-4.2_327, -3.3_360, -4.6_665, -4.7_631, -1.8_180, -3.4_170, -1.4_211, -3.1_810]] , dtype=torch.floataa )
torch.testing.assert_close(out.mean(-1 ) , UpperCamelCase_ , atol=1E-2 , rtol=1E-2 )
# fmt: off
lowerCAmelCase : Any = torch.tensor([-9.4_922, -3.9_551, 1.7_998, -5.6_758, -5.1_055, -5.8_984, -4.8_320, -6.8_086, -6.5_391, -5.6_172, -5.5_820, -5.5_352, 1.7_881, 3.6_289, -6.5_117, -3.4_785, -9.5_000, -6.0_352, -6.8_125, -6.0_195, -6.6_836, -5.4_727, -6.2_812, -6.0_391, -7.3_398, -7.4_297, -7.4_844, -6.5_820, -5.8_789, -5.5_312] )
# fmt: on
torch.testing.assert_close(out[0, 0, :3_0] , UpperCamelCase_ , atol=1E-5 , rtol=1E-5 )
@unittest.skip('''Model is curently gated''' )
@slow
def lowerCamelCase__ ( self : List[Any] ):
lowerCAmelCase : List[Any] = '''Simply put, the theory of relativity states that 1) the laws of physics are the same everywhere in the universe and 2) the passage of time and the length of objects can vary depending on the observer\'s frame of reference.\n\nThe first part of the theory, that the laws of physics are the same everywhere, is known as the "princi'''
lowerCAmelCase : int = '''Simply put, the theory of relativity states that '''
lowerCAmelCase : str = LlamaTokenizer.from_pretrained('''meta-llama/Llama-2-13b-chat-hf''' )
lowerCAmelCase : Optional[int] = tokenizer.encode(UpperCamelCase_ , return_tensors='''pt''' )
lowerCAmelCase : List[Any] = LlamaForCausalLM.from_pretrained(
'''meta-llama/Llama-2-13b-chat-hf''' , device_map='''sequential''' , use_safetensors=UpperCamelCase_ )
# greedy generation outputs
lowerCAmelCase : int = model.generate(UpperCamelCase_ , max_new_tokens=6_4 , top_p=UpperCamelCase_ , temperature=1 , do_sample=UpperCamelCase_ )
lowerCAmelCase : int = tokenizer.decode(generated_ids[0] , skip_special_tokens=UpperCamelCase_ )
self.assertEqual(UpperCamelCase_ , UpperCamelCase_ )
| 637
| 0
|
"""simple docstring"""
import os
import zipfile
import requests
from get_ci_error_statistics import download_artifact, get_artifacts_links
def _snake_case ( _snake_case : str , _snake_case : List[Any]=7 ):
lowerCAmelCase : Any = None
if token is not None:
lowerCAmelCase : Any = {'''Accept''': '''application/vnd.github+json''', '''Authorization''': f'''Bearer {token}'''}
# The id of a workflow (not of a workflow run)
lowerCAmelCase : Union[str, Any] = '''636036'''
lowerCAmelCase : Tuple = f'''https://api.github.com/repos/huggingface/transformers/actions/workflows/{workflow_id}/runs'''
# On `main` branch + event being `schedule` + not returning PRs + only `num_runs` results
url += f'''?branch=main&event=schedule&exclude_pull_requests=true&per_page={num_runs}'''
lowerCAmelCase : int = requests.get(_snake_case , headers=_snake_case ).json()
return result["workflow_runs"]
def _snake_case ( _snake_case : Union[str, Any] ):
lowerCAmelCase : int = get_daily_ci_runs(_snake_case )
lowerCAmelCase : Dict = None
for workflow_run in workflow_runs:
if workflow_run["status"] == "completed":
lowerCAmelCase : List[Any] = workflow_run['''id''']
break
return workflow_run_id
def _snake_case ( _snake_case : List[Any] , _snake_case : Tuple , _snake_case : Any ):
lowerCAmelCase : Tuple = get_last_daily_ci_runs(_snake_case )
if workflow_run_id is not None:
lowerCAmelCase : List[Any] = get_artifacts_links(worflow_run_id=_snake_case , token=_snake_case )
for artifact_name in artifact_names:
if artifact_name in artifacts_links:
lowerCAmelCase : Optional[int] = artifacts_links[artifact_name]
download_artifact(
artifact_name=_snake_case , artifact_url=_snake_case , output_dir=_snake_case , token=_snake_case )
def _snake_case ( _snake_case : Optional[int] , _snake_case : int , _snake_case : Optional[Any] ):
get_last_daily_ci_artifacts(_snake_case , _snake_case , _snake_case )
lowerCAmelCase : List[Any] = {}
for artifact_name in artifact_names:
lowerCAmelCase : Union[str, Any] = os.path.join(_snake_case , f'''{artifact_name}.zip''' )
if os.path.isfile(_snake_case ):
lowerCAmelCase : Dict = {}
with zipfile.ZipFile(_snake_case ) as z:
for filename in z.namelist():
if not os.path.isdir(_snake_case ):
# read the file
with z.open(_snake_case ) as f:
lowerCAmelCase : List[Any] = f.read().decode('''UTF-8''' )
return results
| 709
|
"""simple docstring"""
import os
import tempfile
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from torch import nn
from transformers import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_inverse_sqrt_schedule,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
def _snake_case ( _snake_case : Tuple , _snake_case : Union[str, Any]=10 ):
lowerCAmelCase : Dict = []
for _ in range(_snake_case ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
return lrs
def _snake_case ( _snake_case : Optional[int] , _snake_case : int=10 ):
lowerCAmelCase : Optional[int] = []
for step in range(_snake_case ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
if step == num_steps // 2:
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCAmelCase : List[str] = os.path.join(_snake_case , '''schedule.bin''' )
torch.save(scheduler.state_dict() , _snake_case )
lowerCAmelCase : List[Any] = torch.load(_snake_case )
scheduler.load_state_dict(_snake_case )
return lrs
@require_torch
class snake_case_( unittest.TestCase ):
def lowerCamelCase__ ( self : Optional[int] , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : str , UpperCamelCase_ : Any ):
self.assertEqual(len(UpperCamelCase_ ) , len(UpperCamelCase_ ) )
for a, b in zip(UpperCamelCase_ , UpperCamelCase_ ):
self.assertAlmostEqual(UpperCamelCase_ , UpperCamelCase_ , delta=UpperCamelCase_ )
def lowerCamelCase__ ( self : Tuple ):
lowerCAmelCase : Any = torch.tensor([0.1, -0.2, -0.1] , requires_grad=UpperCamelCase_ )
lowerCAmelCase : List[str] = torch.tensor([0.4, 0.2, -0.5] )
lowerCAmelCase : List[Any] = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
lowerCAmelCase : Union[str, Any] = AdamW(params=[w] , lr=2E-1 , weight_decay=0.0 )
for _ in range(1_0_0 ):
lowerCAmelCase : Union[str, Any] = criterion(UpperCamelCase_ , UpperCamelCase_ )
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1E-2 )
def lowerCamelCase__ ( self : Union[str, Any] ):
lowerCAmelCase : Tuple = torch.tensor([0.1, -0.2, -0.1] , requires_grad=UpperCamelCase_ )
lowerCAmelCase : Union[str, Any] = torch.tensor([0.4, 0.2, -0.5] )
lowerCAmelCase : Optional[int] = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
lowerCAmelCase : Any = Adafactor(
params=[w] , lr=1E-2 , eps=(1E-30, 1E-3) , clip_threshold=1.0 , decay_rate=-0.8 , betaa=UpperCamelCase_ , weight_decay=0.0 , relative_step=UpperCamelCase_ , scale_parameter=UpperCamelCase_ , warmup_init=UpperCamelCase_ , )
for _ in range(1_0_0_0 ):
lowerCAmelCase : List[Any] = criterion(UpperCamelCase_ , UpperCamelCase_ )
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1E-2 )
@require_torch
class snake_case_( unittest.TestCase ):
__UpperCamelCase = nn.Linear(50 , 50 ) if is_torch_available() else None
__UpperCamelCase = AdamW(m.parameters() , lr=10.0 ) if is_torch_available() else None
__UpperCamelCase = 10
def lowerCamelCase__ ( self : Union[str, Any] , UpperCamelCase_ : str , UpperCamelCase_ : str , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Union[str, Any]=None ):
self.assertEqual(len(UpperCamelCase_ ) , len(UpperCamelCase_ ) )
for a, b in zip(UpperCamelCase_ , UpperCamelCase_ ):
self.assertAlmostEqual(UpperCamelCase_ , UpperCamelCase_ , delta=UpperCamelCase_ , msg=UpperCamelCase_ )
def lowerCamelCase__ ( self : Union[str, Any] ):
lowerCAmelCase : Tuple = {'''num_warmup_steps''': 2, '''num_training_steps''': 1_0}
# schedulers doct format
# function: (sched_args_dict, expected_learning_rates)
lowerCAmelCase : Optional[Any] = {
get_constant_schedule: ({}, [10.0] * self.num_steps),
get_constant_schedule_with_warmup: (
{'''num_warmup_steps''': 4},
[0.0, 2.5, 5.0, 7.5, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0],
),
get_linear_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 8.75, 7.5, 6.25, 5.0, 3.75, 2.5, 1.25],
),
get_cosine_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 9.61, 8.53, 6.91, 5.0, 3.08, 1.46, 0.38],
),
get_cosine_with_hard_restarts_schedule_with_warmup: (
{**common_kwargs, '''num_cycles''': 2},
[0.0, 5.0, 10.0, 8.53, 5.0, 1.46, 10.0, 8.53, 5.0, 1.46],
),
get_polynomial_decay_schedule_with_warmup: (
{**common_kwargs, '''power''': 2.0, '''lr_end''': 1E-7},
[0.0, 5.0, 10.0, 7.656, 5.625, 3.906, 2.5, 1.406, 0.625, 0.156],
),
get_inverse_sqrt_schedule: (
{'''num_warmup_steps''': 2},
[0.0, 5.0, 10.0, 8.165, 7.071, 6.325, 5.774, 5.345, 5.0, 4.714],
),
}
for scheduler_func, data in scheds.items():
lowerCAmelCase, lowerCAmelCase : Union[str, Any] = data
lowerCAmelCase : List[Any] = scheduler_func(self.optimizer , **UpperCamelCase_ )
self.assertEqual(len([scheduler.get_lr()[0]] ) , 1 )
lowerCAmelCase : str = unwrap_schedule(UpperCamelCase_ , self.num_steps )
self.assertListAlmostEqual(
UpperCamelCase_ , UpperCamelCase_ , tol=1E-2 , msg=F'''failed for {scheduler_func} in normal scheduler''' , )
lowerCAmelCase : Optional[int] = scheduler_func(self.optimizer , **UpperCamelCase_ )
if scheduler_func.__name__ != "get_constant_schedule":
LambdaScheduleWrapper.wrap_scheduler(UpperCamelCase_ ) # wrap to test picklability of the schedule
lowerCAmelCase : List[Any] = unwrap_and_save_reload_schedule(UpperCamelCase_ , self.num_steps )
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ , msg=F'''failed for {scheduler_func} in save and reload''' )
class snake_case_:
def __init__( self : List[Any] , UpperCamelCase_ : Any ):
lowerCAmelCase : Tuple = fn
def __call__( self : Union[str, Any] , *UpperCamelCase_ : Optional[Any] , **UpperCamelCase_ : List[Any] ):
return self.fn(*UpperCamelCase_ , **UpperCamelCase_ )
@classmethod
def lowerCamelCase__ ( self : Any , UpperCamelCase_ : Optional[int] ):
lowerCAmelCase : Union[str, Any] = list(map(self , scheduler.lr_lambdas ) )
| 637
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
snake_case__ : Optional[Any] = {
'''configuration_mobilevit''': ['''MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MobileViTConfig''', '''MobileViTOnnxConfig'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ : Dict = ['''MobileViTFeatureExtractor''']
snake_case__ : Optional[Any] = ['''MobileViTImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ : Any = [
'''MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MobileViTForImageClassification''',
'''MobileViTForSemanticSegmentation''',
'''MobileViTModel''',
'''MobileViTPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ : Optional[int] = [
'''TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFMobileViTForImageClassification''',
'''TFMobileViTForSemanticSegmentation''',
'''TFMobileViTModel''',
'''TFMobileViTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mobilevit import MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, MobileViTConfig, MobileViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_mobilevit import MobileViTFeatureExtractor
from .image_processing_mobilevit import MobileViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilevit import (
MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileViTForImageClassification,
MobileViTForSemanticSegmentation,
MobileViTModel,
MobileViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mobilevit import (
TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFMobileViTForImageClassification,
TFMobileViTForSemanticSegmentation,
TFMobileViTModel,
TFMobileViTPreTrainedModel,
)
else:
import sys
snake_case__ : Union[str, Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 710
|
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer
from .base import PipelineTool
class snake_case_( a__ ):
__UpperCamelCase = '''philschmid/bart-large-cnn-samsum'''
__UpperCamelCase = (
'''This is a tool that summarizes an English text. It takes an input `text` containing the text to summarize, '''
'''and returns a summary of the text.'''
)
__UpperCamelCase = '''summarizer'''
__UpperCamelCase = AutoTokenizer
__UpperCamelCase = AutoModelForSeqaSeqLM
__UpperCamelCase = ['''text''']
__UpperCamelCase = ['''text''']
def lowerCamelCase__ ( self : Dict , UpperCamelCase_ : int ):
return self.pre_processor(UpperCamelCase_ , return_tensors='''pt''' , truncation=UpperCamelCase_ )
def lowerCamelCase__ ( self : Optional[int] , UpperCamelCase_ : str ):
return self.model.generate(**UpperCamelCase_ )[0]
def lowerCamelCase__ ( self : Any , UpperCamelCase_ : Tuple ):
return self.pre_processor.decode(UpperCamelCase_ , skip_special_tokens=UpperCamelCase_ , clean_up_tokenization_spaces=UpperCamelCase_ )
| 637
| 0
|
"""simple docstring"""
import unittest
import numpy as np
import torch
from .utils_summarization import build_mask, compute_token_type_ids, process_story, truncate_or_pad
class snake_case_( unittest.TestCase ):
def lowerCamelCase__ ( self : Union[str, Any] ):
lowerCAmelCase : Any = 1_0
def lowerCamelCase__ ( self : List[Any] ):
lowerCAmelCase : str = [1, 2, 3, 4]
lowerCAmelCase : Union[str, Any] = [1, 2, 3, 4, 0, 0, 0, 0, 0, 0]
self.assertEqual(truncate_or_pad(UpperCamelCase_ , self.block_size , 0 ) , UpperCamelCase_ )
def lowerCamelCase__ ( self : Optional[Any] ):
lowerCAmelCase : Optional[Any] = [1, 2, 3, 4, 5, 6, 7, 8, 9, 1_0]
lowerCAmelCase : int = [1, 2, 3, 4, 5, 6, 7, 8, 9, 1_0]
self.assertEqual(truncate_or_pad(UpperCamelCase_ , self.block_size , 0 ) , UpperCamelCase_ )
def lowerCamelCase__ ( self : Tuple ):
lowerCAmelCase : Any = [1, 2, 3, 4, 5, 6, 7, 8, 9, 1_0, 1_1, 1_2, 1_3]
lowerCAmelCase : Optional[Any] = [1, 2, 3, 4, 5, 6, 7, 8, 9, 1_0]
self.assertEqual(truncate_or_pad(UpperCamelCase_ , self.block_size , 0 ) , UpperCamelCase_ )
def lowerCamelCase__ ( self : Tuple ):
lowerCAmelCase : Optional[int] = '''It was the year of Our Lord one thousand seven hundred and
seventy-five.\n\nSpiritual revelations were conceded to England at that
favoured period, as at this.'''
lowerCAmelCase : Union[str, Any] = process_story(UpperCamelCase_ )
self.assertEqual(UpperCamelCase_ , [] )
def lowerCamelCase__ ( self : List[str] ):
lowerCAmelCase : List[Any] = ''''''
lowerCAmelCase : str = process_story(UpperCamelCase_ )
self.assertEqual(UpperCamelCase_ , [] )
self.assertEqual(UpperCamelCase_ , [] )
def lowerCamelCase__ ( self : List[Any] ):
lowerCAmelCase : int = (
'''It was the year of Our Lord one thousand seven hundred and '''
'''seventy-five\n\nSpiritual revelations were conceded to England '''
'''at that favoured period, as at this.\n@highlight\n\nIt was the best of times'''
)
lowerCAmelCase : Union[str, Any] = process_story(UpperCamelCase_ )
lowerCAmelCase : Dict = [
'''It was the year of Our Lord one thousand seven hundred and seventy-five.''',
'''Spiritual revelations were conceded to England at that favoured period, as at this.''',
]
self.assertEqual(UpperCamelCase_ , UpperCamelCase_ )
lowerCAmelCase : str = ['''It was the best of times.''']
self.assertEqual(UpperCamelCase_ , UpperCamelCase_ )
def lowerCamelCase__ ( self : int ):
lowerCAmelCase : Dict = torch.tensor([1, 2, 3, 4] )
lowerCAmelCase : str = torch.tensor([1, 1, 1, 1] )
np.testing.assert_array_equal(build_mask(UpperCamelCase_ , 0 ).numpy() , expected.numpy() )
def lowerCamelCase__ ( self : Optional[Any] ):
lowerCAmelCase : Optional[int] = torch.tensor([1, 2, 3, 4, 2_3, 2_3, 2_3] )
lowerCAmelCase : Tuple = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(UpperCamelCase_ , 2_3 ).numpy() , expected.numpy() )
def lowerCamelCase__ ( self : List[Any] ):
lowerCAmelCase : Any = torch.tensor([8, 2, 3, 4, 1, 1, 1] )
lowerCAmelCase : List[Any] = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(UpperCamelCase_ , 1 ).numpy() , expected.numpy() )
def lowerCamelCase__ ( self : List[Any] ):
lowerCAmelCase : Union[str, Any] = 1_0_1
lowerCAmelCase : Union[str, Any] = torch.tensor([[1, 2, 3, 4, 5, 6], [1, 2, 3, 1_0_1, 5, 6], [1, 1_0_1, 3, 4, 1_0_1, 6]] )
lowerCAmelCase : str = torch.tensor([[1, 1, 1, 1, 1, 1], [1, 1, 1, 0, 0, 0], [1, 0, 0, 0, 1, 1]] )
lowerCAmelCase : Dict = compute_token_type_ids(UpperCamelCase_ , UpperCamelCase_ )
np.testing.assert_array_equal(UpperCamelCase_ , UpperCamelCase_ )
| 711
|
"""simple docstring"""
snake_case__ : List[Any] = '''Tobias Carryer'''
from time import time
class snake_case_:
def __init__( self : Optional[Any] , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Tuple , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : Dict=int(time() ) ): # noqa: B008
lowerCAmelCase : str = multiplier
lowerCAmelCase : Optional[int] = increment
lowerCAmelCase : Optional[Any] = modulo
lowerCAmelCase : Optional[Any] = seed
def lowerCamelCase__ ( self : Union[str, Any] ):
lowerCAmelCase : Optional[int] = (self.multiplier * self.seed + self.increment) % self.modulo
return self.seed
if __name__ == "__main__":
# Show the LCG in action.
snake_case__ : int = LinearCongruentialGenerator(1_664_525, 1_013_904_223, 2 << 31)
while True:
print(lcg.next_number())
| 637
| 0
|
"""simple docstring"""
from __future__ import annotations
import unittest
import numpy as np
from transformers import BlipTextConfig
from transformers.testing_utils import require_tf, slow
from transformers.utils import is_tf_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
if is_tf_available():
import tensorflow as tf
from transformers import TFBlipTextModel
from transformers.models.blip.modeling_tf_blip import TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST
class snake_case_:
def __init__( self : Union[str, Any] , UpperCamelCase_ : int , UpperCamelCase_ : Optional[Any]=1_2 , UpperCamelCase_ : List[Any]=7 , UpperCamelCase_ : Optional[Any]=True , UpperCamelCase_ : Dict=True , UpperCamelCase_ : Optional[int]=True , UpperCamelCase_ : List[Any]=9_9 , UpperCamelCase_ : Tuple=3_2 , UpperCamelCase_ : int=3_2 , UpperCamelCase_ : str=2 , UpperCamelCase_ : List[str]=4 , UpperCamelCase_ : List[str]=3_7 , UpperCamelCase_ : Tuple=0.1 , UpperCamelCase_ : Tuple=0.1 , UpperCamelCase_ : List[Any]=5_1_2 , UpperCamelCase_ : Union[str, Any]=0.02 , UpperCamelCase_ : Optional[int]=0 , UpperCamelCase_ : List[str]=None , ):
lowerCAmelCase : int = parent
lowerCAmelCase : Union[str, Any] = batch_size
lowerCAmelCase : str = seq_length
lowerCAmelCase : Any = is_training
lowerCAmelCase : Tuple = use_input_mask
lowerCAmelCase : List[Any] = use_labels
lowerCAmelCase : List[str] = vocab_size
lowerCAmelCase : Optional[Any] = hidden_size
lowerCAmelCase : Any = projection_dim
lowerCAmelCase : List[str] = num_hidden_layers
lowerCAmelCase : Tuple = num_attention_heads
lowerCAmelCase : str = intermediate_size
lowerCAmelCase : List[str] = dropout
lowerCAmelCase : int = attention_dropout
lowerCAmelCase : str = max_position_embeddings
lowerCAmelCase : Union[str, Any] = initializer_range
lowerCAmelCase : List[str] = scope
lowerCAmelCase : List[str] = bos_token_id
def lowerCamelCase__ ( self : List[Any] ):
lowerCAmelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase : int = None
if self.use_input_mask:
lowerCAmelCase : List[Any] = random_attention_mask([self.batch_size, self.seq_length] )
if input_mask is not None:
lowerCAmelCase : Optional[int] = input_mask.numpy()
lowerCAmelCase : str = input_mask.shape
lowerCAmelCase : List[Any] = np.random.randint(1 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(UpperCamelCase_ ):
lowerCAmelCase : List[Any] = 1
lowerCAmelCase : List[str] = 0
lowerCAmelCase : Tuple = self.get_config()
return config, input_ids, tf.convert_to_tensor(UpperCamelCase_ )
def lowerCamelCase__ ( self : Optional[Any] ):
return BlipTextConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , projection_dim=self.projection_dim , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , dropout=self.dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , bos_token_id=self.bos_token_id , )
def lowerCamelCase__ ( self : Union[str, Any] , UpperCamelCase_ : Tuple , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : List[str] ):
lowerCAmelCase : List[Any] = TFBlipTextModel(config=UpperCamelCase_ )
lowerCAmelCase : Tuple = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ , training=UpperCamelCase_ )
lowerCAmelCase : Any = model(UpperCamelCase_ , training=UpperCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def lowerCamelCase__ ( self : Optional[Any] ):
lowerCAmelCase : Optional[int] = self.prepare_config_and_inputs()
lowerCAmelCase : Optional[int] = config_and_inputs
lowerCAmelCase : str = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class snake_case_( a__ , unittest.TestCase ):
__UpperCamelCase = (TFBlipTextModel,) if is_tf_available() else ()
__UpperCamelCase = False
__UpperCamelCase = False
__UpperCamelCase = False
def lowerCamelCase__ ( self : List[str] ):
lowerCAmelCase : List[str] = BlipTextModelTester(self )
lowerCAmelCase : int = ConfigTester(self , config_class=UpperCamelCase_ , hidden_size=3_7 )
def lowerCamelCase__ ( self : List[str] ):
self.config_tester.run_common_tests()
def lowerCamelCase__ ( self : Tuple ):
lowerCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase_ )
def lowerCamelCase__ ( self : Dict ):
pass
def lowerCamelCase__ ( self : int ):
pass
@unittest.skip(reason='''Blip does not use inputs_embeds''' )
def lowerCamelCase__ ( self : Optional[Any] ):
pass
@unittest.skip(reason='''BlipTextModel has no base class and is not available in MODEL_MAPPING''' )
def lowerCamelCase__ ( self : Tuple ):
pass
@unittest.skip(reason='''BlipTextModel has no base class and is not available in MODEL_MAPPING''' )
def lowerCamelCase__ ( self : List[Any] ):
pass
@slow
def lowerCamelCase__ ( self : Tuple ):
for model_name in TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase : str = TFBlipTextModel.from_pretrained(UpperCamelCase_ )
self.assertIsNotNone(UpperCamelCase_ )
def lowerCamelCase__ ( self : str , UpperCamelCase_ : Optional[int]=True ):
super().test_pt_tf_model_equivalence(allow_missing_keys=UpperCamelCase_ )
| 712
|
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_big_bird import BigBirdTokenizer
else:
snake_case__ : Optional[Any] = None
snake_case__ : Union[str, Any] = logging.get_logger(__name__)
snake_case__ : List[str] = {'''vocab_file''': '''spiece.model''', '''tokenizer_file''': '''tokenizer.json'''}
snake_case__ : Any = {
'''vocab_file''': {
'''google/bigbird-roberta-base''': '''https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model''',
'''google/bigbird-roberta-large''': (
'''https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model'''
),
'''google/bigbird-base-trivia-itc''': (
'''https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model'''
),
},
'''tokenizer_file''': {
'''google/bigbird-roberta-base''': (
'''https://huggingface.co/google/bigbird-roberta-base/resolve/main/tokenizer.json'''
),
'''google/bigbird-roberta-large''': (
'''https://huggingface.co/google/bigbird-roberta-large/resolve/main/tokenizer.json'''
),
'''google/bigbird-base-trivia-itc''': (
'''https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/tokenizer.json'''
),
},
}
snake_case__ : int = {
'''google/bigbird-roberta-base''': 4_096,
'''google/bigbird-roberta-large''': 4_096,
'''google/bigbird-base-trivia-itc''': 4_096,
}
snake_case__ : Optional[Any] = '''▁'''
class snake_case_( a__ ):
__UpperCamelCase = VOCAB_FILES_NAMES
__UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase = BigBirdTokenizer
__UpperCamelCase = ['''input_ids''', '''attention_mask''']
__UpperCamelCase = []
def __init__( self : Union[str, Any] , UpperCamelCase_ : str=None , UpperCamelCase_ : Any=None , UpperCamelCase_ : str="<unk>" , UpperCamelCase_ : str="<s>" , UpperCamelCase_ : str="</s>" , UpperCamelCase_ : int="<pad>" , UpperCamelCase_ : List[Any]="[SEP]" , UpperCamelCase_ : Dict="[MASK]" , UpperCamelCase_ : Any="[CLS]" , **UpperCamelCase_ : Any , ):
lowerCAmelCase : Tuple = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else bos_token
lowerCAmelCase : int = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else eos_token
lowerCAmelCase : List[Any] = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else unk_token
lowerCAmelCase : List[str] = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else pad_token
lowerCAmelCase : Any = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else cls_token
lowerCAmelCase : Tuple = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else sep_token
# Mask token behave like a normal word, i.e. include the space before it
lowerCAmelCase : Optional[Any] = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else mask_token
super().__init__(
UpperCamelCase_ , tokenizer_file=UpperCamelCase_ , bos_token=UpperCamelCase_ , eos_token=UpperCamelCase_ , unk_token=UpperCamelCase_ , sep_token=UpperCamelCase_ , pad_token=UpperCamelCase_ , cls_token=UpperCamelCase_ , mask_token=UpperCamelCase_ , **UpperCamelCase_ , )
lowerCAmelCase : Optional[int] = vocab_file
lowerCAmelCase : Optional[int] = False if not self.vocab_file else True
def lowerCamelCase__ ( self : Union[str, Any] , UpperCamelCase_ : List[int] , UpperCamelCase_ : Optional[List[int]] = None ):
lowerCAmelCase : str = [self.sep_token_id]
lowerCAmelCase : Tuple = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def lowerCamelCase__ ( self : Dict , UpperCamelCase_ : List[int] , UpperCamelCase_ : Optional[List[int]] = None , UpperCamelCase_ : bool = False ):
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'''You should not supply a second sequence if the provided sequence of '''
'''ids is already formatted with special tokens for the model.''' )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is None:
return [1] + ([0] * len(UpperCamelCase_ )) + [1]
return [1] + ([0] * len(UpperCamelCase_ )) + [1] + ([0] * len(UpperCamelCase_ )) + [1]
def lowerCamelCase__ ( self : Dict , UpperCamelCase_ : List[int] , UpperCamelCase_ : Optional[List[int]] = None ):
lowerCAmelCase : Tuple = [self.sep_token_id]
lowerCAmelCase : Tuple = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowerCamelCase__ ( self : Optional[int] , UpperCamelCase_ : str , UpperCamelCase_ : Optional[str] = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(UpperCamelCase_ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
lowerCAmelCase : Optional[int] = os.path.join(
UpperCamelCase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase_ ):
copyfile(self.vocab_file , UpperCamelCase_ )
return (out_vocab_file,)
| 637
| 0
|
"""simple docstring"""
from typing import Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
snake_case__ : Union[str, Any] = logging.get_logger(__name__)
@add_end_docstrings(a__ )
class snake_case_( a__ ):
def __init__( self : str , *UpperCamelCase_ : Union[str, Any] , **UpperCamelCase_ : Dict ):
super().__init__(*UpperCamelCase_ , **UpperCamelCase_ )
self.check_model_type(UpperCamelCase_ )
def lowerCamelCase__ ( self : Tuple , UpperCamelCase_ : str=None , UpperCamelCase_ : List[str]=None , UpperCamelCase_ : Tuple=None , **UpperCamelCase_ : Any ):
lowerCAmelCase : Tuple = {}, {}
if padding is not None:
lowerCAmelCase : Optional[int] = padding
if truncation is not None:
lowerCAmelCase : Optional[Any] = truncation
if top_k is not None:
lowerCAmelCase : Union[str, Any] = top_k
return preprocess_params, {}, postprocess_params
def __call__( self : Optional[int] , UpperCamelCase_ : Union["Image.Image", str] , UpperCamelCase_ : str = None , **UpperCamelCase_ : Any ):
if isinstance(UpperCamelCase_ , (Image.Image, str) ) and isinstance(UpperCamelCase_ , UpperCamelCase_ ):
lowerCAmelCase : Union[str, Any] = {'''image''': image, '''question''': question}
else:
lowerCAmelCase : Union[str, Any] = image
lowerCAmelCase : List[Any] = super().__call__(UpperCamelCase_ , **UpperCamelCase_ )
return results
def lowerCamelCase__ ( self : Any , UpperCamelCase_ : List[Any] , UpperCamelCase_ : List[str]=False , UpperCamelCase_ : str=False ):
lowerCAmelCase : Optional[int] = load_image(inputs['''image'''] )
lowerCAmelCase : Any = self.tokenizer(
inputs['''question'''] , return_tensors=self.framework , padding=UpperCamelCase_ , truncation=UpperCamelCase_ )
lowerCAmelCase : int = self.image_processor(images=UpperCamelCase_ , return_tensors=self.framework )
model_inputs.update(UpperCamelCase_ )
return model_inputs
def lowerCamelCase__ ( self : str , UpperCamelCase_ : Tuple ):
lowerCAmelCase : int = self.model(**UpperCamelCase_ )
return model_outputs
def lowerCamelCase__ ( self : Union[str, Any] , UpperCamelCase_ : List[Any] , UpperCamelCase_ : int=5 ):
if top_k > self.model.config.num_labels:
lowerCAmelCase : List[Any] = self.model.config.num_labels
if self.framework == "pt":
lowerCAmelCase : Optional[int] = model_outputs.logits.sigmoid()[0]
lowerCAmelCase : List[Any] = probs.topk(UpperCamelCase_ )
else:
raise ValueError(F'''Unsupported framework: {self.framework}''' )
lowerCAmelCase : Union[str, Any] = scores.tolist()
lowerCAmelCase : Optional[int] = ids.tolist()
return [{"score": score, "answer": self.model.config.idalabel[_id]} for score, _id in zip(UpperCamelCase_ , UpperCamelCase_ )]
| 713
|
"""simple docstring"""
# using dfs for finding eulerian path traversal
def _snake_case ( _snake_case : Optional[Any] , _snake_case : List[Any] , _snake_case : str , _snake_case : List[Any]=None ):
lowerCAmelCase : Any = (path or []) + [u]
for v in graph[u]:
if visited_edge[u][v] is False:
lowerCAmelCase, lowerCAmelCase : Union[str, Any] = True, True
lowerCAmelCase : int = dfs(_snake_case , _snake_case , _snake_case , _snake_case )
return path
def _snake_case ( _snake_case : Optional[int] , _snake_case : Dict ):
lowerCAmelCase : Tuple = 0
lowerCAmelCase : Optional[Any] = -1
for i in range(_snake_case ):
if i not in graph.keys():
continue
if len(graph[i] ) % 2 == 1:
odd_degree_nodes += 1
lowerCAmelCase : Optional[Any] = i
if odd_degree_nodes == 0:
return 1, odd_node
if odd_degree_nodes == 2:
return 2, odd_node
return 3, odd_node
def _snake_case ( _snake_case : Tuple , _snake_case : List[Any] ):
lowerCAmelCase : Any = [[False for _ in range(max_node + 1 )] for _ in range(max_node + 1 )]
lowerCAmelCase, lowerCAmelCase : Optional[int] = check_circuit_or_path(_snake_case , _snake_case )
if check == 3:
print('''graph is not Eulerian''' )
print('''no path''' )
return
lowerCAmelCase : Dict = 1
if check == 2:
lowerCAmelCase : int = odd_node
print('''graph has a Euler path''' )
if check == 1:
print('''graph has a Euler cycle''' )
lowerCAmelCase : List[str] = dfs(_snake_case , _snake_case , _snake_case )
print(_snake_case )
def _snake_case ( ):
lowerCAmelCase : Optional[Any] = {1: [2, 3, 4], 2: [1, 3], 3: [1, 2], 4: [1, 5], 5: [4]}
lowerCAmelCase : Union[str, Any] = {1: [2, 3, 4, 5], 2: [1, 3], 3: [1, 2], 4: [1, 5], 5: [1, 4]}
lowerCAmelCase : List[Any] = {1: [2, 3, 4], 2: [1, 3, 4], 3: [1, 2], 4: [1, 2, 5], 5: [4]}
lowerCAmelCase : Optional[Any] = {1: [2, 3], 2: [1, 3], 3: [1, 2]}
lowerCAmelCase : Any = {
1: [],
2: []
# all degree is zero
}
lowerCAmelCase : List[str] = 10
check_euler(_snake_case , _snake_case )
check_euler(_snake_case , _snake_case )
check_euler(_snake_case , _snake_case )
check_euler(_snake_case , _snake_case )
check_euler(_snake_case , _snake_case )
if __name__ == "__main__":
main()
| 637
| 0
|
"""simple docstring"""
import unittest
from transformers import CamembertTokenizer, CamembertTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
snake_case__ : Optional[Any] = get_tests_dir('''fixtures/test_sentencepiece.model''')
snake_case__ : List[str] = get_tests_dir('''fixtures/test_sentencepiece_bpe.model''')
snake_case__ : List[str] = '''pt''' if is_torch_available() else '''tf'''
@require_sentencepiece
@require_tokenizers
class snake_case_( a__ , unittest.TestCase ):
__UpperCamelCase = CamembertTokenizer
__UpperCamelCase = CamembertTokenizerFast
__UpperCamelCase = True
__UpperCamelCase = True
def lowerCamelCase__ ( self : Tuple ):
super().setUp()
# We have a SentencePiece fixture for testing
lowerCAmelCase : Optional[Any] = CamembertTokenizer(UpperCamelCase_ )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCamelCase__ ( self : List[str] ):
lowerCAmelCase : int = '''<pad>'''
lowerCAmelCase : Union[str, Any] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCamelCase_ ) , UpperCamelCase_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCamelCase_ ) , UpperCamelCase_ )
def lowerCamelCase__ ( self : List[str] ):
lowerCAmelCase : List[Any] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<s>NOTUSED''' )
self.assertEqual(vocab_keys[1] , '''<pad>''' )
self.assertEqual(vocab_keys[-1] , '''<mask>''' )
self.assertEqual(len(UpperCamelCase_ ) , 1_0_0_4 )
def lowerCamelCase__ ( self : List[str] ):
self.assertEqual(self.get_tokenizer().vocab_size , 1_0_0_5 )
def lowerCamelCase__ ( self : List[str] ):
lowerCAmelCase : Dict = CamembertTokenizer(UpperCamelCase_ )
tokenizer.save_pretrained(self.tmpdirname )
lowerCAmelCase : str = CamembertTokenizerFast.from_pretrained(self.tmpdirname )
lowerCAmelCase : List[Any] = '''I was born in 92000, and this is falsé.'''
lowerCAmelCase : Dict = tokenizer.encode(UpperCamelCase_ )
lowerCAmelCase : List[Any] = rust_tokenizer.encode(UpperCamelCase_ )
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
lowerCAmelCase : int = tokenizer.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ )
lowerCAmelCase : List[str] = rust_tokenizer.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ )
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
# <unk> tokens are not the same for `rust` than for `slow`.
# Because spm gives back raw token instead of `unk` in EncodeAsPieces
# tokens = tokenizer.tokenize(sequence)
lowerCAmelCase : Optional[int] = tokenizer.convert_ids_to_tokens(UpperCamelCase_ )
lowerCAmelCase : Dict = rust_tokenizer.tokenize(UpperCamelCase_ )
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
def lowerCamelCase__ ( self : Dict ):
if not self.test_rust_tokenizer:
return
lowerCAmelCase : Tuple = self.get_tokenizer()
lowerCAmelCase : List[str] = self.get_rust_tokenizer()
lowerCAmelCase : Dict = '''I was born in 92000, and this is falsé.'''
lowerCAmelCase : Any = tokenizer.tokenize(UpperCamelCase_ )
lowerCAmelCase : Union[str, Any] = rust_tokenizer.tokenize(UpperCamelCase_ )
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
lowerCAmelCase : Optional[int] = tokenizer.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ )
lowerCAmelCase : List[Any] = rust_tokenizer.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ )
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
lowerCAmelCase : Optional[Any] = self.get_rust_tokenizer()
lowerCAmelCase : Optional[int] = tokenizer.encode(UpperCamelCase_ )
lowerCAmelCase : Dict = rust_tokenizer.encode(UpperCamelCase_ )
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
@slow
def lowerCamelCase__ ( self : List[str] ):
# fmt: off
lowerCAmelCase : Dict = {'''input_ids''': [[5, 5_4, 7_1_9_6, 2_9_7, 3_0, 2_3, 7_7_6, 1_8, 1_1, 3_2_1_5, 3_7_0_5, 8_2_5_2, 2_2, 3_1_6_4, 1_1_8_1, 2_1_1_6, 2_9, 1_6, 8_1_3, 2_5, 7_9_1, 3_3_1_4, 2_0, 3_4_4_6, 3_8, 2_7_5_7_5, 1_2_0, 6, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [5, 4_6_8, 1_7, 1_1, 9_0_8_8, 2_0, 1_5_1_7, 8, 2_2_8_0_4, 1_8_8_1_8, 1_0, 3_8, 6_2_9, 6_0_7, 6_0_7, 1_4_2, 1_9, 7_1_9_6, 8_6_7, 5_6, 1_0_3_2_6, 2_4, 2_2_6_7, 2_0, 4_1_6, 5_0_7_2, 1_5_6_1_2, 2_3_3, 7_3_4, 7, 2_3_9_9, 2_7, 1_6, 3_0_1_5, 1_6_4_9, 7, 2_4, 2_0, 4_3_3_8, 2_3_9_9, 2_7, 1_3, 3_4_0_0, 1_4, 1_3, 6_1_8_9, 8, 9_3_0, 9, 6]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# camembert is a french model. So we also use french texts.
lowerCAmelCase : Any = [
'''Le transformeur est un modèle d\'apprentissage profond introduit en 2017, '''
'''utilisé principalement dans le domaine du traitement automatique des langues (TAL).''',
'''À l\'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus '''
'''pour gérer des données séquentielles, telles que le langage naturel, pour des tâches '''
'''telles que la traduction et la synthèse de texte.''',
]
self.tokenizer_integration_test_util(
expected_encoding=UpperCamelCase_ , model_name='''camembert-base''' , revision='''3a0641d9a1aeb7e848a74299e7e4c4bca216b4cf''' , sequences=UpperCamelCase_ , )
| 714
|
"""simple docstring"""
import os
import shutil
import sys
import tempfile
import unittest
from pathlib import Path
import pytest
import transformers
from transformers import (
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoTokenizer,
BertConfig,
BertTokenizer,
BertTokenizerFast,
CTRLTokenizer,
GPTaTokenizer,
GPTaTokenizerFast,
PreTrainedTokenizerFast,
RobertaTokenizer,
RobertaTokenizerFast,
is_tokenizers_available,
)
from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig
from transformers.models.auto.tokenization_auto import (
TOKENIZER_MAPPING,
get_tokenizer_config,
tokenizer_class_from_name,
)
from transformers.models.roberta.configuration_roberta import RobertaConfig
from transformers.testing_utils import (
DUMMY_DIFF_TOKENIZER_IDENTIFIER,
DUMMY_UNKNOWN_IDENTIFIER,
SMALL_MODEL_IDENTIFIER,
RequestCounter,
require_tokenizers,
slow,
)
sys.path.append(str(Path(__file__).parent.parent.parent.parent / '''utils'''))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_tokenization import CustomTokenizer # noqa E402
if is_tokenizers_available():
from test_module.custom_tokenization_fast import CustomTokenizerFast
class snake_case_( unittest.TestCase ):
def lowerCamelCase__ ( self : Optional[int] ):
lowerCAmelCase : Optional[Any] = 0
@slow
def lowerCamelCase__ ( self : Dict ):
for model_name in (x for x in BERT_PRETRAINED_CONFIG_ARCHIVE_MAP.keys() if "japanese" not in x):
lowerCAmelCase : Optional[Any] = AutoTokenizer.from_pretrained(UpperCamelCase_ )
self.assertIsNotNone(UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , (BertTokenizer, BertTokenizerFast) )
self.assertGreater(len(UpperCamelCase_ ) , 0 )
for model_name in GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP.keys():
lowerCAmelCase : Tuple = AutoTokenizer.from_pretrained(UpperCamelCase_ )
self.assertIsNotNone(UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , (GPTaTokenizer, GPTaTokenizerFast) )
self.assertGreater(len(UpperCamelCase_ ) , 0 )
def lowerCamelCase__ ( self : Union[str, Any] ):
lowerCAmelCase : Optional[Any] = AutoTokenizer.from_pretrained(UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 1_2 )
def lowerCamelCase__ ( self : Dict ):
lowerCAmelCase : Tuple = AutoTokenizer.from_pretrained(UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , (RobertaTokenizer, RobertaTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 2_0 )
def lowerCamelCase__ ( self : Dict ):
lowerCAmelCase : int = AutoConfig.from_pretrained(UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
# Check that tokenizer_type ≠ model_type
lowerCAmelCase : List[Any] = AutoTokenizer.from_pretrained(UpperCamelCase_ , config=UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 1_2 )
def lowerCamelCase__ ( self : Any ):
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('''./tests/fixtures/vocab.txt''' , os.path.join(UpperCamelCase_ , '''vocab.txt''' ) )
lowerCAmelCase : Any = AutoTokenizer.from_pretrained(UpperCamelCase_ , tokenizer_type='''bert''' , use_fast=UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('''./tests/fixtures/vocab.json''' , os.path.join(UpperCamelCase_ , '''vocab.json''' ) )
shutil.copy('''./tests/fixtures/merges.txt''' , os.path.join(UpperCamelCase_ , '''merges.txt''' ) )
lowerCAmelCase : List[Any] = AutoTokenizer.from_pretrained(UpperCamelCase_ , tokenizer_type='''gpt2''' , use_fast=UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
@require_tokenizers
def lowerCamelCase__ ( self : Union[str, Any] ):
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('''./tests/fixtures/vocab.txt''' , os.path.join(UpperCamelCase_ , '''vocab.txt''' ) )
lowerCAmelCase : Optional[Any] = AutoTokenizer.from_pretrained(UpperCamelCase_ , tokenizer_type='''bert''' )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('''./tests/fixtures/vocab.json''' , os.path.join(UpperCamelCase_ , '''vocab.json''' ) )
shutil.copy('''./tests/fixtures/merges.txt''' , os.path.join(UpperCamelCase_ , '''merges.txt''' ) )
lowerCAmelCase : int = AutoTokenizer.from_pretrained(UpperCamelCase_ , tokenizer_type='''gpt2''' )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
def lowerCamelCase__ ( self : Dict ):
with pytest.raises(UpperCamelCase_ ):
AutoTokenizer.from_pretrained('''./''' , tokenizer_type='''xxx''' )
@require_tokenizers
def lowerCamelCase__ ( self : str ):
for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]:
lowerCAmelCase : Dict = tokenizer_class.from_pretrained('''wietsedv/bert-base-dutch-cased''' )
self.assertIsInstance(UpperCamelCase_ , (BertTokenizer, BertTokenizerFast) )
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
self.assertEqual(tokenizer.basic_tokenizer.do_lower_case , UpperCamelCase_ )
else:
self.assertEqual(tokenizer.do_lower_case , UpperCamelCase_ )
self.assertEqual(tokenizer.model_max_length , 5_1_2 )
@require_tokenizers
def lowerCamelCase__ ( self : Optional[int] ):
for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]:
with self.assertRaisesRegex(
UpperCamelCase_ , '''julien-c/herlolip-not-exists is not a local folder and is not a valid model identifier''' , ):
lowerCAmelCase : Any = tokenizer_class.from_pretrained('''julien-c/herlolip-not-exists''' )
def lowerCamelCase__ ( self : Tuple ):
# tests: https://github.com/huggingface/transformers/pull/13251
# 1. models with `-`, e.g. xlm-roberta -> xlm_roberta
# 2. models that don't remap 1-1 from model-name to model file, e.g., openai-gpt -> openai
lowerCAmelCase : Optional[Any] = TOKENIZER_MAPPING.values()
lowerCAmelCase : Optional[Any] = []
for slow_tok, fast_tok in tokenizers:
if slow_tok is not None:
tokenizer_names.append(slow_tok.__name__ )
if fast_tok is not None:
tokenizer_names.append(fast_tok.__name__ )
for tokenizer_name in tokenizer_names:
# must find the right class
tokenizer_class_from_name(UpperCamelCase_ )
@require_tokenizers
def lowerCamelCase__ ( self : Any ):
self.assertIsInstance(AutoTokenizer.from_pretrained('''bert-base-cased''' , use_fast=UpperCamelCase_ ) , UpperCamelCase_ )
self.assertIsInstance(AutoTokenizer.from_pretrained('''bert-base-cased''' ) , UpperCamelCase_ )
@require_tokenizers
def lowerCamelCase__ ( self : Dict ):
lowerCAmelCase : List[Any] = AutoTokenizer.from_pretrained('''distilbert-base-uncased''' , do_lower_case=UpperCamelCase_ )
lowerCAmelCase : Union[str, Any] = '''Hello, world. How are you?'''
lowerCAmelCase : Optional[Any] = tokenizer.tokenize(UpperCamelCase_ )
self.assertEqual('''[UNK]''' , tokens[0] )
lowerCAmelCase : List[str] = AutoTokenizer.from_pretrained('''microsoft/mpnet-base''' , do_lower_case=UpperCamelCase_ )
lowerCAmelCase : Optional[int] = tokenizer.tokenize(UpperCamelCase_ )
self.assertEqual('''[UNK]''' , tokens[0] )
@require_tokenizers
def lowerCamelCase__ ( self : int ):
lowerCAmelCase : Union[str, Any] = AutoTokenizer.from_pretrained('''robot-test/dummy-tokenizer-fast-with-model-config''' )
self.assertEqual(type(UpperCamelCase_ ) , UpperCamelCase_ )
self.assertEqual(tokenizer.model_max_length , 5_1_2 )
self.assertEqual(tokenizer.vocab_size , 3_0_0_0_0 )
self.assertEqual(tokenizer.unk_token , '''[UNK]''' )
self.assertEqual(tokenizer.padding_side , '''right''' )
self.assertEqual(tokenizer.truncation_side , '''right''' )
def lowerCamelCase__ ( self : List[Any] ):
lowerCAmelCase : int = AutoTokenizer.from_pretrained(UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , (BertTokenizer, BertTokenizerFast) )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(UpperCamelCase_ )
lowerCAmelCase : List[Any] = AutoTokenizer.from_pretrained(UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , tokenizer.__class__ )
self.assertEqual(tokenizera.vocab_size , 1_2 )
def lowerCamelCase__ ( self : List[str] ):
lowerCAmelCase : List[Any] = AutoTokenizer.from_pretrained('''ctrl''' )
# There is no fast CTRL so this always gives us a slow tokenizer.
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
def lowerCamelCase__ ( self : Dict ):
# Check we can load the tokenizer config of an online model.
lowerCAmelCase : Any = get_tokenizer_config('''bert-base-cased''' )
lowerCAmelCase : Optional[int] = config.pop('''_commit_hash''' , UpperCamelCase_ )
# If we ever update bert-base-cased tokenizer config, this dict here will need to be updated.
self.assertEqual(UpperCamelCase_ , {'''do_lower_case''': False} )
# This model does not have a tokenizer_config so we get back an empty dict.
lowerCAmelCase : Union[str, Any] = get_tokenizer_config(UpperCamelCase_ )
self.assertDictEqual(UpperCamelCase_ , {} )
# A tokenizer saved with `save_pretrained` always creates a tokenizer config.
lowerCAmelCase : List[Any] = AutoTokenizer.from_pretrained(UpperCamelCase_ )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(UpperCamelCase_ )
lowerCAmelCase : Dict = get_tokenizer_config(UpperCamelCase_ )
# Check the class of the tokenizer was properly saved (note that it always saves the slow class).
self.assertEqual(config['''tokenizer_class'''] , '''BertTokenizer''' )
def lowerCamelCase__ ( self : Optional[int] ):
try:
AutoConfig.register('''custom''' , UpperCamelCase_ )
AutoTokenizer.register(UpperCamelCase_ , slow_tokenizer_class=UpperCamelCase_ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(UpperCamelCase_ ):
AutoTokenizer.register(UpperCamelCase_ , slow_tokenizer_class=UpperCamelCase_ )
lowerCAmelCase : Union[str, Any] = CustomTokenizer.from_pretrained(UpperCamelCase_ )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(UpperCamelCase_ )
lowerCAmelCase : Tuple = AutoTokenizer.from_pretrained(UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
@require_tokenizers
def lowerCamelCase__ ( self : str ):
try:
AutoConfig.register('''custom''' , UpperCamelCase_ )
# Can register in two steps
AutoTokenizer.register(UpperCamelCase_ , slow_tokenizer_class=UpperCamelCase_ )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, None) )
AutoTokenizer.register(UpperCamelCase_ , fast_tokenizer_class=UpperCamelCase_ )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, CustomTokenizerFast) )
del TOKENIZER_MAPPING._extra_content[CustomConfig]
# Can register in one step
AutoTokenizer.register(
UpperCamelCase_ , slow_tokenizer_class=UpperCamelCase_ , fast_tokenizer_class=UpperCamelCase_ )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, CustomTokenizerFast) )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(UpperCamelCase_ ):
AutoTokenizer.register(UpperCamelCase_ , fast_tokenizer_class=UpperCamelCase_ )
# We pass through a bert tokenizer fast cause there is no converter slow to fast for our new toknizer
# and that model does not have a tokenizer.json
with tempfile.TemporaryDirectory() as tmp_dir:
lowerCAmelCase : Dict = BertTokenizerFast.from_pretrained(UpperCamelCase_ )
bert_tokenizer.save_pretrained(UpperCamelCase_ )
lowerCAmelCase : int = CustomTokenizerFast.from_pretrained(UpperCamelCase_ )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(UpperCamelCase_ )
lowerCAmelCase : Optional[int] = AutoTokenizer.from_pretrained(UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
lowerCAmelCase : List[str] = AutoTokenizer.from_pretrained(UpperCamelCase_ , use_fast=UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
def lowerCamelCase__ ( self : Optional[int] ):
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(UpperCamelCase_ ):
lowerCAmelCase : int = AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(UpperCamelCase_ ):
lowerCAmelCase : str = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=UpperCamelCase_ )
lowerCAmelCase : List[str] = AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=UpperCamelCase_ )
self.assertTrue(tokenizer.special_attribute_present )
# Test tokenizer can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(UpperCamelCase_ )
lowerCAmelCase : Optional[Any] = AutoTokenizer.from_pretrained(UpperCamelCase_ , trust_remote_code=UpperCamelCase_ )
self.assertTrue(reloaded_tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , '''NewTokenizerFast''' )
# Test we can also load the slow version
lowerCAmelCase : Union[str, Any] = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=UpperCamelCase_ , use_fast=UpperCamelCase_ )
self.assertTrue(tokenizer.special_attribute_present )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
# Test tokenizer can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(UpperCamelCase_ )
lowerCAmelCase : List[str] = AutoTokenizer.from_pretrained(UpperCamelCase_ , trust_remote_code=UpperCamelCase_ , use_fast=UpperCamelCase_ )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , '''NewTokenizer''' )
self.assertTrue(reloaded_tokenizer.special_attribute_present )
else:
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , '''NewTokenizer''' )
@require_tokenizers
def lowerCamelCase__ ( self : Optional[int] ):
class snake_case_( a__ ):
__UpperCamelCase = False
class snake_case_( a__ ):
__UpperCamelCase = NewTokenizer
__UpperCamelCase = False
try:
AutoConfig.register('''custom''' , UpperCamelCase_ )
AutoTokenizer.register(UpperCamelCase_ , slow_tokenizer_class=UpperCamelCase_ )
AutoTokenizer.register(UpperCamelCase_ , fast_tokenizer_class=UpperCamelCase_ )
# If remote code is not set, the default is to use local
lowerCAmelCase : Optional[int] = AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''' )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' )
self.assertFalse(tokenizer.special_attribute_present )
lowerCAmelCase : str = AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''' , use_fast=UpperCamelCase_ )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
self.assertFalse(tokenizer.special_attribute_present )
# If remote code is disabled, we load the local one.
lowerCAmelCase : Union[str, Any] = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=UpperCamelCase_ )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' )
self.assertFalse(tokenizer.special_attribute_present )
lowerCAmelCase : Dict = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=UpperCamelCase_ , use_fast=UpperCamelCase_ )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
self.assertFalse(tokenizer.special_attribute_present )
# If remote is enabled, we load from the Hub
lowerCAmelCase : int = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=UpperCamelCase_ )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' )
self.assertTrue(tokenizer.special_attribute_present )
lowerCAmelCase : int = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=UpperCamelCase_ , use_fast=UpperCamelCase_ )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
self.assertTrue(tokenizer.special_attribute_present )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
def lowerCamelCase__ ( self : Tuple ):
lowerCAmelCase : str = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer_legacy''' , trust_remote_code=UpperCamelCase_ )
self.assertTrue(tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' )
# Test we can also load the slow version
lowerCAmelCase : List[str] = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer_legacy''' , trust_remote_code=UpperCamelCase_ , use_fast=UpperCamelCase_ )
self.assertTrue(tokenizer.special_attribute_present )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
else:
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
def lowerCamelCase__ ( self : str ):
with self.assertRaisesRegex(
UpperCamelCase_ , '''bert-base is not a local folder and is not a valid model identifier''' ):
lowerCAmelCase : List[str] = AutoTokenizer.from_pretrained('''bert-base''' )
def lowerCamelCase__ ( self : int ):
with self.assertRaisesRegex(
UpperCamelCase_ , r'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)''' ):
lowerCAmelCase : List[Any] = AutoTokenizer.from_pretrained(UpperCamelCase_ , revision='''aaaaaa''' )
def lowerCamelCase__ ( self : Optional[int] ):
# Make sure we have cached the tokenizer.
lowerCAmelCase : List[str] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
with RequestCounter() as counter:
lowerCAmelCase : int = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
self.assertEqual(counter.get_request_count , 0 )
self.assertEqual(counter.head_request_count , 1 )
self.assertEqual(counter.other_request_count , 0 )
| 637
| 0
|
"""simple docstring"""
import json
from typing import Dict, List, Optional, Tuple, Union
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding, EncodedInput
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_led import LEDTokenizer
snake_case__ : Any = logging.get_logger(__name__)
snake_case__ : Dict = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
snake_case__ : Optional[int] = {
'''vocab_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json''',
},
'''merges_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt''',
},
'''tokenizer_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json''',
},
}
snake_case__ : Any = {
'''allenai/led-base-16384''': 16_384,
}
class snake_case_( a__ ):
__UpperCamelCase = VOCAB_FILES_NAMES
__UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase = LEDTokenizer
__UpperCamelCase = ['''input_ids''', '''attention_mask''']
def __init__( self : List[Any] , UpperCamelCase_ : Optional[int]=None , UpperCamelCase_ : Optional[Any]=None , UpperCamelCase_ : List[str]=None , UpperCamelCase_ : Union[str, Any]="replace" , UpperCamelCase_ : List[Any]="<s>" , UpperCamelCase_ : str="</s>" , UpperCamelCase_ : List[str]="</s>" , UpperCamelCase_ : str="<s>" , UpperCamelCase_ : Tuple="<unk>" , UpperCamelCase_ : List[str]="<pad>" , UpperCamelCase_ : Optional[Any]="<mask>" , UpperCamelCase_ : Optional[Any]=False , UpperCamelCase_ : List[str]=True , **UpperCamelCase_ : List[str] , ):
super().__init__(
UpperCamelCase_ , UpperCamelCase_ , tokenizer_file=UpperCamelCase_ , errors=UpperCamelCase_ , bos_token=UpperCamelCase_ , eos_token=UpperCamelCase_ , sep_token=UpperCamelCase_ , cls_token=UpperCamelCase_ , unk_token=UpperCamelCase_ , pad_token=UpperCamelCase_ , mask_token=UpperCamelCase_ , add_prefix_space=UpperCamelCase_ , trim_offsets=UpperCamelCase_ , **UpperCamelCase_ , )
lowerCAmelCase : int = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''' , UpperCamelCase_ ) != add_prefix_space:
lowerCAmelCase : List[Any] = getattr(UpperCamelCase_ , pre_tok_state.pop('''type''' ) )
lowerCAmelCase : Any = add_prefix_space
lowerCAmelCase : int = pre_tok_class(**UpperCamelCase_ )
lowerCAmelCase : Dict = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
lowerCAmelCase : int = '''post_processor'''
lowerCAmelCase : Union[str, Any] = getattr(self.backend_tokenizer , UpperCamelCase_ , UpperCamelCase_ )
if tokenizer_component_instance:
lowerCAmelCase : Optional[int] = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
lowerCAmelCase : List[str] = tuple(state['''sep'''] )
if "cls" in state:
lowerCAmelCase : Optional[int] = tuple(state['''cls'''] )
lowerCAmelCase : str = False
if state.get('''add_prefix_space''' , UpperCamelCase_ ) != add_prefix_space:
lowerCAmelCase : Optional[int] = add_prefix_space
lowerCAmelCase : Tuple = True
if state.get('''trim_offsets''' , UpperCamelCase_ ) != trim_offsets:
lowerCAmelCase : List[str] = trim_offsets
lowerCAmelCase : Optional[int] = True
if changes_to_apply:
lowerCAmelCase : Optional[int] = getattr(UpperCamelCase_ , state.pop('''type''' ) )
lowerCAmelCase : Optional[int] = component_class(**UpperCamelCase_ )
setattr(self.backend_tokenizer , UpperCamelCase_ , UpperCamelCase_ )
@property
# Copied from transformers.models.bart.tokenization_bart_fast.BartTokenizerFast.mask_token with BART->LED
def lowerCamelCase__ ( self : Optional[Any] ):
if self._mask_token is None:
if self.verbose:
logger.error('''Using mask_token, but it is not set yet.''' )
return None
return str(self._mask_token )
@mask_token.setter
def lowerCamelCase__ ( self : Union[str, Any] , UpperCamelCase_ : int ):
lowerCAmelCase : List[Any] = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else value
lowerCAmelCase : Optional[int] = value
def lowerCamelCase__ ( self : Optional[int] , *UpperCamelCase_ : List[Any] , **UpperCamelCase_ : Dict ):
lowerCAmelCase : Optional[Any] = kwargs.get('''is_split_into_words''' , UpperCamelCase_ )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
'''to use it with pretokenized inputs.''' )
return super()._batch_encode_plus(*UpperCamelCase_ , **UpperCamelCase_ )
def lowerCamelCase__ ( self : Tuple , *UpperCamelCase_ : Optional[int] , **UpperCamelCase_ : List[str] ):
lowerCAmelCase : Union[str, Any] = kwargs.get('''is_split_into_words''' , UpperCamelCase_ )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
'''to use it with pretokenized inputs.''' )
return super()._encode_plus(*UpperCamelCase_ , **UpperCamelCase_ )
def lowerCamelCase__ ( self : Optional[int] , UpperCamelCase_ : str , UpperCamelCase_ : Optional[str] = None ):
lowerCAmelCase : str = self._tokenizer.model.save(UpperCamelCase_ , name=UpperCamelCase_ )
return tuple(UpperCamelCase_ )
def lowerCamelCase__ ( self : Optional[int] , UpperCamelCase_ : List[str] , UpperCamelCase_ : Tuple=None ):
lowerCAmelCase : Optional[int] = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def lowerCamelCase__ ( self : Optional[int] , UpperCamelCase_ : List[int] , UpperCamelCase_ : Optional[List[int]] = None ):
lowerCAmelCase : int = [self.sep_token_id]
lowerCAmelCase : str = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowerCamelCase__ ( self : Optional[Any] , UpperCamelCase_ : Union[Dict[str, EncodedInput], BatchEncoding] , UpperCamelCase_ : Optional[int] = None , UpperCamelCase_ : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , UpperCamelCase_ : Optional[int] = None , UpperCamelCase_ : Optional[bool] = None , ):
lowerCAmelCase : str = super()._pad(
encoded_inputs=UpperCamelCase_ , max_length=UpperCamelCase_ , padding_strategy=UpperCamelCase_ , pad_to_multiple_of=UpperCamelCase_ , return_attention_mask=UpperCamelCase_ , )
# Load from model defaults
if return_attention_mask is None:
lowerCAmelCase : List[Any] = '''attention_mask''' in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
lowerCAmelCase : List[str] = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
lowerCAmelCase : Union[str, Any] = len(encoded_inputs['''global_attention_mask'''] ) != len(UpperCamelCase_ )
if needs_to_be_padded:
lowerCAmelCase : Tuple = len(UpperCamelCase_ ) - len(encoded_inputs['''global_attention_mask'''] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
lowerCAmelCase : Optional[Any] = (
encoded_inputs['''global_attention_mask'''] + [-1] * difference
)
elif self.padding_side == "left":
lowerCAmelCase : List[Any] = [-1] * difference + encoded_inputs[
'''global_attention_mask'''
]
else:
raise ValueError('''Invalid padding strategy:''' + str(self.padding_side ) )
return encoded_inputs
| 715
|
"""simple docstring"""
import json
import os
from functools import lru_cache
from typing import Dict, List, Optional, Tuple, Union
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding, EncodedInput
from ...utils import PaddingStrategy, logging
snake_case__ : Optional[Any] = logging.get_logger(__name__)
snake_case__ : Any = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt'''}
# See all LED models at https://huggingface.co/models?filter=LED
snake_case__ : Optional[Any] = {
'''vocab_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json''',
},
'''merges_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt''',
},
'''tokenizer_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json''',
},
}
snake_case__ : List[Any] = {
'''allenai/led-base-16384''': 16_384,
}
@lru_cache()
# Copied from transformers.models.bart.tokenization_bart.bytes_to_unicode
def _snake_case ( ):
lowerCAmelCase : Optional[int] = (
list(range(ord('''!''' ) , ord('''~''' ) + 1 ) ) + list(range(ord('''¡''' ) , ord('''¬''' ) + 1 ) ) + list(range(ord('''®''' ) , ord('''ÿ''' ) + 1 ) )
)
lowerCAmelCase : str = bs[:]
lowerCAmelCase : Optional[int] = 0
for b in range(2**8 ):
if b not in bs:
bs.append(_snake_case )
cs.append(2**8 + n )
n += 1
lowerCAmelCase : int = [chr(_snake_case ) for n in cs]
return dict(zip(_snake_case , _snake_case ) )
def _snake_case ( _snake_case : List[Any] ):
lowerCAmelCase : List[str] = set()
lowerCAmelCase : Any = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
lowerCAmelCase : Optional[Any] = char
return pairs
class snake_case_( a__ ):
__UpperCamelCase = VOCAB_FILES_NAMES
__UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase = ['''input_ids''', '''attention_mask''']
def __init__( self : Tuple , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Dict , UpperCamelCase_ : Tuple="replace" , UpperCamelCase_ : Union[str, Any]="<s>" , UpperCamelCase_ : List[str]="</s>" , UpperCamelCase_ : str="</s>" , UpperCamelCase_ : int="<s>" , UpperCamelCase_ : int="<unk>" , UpperCamelCase_ : Union[str, Any]="<pad>" , UpperCamelCase_ : Tuple="<mask>" , UpperCamelCase_ : Optional[int]=False , **UpperCamelCase_ : Tuple , ):
lowerCAmelCase : Any = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else bos_token
lowerCAmelCase : Union[str, Any] = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else eos_token
lowerCAmelCase : Optional[int] = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else sep_token
lowerCAmelCase : int = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else cls_token
lowerCAmelCase : Tuple = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else unk_token
lowerCAmelCase : List[Any] = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
lowerCAmelCase : Tuple = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else mask_token
super().__init__(
errors=UpperCamelCase_ , bos_token=UpperCamelCase_ , eos_token=UpperCamelCase_ , unk_token=UpperCamelCase_ , sep_token=UpperCamelCase_ , cls_token=UpperCamelCase_ , pad_token=UpperCamelCase_ , mask_token=UpperCamelCase_ , add_prefix_space=UpperCamelCase_ , **UpperCamelCase_ , )
with open(UpperCamelCase_ , encoding='''utf-8''' ) as vocab_handle:
lowerCAmelCase : Any = json.load(UpperCamelCase_ )
lowerCAmelCase : Dict = {v: k for k, v in self.encoder.items()}
lowerCAmelCase : Optional[int] = errors # how to handle errors in decoding
lowerCAmelCase : List[Any] = bytes_to_unicode()
lowerCAmelCase : Optional[Any] = {v: k for k, v in self.byte_encoder.items()}
with open(UpperCamelCase_ , encoding='''utf-8''' ) as merges_handle:
lowerCAmelCase : Optional[int] = merges_handle.read().split('''\n''' )[1:-1]
lowerCAmelCase : Optional[int] = [tuple(merge.split() ) for merge in bpe_merges]
lowerCAmelCase : Optional[int] = dict(zip(UpperCamelCase_ , range(len(UpperCamelCase_ ) ) ) )
lowerCAmelCase : List[Any] = {}
lowerCAmelCase : Optional[Any] = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
lowerCAmelCase : Dict = re.compile(r'''\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+''' )
@property
# Copied from transformers.models.bart.tokenization_bart.BartTokenizer.vocab_size
def lowerCamelCase__ ( self : Union[str, Any] ):
return len(self.encoder )
def lowerCamelCase__ ( self : Union[str, Any] ):
return dict(self.encoder , **self.added_tokens_encoder )
def lowerCamelCase__ ( self : Any , UpperCamelCase_ : int ):
if token in self.cache:
return self.cache[token]
lowerCAmelCase : List[str] = tuple(UpperCamelCase_ )
lowerCAmelCase : Optional[Any] = get_pairs(UpperCamelCase_ )
if not pairs:
return token
while True:
lowerCAmelCase : List[Any] = min(UpperCamelCase_ , key=lambda UpperCamelCase_ : self.bpe_ranks.get(UpperCamelCase_ , float('''inf''' ) ) )
if bigram not in self.bpe_ranks:
break
lowerCAmelCase, lowerCAmelCase : Any = bigram
lowerCAmelCase : Tuple = []
lowerCAmelCase : Any = 0
while i < len(UpperCamelCase_ ):
try:
lowerCAmelCase : int = word.index(UpperCamelCase_ , UpperCamelCase_ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
lowerCAmelCase : int = j
if word[i] == first and i < len(UpperCamelCase_ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
lowerCAmelCase : Tuple = tuple(UpperCamelCase_ )
lowerCAmelCase : Tuple = new_word
if len(UpperCamelCase_ ) == 1:
break
else:
lowerCAmelCase : Optional[Any] = get_pairs(UpperCamelCase_ )
lowerCAmelCase : Union[str, Any] = ''' '''.join(UpperCamelCase_ )
lowerCAmelCase : List[str] = word
return word
def lowerCamelCase__ ( self : Optional[Any] , UpperCamelCase_ : Tuple ):
lowerCAmelCase : Dict = []
for token in re.findall(self.pat , UpperCamelCase_ ):
lowerCAmelCase : Union[str, Any] = ''''''.join(
self.byte_encoder[b] for b in token.encode('''utf-8''' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(UpperCamelCase_ ).split(''' ''' ) )
return bpe_tokens
def lowerCamelCase__ ( self : int , UpperCamelCase_ : str ):
return self.encoder.get(UpperCamelCase_ , self.encoder.get(self.unk_token ) )
def lowerCamelCase__ ( self : Any , UpperCamelCase_ : Union[str, Any] ):
return self.decoder.get(UpperCamelCase_ )
def lowerCamelCase__ ( self : Any , UpperCamelCase_ : List[str] ):
lowerCAmelCase : Optional[int] = ''''''.join(UpperCamelCase_ )
lowerCAmelCase : Optional[int] = bytearray([self.byte_decoder[c] for c in text] ).decode('''utf-8''' , errors=self.errors )
return text
def lowerCamelCase__ ( self : str , UpperCamelCase_ : str , UpperCamelCase_ : Optional[str] = None ):
if not os.path.isdir(UpperCamelCase_ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
lowerCAmelCase : int = os.path.join(
UpperCamelCase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
lowerCAmelCase : Optional[Any] = os.path.join(
UpperCamelCase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] )
with open(UpperCamelCase_ , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=UpperCamelCase_ , ensure_ascii=UpperCamelCase_ ) + '''\n''' )
lowerCAmelCase : Optional[int] = 0
with open(UpperCamelCase_ , '''w''' , encoding='''utf-8''' ) as writer:
writer.write('''#version: 0.2\n''' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda UpperCamelCase_ : kv[1] ):
if index != token_index:
logger.warning(
F'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
''' Please check that the tokenizer is not corrupted!''' )
lowerCAmelCase : Tuple = token_index
writer.write(''' '''.join(UpperCamelCase_ ) + '''\n''' )
index += 1
return vocab_file, merge_file
def lowerCamelCase__ ( self : Optional[Any] , UpperCamelCase_ : List[int] , UpperCamelCase_ : Optional[List[int]] = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowerCAmelCase : Any = [self.cls_token_id]
lowerCAmelCase : str = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def lowerCamelCase__ ( self : Any , UpperCamelCase_ : List[int] , UpperCamelCase_ : Optional[List[int]] = None , UpperCamelCase_ : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCamelCase_ , token_ids_a=UpperCamelCase_ , already_has_special_tokens=UpperCamelCase_ )
if token_ids_a is None:
return [1] + ([0] * len(UpperCamelCase_ )) + [1]
return [1] + ([0] * len(UpperCamelCase_ )) + [1, 1] + ([0] * len(UpperCamelCase_ )) + [1]
def lowerCamelCase__ ( self : List[str] , UpperCamelCase_ : List[int] , UpperCamelCase_ : Optional[List[int]] = None ):
lowerCAmelCase : Optional[Any] = [self.sep_token_id]
lowerCAmelCase : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowerCamelCase__ ( self : int , UpperCamelCase_ : Any , UpperCamelCase_ : Dict=False , **UpperCamelCase_ : Tuple ):
lowerCAmelCase : Union[str, Any] = kwargs.pop('''add_prefix_space''' , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(UpperCamelCase_ ) > 0 and not text[0].isspace()):
lowerCAmelCase : List[Any] = ''' ''' + text
return (text, kwargs)
def lowerCamelCase__ ( self : str , UpperCamelCase_ : Union[Dict[str, EncodedInput], BatchEncoding] , UpperCamelCase_ : Optional[int] = None , UpperCamelCase_ : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , UpperCamelCase_ : Optional[int] = None , UpperCamelCase_ : Optional[bool] = None , ):
lowerCAmelCase : Dict = super()._pad(
encoded_inputs=UpperCamelCase_ , max_length=UpperCamelCase_ , padding_strategy=UpperCamelCase_ , pad_to_multiple_of=UpperCamelCase_ , return_attention_mask=UpperCamelCase_ , )
# Load from model defaults
if return_attention_mask is None:
lowerCAmelCase : Tuple = '''attention_mask''' in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
lowerCAmelCase : Dict = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
lowerCAmelCase : List[Any] = len(encoded_inputs['''global_attention_mask'''] ) != len(UpperCamelCase_ )
if needs_to_be_padded:
lowerCAmelCase : int = len(UpperCamelCase_ ) - len(encoded_inputs['''global_attention_mask'''] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
lowerCAmelCase : Dict = (
encoded_inputs['''global_attention_mask'''] + [-1] * difference
)
elif self.padding_side == "left":
lowerCAmelCase : int = [-1] * difference + encoded_inputs[
'''global_attention_mask'''
]
else:
raise ValueError('''Invalid padding strategy:''' + str(self.padding_side ) )
return encoded_inputs
| 637
| 0
|
"""simple docstring"""
from __future__ import annotations
import numpy as np
def _snake_case ( _snake_case : np.ndarray ) -> List[str]:
lowerCAmelCase : int = np.shape(_snake_case )
if rows != columns:
lowerCAmelCase : Union[str, Any] = (
'''\'table\' has to be of square shaped array but got a '''
f'''{rows}x{columns} array:\n{table}'''
)
raise ValueError(_snake_case )
lowerCAmelCase : List[str] = np.zeros((rows, columns) )
lowerCAmelCase : str = np.zeros((rows, columns) )
for i in range(_snake_case ):
for j in range(_snake_case ):
lowerCAmelCase : Optional[Any] = sum(lower[i][k] * upper[k][j] for k in range(_snake_case ) )
if upper[j][j] == 0:
raise ArithmeticError('''No LU decomposition exists''' )
lowerCAmelCase : Any = (table[i][j] - total) / upper[j][j]
lowerCAmelCase : Optional[int] = 1
for j in range(_snake_case , _snake_case ):
lowerCAmelCase : List[Any] = sum(lower[i][k] * upper[k][j] for k in range(_snake_case ) )
lowerCAmelCase : Any = table[i][j] - total
return lower, upper
if __name__ == "__main__":
import doctest
doctest.testmod()
| 716
|
"""simple docstring"""
def _snake_case ( _snake_case : int = 4000000 ):
lowerCAmelCase : int = [0, 1]
lowerCAmelCase : List[str] = 0
while fib[i] <= n:
fib.append(fib[i] + fib[i + 1] )
if fib[i + 2] > n:
break
i += 1
lowerCAmelCase : int = 0
for j in range(len(_snake_case ) - 1 ):
if fib[j] % 2 == 0:
total += fib[j]
return total
if __name__ == "__main__":
print(f"""{solution() = }""")
| 637
| 0
|
"""simple docstring"""
from itertools import product
from cva import COLOR_BGR2GRAY, cvtColor, imread, imshow, waitKey
from numpy import dot, exp, mgrid, pi, ravel, square, uinta, zeros
def _snake_case ( _snake_case : Optional[int] , _snake_case : Union[str, Any] ):
lowerCAmelCase : Optional[int] = k_size // 2
lowerCAmelCase : str = mgrid[0 - center : k_size - center, 0 - center : k_size - center]
lowerCAmelCase : int = 1 / (2 * pi * sigma) * exp(-(square(_snake_case ) + square(_snake_case )) / (2 * square(_snake_case )) )
return g
def _snake_case ( _snake_case : List[Any] , _snake_case : Optional[int] , _snake_case : List[str] ):
lowerCAmelCase : str = image.shape[0], image.shape[1]
# dst image height and width
lowerCAmelCase : Optional[Any] = height - k_size + 1
lowerCAmelCase : Any = width - k_size + 1
# im2col, turn the k_size*k_size pixels into a row and np.vstack all rows
lowerCAmelCase : Dict = zeros((dst_height * dst_width, k_size * k_size) )
lowerCAmelCase : Optional[int] = 0
for i, j in product(range(_snake_case ) , range(_snake_case ) ):
lowerCAmelCase : int = ravel(image[i : i + k_size, j : j + k_size] )
lowerCAmelCase : Union[str, Any] = window
row += 1
# turn the kernel into shape(k*k, 1)
lowerCAmelCase : Union[str, Any] = gen_gaussian_kernel(_snake_case , _snake_case )
lowerCAmelCase : List[str] = ravel(_snake_case )
# reshape and get the dst image
lowerCAmelCase : str = dot(_snake_case , _snake_case ).reshape(_snake_case , _snake_case ).astype(_snake_case )
return dst
if __name__ == "__main__":
# read original image
snake_case__ = imread(R'''../image_data/lena.jpg''')
# turn image in gray scale value
snake_case__ = cvtColor(img, COLOR_BGR2GRAY)
# get values with two different mask size
snake_case__ = gaussian_filter(gray, 3, sigma=1)
snake_case__ = gaussian_filter(gray, 5, sigma=0.8)
# show result images
imshow('''gaussian filter with 3x3 mask''', gaussianaxa)
imshow('''gaussian filter with 5x5 mask''', gaussianaxa)
waitKey()
| 717
|
"""simple docstring"""
def _snake_case ( _snake_case : float , _snake_case : list[float] ):
if discount_rate < 0:
raise ValueError('''Discount rate cannot be negative''' )
if not cash_flows:
raise ValueError('''Cash flows list cannot be empty''' )
lowerCAmelCase : List[str] = sum(
cash_flow / ((1 + discount_rate) ** i) for i, cash_flow in enumerate(_snake_case ) )
return round(_snake_case , ndigits=2 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 637
| 0
|
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_deit import DeiTImageProcessor
snake_case__ : List[str] = logging.get_logger(__name__)
class snake_case_( a__ ):
def __init__( self : Union[str, Any] , *UpperCamelCase_ : Any , **UpperCamelCase_ : Tuple ):
warnings.warn(
'''The class DeiTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use DeiTImageProcessor instead.''' , UpperCamelCase_ , )
super().__init__(*UpperCamelCase_ , **UpperCamelCase_ )
| 718
|
"""simple docstring"""
from __future__ import annotations
def _snake_case ( _snake_case : list[int] , _snake_case : int ):
if len(_snake_case ) == 0:
return False
lowerCAmelCase : List[Any] = len(_snake_case ) // 2
if a_list[midpoint] == item:
return True
if item < a_list[midpoint]:
return binary_search(a_list[:midpoint] , _snake_case )
else:
return binary_search(a_list[midpoint + 1 :] , _snake_case )
if __name__ == "__main__":
snake_case__ : List[str] = input('''Enter numbers separated by comma:\n''').strip()
snake_case__ : Optional[int] = [int(item.strip()) for item in user_input.split(''',''')]
snake_case__ : Dict = int(input('''Enter the number to be found in the list:\n''').strip())
snake_case__ : str = '''''' if binary_search(sequence, target) else '''not '''
print(f"""{target} was {not_str}found in {sequence}""")
| 637
| 0
|
from string import ascii_uppercase
snake_case__ : Union[str, Any] = {str(ord(c) - 55): c for c in ascii_uppercase}
def _snake_case ( _snake_case : int , _snake_case : int ):
if isinstance(_snake_case , _snake_case ):
raise TypeError('''int() can\'t convert non-string with explicit base''' )
if num < 0:
raise ValueError('''parameter must be positive int''' )
if isinstance(_snake_case , _snake_case ):
raise TypeError('''\'str\' object cannot be interpreted as an integer''' )
if isinstance(_snake_case , _snake_case ):
raise TypeError('''\'float\' object cannot be interpreted as an integer''' )
if base in (0, 1):
raise ValueError('''base must be >= 2''' )
if base > 36:
raise ValueError('''base must be <= 36''' )
lowerCAmelCase : Optional[Any] = ''''''
lowerCAmelCase : Union[str, Any] = 0
lowerCAmelCase : Dict = 0
while div != 1:
lowerCAmelCase : Tuple = divmod(_snake_case , _snake_case )
if base >= 11 and 9 < mod < 36:
lowerCAmelCase : int = ALPHABET_VALUES[str(_snake_case )]
else:
lowerCAmelCase : int = str(_snake_case )
new_value += actual_value
lowerCAmelCase : List[str] = num // base
lowerCAmelCase : List[Any] = div
if div == 0:
return str(new_value[::-1] )
elif div == 1:
new_value += str(_snake_case )
return str(new_value[::-1] )
return new_value[::-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
for base in range(2, 37):
for num in range(1_000):
assert int(decimal_to_any(num, base), base) == num, (
num,
base,
decimal_to_any(num, base),
int(decimal_to_any(num, base), base),
)
| 719
|
"""simple docstring"""
import os
from collections import namedtuple
import pytest
from datasets import ClassLabel, Features, Sequence, Value
from datasets.commands.test import TestCommand
from datasets.info import DatasetInfo, DatasetInfosDict
snake_case__ : Optional[Any] = namedtuple(
'''_TestCommandArgs''',
[
'''dataset''',
'''name''',
'''cache_dir''',
'''data_dir''',
'''all_configs''',
'''save_infos''',
'''ignore_verifications''',
'''force_redownload''',
'''clear_cache''',
],
defaults=[None, None, None, False, False, False, False, False],
)
def _snake_case ( _snake_case : List[Any] , _snake_case : List[str] ):
return (abs(source - target ) / target) < 0.01
@pytest.mark.integration
def _snake_case ( _snake_case : Any ):
lowerCAmelCase : Union[str, Any] = _TestCommandArgs(dataset=_snake_case , all_configs=_snake_case , save_infos=_snake_case )
lowerCAmelCase : str = TestCommand(*_snake_case )
test_command.run()
lowerCAmelCase : str = os.path.join(_snake_case , '''README.md''' )
assert os.path.exists(_snake_case )
lowerCAmelCase : Tuple = DatasetInfosDict.from_directory(_snake_case )
lowerCAmelCase : List[str] = DatasetInfosDict(
{
'''default''': DatasetInfo(
features=Features(
{
'''tokens''': Sequence(Value('''string''' ) ),
'''ner_tags''': Sequence(
ClassLabel(names=['''O''', '''B-PER''', '''I-PER''', '''B-ORG''', '''I-ORG''', '''B-LOC''', '''I-LOC'''] ) ),
'''langs''': Sequence(Value('''string''' ) ),
'''spans''': Sequence(Value('''string''' ) ),
} ) , splits=[
{
'''name''': '''train''',
'''num_bytes''': 2351563,
'''num_examples''': 10000,
},
{
'''name''': '''validation''',
'''num_bytes''': 238418,
'''num_examples''': 1000,
},
] , download_size=3940680 , dataset_size=2589981 , )
} )
assert dataset_infos.keys() == expected_dataset_infos.keys()
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
lowerCAmelCase, lowerCAmelCase : Union[str, Any] = getattr(dataset_infos['''default'''] , _snake_case ), getattr(expected_dataset_infos['''default'''] , _snake_case )
if key == "num_bytes":
assert is_apercent_close(_snake_case , _snake_case )
elif key == "splits":
assert list(_snake_case ) == list(_snake_case )
for split in result:
assert result[split].name == expected[split].name
assert result[split].num_examples == expected[split].num_examples
assert is_apercent_close(result[split].num_bytes , expected[split].num_bytes )
else:
result == expected
| 637
| 0
|
"""simple docstring"""
from sklearn.metrics import matthews_corrcoef
import datasets
snake_case__ : List[Any] = '''
Compute the Matthews correlation coefficient (MCC)
The Matthews correlation coefficient is used in machine learning as a
measure of the quality of binary and multiclass classifications. It takes
into account true and false positives and negatives and is generally
regarded as a balanced measure which can be used even if the classes are of
very different sizes. The MCC is in essence a correlation coefficient value
between -1 and +1. A coefficient of +1 represents a perfect prediction, 0
an average random prediction and -1 an inverse prediction. The statistic
is also known as the phi coefficient. [source: Wikipedia]
'''
snake_case__ : Union[str, Any] = '''
Args:
predictions (list of int): Predicted labels, as returned by a model.
references (list of int): Ground truth labels.
sample_weight (list of int, float, or bool): Sample weights. Defaults to `None`.
Returns:
matthews_correlation (dict containing float): Matthews correlation.
Examples:
Example 1, a basic example with only predictions and references as inputs:
>>> matthews_metric = datasets.load_metric("matthews_correlation")
>>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],
... predictions=[1, 2, 2, 0, 3, 3])
>>> print(round(results[\'matthews_correlation\'], 2))
0.54
Example 2, the same example as above, but also including sample weights:
>>> matthews_metric = datasets.load_metric("matthews_correlation")
>>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],
... predictions=[1, 2, 2, 0, 3, 3],
... sample_weight=[0.5, 3, 1, 1, 1, 2])
>>> print(round(results[\'matthews_correlation\'], 2))
0.1
Example 3, the same example as above, but with sample weights that cause a negative correlation:
>>> matthews_metric = datasets.load_metric("matthews_correlation")
>>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],
... predictions=[1, 2, 2, 0, 3, 3],
... sample_weight=[0.5, 1, 0, 0, 0, 1])
>>> print(round(results[\'matthews_correlation\'], 2))
-0.25
'''
snake_case__ : Tuple = '''\
@article{scikit-learn,
title={Scikit-learn: Machine Learning in {P}ython},
author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.
and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.
and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and
Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},
journal={Journal of Machine Learning Research},
volume={12},
pages={2825--2830},
year={2011}
}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class snake_case_( datasets.Metric ):
'''simple docstring'''
def lowerCamelCase__ ( self : List[str] ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''int32''' ),
'''references''': datasets.Value('''int32''' ),
} ) , reference_urls=[
'''https://scikit-learn.org/stable/modules/generated/sklearn.metrics.matthews_corrcoef.html'''
] , )
def lowerCamelCase__ ( self : Dict , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : List[str] , UpperCamelCase_ : Any=None ):
return {
"matthews_correlation": float(matthews_corrcoef(UpperCamelCase_ , UpperCamelCase_ , sample_weight=UpperCamelCase_ ) ),
}
| 720
|
"""simple docstring"""
def _snake_case ( _snake_case : int , _snake_case : int ):
return base * power(_snake_case , (exponent - 1) ) if exponent else 1
if __name__ == "__main__":
print('''Raise base to the power of exponent using recursion...''')
snake_case__ : Union[str, Any] = int(input('''Enter the base: ''').strip())
snake_case__ : Optional[Any] = int(input('''Enter the exponent: ''').strip())
snake_case__ : Any = power(base, abs(exponent))
if exponent < 0: # power() does not properly deal w/ negative exponents
snake_case__ : Dict = 1 / result
print(f"""{base} to the power of {exponent} is {result}""")
| 637
| 0
|
"""simple docstring"""
from typing import List
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case__ : Optional[int] = logging.get_logger(__name__)
snake_case__ : int = {
'''snap-research/efficientformer-l1-300''': (
'''https://huggingface.co/snap-research/efficientformer-l1-300/resolve/main/config.json'''
),
}
class snake_case_( a__ ):
__UpperCamelCase = '''efficientformer'''
def __init__( self : int , UpperCamelCase_ : List[int] = [3, 2, 6, 4] , UpperCamelCase_ : List[int] = [4_8, 9_6, 2_2_4, 4_4_8] , UpperCamelCase_ : List[bool] = [True, True, True, True] , UpperCamelCase_ : int = 4_4_8 , UpperCamelCase_ : int = 3_2 , UpperCamelCase_ : int = 4 , UpperCamelCase_ : int = 7 , UpperCamelCase_ : int = 5 , UpperCamelCase_ : int = 8 , UpperCamelCase_ : int = 4 , UpperCamelCase_ : float = 0.0 , UpperCamelCase_ : int = 1_6 , UpperCamelCase_ : int = 3 , UpperCamelCase_ : int = 3 , UpperCamelCase_ : int = 3 , UpperCamelCase_ : int = 2 , UpperCamelCase_ : int = 1 , UpperCamelCase_ : float = 0.0 , UpperCamelCase_ : int = 1 , UpperCamelCase_ : bool = True , UpperCamelCase_ : bool = True , UpperCamelCase_ : float = 1E-5 , UpperCamelCase_ : str = "gelu" , UpperCamelCase_ : float = 0.02 , UpperCamelCase_ : float = 1E-12 , UpperCamelCase_ : int = 2_2_4 , UpperCamelCase_ : float = 1E-05 , **UpperCamelCase_ : List[Any] , ):
super().__init__(**UpperCamelCase_ )
lowerCAmelCase : Optional[Any] = hidden_act
lowerCAmelCase : int = hidden_dropout_prob
lowerCAmelCase : Tuple = hidden_sizes
lowerCAmelCase : Optional[int] = num_hidden_layers
lowerCAmelCase : Optional[int] = num_attention_heads
lowerCAmelCase : Any = initializer_range
lowerCAmelCase : Dict = layer_norm_eps
lowerCAmelCase : Optional[int] = patch_size
lowerCAmelCase : List[str] = num_channels
lowerCAmelCase : Optional[int] = depths
lowerCAmelCase : Optional[Any] = mlp_expansion_ratio
lowerCAmelCase : Optional[int] = downsamples
lowerCAmelCase : List[str] = dim
lowerCAmelCase : List[Any] = key_dim
lowerCAmelCase : Tuple = attention_ratio
lowerCAmelCase : int = resolution
lowerCAmelCase : Tuple = pool_size
lowerCAmelCase : str = downsample_patch_size
lowerCAmelCase : List[Any] = downsample_stride
lowerCAmelCase : Optional[Any] = downsample_pad
lowerCAmelCase : List[str] = drop_path_rate
lowerCAmelCase : Tuple = num_metaad_blocks
lowerCAmelCase : str = distillation
lowerCAmelCase : int = use_layer_scale
lowerCAmelCase : Any = layer_scale_init_value
lowerCAmelCase : Union[str, Any] = image_size
lowerCAmelCase : int = batch_norm_eps
| 721
|
"""simple docstring"""
import unittest
import numpy as np
import timeout_decorator # noqa
from transformers import BlenderbotConfig, is_flax_available
from transformers.testing_utils import jax_device, require_flax, slow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
snake_case__ : int = '''platform'''
import jax
import jax.numpy as jnp
from transformers import BlenderbotTokenizer
from transformers.models.blenderbot.modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
shift_tokens_right,
)
def _snake_case ( _snake_case : str , _snake_case : Any , _snake_case : str=None , _snake_case : str=None , _snake_case : Dict=None , _snake_case : Tuple=None , _snake_case : str=None , _snake_case : Any=None , ):
if attention_mask is None:
lowerCAmelCase : List[str] = np.where(input_ids != config.pad_token_id , 1 , 0 )
if decoder_attention_mask is None:
lowerCAmelCase : Optional[int] = np.where(decoder_input_ids != config.pad_token_id , 1 , 0 )
if head_mask is None:
lowerCAmelCase : Any = np.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
lowerCAmelCase : int = np.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
lowerCAmelCase : List[str] = np.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
}
class snake_case_:
def __init__( self : int , UpperCamelCase_ : Tuple , UpperCamelCase_ : int=1_3 , UpperCamelCase_ : Union[str, Any]=7 , UpperCamelCase_ : Union[str, Any]=True , UpperCamelCase_ : List[Any]=False , UpperCamelCase_ : Dict=9_9 , UpperCamelCase_ : Optional[int]=1_6 , UpperCamelCase_ : str=2 , UpperCamelCase_ : List[str]=4 , UpperCamelCase_ : List[Any]=4 , UpperCamelCase_ : int="gelu" , UpperCamelCase_ : Optional[int]=0.1 , UpperCamelCase_ : Any=0.1 , UpperCamelCase_ : str=3_2 , UpperCamelCase_ : str=2 , UpperCamelCase_ : Tuple=1 , UpperCamelCase_ : List[Any]=0 , UpperCamelCase_ : Any=0.02 , ):
lowerCAmelCase : Tuple = parent
lowerCAmelCase : str = batch_size
lowerCAmelCase : List[Any] = seq_length
lowerCAmelCase : Optional[int] = is_training
lowerCAmelCase : int = use_labels
lowerCAmelCase : List[Any] = vocab_size
lowerCAmelCase : str = hidden_size
lowerCAmelCase : List[Any] = num_hidden_layers
lowerCAmelCase : Any = num_attention_heads
lowerCAmelCase : List[Any] = intermediate_size
lowerCAmelCase : Optional[int] = hidden_act
lowerCAmelCase : Dict = hidden_dropout_prob
lowerCAmelCase : Optional[int] = attention_probs_dropout_prob
lowerCAmelCase : List[Any] = max_position_embeddings
lowerCAmelCase : Union[str, Any] = eos_token_id
lowerCAmelCase : Dict = pad_token_id
lowerCAmelCase : Optional[Any] = bos_token_id
lowerCAmelCase : List[str] = initializer_range
def lowerCamelCase__ ( self : Dict ):
lowerCAmelCase : List[Any] = np.clip(ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) , 3 , self.vocab_size )
lowerCAmelCase : str = np.concatenate((input_ids, 2 * np.ones((self.batch_size, 1) , dtype=np.intaa )) , -1 )
lowerCAmelCase : Tuple = shift_tokens_right(UpperCamelCase_ , 1 , 2 )
lowerCAmelCase : Union[str, Any] = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , initializer_range=self.initializer_range , use_cache=UpperCamelCase_ , )
lowerCAmelCase : Union[str, Any] = prepare_blenderbot_inputs_dict(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
return config, inputs_dict
def lowerCamelCase__ ( self : str ):
lowerCAmelCase, lowerCAmelCase : Optional[int] = self.prepare_config_and_inputs()
return config, inputs_dict
def lowerCamelCase__ ( self : List[str] , UpperCamelCase_ : List[str] , UpperCamelCase_ : str , UpperCamelCase_ : Tuple ):
lowerCAmelCase : int = 2_0
lowerCAmelCase : Tuple = model_class_name(UpperCamelCase_ )
lowerCAmelCase : Optional[Any] = model.encode(inputs_dict['''input_ids'''] )
lowerCAmelCase, lowerCAmelCase : str = (
inputs_dict['''decoder_input_ids'''],
inputs_dict['''decoder_attention_mask'''],
)
lowerCAmelCase : str = model.init_cache(decoder_input_ids.shape[0] , UpperCamelCase_ , UpperCamelCase_ )
lowerCAmelCase : Union[str, Any] = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype='''i4''' )
lowerCAmelCase : Tuple = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
lowerCAmelCase : List[Any] = model.decode(
decoder_input_ids[:, :-1] , UpperCamelCase_ , decoder_attention_mask=UpperCamelCase_ , past_key_values=UpperCamelCase_ , decoder_position_ids=UpperCamelCase_ , )
lowerCAmelCase : Any = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='''i4''' )
lowerCAmelCase : List[str] = model.decode(
decoder_input_ids[:, -1:] , UpperCamelCase_ , decoder_attention_mask=UpperCamelCase_ , past_key_values=outputs_cache.past_key_values , decoder_position_ids=UpperCamelCase_ , )
lowerCAmelCase : Union[str, Any] = model.decode(UpperCamelCase_ , UpperCamelCase_ )
lowerCAmelCase : int = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=F'''Max diff is {diff}''' )
def lowerCamelCase__ ( self : Union[str, Any] , UpperCamelCase_ : Any , UpperCamelCase_ : Any , UpperCamelCase_ : List[str] ):
lowerCAmelCase : Optional[int] = 2_0
lowerCAmelCase : List[Any] = model_class_name(UpperCamelCase_ )
lowerCAmelCase : Union[str, Any] = model.encode(inputs_dict['''input_ids'''] )
lowerCAmelCase, lowerCAmelCase : Optional[int] = (
inputs_dict['''decoder_input_ids'''],
inputs_dict['''decoder_attention_mask'''],
)
lowerCAmelCase : str = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
lowerCAmelCase : Union[str, Any] = model.init_cache(decoder_input_ids.shape[0] , UpperCamelCase_ , UpperCamelCase_ )
lowerCAmelCase : str = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
lowerCAmelCase : Dict = model.decode(
decoder_input_ids[:, :-1] , UpperCamelCase_ , decoder_attention_mask=UpperCamelCase_ , past_key_values=UpperCamelCase_ , decoder_position_ids=UpperCamelCase_ , )
lowerCAmelCase : Any = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='''i4''' )
lowerCAmelCase : Union[str, Any] = model.decode(
decoder_input_ids[:, -1:] , UpperCamelCase_ , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=UpperCamelCase_ , decoder_position_ids=UpperCamelCase_ , )
lowerCAmelCase : Dict = model.decode(UpperCamelCase_ , UpperCamelCase_ , decoder_attention_mask=UpperCamelCase_ )
lowerCAmelCase : Any = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=F'''Max diff is {diff}''' )
@require_flax
class snake_case_( unittest.TestCase ):
__UpperCamelCase = 99
def lowerCamelCase__ ( self : str ):
lowerCAmelCase : List[Any] = np.array(
[
[7_1, 8_2, 1_8, 3_3, 4_6, 9_1, 2],
[6_8, 3_4, 2_6, 5_8, 3_0, 8_2, 2],
[5, 9_7, 1_7, 3_9, 9_4, 4_0, 2],
[7_6, 8_3, 9_4, 2_5, 7_0, 7_8, 2],
[8_7, 5_9, 4_1, 3_5, 4_8, 6_6, 2],
[5_5, 1_3, 1_6, 5_8, 5, 2, 1], # note padding
[6_4, 2_7, 3_1, 5_1, 1_2, 7_5, 2],
[5_2, 6_4, 8_6, 1_7, 8_3, 3_9, 2],
[4_8, 6_1, 9, 2_4, 7_1, 8_2, 2],
[2_6, 1, 6_0, 4_8, 2_2, 1_3, 2],
[2_1, 5, 6_2, 2_8, 1_4, 7_6, 2],
[4_5, 9_8, 3_7, 8_6, 5_9, 4_8, 2],
[7_0, 7_0, 5_0, 9, 2_8, 0, 2],
] , dtype=np.intaa , )
lowerCAmelCase : List[Any] = input_ids.shape[0]
lowerCAmelCase : Optional[Any] = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=2_4 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=3_2 , decoder_ffn_dim=3_2 , max_position_embeddings=4_8 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
def lowerCamelCase__ ( self : List[str] ):
lowerCAmelCase, lowerCAmelCase, lowerCAmelCase : Any = self._get_config_and_data()
lowerCAmelCase : Any = FlaxBlenderbotForConditionalGeneration(UpperCamelCase_ )
lowerCAmelCase : Optional[int] = lm_model(input_ids=UpperCamelCase_ )
lowerCAmelCase : Tuple = (batch_size, input_ids.shape[1], config.vocab_size)
self.assertEqual(outputs['''logits'''].shape , UpperCamelCase_ )
def lowerCamelCase__ ( self : Any ):
lowerCAmelCase : Any = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=1_4 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=8 , decoder_ffn_dim=8 , max_position_embeddings=4_8 , )
lowerCAmelCase : int = FlaxBlenderbotForConditionalGeneration(UpperCamelCase_ )
lowerCAmelCase : Optional[Any] = np.array([[7_1, 8_2, 1_8, 3_3, 4_6, 9_1, 2], [6_8, 3_4, 2_6, 5_8, 3_0, 2, 1]] , dtype=np.intaa )
lowerCAmelCase : List[str] = np.array([[8_2, 7_1, 8_2, 1_8, 2], [5_8, 6_8, 2, 1, 1]] , dtype=np.intaa )
lowerCAmelCase : List[Any] = lm_model(input_ids=UpperCamelCase_ , decoder_input_ids=UpperCamelCase_ )
lowerCAmelCase : str = (*summary.shape, config.vocab_size)
self.assertEqual(outputs['''logits'''].shape , UpperCamelCase_ )
def lowerCamelCase__ ( self : int ):
lowerCAmelCase : Any = np.array([[7_1, 8_2, 1_8, 3_3, 2, 1, 1], [6_8, 3_4, 2_6, 5_8, 3_0, 8_2, 2]] , dtype=np.intaa )
lowerCAmelCase : Tuple = shift_tokens_right(UpperCamelCase_ , 1 , 2 )
lowerCAmelCase : Optional[int] = np.equal(UpperCamelCase_ , 1 ).astype(np.floataa ).sum()
lowerCAmelCase : str = np.equal(UpperCamelCase_ , 1 ).astype(np.floataa ).sum()
self.assertEqual(shifted.shape , input_ids.shape )
self.assertEqual(UpperCamelCase_ , n_pad_before - 1 )
self.assertTrue(np.equal(shifted[:, 0] , 2 ).all() )
@require_flax
class snake_case_( a__ , unittest.TestCase , a__ ):
__UpperCamelCase = True
__UpperCamelCase = (
(
FlaxBlenderbotModel,
FlaxBlenderbotForConditionalGeneration,
)
if is_flax_available()
else ()
)
__UpperCamelCase = (FlaxBlenderbotForConditionalGeneration,) if is_flax_available() else ()
def lowerCamelCase__ ( self : Dict ):
lowerCAmelCase : Any = FlaxBlenderbotModelTester(self )
def lowerCamelCase__ ( self : Tuple ):
lowerCAmelCase, lowerCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
def lowerCamelCase__ ( self : List[str] ):
lowerCAmelCase, lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
def lowerCamelCase__ ( self : Tuple ):
lowerCAmelCase, lowerCAmelCase : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
lowerCAmelCase : Optional[int] = self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ )
lowerCAmelCase : Optional[Any] = model_class(UpperCamelCase_ )
@jax.jit
def encode_jitted(UpperCamelCase_ : List[str] , UpperCamelCase_ : Optional[Any]=None , **UpperCamelCase_ : List[str] ):
return model.encode(input_ids=UpperCamelCase_ , attention_mask=UpperCamelCase_ )
with self.subTest('''JIT Enabled''' ):
lowerCAmelCase : List[str] = encode_jitted(**UpperCamelCase_ ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
lowerCAmelCase : int = encode_jitted(**UpperCamelCase_ ).to_tuple()
self.assertEqual(len(UpperCamelCase_ ) , len(UpperCamelCase_ ) )
for jitted_output, output in zip(UpperCamelCase_ , UpperCamelCase_ ):
self.assertEqual(jitted_output.shape , output.shape )
def lowerCamelCase__ ( self : Union[str, Any] ):
lowerCAmelCase, lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
lowerCAmelCase : Tuple = model_class(UpperCamelCase_ )
lowerCAmelCase : int = model.encode(inputs_dict['''input_ids'''] , inputs_dict['''attention_mask'''] )
lowerCAmelCase : List[Any] = {
'''decoder_input_ids''': inputs_dict['''decoder_input_ids'''],
'''decoder_attention_mask''': inputs_dict['''decoder_attention_mask'''],
'''encoder_outputs''': encoder_outputs,
}
@jax.jit
def decode_jitted(UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : Dict , UpperCamelCase_ : int ):
return model.decode(
decoder_input_ids=UpperCamelCase_ , decoder_attention_mask=UpperCamelCase_ , encoder_outputs=UpperCamelCase_ , )
with self.subTest('''JIT Enabled''' ):
lowerCAmelCase : str = decode_jitted(**UpperCamelCase_ ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
lowerCAmelCase : Union[str, Any] = decode_jitted(**UpperCamelCase_ ).to_tuple()
self.assertEqual(len(UpperCamelCase_ ) , len(UpperCamelCase_ ) )
for jitted_output, output in zip(UpperCamelCase_ , UpperCamelCase_ ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def lowerCamelCase__ ( self : Optional[int] ):
for model_class_name in self.all_model_classes:
lowerCAmelCase : Optional[int] = model_class_name.from_pretrained('''facebook/blenderbot-400M-distill''' )
# FlaxBlenderbotForSequenceClassification expects eos token in input_ids
lowerCAmelCase : int = np.ones((1, 1) ) * model.config.eos_token_id
lowerCAmelCase : List[str] = model(UpperCamelCase_ )
self.assertIsNotNone(UpperCamelCase_ )
@unittest.skipUnless(jax_device != '''cpu''' , '''3B test too slow on CPU.''' )
@slow
def lowerCamelCase__ ( self : Union[str, Any] ):
lowerCAmelCase : Dict = {'''num_beams''': 1, '''early_stopping''': True, '''min_length''': 1_5, '''max_length''': 2_5}
lowerCAmelCase : List[str] = {'''skip_special_tokens''': True, '''clean_up_tokenization_spaces''': True}
lowerCAmelCase : Tuple = FlaxBlenderbotForConditionalGeneration.from_pretrained('''facebook/blenderbot-3B''' , from_pt=UpperCamelCase_ )
lowerCAmelCase : Union[str, Any] = BlenderbotTokenizer.from_pretrained('''facebook/blenderbot-3B''' )
lowerCAmelCase : List[Any] = ['''Sam''']
lowerCAmelCase : str = tokenizer(UpperCamelCase_ , return_tensors='''jax''' )
lowerCAmelCase : Union[str, Any] = model.generate(**UpperCamelCase_ , **UpperCamelCase_ )
lowerCAmelCase : Tuple = '''Sam is a great name. It means "sun" in Gaelic.'''
lowerCAmelCase : Union[str, Any] = tokenizer.batch_decode(UpperCamelCase_ , **UpperCamelCase_ )
assert generated_txt[0].strip() == tgt_text
| 637
| 0
|
"""simple docstring"""
import json
import os
import unittest
from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES, BioGptTokenizer
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class snake_case_( a__ , unittest.TestCase ):
__UpperCamelCase = BioGptTokenizer
__UpperCamelCase = False
def lowerCamelCase__ ( self : Union[str, Any] ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
lowerCAmelCase : Any = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''w</w>''',
'''r</w>''',
'''t</w>''',
'''lo''',
'''low''',
'''er</w>''',
'''low</w>''',
'''lowest</w>''',
'''newer</w>''',
'''wider</w>''',
'''<unk>''',
]
lowerCAmelCase : Union[str, Any] = dict(zip(UpperCamelCase_ , range(len(UpperCamelCase_ ) ) ) )
lowerCAmelCase : List[str] = ['''l o 123''', '''lo w 1456''', '''e r</w> 1789''', '''''']
lowerCAmelCase : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
lowerCAmelCase : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' ) as fp:
fp.write(json.dumps(UpperCamelCase_ ) )
with open(self.merges_file , '''w''' ) as fp:
fp.write('''\n'''.join(UpperCamelCase_ ) )
def lowerCamelCase__ ( self : Union[str, Any] , UpperCamelCase_ : Dict ):
lowerCAmelCase : List[str] = '''lower newer'''
lowerCAmelCase : List[str] = '''lower newer'''
return input_text, output_text
def lowerCamelCase__ ( self : Union[str, Any] ):
lowerCAmelCase : Dict = BioGptTokenizer(self.vocab_file , self.merges_file )
lowerCAmelCase : List[str] = '''lower'''
lowerCAmelCase : Tuple = ['''low''', '''er</w>''']
lowerCAmelCase : Dict = tokenizer.tokenize(UpperCamelCase_ )
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
lowerCAmelCase : List[str] = tokens + ['''<unk>''']
lowerCAmelCase : Tuple = [1_4, 1_5, 2_0]
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCamelCase_ ) , UpperCamelCase_ )
@slow
def lowerCamelCase__ ( self : Union[str, Any] ):
lowerCAmelCase : List[Any] = BioGptTokenizer.from_pretrained('''microsoft/biogpt''' )
lowerCAmelCase : Dict = tokenizer.encode('''sequence builders''' , add_special_tokens=UpperCamelCase_ )
lowerCAmelCase : Tuple = tokenizer.encode('''multi-sequence build''' , add_special_tokens=UpperCamelCase_ )
lowerCAmelCase : str = tokenizer.build_inputs_with_special_tokens(UpperCamelCase_ )
lowerCAmelCase : Any = tokenizer.build_inputs_with_special_tokens(UpperCamelCase_ , UpperCamelCase_ )
self.assertTrue(encoded_sentence == [2] + text )
self.assertTrue(encoded_pair == [2] + text + [2] + text_a )
| 700
|
"""simple docstring"""
from __future__ import annotations
from PIL import Image
# Define glider example
snake_case__ : int = [
[0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0],
[1, 1, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
]
# Define blinker example
snake_case__ : Any = [[0, 1, 0], [0, 1, 0], [0, 1, 0]]
def _snake_case ( _snake_case : list[list[int]] ):
lowerCAmelCase : Union[str, Any] = []
for i in range(len(_snake_case ) ):
lowerCAmelCase : Any = []
for j in range(len(cells[i] ) ):
# Get the number of live neighbours
lowerCAmelCase : Optional[int] = 0
if i > 0 and j > 0:
neighbour_count += cells[i - 1][j - 1]
if i > 0:
neighbour_count += cells[i - 1][j]
if i > 0 and j < len(cells[i] ) - 1:
neighbour_count += cells[i - 1][j + 1]
if j > 0:
neighbour_count += cells[i][j - 1]
if j < len(cells[i] ) - 1:
neighbour_count += cells[i][j + 1]
if i < len(_snake_case ) - 1 and j > 0:
neighbour_count += cells[i + 1][j - 1]
if i < len(_snake_case ) - 1:
neighbour_count += cells[i + 1][j]
if i < len(_snake_case ) - 1 and j < len(cells[i] ) - 1:
neighbour_count += cells[i + 1][j + 1]
# Rules of the game of life (excerpt from Wikipedia):
# 1. Any live cell with two or three live neighbours survives.
# 2. Any dead cell with three live neighbours becomes a live cell.
# 3. All other live cells die in the next generation.
# Similarly, all other dead cells stay dead.
lowerCAmelCase : str = cells[i][j] == 1
if (
(alive and 2 <= neighbour_count <= 3)
or not alive
and neighbour_count == 3
):
next_generation_row.append(1 )
else:
next_generation_row.append(0 )
next_generation.append(_snake_case )
return next_generation
def _snake_case ( _snake_case : list[list[int]] , _snake_case : int ):
lowerCAmelCase : int = []
for _ in range(_snake_case ):
# Create output image
lowerCAmelCase : Union[str, Any] = Image.new('''RGB''' , (len(cells[0] ), len(_snake_case )) )
lowerCAmelCase : Union[str, Any] = img.load()
# Save cells to image
for x in range(len(_snake_case ) ):
for y in range(len(cells[0] ) ):
lowerCAmelCase : Optional[int] = 255 - cells[y][x] * 255
lowerCAmelCase : List[Any] = (colour, colour, colour)
# Save image
images.append(_snake_case )
lowerCAmelCase : Union[str, Any] = new_generation(_snake_case )
return images
if __name__ == "__main__":
snake_case__ : Union[str, Any] = generate_images(GLIDER, 16)
images[0].save('''out.gif''', save_all=True, append_images=images[1:])
| 637
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case__ : Tuple = logging.get_logger(__name__)
snake_case__ : Optional[int] = {
'''facebook/vit-mae-base''': '''https://huggingface.co/facebook/vit-mae-base/resolve/main/config.json''',
# See all ViT MAE models at https://huggingface.co/models?filter=vit-mae
}
class snake_case_( a__ ):
__UpperCamelCase = '''vit_mae'''
def __init__( self : Tuple , UpperCamelCase_ : List[Any]=7_6_8 , UpperCamelCase_ : List[str]=1_2 , UpperCamelCase_ : Tuple=1_2 , UpperCamelCase_ : List[Any]=3_0_7_2 , UpperCamelCase_ : List[str]="gelu" , UpperCamelCase_ : List[str]=0.0 , UpperCamelCase_ : int=0.0 , UpperCamelCase_ : str=0.02 , UpperCamelCase_ : Tuple=1E-12 , UpperCamelCase_ : Optional[int]=2_2_4 , UpperCamelCase_ : Any=1_6 , UpperCamelCase_ : str=3 , UpperCamelCase_ : Dict=True , UpperCamelCase_ : List[str]=1_6 , UpperCamelCase_ : str=5_1_2 , UpperCamelCase_ : Union[str, Any]=8 , UpperCamelCase_ : Dict=2_0_4_8 , UpperCamelCase_ : Optional[int]=0.75 , UpperCamelCase_ : Tuple=False , **UpperCamelCase_ : Any , ):
super().__init__(**UpperCamelCase_ )
lowerCAmelCase : int = hidden_size
lowerCAmelCase : Union[str, Any] = num_hidden_layers
lowerCAmelCase : str = num_attention_heads
lowerCAmelCase : Union[str, Any] = intermediate_size
lowerCAmelCase : int = hidden_act
lowerCAmelCase : Dict = hidden_dropout_prob
lowerCAmelCase : Any = attention_probs_dropout_prob
lowerCAmelCase : int = initializer_range
lowerCAmelCase : Any = layer_norm_eps
lowerCAmelCase : Union[str, Any] = image_size
lowerCAmelCase : int = patch_size
lowerCAmelCase : str = num_channels
lowerCAmelCase : List[Any] = qkv_bias
lowerCAmelCase : Optional[Any] = decoder_num_attention_heads
lowerCAmelCase : int = decoder_hidden_size
lowerCAmelCase : Any = decoder_num_hidden_layers
lowerCAmelCase : Any = decoder_intermediate_size
lowerCAmelCase : List[str] = mask_ratio
lowerCAmelCase : int = norm_pix_loss
| 701
|
"""simple docstring"""
from __future__ import annotations
class snake_case_:
def __init__( self : int , UpperCamelCase_ : str , UpperCamelCase_ : str ):
lowerCAmelCase, lowerCAmelCase : List[str] = text, pattern
lowerCAmelCase, lowerCAmelCase : Union[str, Any] = len(UpperCamelCase_ ), len(UpperCamelCase_ )
def lowerCamelCase__ ( self : List[Any] , UpperCamelCase_ : str ):
for i in range(self.patLen - 1 , -1 , -1 ):
if char == self.pattern[i]:
return i
return -1
def lowerCamelCase__ ( self : Optional[Any] , UpperCamelCase_ : int ):
for i in range(self.patLen - 1 , -1 , -1 ):
if self.pattern[i] != self.text[current_pos + i]:
return current_pos + i
return -1
def lowerCamelCase__ ( self : Dict ):
# searches pattern in text and returns index positions
lowerCAmelCase : Union[str, Any] = []
for i in range(self.textLen - self.patLen + 1 ):
lowerCAmelCase : str = self.mismatch_in_text(UpperCamelCase_ )
if mismatch_index == -1:
positions.append(UpperCamelCase_ )
else:
lowerCAmelCase : Optional[Any] = self.match_in_pattern(self.text[mismatch_index] )
lowerCAmelCase : int = (
mismatch_index - match_index
) # shifting index lgtm [py/multiple-definition]
return positions
snake_case__ : str = '''ABAABA'''
snake_case__ : List[str] = '''AB'''
snake_case__ : Union[str, Any] = BoyerMooreSearch(text, pattern)
snake_case__ : Optional[Any] = bms.bad_character_heuristic()
if len(positions) == 0:
print('''No match found''')
else:
print('''Pattern found in following positions: ''')
print(positions)
| 637
| 0
|
"""simple docstring"""
from dataclasses import dataclass, field
from typing import Optional
@dataclass
class snake_case_:
__UpperCamelCase = field(
default='''codeparrot/codeparrot''' , metadata={'''help''': '''Model name or path of model to be trained.'''} )
__UpperCamelCase = field(
default='''./''' , metadata={'''help''': '''Save dir where model repo is cloned and models updates are saved to.'''} )
__UpperCamelCase = field(
default='''codeparrot/codeparrot-clean-train''' , metadata={'''help''': '''Name or path of training dataset.'''} )
__UpperCamelCase = field(
default='''codeparrot/codeparrot-clean-valid''' , metadata={'''help''': '''Name or path of validation dataset.'''} )
__UpperCamelCase = field(default=2 , metadata={'''help''': '''Batch size for training.'''} )
__UpperCamelCase = field(default=2 , metadata={'''help''': '''Batch size for evaluation.'''} )
__UpperCamelCase = field(default=0.1 , metadata={'''help''': '''Value of weight decay.'''} )
__UpperCamelCase = field(
default=10_000 , metadata={'''help''': '''Size of buffer used to shuffle streaming dataset.'''} )
__UpperCamelCase = field(default=2e-4 , metadata={'''help''': '''Learning rate fo training.'''} )
__UpperCamelCase = field(default='''cosine''' , metadata={'''help''': '''Learning rate.'''} )
__UpperCamelCase = field(
default=750 , metadata={'''help''': '''Number of warmup steps in the learning rate schedule.'''} )
__UpperCamelCase = field(
default=16 , metadata={'''help''': '''Number of gradient accumulation steps.'''} )
__UpperCamelCase = field(
default=a__ , metadata={'''help''': '''Use gradient checkpointing to reduce memory footprint.'''} )
__UpperCamelCase = field(default=50_000 , metadata={'''help''': '''Maximum number of training steps.'''} )
__UpperCamelCase = field(
default=-1 , metadata={'''help''': '''Maximum number of evaluation steps. If -1 the full dataset is evaluated.'''} )
__UpperCamelCase = field(default=1_024 , metadata={'''help''': '''Sequence lengths used for training.'''} )
__UpperCamelCase = field(default=1 , metadata={'''help''': '''Training seed.'''} )
__UpperCamelCase = field(
default=1_024 , metadata={'''help''': '''Interval to save checkpoints. Measured as number of forward passes not training steps.'''} , )
__UpperCamelCase = field(
default=a__ , metadata={'''help''': '''States path if the training should continue from a checkpoint folder.'''} )
__UpperCamelCase = field(default=a__ , metadata={'''help''': '''If True the data is pretokenized.'''} )
@dataclass
class snake_case_:
__UpperCamelCase = field(
default='''codeparrot/codeparrot''' , metadata={'''help''': '''Model name or path of model to be evaluated.'''} )
__UpperCamelCase = field(
default='''codeparrot/codeparrot-clean-valid''' , metadata={'''help''': '''Name or path of validation dataset.'''} )
__UpperCamelCase = field(default=2 , metadata={'''help''': '''Batch size used for evaluation.'''} )
__UpperCamelCase = field(
default=-1 , metadata={'''help''': '''Maximum number of evaluation steps. If -1 the full dataset is evaluated.'''} )
__UpperCamelCase = field(default=1_024 , metadata={'''help''': '''Length of sequences to be evaluated.'''} )
__UpperCamelCase = field(default=1 , metadata={'''help''': '''Random seed used for evaluation.'''} )
@dataclass
class snake_case_:
__UpperCamelCase = field(
default='''codeparrot/codeparrot''' , metadata={'''help''': '''Model name or path of model to be evaluated.'''} )
__UpperCamelCase = field(default=a__ , metadata={'''help''': '''Number of workers used for code evaluation.'''} )
__UpperCamelCase = field(
default=a__ , metadata={'''help''': '''The number of human-eval tasks to run. If not included all tasks are evaluated.'''} , )
__UpperCamelCase = field(
default=a__ , metadata={'''help''': '''Sample from the language model\'s output distribution.'''} )
__UpperCamelCase = field(default=0.2 , metadata={'''help''': '''Sampling temperature used for generation.'''} )
__UpperCamelCase = field(default=256 , metadata={'''help''': '''Maximum number of newly generated tokens.'''} )
__UpperCamelCase = field(default=0 , metadata={'''help''': '''Top-k parameter used for generation.'''} )
__UpperCamelCase = field(default=0.95 , metadata={'''help''': '''Top-p parameter used for nucleus sampling.'''} )
__UpperCamelCase = field(default=10 , metadata={'''help''': '''Number of generations to run in parallel.'''} )
__UpperCamelCase = field(
default=200 , metadata={'''help''': '''Number of completions to generate for each sample.'''} )
__UpperCamelCase = field(default=1 , metadata={'''help''': '''Random seed used for evaluation.'''} )
__UpperCamelCase = field(
default='''eval_results.json''' , metadata={'''help''': '''Random seed used for evaluation.'''} )
__UpperCamelCase = field(
default='''0''' , metadata={'''help''': '''Allow `code_eval` to execute Python code on machine'''} )
__UpperCamelCase = field(
default=-1 , metadata={
'''help''': (
'''Determine which device to run the `text-generation` Pipeline on. -1 is CPU and any zero or positive'''
''' number corresponds to which GPU device id to run on.'''
)
} , )
@dataclass
class snake_case_:
__UpperCamelCase = field(
default=a__ , metadata={
'''help''': '''The number of CPU cores to use for parallel preprocessing. Default uses the maximum available.'''
} , )
__UpperCamelCase = field(
default='''transformersbook/codeparrot''' , metadata={'''help''': '''Folder or name of dataset to process.'''} )
__UpperCamelCase = field(
default='''codeparrot-clean''' , metadata={'''help''': '''Folder to save processed processed dataset.'''} )
__UpperCamelCase = field(
default=100_000 , metadata={'''help''': '''Number of files to save per JSON output file.'''} )
__UpperCamelCase = field(default='''content''' , metadata={'''help''': '''Column containing text data to process.'''} )
__UpperCamelCase = field(
default=1_000 , metadata={'''help''': '''Maximum line length in file, otherwise file is filtered.'''} )
__UpperCamelCase = field(
default=100 , metadata={'''help''': '''Maximum mean line length in file, otherwise file is filtered.'''} )
__UpperCamelCase = field(
default=0.25 , metadata={'''help''': '''Maximum fraction of non-alphanumeric characters, otherwise file is filtered.'''} )
__UpperCamelCase = field(
default=1.5 , metadata={'''help''': '''Minimum character token ratio for the file, otherwise file is filtered.'''} )
__UpperCamelCase = field(
default=0.7 , metadata={'''help''': '''Probability for filtering config, test and uncommon files.'''} )
__UpperCamelCase = field(
default='''codeparrot/codeparrot''' , metadata={'''help''': '''Name or path to the tokenizer.'''} , )
__UpperCamelCase = field(
default=a__ , metadata={'''help''': '''If True, near-duplicate samples are removed.'''} )
__UpperCamelCase = field(
default=0.85 , metadata={'''help''': '''Jaccard threshold for near-duplicate samples.'''} )
@dataclass
class snake_case_:
__UpperCamelCase = field(
default='''gpt2''' , metadata={'''help''': '''Base tokenizer to build new tokenizer from.'''} )
__UpperCamelCase = field(
default='''transformersbook/codeparrot-train''' , metadata={'''help''': '''Dataset to train tokenizer on.'''} )
__UpperCamelCase = field(default='''content''' , metadata={'''help''': '''Column containing text data to process.'''} )
__UpperCamelCase = field(default=200_000 , metadata={'''help''': '''Number of examples to train tokenizer on.'''} )
__UpperCamelCase = field(
default=32_768 , metadata={'''help''': '''Number of examples to train the tokenizer on.'''} )
__UpperCamelCase = field(default='''codeparrot''' , metadata={'''help''': '''Name of new tokenizer.'''} )
__UpperCamelCase = field(default=a__ , metadata={'''help''': '''Push saved tokenizer to the hub.'''} )
@dataclass
class snake_case_:
__UpperCamelCase = field(
default='''codeparrot/codeparrot''' , metadata={'''help''': '''Name or path to the tokenizer.'''} )
__UpperCamelCase = field(
default='''codeparrot/codeparrot-clean-train''' , metadata={'''help''': '''Name or path to the dataset to pretokenize.'''} )
__UpperCamelCase = field(
default='''tokenized-codeparrot-train''' , metadata={'''help''': '''Repo name of the pretokenized data.'''} )
__UpperCamelCase = field(default=a__ , metadata={'''help''': '''Number of workers used for code evaluation.'''} )
@dataclass
class snake_case_:
__UpperCamelCase = field(
default='''gpt2-large''' , metadata={'''help''': '''Configuration to use for model initialization.'''} )
__UpperCamelCase = field(
default='''codeparrot/codeparrot''' , metadata={'''help''': '''Tokenizer attached to model.'''} )
__UpperCamelCase = field(default='''codeparrot''' , metadata={'''help''': '''Name of the created model.'''} )
__UpperCamelCase = field(default=a__ , metadata={'''help''': '''Push saved tokenizer to the hub.'''} )
| 702
|
"""simple docstring"""
from __future__ import annotations
from typing import Any
class snake_case_( a__ ):
pass
class snake_case_:
def __init__( self : Any , UpperCamelCase_ : Any ):
lowerCAmelCase : Any = data
lowerCAmelCase : Node | None = None
def __iter__( self : int ):
lowerCAmelCase : Any = self
lowerCAmelCase : Union[str, Any] = []
while node:
if node in visited:
raise ContainsLoopError
visited.append(UpperCamelCase_ )
yield node.data
lowerCAmelCase : Optional[int] = node.next_node
@property
def lowerCamelCase__ ( self : str ):
try:
list(self )
return False
except ContainsLoopError:
return True
if __name__ == "__main__":
snake_case__ : Dict = Node(1)
snake_case__ : Any = Node(2)
snake_case__ : int = Node(3)
snake_case__ : Any = Node(4)
print(root_node.has_loop) # False
snake_case__ : Tuple = root_node.next_node
print(root_node.has_loop) # True
snake_case__ : List[Any] = Node(5)
snake_case__ : int = Node(6)
snake_case__ : List[Any] = Node(5)
snake_case__ : Dict = Node(6)
print(root_node.has_loop) # False
snake_case__ : Any = Node(1)
print(root_node.has_loop) # False
| 637
| 0
|
"""simple docstring"""
import os
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_doctest_list.py
snake_case__ : List[str] = '''.'''
if __name__ == "__main__":
snake_case__ : Any = os.path.join(REPO_PATH, '''utils/documentation_tests.txt''')
snake_case__ : Dict = []
snake_case__ : Tuple = []
with open(doctest_file_path) as fp:
for line in fp:
snake_case__ : Optional[int] = line.strip()
snake_case__ : Union[str, Any] = os.path.join(REPO_PATH, line)
if not (os.path.isfile(path) or os.path.isdir(path)):
non_existent_paths.append(line)
all_paths.append(path)
if len(non_existent_paths) > 0:
snake_case__ : str = '''\n'''.join(non_existent_paths)
raise ValueError(f"""`utils/documentation_tests.txt` contains non-existent paths:\n{non_existent_paths}""")
if all_paths != sorted(all_paths):
raise ValueError('''Files in `utils/documentation_tests.txt` are not in alphabetical order.''')
| 703
|
"""simple docstring"""
from torch import nn
class snake_case_( nn.Module ):
def __init__( self : int , UpperCamelCase_ : int , UpperCamelCase_ : int ):
super().__init__()
lowerCAmelCase : str = class_size
lowerCAmelCase : Dict = embed_size
# self.mlp1 = nn.Linear(embed_size, embed_size)
# self.mlp2 = (nn.Linear(embed_size, class_size))
lowerCAmelCase : Any = nn.Linear(UpperCamelCase_ , UpperCamelCase_ )
def lowerCamelCase__ ( self : List[Any] , UpperCamelCase_ : Tuple ):
# hidden_state = nn.functional.relu(self.mlp1(hidden_state))
# hidden_state = self.mlp2(hidden_state)
lowerCAmelCase : int = self.mlp(UpperCamelCase_ )
return logits
| 637
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_speech_available, is_torch_available
snake_case__ : str = {
'''configuration_audio_spectrogram_transformer''': [
'''AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''ASTConfig''',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ : Union[str, Any] = [
'''AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ASTForAudioClassification''',
'''ASTModel''',
'''ASTPreTrainedModel''',
]
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ : int = ['''ASTFeatureExtractor''']
if TYPE_CHECKING:
from .configuration_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
ASTConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ASTForAudioClassification,
ASTModel,
ASTPreTrainedModel,
)
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_audio_spectrogram_transformer import ASTFeatureExtractor
else:
import sys
snake_case__ : Any = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 704
|
"""simple docstring"""
class snake_case_:
def __init__( self : Union[str, Any] , UpperCamelCase_ : str ):
lowerCAmelCase : Dict = val
lowerCAmelCase : str = None
lowerCAmelCase : Dict = None
def lowerCamelCase__ ( self : Union[str, Any] , UpperCamelCase_ : Dict ):
if self.val:
if val < self.val:
if self.left is None:
lowerCAmelCase : int = Node(UpperCamelCase_ )
else:
self.left.insert(UpperCamelCase_ )
elif val > self.val:
if self.right is None:
lowerCAmelCase : Any = Node(UpperCamelCase_ )
else:
self.right.insert(UpperCamelCase_ )
else:
lowerCAmelCase : Optional[Any] = val
def _snake_case ( _snake_case : Tuple , _snake_case : str ):
# Recursive traversal
if root:
inorder(root.left , _snake_case )
res.append(root.val )
inorder(root.right , _snake_case )
def _snake_case ( _snake_case : Optional[Any] ):
# Build BST
if len(_snake_case ) == 0:
return arr
lowerCAmelCase : Optional[Any] = Node(arr[0] )
for i in range(1 , len(_snake_case ) ):
root.insert(arr[i] )
# Traverse BST in order.
lowerCAmelCase : Optional[int] = []
inorder(_snake_case , _snake_case )
return res
if __name__ == "__main__":
print(tree_sort([10, 1, 3, 2, 9, 14, 13]))
| 637
| 0
|
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPImageProcessor, CLIPVisionConfig, CLIPVisionModel
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEImgaImgPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import floats_tensor, load_image, load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class snake_case_( a__ , unittest.TestCase ):
__UpperCamelCase = ShapEImgaImgPipeline
__UpperCamelCase = ['''image''']
__UpperCamelCase = ['''image''']
__UpperCamelCase = [
'''num_images_per_prompt''',
'''num_inference_steps''',
'''generator''',
'''latents''',
'''guidance_scale''',
'''frame_size''',
'''output_type''',
'''return_dict''',
]
__UpperCamelCase = False
@property
def lowerCamelCase__ ( self : Dict ):
return 3_2
@property
def lowerCamelCase__ ( self : List[str] ):
return 3_2
@property
def lowerCamelCase__ ( self : Optional[int] ):
return self.time_input_dim * 4
@property
def lowerCamelCase__ ( self : Union[str, Any] ):
return 8
@property
def lowerCamelCase__ ( self : Dict ):
torch.manual_seed(0 )
lowerCAmelCase : Tuple = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , image_size=6_4 , projection_dim=self.text_embedder_hidden_size , intermediate_size=3_7 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=1 , )
lowerCAmelCase : Optional[Any] = CLIPVisionModel(UpperCamelCase_ )
return model
@property
def lowerCamelCase__ ( self : Union[str, Any] ):
lowerCAmelCase : List[str] = CLIPImageProcessor(
crop_size=2_2_4 , do_center_crop=UpperCamelCase_ , do_normalize=UpperCamelCase_ , do_resize=UpperCamelCase_ , image_mean=[0.48_145_466, 0.4_578_275, 0.40_821_073] , image_std=[0.26_862_954, 0.26_130_258, 0.27_577_711] , resample=3 , size=2_2_4 , )
return image_processor
@property
def lowerCamelCase__ ( self : Optional[Any] ):
torch.manual_seed(0 )
lowerCAmelCase : List[str] = {
'''num_attention_heads''': 2,
'''attention_head_dim''': 1_6,
'''embedding_dim''': self.time_input_dim,
'''num_embeddings''': 3_2,
'''embedding_proj_dim''': self.text_embedder_hidden_size,
'''time_embed_dim''': self.time_embed_dim,
'''num_layers''': 1,
'''clip_embed_dim''': self.time_input_dim * 2,
'''additional_embeddings''': 0,
'''time_embed_act_fn''': '''gelu''',
'''norm_in_type''': '''layer''',
'''embedding_proj_norm_type''': '''layer''',
'''encoder_hid_proj_type''': None,
'''added_emb_type''': None,
}
lowerCAmelCase : List[Any] = PriorTransformer(**UpperCamelCase_ )
return model
@property
def lowerCamelCase__ ( self : int ):
torch.manual_seed(0 )
lowerCAmelCase : Union[str, Any] = {
'''param_shapes''': (
(self.renderer_dim, 9_3),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
'''d_latent''': self.time_input_dim,
'''d_hidden''': self.renderer_dim,
'''n_output''': 1_2,
'''background''': (
0.1,
0.1,
0.1,
),
}
lowerCAmelCase : Any = ShapERenderer(**UpperCamelCase_ )
return model
def lowerCamelCase__ ( self : List[str] ):
lowerCAmelCase : Optional[int] = self.dummy_prior
lowerCAmelCase : str = self.dummy_image_encoder
lowerCAmelCase : Optional[Any] = self.dummy_image_processor
lowerCAmelCase : Tuple = self.dummy_renderer
lowerCAmelCase : List[str] = HeunDiscreteScheduler(
beta_schedule='''exp''' , num_train_timesteps=1_0_2_4 , prediction_type='''sample''' , use_karras_sigmas=UpperCamelCase_ , clip_sample=UpperCamelCase_ , clip_sample_range=1.0 , )
lowerCAmelCase : Optional[Any] = {
'''prior''': prior,
'''image_encoder''': image_encoder,
'''image_processor''': image_processor,
'''renderer''': renderer,
'''scheduler''': scheduler,
}
return components
def lowerCamelCase__ ( self : Any , UpperCamelCase_ : str , UpperCamelCase_ : Optional[int]=0 ):
lowerCAmelCase : Any = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(UpperCamelCase_ ) ).to(UpperCamelCase_ )
if str(UpperCamelCase_ ).startswith('''mps''' ):
lowerCAmelCase : str = torch.manual_seed(UpperCamelCase_ )
else:
lowerCAmelCase : Optional[int] = torch.Generator(device=UpperCamelCase_ ).manual_seed(UpperCamelCase_ )
lowerCAmelCase : int = {
'''image''': input_image,
'''generator''': generator,
'''num_inference_steps''': 1,
'''frame_size''': 3_2,
'''output_type''': '''np''',
}
return inputs
def lowerCamelCase__ ( self : List[Any] ):
lowerCAmelCase : int = '''cpu'''
lowerCAmelCase : List[str] = self.get_dummy_components()
lowerCAmelCase : str = self.pipeline_class(**UpperCamelCase_ )
lowerCAmelCase : Any = pipe.to(UpperCamelCase_ )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
lowerCAmelCase : int = pipe(**self.get_dummy_inputs(UpperCamelCase_ ) )
lowerCAmelCase : Tuple = output.images[0]
lowerCAmelCase : List[Any] = image[0, -3:, -3:, -1]
assert image.shape == (2_0, 3_2, 3_2, 3)
lowerCAmelCase : List[Any] = np.array(
[
0.00_039_216,
0.00_039_216,
0.00_039_216,
0.00_039_216,
0.00_039_216,
0.00_039_216,
0.00_039_216,
0.00_039_216,
0.00_039_216,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def lowerCamelCase__ ( self : List[Any] ):
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def lowerCamelCase__ ( self : Union[str, Any] ):
lowerCAmelCase : List[Any] = torch_device == '''cpu'''
lowerCAmelCase : List[str] = True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=UpperCamelCase_ , relax_max_difference=UpperCamelCase_ , )
def lowerCamelCase__ ( self : Union[str, Any] ):
lowerCAmelCase : Union[str, Any] = self.get_dummy_components()
lowerCAmelCase : int = self.pipeline_class(**UpperCamelCase_ )
lowerCAmelCase : Optional[Any] = pipe.to(UpperCamelCase_ )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
lowerCAmelCase : Dict = 1
lowerCAmelCase : List[Any] = 2
lowerCAmelCase : Union[str, Any] = self.get_dummy_inputs(UpperCamelCase_ )
for key in inputs.keys():
if key in self.batch_params:
lowerCAmelCase : Dict = batch_size * [inputs[key]]
lowerCAmelCase : int = pipe(**UpperCamelCase_ , num_images_per_prompt=UpperCamelCase_ )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class snake_case_( unittest.TestCase ):
def lowerCamelCase__ ( self : List[Any] ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase__ ( self : Tuple ):
lowerCAmelCase : Dict = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/shap_e/corgi.png''' )
lowerCAmelCase : Union[str, Any] = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/shap_e/test_shap_e_img2img_out.npy''' )
lowerCAmelCase : int = ShapEImgaImgPipeline.from_pretrained('''openai/shap-e-img2img''' )
lowerCAmelCase : Any = pipe.to(UpperCamelCase_ )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
lowerCAmelCase : Any = torch.Generator(device=UpperCamelCase_ ).manual_seed(0 )
lowerCAmelCase : str = pipe(
UpperCamelCase_ , generator=UpperCamelCase_ , guidance_scale=3.0 , num_inference_steps=6_4 , frame_size=6_4 , output_type='''np''' , ).images[0]
assert images.shape == (2_0, 6_4, 6_4, 3)
assert_mean_pixel_difference(UpperCamelCase_ , UpperCamelCase_ )
| 705
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
snake_case__ : Tuple = logging.get_logger(__name__)
snake_case__ : int = {
'''facebook/levit-128S''': '''https://huggingface.co/facebook/levit-128S/resolve/main/config.json''',
# See all LeViT models at https://huggingface.co/models?filter=levit
}
class snake_case_( a__ ):
__UpperCamelCase = '''levit'''
def __init__( self : str , UpperCamelCase_ : Union[str, Any]=2_2_4 , UpperCamelCase_ : Union[str, Any]=3 , UpperCamelCase_ : Union[str, Any]=3 , UpperCamelCase_ : int=2 , UpperCamelCase_ : Union[str, Any]=1 , UpperCamelCase_ : Tuple=1_6 , UpperCamelCase_ : Dict=[1_2_8, 2_5_6, 3_8_4] , UpperCamelCase_ : Optional[Any]=[4, 8, 1_2] , UpperCamelCase_ : Dict=[4, 4, 4] , UpperCamelCase_ : Any=[1_6, 1_6, 1_6] , UpperCamelCase_ : str=0 , UpperCamelCase_ : int=[2, 2, 2] , UpperCamelCase_ : Optional[Any]=[2, 2, 2] , UpperCamelCase_ : str=0.02 , **UpperCamelCase_ : List[str] , ):
super().__init__(**UpperCamelCase_ )
lowerCAmelCase : Tuple = image_size
lowerCAmelCase : int = num_channels
lowerCAmelCase : Optional[int] = kernel_size
lowerCAmelCase : Dict = stride
lowerCAmelCase : List[Any] = padding
lowerCAmelCase : Dict = hidden_sizes
lowerCAmelCase : List[str] = num_attention_heads
lowerCAmelCase : Tuple = depths
lowerCAmelCase : Dict = key_dim
lowerCAmelCase : Union[str, Any] = drop_path_rate
lowerCAmelCase : List[Any] = patch_size
lowerCAmelCase : Tuple = attention_ratio
lowerCAmelCase : Optional[int] = mlp_ratio
lowerCAmelCase : Union[str, Any] = initializer_range
lowerCAmelCase : List[str] = [
['''Subsample''', key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2],
['''Subsample''', key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2],
]
class snake_case_( a__ ):
__UpperCamelCase = version.parse('''1.11''' )
@property
def lowerCamelCase__ ( self : Tuple ):
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def lowerCamelCase__ ( self : Optional[Any] ):
return 1E-4
| 637
| 0
|
"""simple docstring"""
def _snake_case ( _snake_case : int , _snake_case : list ):
_enforce_args(_snake_case , _snake_case )
if n == 0:
return 0
lowerCAmelCase : List[str] = float('''-inf''' )
for i in range(1 , n + 1 ):
lowerCAmelCase : int = max(
_snake_case , prices[i - 1] + naive_cut_rod_recursive(n - i , _snake_case ) )
return max_revue
def _snake_case ( _snake_case : int , _snake_case : list ):
_enforce_args(_snake_case , _snake_case )
lowerCAmelCase : Dict = [float('''-inf''' ) for _ in range(n + 1 )]
return _top_down_cut_rod_recursive(_snake_case , _snake_case , _snake_case )
def _snake_case ( _snake_case : int , _snake_case : list , _snake_case : list ):
if max_rev[n] >= 0:
return max_rev[n]
elif n == 0:
return 0
else:
lowerCAmelCase : List[str] = float('''-inf''' )
for i in range(1 , n + 1 ):
lowerCAmelCase : Union[str, Any] = max(
_snake_case , prices[i - 1] + _top_down_cut_rod_recursive(n - i , _snake_case , _snake_case ) , )
lowerCAmelCase : Optional[int] = max_revenue
return max_rev[n]
def _snake_case ( _snake_case : int , _snake_case : list ):
_enforce_args(_snake_case , _snake_case )
# length(max_rev) = n + 1, to accommodate for the revenue obtainable from a rod of
# length 0.
lowerCAmelCase : int = [float('''-inf''' ) for _ in range(n + 1 )]
lowerCAmelCase : Union[str, Any] = 0
for i in range(1 , n + 1 ):
lowerCAmelCase : Any = max_rev[i]
for j in range(1 , i + 1 ):
lowerCAmelCase : Dict = max(_snake_case , prices[j - 1] + max_rev[i - j] )
lowerCAmelCase : Optional[Any] = max_revenue_i
return max_rev[n]
def _snake_case ( _snake_case : int , _snake_case : list ):
if n < 0:
lowerCAmelCase : Dict = f'''n must be greater than or equal to 0. Got n = {n}'''
raise ValueError(_snake_case )
if n > len(_snake_case ):
lowerCAmelCase : Dict = (
'''Each integral piece of rod must have a corresponding price. '''
f'''Got n = {n} but length of prices = {len(_snake_case )}'''
)
raise ValueError(_snake_case )
def _snake_case ( ):
lowerCAmelCase : List[str] = [6, 10, 12, 15, 20, 23]
lowerCAmelCase : Tuple = len(_snake_case )
# the best revenue comes from cutting the rod into 6 pieces, each
# of length 1 resulting in a revenue of 6 * 6 = 36.
lowerCAmelCase : List[str] = 36
lowerCAmelCase : Dict = top_down_cut_rod(_snake_case , _snake_case )
lowerCAmelCase : List[str] = bottom_up_cut_rod(_snake_case , _snake_case )
lowerCAmelCase : List[str] = naive_cut_rod_recursive(_snake_case , _snake_case )
assert expected_max_revenue == max_rev_top_down
assert max_rev_top_down == max_rev_bottom_up
assert max_rev_bottom_up == max_rev_naive
if __name__ == "__main__":
main()
| 706
|
"""simple docstring"""
import time
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers.generation import (
MaxLengthCriteria,
MaxNewTokensCriteria,
MaxTimeCriteria,
StoppingCriteriaList,
validate_stopping_criteria,
)
@require_torch
class snake_case_( unittest.TestCase ):
def lowerCamelCase__ ( self : int , UpperCamelCase_ : int ):
lowerCAmelCase : str = 3
lowerCAmelCase : Tuple = 2_5_0
lowerCAmelCase : Optional[Any] = ids_tensor((batch_size, length) , UpperCamelCase_ )
lowerCAmelCase : Optional[Any] = torch.ones((batch_size, length) , device=UpperCamelCase_ , dtype=torch.float ) / length
return input_ids, scores
def lowerCamelCase__ ( self : Optional[Any] ):
lowerCAmelCase, lowerCAmelCase : Optional[int] = self._get_tensors(5 )
lowerCAmelCase : Union[str, Any] = StoppingCriteriaList(
[
MaxLengthCriteria(max_length=1_0 ),
MaxTimeCriteria(max_time=0.1 ),
] )
self.assertFalse(criteria(UpperCamelCase_ , UpperCamelCase_ ) )
lowerCAmelCase, lowerCAmelCase : List[str] = self._get_tensors(9 )
self.assertFalse(criteria(UpperCamelCase_ , UpperCamelCase_ ) )
lowerCAmelCase, lowerCAmelCase : Any = self._get_tensors(1_0 )
self.assertTrue(criteria(UpperCamelCase_ , UpperCamelCase_ ) )
def lowerCamelCase__ ( self : Optional[Any] ):
lowerCAmelCase : Optional[Any] = MaxLengthCriteria(max_length=1_0 )
lowerCAmelCase, lowerCAmelCase : Optional[Any] = self._get_tensors(5 )
self.assertFalse(criteria(UpperCamelCase_ , UpperCamelCase_ ) )
lowerCAmelCase, lowerCAmelCase : List[str] = self._get_tensors(9 )
self.assertFalse(criteria(UpperCamelCase_ , UpperCamelCase_ ) )
lowerCAmelCase, lowerCAmelCase : str = self._get_tensors(1_0 )
self.assertTrue(criteria(UpperCamelCase_ , UpperCamelCase_ ) )
def lowerCamelCase__ ( self : List[Any] ):
lowerCAmelCase : Optional[Any] = MaxNewTokensCriteria(start_length=5 , max_new_tokens=5 )
lowerCAmelCase, lowerCAmelCase : Optional[int] = self._get_tensors(5 )
self.assertFalse(criteria(UpperCamelCase_ , UpperCamelCase_ ) )
lowerCAmelCase, lowerCAmelCase : Union[str, Any] = self._get_tensors(9 )
self.assertFalse(criteria(UpperCamelCase_ , UpperCamelCase_ ) )
lowerCAmelCase, lowerCAmelCase : str = self._get_tensors(1_0 )
self.assertTrue(criteria(UpperCamelCase_ , UpperCamelCase_ ) )
lowerCAmelCase : Dict = StoppingCriteriaList([criteria] )
self.assertEqual(criteria_list.max_length , 1_0 )
def lowerCamelCase__ ( self : Union[str, Any] ):
lowerCAmelCase, lowerCAmelCase : Tuple = self._get_tensors(5 )
lowerCAmelCase : List[str] = MaxTimeCriteria(max_time=0.1 )
self.assertFalse(criteria(UpperCamelCase_ , UpperCamelCase_ ) )
lowerCAmelCase : List[str] = MaxTimeCriteria(max_time=0.1 , initial_timestamp=time.time() - 0.2 )
self.assertTrue(criteria(UpperCamelCase_ , UpperCamelCase_ ) )
def lowerCamelCase__ ( self : str ):
validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(1_0 )] ) , 1_0 )
with self.assertWarns(UpperCamelCase_ ):
validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(1_0 )] ) , 1_1 )
lowerCAmelCase : str = validate_stopping_criteria(StoppingCriteriaList() , 1_1 )
self.assertEqual(len(UpperCamelCase_ ) , 1 )
| 637
| 0
|
"""simple docstring"""
from queue import Queue
from typing import TYPE_CHECKING, Optional
if TYPE_CHECKING:
from ..models.auto import AutoTokenizer
class snake_case_:
def lowerCamelCase__ ( self : List[str] , UpperCamelCase_ : List[str] ):
raise NotImplementedError()
def lowerCamelCase__ ( self : List[Any] ):
raise NotImplementedError()
class snake_case_( a__ ):
def __init__( self : Optional[int] , UpperCamelCase_ : "AutoTokenizer" , UpperCamelCase_ : bool = False , **UpperCamelCase_ : Optional[int] ):
lowerCAmelCase : Tuple = tokenizer
lowerCAmelCase : str = skip_prompt
lowerCAmelCase : Dict = decode_kwargs
# variables used in the streaming process
lowerCAmelCase : List[Any] = []
lowerCAmelCase : Optional[int] = 0
lowerCAmelCase : Union[str, Any] = True
def lowerCamelCase__ ( self : Optional[int] , UpperCamelCase_ : Optional[int] ):
if len(value.shape ) > 1 and value.shape[0] > 1:
raise ValueError('''TextStreamer only supports batch size 1''' )
elif len(value.shape ) > 1:
lowerCAmelCase : Optional[Any] = value[0]
if self.skip_prompt and self.next_tokens_are_prompt:
lowerCAmelCase : Optional[int] = False
return
# Add the new token to the cache and decodes the entire thing.
self.token_cache.extend(value.tolist() )
lowerCAmelCase : List[Any] = self.tokenizer.decode(self.token_cache , **self.decode_kwargs )
# After the symbol for a new line, we flush the cache.
if text.endswith('''\n''' ):
lowerCAmelCase : Tuple = text[self.print_len :]
lowerCAmelCase : List[Any] = []
lowerCAmelCase : str = 0
# If the last token is a CJK character, we print the characters.
elif len(UpperCamelCase_ ) > 0 and self._is_chinese_char(ord(text[-1] ) ):
lowerCAmelCase : Tuple = text[self.print_len :]
self.print_len += len(UpperCamelCase_ )
# Otherwise, prints until the last space char (simple heuristic to avoid printing incomplete words,
# which may change with the subsequent token -- there are probably smarter ways to do this!)
else:
lowerCAmelCase : Dict = text[self.print_len : text.rfind(''' ''' ) + 1]
self.print_len += len(UpperCamelCase_ )
self.on_finalized_text(UpperCamelCase_ )
def lowerCamelCase__ ( self : Optional[int] ):
# Flush the cache, if it exists
if len(self.token_cache ) > 0:
lowerCAmelCase : Dict = self.tokenizer.decode(self.token_cache , **self.decode_kwargs )
lowerCAmelCase : Optional[int] = text[self.print_len :]
lowerCAmelCase : Dict = []
lowerCAmelCase : Tuple = 0
else:
lowerCAmelCase : List[str] = ''''''
lowerCAmelCase : Tuple = True
self.on_finalized_text(UpperCamelCase_ , stream_end=UpperCamelCase_ )
def lowerCamelCase__ ( self : int , UpperCamelCase_ : str , UpperCamelCase_ : bool = False ):
print(UpperCamelCase_ , flush=UpperCamelCase_ , end='''''' if not stream_end else None )
def lowerCamelCase__ ( self : str , UpperCamelCase_ : List[Any] ):
# This defines a "chinese character" as anything in the CJK Unicode block:
# https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
#
# Note that the CJK Unicode block is NOT all Japanese and Korean characters,
# despite its name. The modern Korean Hangul alphabet is a different block,
# as is Japanese Hiragana and Katakana. Those alphabets are used to write
# space-separated words, so they are not treated specially and handled
# like the all of the other languages.
if (
(cp >= 0X4_E_0_0 and cp <= 0X9_F_F_F)
or (cp >= 0X3_4_0_0 and cp <= 0X4_D_B_F) #
or (cp >= 0X2_0_0_0_0 and cp <= 0X2_A_6_D_F) #
or (cp >= 0X2_A_7_0_0 and cp <= 0X2_B_7_3_F) #
or (cp >= 0X2_B_7_4_0 and cp <= 0X2_B_8_1_F) #
or (cp >= 0X2_B_8_2_0 and cp <= 0X2_C_E_A_F) #
or (cp >= 0XF_9_0_0 and cp <= 0XF_A_F_F)
or (cp >= 0X2_F_8_0_0 and cp <= 0X2_F_A_1_F) #
): #
return True
return False
class snake_case_( a__ ):
def __init__( self : Any , UpperCamelCase_ : "AutoTokenizer" , UpperCamelCase_ : bool = False , UpperCamelCase_ : Optional[float] = None , **UpperCamelCase_ : List[str] ):
super().__init__(UpperCamelCase_ , UpperCamelCase_ , **UpperCamelCase_ )
lowerCAmelCase : Tuple = Queue()
lowerCAmelCase : Dict = None
lowerCAmelCase : Dict = timeout
def lowerCamelCase__ ( self : Any , UpperCamelCase_ : str , UpperCamelCase_ : bool = False ):
self.text_queue.put(UpperCamelCase_ , timeout=self.timeout )
if stream_end:
self.text_queue.put(self.stop_signal , timeout=self.timeout )
def __iter__( self : Union[str, Any] ):
return self
def lowerCamelCase__ ( self : Optional[Any] ):
lowerCAmelCase : Optional[int] = self.text_queue.get(timeout=self.timeout )
if value == self.stop_signal:
raise StopIteration()
else:
return value
| 707
|
"""simple docstring"""
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin
@dataclass
# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->UnCLIP
class snake_case_( a__ ):
__UpperCamelCase = 42
__UpperCamelCase = None
def _snake_case ( _snake_case : Dict , _snake_case : List[str]=0.999 , _snake_case : Dict="cosine" , ):
if alpha_transform_type == "cosine":
def alpha_bar_fn(_snake_case : List[Any] ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(_snake_case : Optional[int] ):
return math.exp(t * -12.0 )
else:
raise ValueError(f'''Unsupported alpha_tranform_type: {alpha_transform_type}''' )
lowerCAmelCase : List[Any] = []
for i in range(_snake_case ):
lowerCAmelCase : int = i / num_diffusion_timesteps
lowerCAmelCase : Tuple = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(_snake_case ) / alpha_bar_fn(_snake_case ) , _snake_case ) )
return torch.tensor(_snake_case , dtype=torch.floataa )
class snake_case_( a__ , a__ ):
@register_to_config
def __init__( self : Any , UpperCamelCase_ : int = 1_0_0_0 , UpperCamelCase_ : str = "fixed_small_log" , UpperCamelCase_ : bool = True , UpperCamelCase_ : Optional[float] = 1.0 , UpperCamelCase_ : str = "epsilon" , UpperCamelCase_ : str = "squaredcos_cap_v2" , ):
if beta_schedule != "squaredcos_cap_v2":
raise ValueError('''UnCLIPScheduler only supports `beta_schedule`: \'squaredcos_cap_v2\'''' )
lowerCAmelCase : Any = betas_for_alpha_bar(UpperCamelCase_ )
lowerCAmelCase : str = 1.0 - self.betas
lowerCAmelCase : Union[str, Any] = torch.cumprod(self.alphas , dim=0 )
lowerCAmelCase : Tuple = torch.tensor(1.0 )
# standard deviation of the initial noise distribution
lowerCAmelCase : Any = 1.0
# setable values
lowerCAmelCase : Any = None
lowerCAmelCase : Any = torch.from_numpy(np.arange(0 , UpperCamelCase_ )[::-1].copy() )
lowerCAmelCase : List[str] = variance_type
def lowerCamelCase__ ( self : Union[str, Any] , UpperCamelCase_ : torch.FloatTensor , UpperCamelCase_ : Optional[int] = None ):
return sample
def lowerCamelCase__ ( self : Tuple , UpperCamelCase_ : int , UpperCamelCase_ : Union[str, torch.device] = None ):
lowerCAmelCase : Any = num_inference_steps
lowerCAmelCase : str = (self.config.num_train_timesteps - 1) / (self.num_inference_steps - 1)
lowerCAmelCase : Tuple = (np.arange(0 , UpperCamelCase_ ) * step_ratio).round()[::-1].copy().astype(np.intaa )
lowerCAmelCase : Any = torch.from_numpy(UpperCamelCase_ ).to(UpperCamelCase_ )
def lowerCamelCase__ ( self : Optional[Any] , UpperCamelCase_ : List[Any] , UpperCamelCase_ : str=None , UpperCamelCase_ : Tuple=None , UpperCamelCase_ : Any=None ):
if prev_timestep is None:
lowerCAmelCase : Any = t - 1
lowerCAmelCase : int = self.alphas_cumprod[t]
lowerCAmelCase : Union[str, Any] = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one
lowerCAmelCase : Dict = 1 - alpha_prod_t
lowerCAmelCase : str = 1 - alpha_prod_t_prev
if prev_timestep == t - 1:
lowerCAmelCase : Tuple = self.betas[t]
else:
lowerCAmelCase : Union[str, Any] = 1 - alpha_prod_t / alpha_prod_t_prev
# For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
# and sample from it to get previous sample
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
lowerCAmelCase : Optional[Any] = beta_prod_t_prev / beta_prod_t * beta
if variance_type is None:
lowerCAmelCase : List[str] = self.config.variance_type
# hacks - were probably added for training stability
if variance_type == "fixed_small_log":
lowerCAmelCase : Any = torch.log(torch.clamp(UpperCamelCase_ , min=1E-20 ) )
lowerCAmelCase : Union[str, Any] = torch.exp(0.5 * variance )
elif variance_type == "learned_range":
# NOTE difference with DDPM scheduler
lowerCAmelCase : Optional[Any] = variance.log()
lowerCAmelCase : Union[str, Any] = beta.log()
lowerCAmelCase : Dict = (predicted_variance + 1) / 2
lowerCAmelCase : Union[str, Any] = frac * max_log + (1 - frac) * min_log
return variance
def lowerCamelCase__ ( self : Optional[Any] , UpperCamelCase_ : torch.FloatTensor , UpperCamelCase_ : int , UpperCamelCase_ : torch.FloatTensor , UpperCamelCase_ : Optional[int] = None , UpperCamelCase_ : List[Any]=None , UpperCamelCase_ : bool = True , ):
lowerCAmelCase : Optional[Any] = timestep
if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type == "learned_range":
lowerCAmelCase, lowerCAmelCase : List[Any] = torch.split(UpperCamelCase_ , sample.shape[1] , dim=1 )
else:
lowerCAmelCase : Optional[int] = None
# 1. compute alphas, betas
if prev_timestep is None:
lowerCAmelCase : Any = t - 1
lowerCAmelCase : Union[str, Any] = self.alphas_cumprod[t]
lowerCAmelCase : Optional[int] = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one
lowerCAmelCase : int = 1 - alpha_prod_t
lowerCAmelCase : str = 1 - alpha_prod_t_prev
if prev_timestep == t - 1:
lowerCAmelCase : List[Any] = self.betas[t]
lowerCAmelCase : Optional[int] = self.alphas[t]
else:
lowerCAmelCase : List[Any] = 1 - alpha_prod_t / alpha_prod_t_prev
lowerCAmelCase : Dict = 1 - beta
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if self.config.prediction_type == "epsilon":
lowerCAmelCase : List[Any] = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif self.config.prediction_type == "sample":
lowerCAmelCase : Tuple = model_output
else:
raise ValueError(
F'''prediction_type given as {self.config.prediction_type} must be one of `epsilon` or `sample`'''
''' for the UnCLIPScheduler.''' )
# 3. Clip "predicted x_0"
if self.config.clip_sample:
lowerCAmelCase : Dict = torch.clamp(
UpperCamelCase_ , -self.config.clip_sample_range , self.config.clip_sample_range )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
lowerCAmelCase : int = (alpha_prod_t_prev ** 0.5 * beta) / beta_prod_t
lowerCAmelCase : List[Any] = alpha ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
lowerCAmelCase : str = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
lowerCAmelCase : int = 0
if t > 0:
lowerCAmelCase : Union[str, Any] = randn_tensor(
model_output.shape , dtype=model_output.dtype , generator=UpperCamelCase_ , device=model_output.device )
lowerCAmelCase : Any = self._get_variance(
UpperCamelCase_ , predicted_variance=UpperCamelCase_ , prev_timestep=UpperCamelCase_ , )
if self.variance_type == "fixed_small_log":
lowerCAmelCase : str = variance
elif self.variance_type == "learned_range":
lowerCAmelCase : Optional[Any] = (0.5 * variance).exp()
else:
raise ValueError(
F'''variance_type given as {self.variance_type} must be one of `fixed_small_log` or `learned_range`'''
''' for the UnCLIPScheduler.''' )
lowerCAmelCase : List[Any] = variance * variance_noise
lowerCAmelCase : int = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample,)
return UnCLIPSchedulerOutput(prev_sample=UpperCamelCase_ , pred_original_sample=UpperCamelCase_ )
def lowerCamelCase__ ( self : Optional[int] , UpperCamelCase_ : torch.FloatTensor , UpperCamelCase_ : torch.FloatTensor , UpperCamelCase_ : torch.IntTensor , ):
# Make sure alphas_cumprod and timestep have same device and dtype as original_samples
lowerCAmelCase : Tuple = self.alphas_cumprod.to(device=original_samples.device , dtype=original_samples.dtype )
lowerCAmelCase : int = timesteps.to(original_samples.device )
lowerCAmelCase : Dict = alphas_cumprod[timesteps] ** 0.5
lowerCAmelCase : str = sqrt_alpha_prod.flatten()
while len(sqrt_alpha_prod.shape ) < len(original_samples.shape ):
lowerCAmelCase : Any = sqrt_alpha_prod.unsqueeze(-1 )
lowerCAmelCase : List[str] = (1 - alphas_cumprod[timesteps]) ** 0.5
lowerCAmelCase : Tuple = sqrt_one_minus_alpha_prod.flatten()
while len(sqrt_one_minus_alpha_prod.shape ) < len(original_samples.shape ):
lowerCAmelCase : int = sqrt_one_minus_alpha_prod.unsqueeze(-1 )
lowerCAmelCase : Dict = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise
return noisy_samples
| 637
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
snake_case__ : List[str] = {
'''configuration_cpmant''': ['''CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''CpmAntConfig'''],
'''tokenization_cpmant''': ['''CpmAntTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ : int = [
'''CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''CpmAntForCausalLM''',
'''CpmAntModel''',
'''CpmAntPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_cpmant import CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP, CpmAntConfig
from .tokenization_cpmant import CpmAntTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_cpmant import (
CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST,
CpmAntForCausalLM,
CpmAntModel,
CpmAntPreTrainedModel,
)
else:
import sys
snake_case__ : Union[str, Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 708
|
"""simple docstring"""
import unittest
from parameterized import parameterized
from transformers import LlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaTokenizer
class snake_case_:
def __init__( self : int , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Dict=1_3 , UpperCamelCase_ : Optional[Any]=7 , UpperCamelCase_ : Optional[int]=True , UpperCamelCase_ : Dict=True , UpperCamelCase_ : Optional[int]=False , UpperCamelCase_ : Any=True , UpperCamelCase_ : List[str]=9_9 , UpperCamelCase_ : Tuple=3_2 , UpperCamelCase_ : Optional[Any]=5 , UpperCamelCase_ : str=4 , UpperCamelCase_ : Any=3_7 , UpperCamelCase_ : Optional[Any]="gelu" , UpperCamelCase_ : Tuple=0.1 , UpperCamelCase_ : Union[str, Any]=0.1 , UpperCamelCase_ : Union[str, Any]=5_1_2 , UpperCamelCase_ : Union[str, Any]=1_6 , UpperCamelCase_ : Any=2 , UpperCamelCase_ : Optional[Any]=0.02 , UpperCamelCase_ : List[Any]=3 , UpperCamelCase_ : Any=4 , UpperCamelCase_ : int=None , ):
lowerCAmelCase : Any = parent
lowerCAmelCase : Any = batch_size
lowerCAmelCase : List[Any] = seq_length
lowerCAmelCase : str = is_training
lowerCAmelCase : List[Any] = use_input_mask
lowerCAmelCase : Optional[int] = use_token_type_ids
lowerCAmelCase : Union[str, Any] = use_labels
lowerCAmelCase : List[str] = vocab_size
lowerCAmelCase : Tuple = hidden_size
lowerCAmelCase : int = num_hidden_layers
lowerCAmelCase : Union[str, Any] = num_attention_heads
lowerCAmelCase : Optional[int] = intermediate_size
lowerCAmelCase : List[Any] = hidden_act
lowerCAmelCase : int = hidden_dropout_prob
lowerCAmelCase : Tuple = attention_probs_dropout_prob
lowerCAmelCase : Optional[Any] = max_position_embeddings
lowerCAmelCase : Optional[int] = type_vocab_size
lowerCAmelCase : Tuple = type_sequence_label_size
lowerCAmelCase : List[str] = initializer_range
lowerCAmelCase : str = num_labels
lowerCAmelCase : Optional[int] = num_choices
lowerCAmelCase : Tuple = scope
def lowerCamelCase__ ( self : Optional[int] ):
lowerCAmelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase : Tuple = None
if self.use_input_mask:
lowerCAmelCase : str = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase : List[str] = None
if self.use_token_type_ids:
lowerCAmelCase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCAmelCase : int = None
lowerCAmelCase : int = None
lowerCAmelCase : Tuple = None
if self.use_labels:
lowerCAmelCase : int = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase : Union[str, Any] = ids_tensor([self.batch_size] , self.num_choices )
lowerCAmelCase : Optional[Any] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCamelCase__ ( self : Tuple ):
return LlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCamelCase_ , initializer_range=self.initializer_range , )
def lowerCamelCase__ ( self : int , UpperCamelCase_ : Any , UpperCamelCase_ : Dict , UpperCamelCase_ : Any , UpperCamelCase_ : Dict , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : List[str] , UpperCamelCase_ : Tuple ):
lowerCAmelCase : List[Any] = LlamaModel(config=UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
lowerCAmelCase : Dict = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ )
lowerCAmelCase : Optional[int] = model(UpperCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase__ ( self : Any , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Tuple , UpperCamelCase_ : Tuple , UpperCamelCase_ : Dict , UpperCamelCase_ : int , UpperCamelCase_ : Dict , UpperCamelCase_ : Tuple , UpperCamelCase_ : int , UpperCamelCase_ : Any , ):
lowerCAmelCase : Tuple = True
lowerCAmelCase : Optional[int] = LlamaModel(UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
lowerCAmelCase : List[Any] = model(
UpperCamelCase_ , attention_mask=UpperCamelCase_ , encoder_hidden_states=UpperCamelCase_ , encoder_attention_mask=UpperCamelCase_ , )
lowerCAmelCase : Dict = model(
UpperCamelCase_ , attention_mask=UpperCamelCase_ , encoder_hidden_states=UpperCamelCase_ , )
lowerCAmelCase : Tuple = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase__ ( self : Any , UpperCamelCase_ : int , UpperCamelCase_ : Tuple , UpperCamelCase_ : Any , UpperCamelCase_ : List[Any] , UpperCamelCase_ : str , UpperCamelCase_ : int , UpperCamelCase_ : int , UpperCamelCase_ : int , UpperCamelCase_ : str , ):
lowerCAmelCase : Optional[Any] = LlamaForCausalLM(config=UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
lowerCAmelCase : List[str] = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ , labels=UpperCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCamelCase__ ( self : str , UpperCamelCase_ : List[str] , UpperCamelCase_ : int , UpperCamelCase_ : Tuple , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : int , UpperCamelCase_ : str , UpperCamelCase_ : Dict , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : List[Any] , ):
lowerCAmelCase : Union[str, Any] = True
lowerCAmelCase : str = True
lowerCAmelCase : Tuple = LlamaForCausalLM(config=UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
# first forward pass
lowerCAmelCase : Optional[Any] = model(
UpperCamelCase_ , attention_mask=UpperCamelCase_ , encoder_hidden_states=UpperCamelCase_ , encoder_attention_mask=UpperCamelCase_ , use_cache=UpperCamelCase_ , )
lowerCAmelCase : Dict = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
lowerCAmelCase : Any = ids_tensor((self.batch_size, 3) , config.vocab_size )
lowerCAmelCase : Dict = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
lowerCAmelCase : Optional[Any] = torch.cat([input_ids, next_tokens] , dim=-1 )
lowerCAmelCase : List[str] = torch.cat([input_mask, next_mask] , dim=-1 )
lowerCAmelCase : Dict = model(
UpperCamelCase_ , attention_mask=UpperCamelCase_ , encoder_hidden_states=UpperCamelCase_ , encoder_attention_mask=UpperCamelCase_ , output_hidden_states=UpperCamelCase_ , )['''hidden_states'''][0]
lowerCAmelCase : str = model(
UpperCamelCase_ , attention_mask=UpperCamelCase_ , encoder_hidden_states=UpperCamelCase_ , encoder_attention_mask=UpperCamelCase_ , past_key_values=UpperCamelCase_ , output_hidden_states=UpperCamelCase_ , )['''hidden_states'''][0]
# select random slice
lowerCAmelCase : Tuple = ids_tensor((1,) , output_from_past.shape[-1] ).item()
lowerCAmelCase : Any = output_from_no_past[:, -3:, random_slice_idx].detach()
lowerCAmelCase : Optional[int] = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(UpperCamelCase_ , UpperCamelCase_ , atol=1E-3 ) )
def lowerCamelCase__ ( self : Union[str, Any] ):
lowerCAmelCase : Dict = self.prepare_config_and_inputs()
(
(
lowerCAmelCase
), (
lowerCAmelCase
), (
lowerCAmelCase
), (
lowerCAmelCase
), (
lowerCAmelCase
), (
lowerCAmelCase
), (
lowerCAmelCase
),
) : Tuple = config_and_inputs
lowerCAmelCase : Optional[int] = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class snake_case_( a__ , a__ , a__ , unittest.TestCase ):
__UpperCamelCase = (LlamaModel, LlamaForCausalLM, LlamaForSequenceClassification) if is_torch_available() else ()
__UpperCamelCase = (LlamaForCausalLM,) if is_torch_available() else ()
__UpperCamelCase = (
{
'''feature-extraction''': LlamaModel,
'''text-classification''': LlamaForSequenceClassification,
'''text-generation''': LlamaForCausalLM,
'''zero-shot''': LlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
__UpperCamelCase = False
__UpperCamelCase = False
def lowerCamelCase__ ( self : Optional[Any] ):
lowerCAmelCase : Any = LlamaModelTester(self )
lowerCAmelCase : Dict = ConfigTester(self , config_class=UpperCamelCase_ , hidden_size=3_7 )
def lowerCamelCase__ ( self : str ):
self.config_tester.run_common_tests()
def lowerCamelCase__ ( self : Tuple ):
lowerCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase_ )
def lowerCamelCase__ ( self : List[Any] ):
lowerCAmelCase : str = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
lowerCAmelCase : str = type
self.model_tester.create_and_check_model(*UpperCamelCase_ )
def lowerCamelCase__ ( self : List[Any] ):
lowerCAmelCase, lowerCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase : List[str] = 3
lowerCAmelCase : List[str] = input_dict['''input_ids''']
lowerCAmelCase : List[str] = input_ids.ne(1 ).to(UpperCamelCase_ )
lowerCAmelCase : Tuple = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
lowerCAmelCase : Union[str, Any] = LlamaForSequenceClassification(UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
lowerCAmelCase : List[Any] = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ , labels=UpperCamelCase_ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def lowerCamelCase__ ( self : Optional[int] ):
lowerCAmelCase, lowerCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase : Any = 3
lowerCAmelCase : int = '''single_label_classification'''
lowerCAmelCase : Tuple = input_dict['''input_ids''']
lowerCAmelCase : Tuple = input_ids.ne(1 ).to(UpperCamelCase_ )
lowerCAmelCase : str = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
lowerCAmelCase : Tuple = LlamaForSequenceClassification(UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
lowerCAmelCase : Any = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ , labels=UpperCamelCase_ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def lowerCamelCase__ ( self : Union[str, Any] ):
lowerCAmelCase, lowerCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase : Any = 3
lowerCAmelCase : Dict = '''multi_label_classification'''
lowerCAmelCase : Union[str, Any] = input_dict['''input_ids''']
lowerCAmelCase : Tuple = input_ids.ne(1 ).to(UpperCamelCase_ )
lowerCAmelCase : Any = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
lowerCAmelCase : Optional[int] = LlamaForSequenceClassification(UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
lowerCAmelCase : Optional[Any] = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ , labels=UpperCamelCase_ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip('''LLaMA buffers include complex numbers, which breaks this test''' )
def lowerCamelCase__ ( self : Optional[Any] ):
pass
@parameterized.expand([('''linear''',), ('''dynamic''',)] )
def lowerCamelCase__ ( self : List[str] , UpperCamelCase_ : Tuple ):
lowerCAmelCase, lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase : Optional[int] = ids_tensor([1, 1_0] , config.vocab_size )
lowerCAmelCase : int = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(4_2 ) # Fixed seed at init time so the two models get the same random weights
lowerCAmelCase : List[Any] = LlamaModel(UpperCamelCase_ )
original_model.to(UpperCamelCase_ )
original_model.eval()
lowerCAmelCase : Optional[int] = original_model(UpperCamelCase_ ).last_hidden_state
lowerCAmelCase : List[Any] = original_model(UpperCamelCase_ ).last_hidden_state
set_seed(4_2 ) # Fixed seed at init time so the two models get the same random weights
lowerCAmelCase : int = {'''type''': scaling_type, '''factor''': 10.0}
lowerCAmelCase : List[str] = LlamaModel(UpperCamelCase_ )
scaled_model.to(UpperCamelCase_ )
scaled_model.eval()
lowerCAmelCase : Union[str, Any] = scaled_model(UpperCamelCase_ ).last_hidden_state
lowerCAmelCase : Optional[int] = scaled_model(UpperCamelCase_ ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(UpperCamelCase_ , UpperCamelCase_ , atol=1E-5 ) )
else:
self.assertFalse(torch.allclose(UpperCamelCase_ , UpperCamelCase_ , atol=1E-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(UpperCamelCase_ , UpperCamelCase_ , atol=1E-5 ) )
@require_torch
class snake_case_( unittest.TestCase ):
@unittest.skip('''Logits are not exactly the same, once we fix the instabalities somehow, will update!''' )
@slow
def lowerCamelCase__ ( self : List[Any] ):
lowerCAmelCase : Tuple = [1, 3_0_6, 4_6_5_8, 2_7_8, 6_5_9_3, 3_1_0, 2_8_3_4, 3_3_8]
lowerCAmelCase : Optional[Any] = LlamaForCausalLM.from_pretrained('''meta-llama/Llama-2-7b-hf''' , device_map='''auto''' )
lowerCAmelCase : str = model(torch.tensor([input_ids] ) )
# Expected mean on dim = -1
lowerCAmelCase : int = torch.tensor([[-6.6_550, -4.1_227, -4.9_859, -3.2_406, 0.8_262, -3.0_033, 1.2_964, -3.3_699]] )
torch.testing.assert_close(out.mean(-1 ) , UpperCamelCase_ , atol=1E-2 , rtol=1E-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
lowerCAmelCase : Tuple = torch.tensor([-12.8_281, -7.4_453, -0.4_639, -8.0_625, -7.2_500, -8.0_000, -6.4_883, -7.7_695, -7.8_438, -7.0_312, -6.2_188, -7.1_328, -1.8_496, 1.9_961, -8.6_250, -6.7_227, -12.8_281, -6.9_492, -7.0_742, -7.7_852, -7.5_820, -7.9_062, -6.9_375, -7.9_805, -8.3_438, -8.1_562, -8.0_469, -7.6_250, -7.7_422, -7.3_398,] )
# fmt: on
torch.testing.assert_close(out[0, 0, :3_0] , UpperCamelCase_ , atol=1E-5 , rtol=1E-5 )
@unittest.skip('''Logits are not exactly the same, once we fix the instabalities somehow, will update!''' )
@slow
def lowerCamelCase__ ( self : Dict ):
lowerCAmelCase : str = [1, 3_0_6, 4_6_5_8, 2_7_8, 6_5_9_3, 3_1_0, 2_8_3_4, 3_3_8]
lowerCAmelCase : Dict = LlamaForCausalLM.from_pretrained('''meta-llama/Llama-2-13b-hf''' , device_map='''auto''' )
lowerCAmelCase : str = model(torch.tensor(UpperCamelCase_ ) )
# Expected mean on dim = -1
lowerCAmelCase : Any = torch.tensor([[-2.0_622, -1.2_794, -1.1_638, -0.9_788, -1.4_603, -1.0_238, -1.7_893, -1.4_411]] )
torch.testing.assert_close(out.mean(-1 ) , UpperCamelCase_ , atol=1E-2 , rtol=1E-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
lowerCAmelCase : Tuple = torch.tensor([-8.1_406, -8.0_547, 2.7_461, -1.2_344, -0.1_448, -1.8_262, -1.0_020, -1.8_154, -1.6_895, -1.8_516, -2.3_574, -0.9_277, 3.7_598, 6.5_742, -1.2_998, -0.1_177, -8.1_406, -2.9_688, -2.9_199, -3.1_699, -3.5_254, -2.3_555, -2.7_988, -3.4_141, -2.8_262, -4.5_195, -3.3_379, -3.3_164, -2.7_832, -3.0_273] )
# fmt: on
torch.testing.assert_close(out[0, 0, :3_0] , UpperCamelCase_ , atol=1E-5 , rtol=1E-5 )
@unittest.skip('''Logits are not exactly the same, once we fix the instabalities somehow, will update!''' )
@slow
def lowerCamelCase__ ( self : Optional[int] ):
lowerCAmelCase : int = [1, 3_0_6, 4_6_5_8, 2_7_8, 6_5_9_3, 3_1_0, 2_8_3_4, 3_3_8]
lowerCAmelCase : List[str] = LlamaForCausalLM.from_pretrained('''meta-llama/Llama-2-13b-chat-hf''' , device_map='''auto''' )
lowerCAmelCase : List[Any] = model(torch.tensor(UpperCamelCase_ ) )
# Expected mean on dim = -1
lowerCAmelCase : List[str] = torch.tensor([[-0.8_562, -1.8_520, -0.7_551, -0.4_162, -1.5_161, -1.2_038, -2.4_823, -2.3_254]] )
torch.testing.assert_close(out.mean(-1 ) , UpperCamelCase_ , atol=1E-2 , rtol=1E-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
lowerCAmelCase : Dict = torch.tensor([-2.2_227, 4.8_828, 0.9_023, -0.4_578, -0.7_871, -0.1_033, -0.6_221, -0.5_786, -0.7_803, -1.0_674, -1.2_920, -0.1_570, 0.8_008, 2.0_723, -0.9_497, 0.2_771, -2.2_227, -0.7_612, -1.4_346, -1.2_061, -1.6_426, -0.3_000, -0.7_139, -1.1_934, -1.8_691, -1.6_973, -1.5_947, -1.2_705, -0.3_523, -0.5_513] )
# fmt: on
torch.testing.assert_close(out.mean(-1 ) , UpperCamelCase_ , atol=1E-2 , rtol=1E-2 )
@unittest.skip(
'''Logits are not exactly the same, once we fix the instabalities somehow, will update! Also it is gonna be a `too_slow` test''' )
@slow
def lowerCamelCase__ ( self : List[str] ):
lowerCAmelCase : Optional[Any] = [1, 3_0_6, 4_6_5_8, 2_7_8, 6_5_9_3, 3_1_0, 2_8_3_4, 3_3_8]
lowerCAmelCase : Optional[int] = LlamaForCausalLM.from_pretrained('''meta-llama/Llama-2-70b-hf''' , device_map='''auto''' )
lowerCAmelCase : Any = model(torch.tensor(UpperCamelCase_ ) )
lowerCAmelCase : Optional[Any] = torch.tensor(
[[-4.2_327, -3.3_360, -4.6_665, -4.7_631, -1.8_180, -3.4_170, -1.4_211, -3.1_810]] , dtype=torch.floataa )
torch.testing.assert_close(out.mean(-1 ) , UpperCamelCase_ , atol=1E-2 , rtol=1E-2 )
# fmt: off
lowerCAmelCase : Any = torch.tensor([-9.4_922, -3.9_551, 1.7_998, -5.6_758, -5.1_055, -5.8_984, -4.8_320, -6.8_086, -6.5_391, -5.6_172, -5.5_820, -5.5_352, 1.7_881, 3.6_289, -6.5_117, -3.4_785, -9.5_000, -6.0_352, -6.8_125, -6.0_195, -6.6_836, -5.4_727, -6.2_812, -6.0_391, -7.3_398, -7.4_297, -7.4_844, -6.5_820, -5.8_789, -5.5_312] )
# fmt: on
torch.testing.assert_close(out[0, 0, :3_0] , UpperCamelCase_ , atol=1E-5 , rtol=1E-5 )
@unittest.skip('''Model is curently gated''' )
@slow
def lowerCamelCase__ ( self : List[Any] ):
lowerCAmelCase : List[Any] = '''Simply put, the theory of relativity states that 1) the laws of physics are the same everywhere in the universe and 2) the passage of time and the length of objects can vary depending on the observer\'s frame of reference.\n\nThe first part of the theory, that the laws of physics are the same everywhere, is known as the "princi'''
lowerCAmelCase : int = '''Simply put, the theory of relativity states that '''
lowerCAmelCase : str = LlamaTokenizer.from_pretrained('''meta-llama/Llama-2-13b-chat-hf''' )
lowerCAmelCase : Optional[int] = tokenizer.encode(UpperCamelCase_ , return_tensors='''pt''' )
lowerCAmelCase : List[Any] = LlamaForCausalLM.from_pretrained(
'''meta-llama/Llama-2-13b-chat-hf''' , device_map='''sequential''' , use_safetensors=UpperCamelCase_ )
# greedy generation outputs
lowerCAmelCase : int = model.generate(UpperCamelCase_ , max_new_tokens=6_4 , top_p=UpperCamelCase_ , temperature=1 , do_sample=UpperCamelCase_ )
lowerCAmelCase : int = tokenizer.decode(generated_ids[0] , skip_special_tokens=UpperCamelCase_ )
self.assertEqual(UpperCamelCase_ , UpperCamelCase_ )
| 637
| 0
|
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_big_bird import BigBirdTokenizer
else:
snake_case__ : Optional[Any] = None
snake_case__ : Union[str, Any] = logging.get_logger(__name__)
snake_case__ : List[str] = {'''vocab_file''': '''spiece.model''', '''tokenizer_file''': '''tokenizer.json'''}
snake_case__ : Any = {
'''vocab_file''': {
'''google/bigbird-roberta-base''': '''https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model''',
'''google/bigbird-roberta-large''': (
'''https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model'''
),
'''google/bigbird-base-trivia-itc''': (
'''https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model'''
),
},
'''tokenizer_file''': {
'''google/bigbird-roberta-base''': (
'''https://huggingface.co/google/bigbird-roberta-base/resolve/main/tokenizer.json'''
),
'''google/bigbird-roberta-large''': (
'''https://huggingface.co/google/bigbird-roberta-large/resolve/main/tokenizer.json'''
),
'''google/bigbird-base-trivia-itc''': (
'''https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/tokenizer.json'''
),
},
}
snake_case__ : int = {
'''google/bigbird-roberta-base''': 4_096,
'''google/bigbird-roberta-large''': 4_096,
'''google/bigbird-base-trivia-itc''': 4_096,
}
snake_case__ : Optional[Any] = '''▁'''
class snake_case_( a__ ):
__UpperCamelCase = VOCAB_FILES_NAMES
__UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase = BigBirdTokenizer
__UpperCamelCase = ['''input_ids''', '''attention_mask''']
__UpperCamelCase = []
def __init__( self : Union[str, Any] , UpperCamelCase_ : str=None , UpperCamelCase_ : Any=None , UpperCamelCase_ : str="<unk>" , UpperCamelCase_ : str="<s>" , UpperCamelCase_ : str="</s>" , UpperCamelCase_ : int="<pad>" , UpperCamelCase_ : List[Any]="[SEP]" , UpperCamelCase_ : Dict="[MASK]" , UpperCamelCase_ : Any="[CLS]" , **UpperCamelCase_ : Any , ):
lowerCAmelCase : Tuple = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else bos_token
lowerCAmelCase : int = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else eos_token
lowerCAmelCase : List[Any] = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else unk_token
lowerCAmelCase : List[str] = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else pad_token
lowerCAmelCase : Any = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else cls_token
lowerCAmelCase : Tuple = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else sep_token
# Mask token behave like a normal word, i.e. include the space before it
lowerCAmelCase : Optional[Any] = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else mask_token
super().__init__(
UpperCamelCase_ , tokenizer_file=UpperCamelCase_ , bos_token=UpperCamelCase_ , eos_token=UpperCamelCase_ , unk_token=UpperCamelCase_ , sep_token=UpperCamelCase_ , pad_token=UpperCamelCase_ , cls_token=UpperCamelCase_ , mask_token=UpperCamelCase_ , **UpperCamelCase_ , )
lowerCAmelCase : Optional[int] = vocab_file
lowerCAmelCase : Optional[int] = False if not self.vocab_file else True
def lowerCamelCase__ ( self : Union[str, Any] , UpperCamelCase_ : List[int] , UpperCamelCase_ : Optional[List[int]] = None ):
lowerCAmelCase : str = [self.sep_token_id]
lowerCAmelCase : Tuple = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def lowerCamelCase__ ( self : Dict , UpperCamelCase_ : List[int] , UpperCamelCase_ : Optional[List[int]] = None , UpperCamelCase_ : bool = False ):
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'''You should not supply a second sequence if the provided sequence of '''
'''ids is already formatted with special tokens for the model.''' )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is None:
return [1] + ([0] * len(UpperCamelCase_ )) + [1]
return [1] + ([0] * len(UpperCamelCase_ )) + [1] + ([0] * len(UpperCamelCase_ )) + [1]
def lowerCamelCase__ ( self : Dict , UpperCamelCase_ : List[int] , UpperCamelCase_ : Optional[List[int]] = None ):
lowerCAmelCase : Tuple = [self.sep_token_id]
lowerCAmelCase : Tuple = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowerCamelCase__ ( self : Optional[int] , UpperCamelCase_ : str , UpperCamelCase_ : Optional[str] = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(UpperCamelCase_ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
lowerCAmelCase : Optional[int] = os.path.join(
UpperCamelCase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase_ ):
copyfile(self.vocab_file , UpperCamelCase_ )
return (out_vocab_file,)
| 709
|
"""simple docstring"""
import os
import tempfile
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from torch import nn
from transformers import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_inverse_sqrt_schedule,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
def _snake_case ( _snake_case : Tuple , _snake_case : Union[str, Any]=10 ):
lowerCAmelCase : Dict = []
for _ in range(_snake_case ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
return lrs
def _snake_case ( _snake_case : Optional[int] , _snake_case : int=10 ):
lowerCAmelCase : Optional[int] = []
for step in range(_snake_case ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
if step == num_steps // 2:
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCAmelCase : List[str] = os.path.join(_snake_case , '''schedule.bin''' )
torch.save(scheduler.state_dict() , _snake_case )
lowerCAmelCase : List[Any] = torch.load(_snake_case )
scheduler.load_state_dict(_snake_case )
return lrs
@require_torch
class snake_case_( unittest.TestCase ):
def lowerCamelCase__ ( self : Optional[int] , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : str , UpperCamelCase_ : Any ):
self.assertEqual(len(UpperCamelCase_ ) , len(UpperCamelCase_ ) )
for a, b in zip(UpperCamelCase_ , UpperCamelCase_ ):
self.assertAlmostEqual(UpperCamelCase_ , UpperCamelCase_ , delta=UpperCamelCase_ )
def lowerCamelCase__ ( self : Tuple ):
lowerCAmelCase : Any = torch.tensor([0.1, -0.2, -0.1] , requires_grad=UpperCamelCase_ )
lowerCAmelCase : List[str] = torch.tensor([0.4, 0.2, -0.5] )
lowerCAmelCase : List[Any] = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
lowerCAmelCase : Union[str, Any] = AdamW(params=[w] , lr=2E-1 , weight_decay=0.0 )
for _ in range(1_0_0 ):
lowerCAmelCase : Union[str, Any] = criterion(UpperCamelCase_ , UpperCamelCase_ )
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1E-2 )
def lowerCamelCase__ ( self : Union[str, Any] ):
lowerCAmelCase : Tuple = torch.tensor([0.1, -0.2, -0.1] , requires_grad=UpperCamelCase_ )
lowerCAmelCase : Union[str, Any] = torch.tensor([0.4, 0.2, -0.5] )
lowerCAmelCase : Optional[int] = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
lowerCAmelCase : Any = Adafactor(
params=[w] , lr=1E-2 , eps=(1E-30, 1E-3) , clip_threshold=1.0 , decay_rate=-0.8 , betaa=UpperCamelCase_ , weight_decay=0.0 , relative_step=UpperCamelCase_ , scale_parameter=UpperCamelCase_ , warmup_init=UpperCamelCase_ , )
for _ in range(1_0_0_0 ):
lowerCAmelCase : List[Any] = criterion(UpperCamelCase_ , UpperCamelCase_ )
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1E-2 )
@require_torch
class snake_case_( unittest.TestCase ):
__UpperCamelCase = nn.Linear(50 , 50 ) if is_torch_available() else None
__UpperCamelCase = AdamW(m.parameters() , lr=10.0 ) if is_torch_available() else None
__UpperCamelCase = 10
def lowerCamelCase__ ( self : Union[str, Any] , UpperCamelCase_ : str , UpperCamelCase_ : str , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Union[str, Any]=None ):
self.assertEqual(len(UpperCamelCase_ ) , len(UpperCamelCase_ ) )
for a, b in zip(UpperCamelCase_ , UpperCamelCase_ ):
self.assertAlmostEqual(UpperCamelCase_ , UpperCamelCase_ , delta=UpperCamelCase_ , msg=UpperCamelCase_ )
def lowerCamelCase__ ( self : Union[str, Any] ):
lowerCAmelCase : Tuple = {'''num_warmup_steps''': 2, '''num_training_steps''': 1_0}
# schedulers doct format
# function: (sched_args_dict, expected_learning_rates)
lowerCAmelCase : Optional[Any] = {
get_constant_schedule: ({}, [10.0] * self.num_steps),
get_constant_schedule_with_warmup: (
{'''num_warmup_steps''': 4},
[0.0, 2.5, 5.0, 7.5, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0],
),
get_linear_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 8.75, 7.5, 6.25, 5.0, 3.75, 2.5, 1.25],
),
get_cosine_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 9.61, 8.53, 6.91, 5.0, 3.08, 1.46, 0.38],
),
get_cosine_with_hard_restarts_schedule_with_warmup: (
{**common_kwargs, '''num_cycles''': 2},
[0.0, 5.0, 10.0, 8.53, 5.0, 1.46, 10.0, 8.53, 5.0, 1.46],
),
get_polynomial_decay_schedule_with_warmup: (
{**common_kwargs, '''power''': 2.0, '''lr_end''': 1E-7},
[0.0, 5.0, 10.0, 7.656, 5.625, 3.906, 2.5, 1.406, 0.625, 0.156],
),
get_inverse_sqrt_schedule: (
{'''num_warmup_steps''': 2},
[0.0, 5.0, 10.0, 8.165, 7.071, 6.325, 5.774, 5.345, 5.0, 4.714],
),
}
for scheduler_func, data in scheds.items():
lowerCAmelCase, lowerCAmelCase : Union[str, Any] = data
lowerCAmelCase : List[Any] = scheduler_func(self.optimizer , **UpperCamelCase_ )
self.assertEqual(len([scheduler.get_lr()[0]] ) , 1 )
lowerCAmelCase : str = unwrap_schedule(UpperCamelCase_ , self.num_steps )
self.assertListAlmostEqual(
UpperCamelCase_ , UpperCamelCase_ , tol=1E-2 , msg=F'''failed for {scheduler_func} in normal scheduler''' , )
lowerCAmelCase : Optional[int] = scheduler_func(self.optimizer , **UpperCamelCase_ )
if scheduler_func.__name__ != "get_constant_schedule":
LambdaScheduleWrapper.wrap_scheduler(UpperCamelCase_ ) # wrap to test picklability of the schedule
lowerCAmelCase : List[Any] = unwrap_and_save_reload_schedule(UpperCamelCase_ , self.num_steps )
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ , msg=F'''failed for {scheduler_func} in save and reload''' )
class snake_case_:
def __init__( self : List[Any] , UpperCamelCase_ : Any ):
lowerCAmelCase : Tuple = fn
def __call__( self : Union[str, Any] , *UpperCamelCase_ : Optional[Any] , **UpperCamelCase_ : List[Any] ):
return self.fn(*UpperCamelCase_ , **UpperCamelCase_ )
@classmethod
def lowerCamelCase__ ( self : Any , UpperCamelCase_ : Optional[int] ):
lowerCAmelCase : Union[str, Any] = list(map(self , scheduler.lr_lambdas ) )
| 637
| 0
|
"""simple docstring"""
import os
import unittest
from transformers.models.phobert.tokenization_phobert import VOCAB_FILES_NAMES, PhobertTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class snake_case_( a__ , unittest.TestCase ):
__UpperCamelCase = PhobertTokenizer
__UpperCamelCase = False
def lowerCamelCase__ ( self : Optional[int] ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
lowerCAmelCase : str = ['''T@@''', '''i''', '''I''', '''R@@''', '''r''', '''e@@''']
lowerCAmelCase : Dict = dict(zip(UpperCamelCase_ , range(len(UpperCamelCase_ ) ) ) )
lowerCAmelCase : Tuple = ['''#version: 0.2''', '''l à</w>''']
lowerCAmelCase : Any = {'''unk_token''': '''<unk>'''}
lowerCAmelCase : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
lowerCAmelCase : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
for token in vocab_tokens:
fp.write(F'''{token} {vocab_tokens[token]}\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(UpperCamelCase_ ) )
def lowerCamelCase__ ( self : List[str] , **UpperCamelCase_ : Tuple ):
kwargs.update(self.special_tokens_map )
return PhobertTokenizer.from_pretrained(self.tmpdirname , **UpperCamelCase_ )
def lowerCamelCase__ ( self : List[Any] , UpperCamelCase_ : Any ):
lowerCAmelCase : Union[str, Any] = '''Tôi là VinAI Research'''
lowerCAmelCase : str = '''T<unk> i <unk> <unk> <unk> <unk> <unk> <unk> I Re<unk> e<unk> <unk> <unk> <unk>'''
return input_text, output_text
def lowerCamelCase__ ( self : Optional[int] ):
lowerCAmelCase : Optional[int] = PhobertTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
lowerCAmelCase : int = '''Tôi là VinAI Research'''
lowerCAmelCase : Any = '''T@@ ô@@ i l@@ à V@@ i@@ n@@ A@@ I R@@ e@@ s@@ e@@ a@@ r@@ c@@ h'''.split()
lowerCAmelCase : Tuple = tokenizer.tokenize(UpperCamelCase_ )
print(UpperCamelCase_ )
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
lowerCAmelCase : Dict = tokens + [tokenizer.unk_token]
lowerCAmelCase : Tuple = [4, 3, 5, 3, 3, 3, 3, 3, 3, 6, 7, 9, 3, 9, 3, 3, 3, 3, 3]
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCamelCase_ ) , UpperCamelCase_ )
| 710
|
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer
from .base import PipelineTool
class snake_case_( a__ ):
__UpperCamelCase = '''philschmid/bart-large-cnn-samsum'''
__UpperCamelCase = (
'''This is a tool that summarizes an English text. It takes an input `text` containing the text to summarize, '''
'''and returns a summary of the text.'''
)
__UpperCamelCase = '''summarizer'''
__UpperCamelCase = AutoTokenizer
__UpperCamelCase = AutoModelForSeqaSeqLM
__UpperCamelCase = ['''text''']
__UpperCamelCase = ['''text''']
def lowerCamelCase__ ( self : Dict , UpperCamelCase_ : int ):
return self.pre_processor(UpperCamelCase_ , return_tensors='''pt''' , truncation=UpperCamelCase_ )
def lowerCamelCase__ ( self : Optional[int] , UpperCamelCase_ : str ):
return self.model.generate(**UpperCamelCase_ )[0]
def lowerCamelCase__ ( self : Any , UpperCamelCase_ : Tuple ):
return self.pre_processor.decode(UpperCamelCase_ , skip_special_tokens=UpperCamelCase_ , clean_up_tokenization_spaces=UpperCamelCase_ )
| 637
| 0
|
"""simple docstring"""
from urllib.parse import quote
import pytest
from datasets.utils.hub import hf_hub_url
@pytest.mark.parametrize('''repo_id''' , ['''canonical_dataset_name''', '''org-name/dataset-name'''] )
@pytest.mark.parametrize('''path''' , ['''filename.csv''', '''filename with blanks.csv'''] )
@pytest.mark.parametrize('''revision''' , [None, '''v2'''] )
def _snake_case ( _snake_case : int , _snake_case : List[Any] , _snake_case : Union[str, Any] ) -> List[str]:
lowerCAmelCase : int = hf_hub_url(repo_id=_snake_case , path=_snake_case , revision=_snake_case )
assert url == f'''https://huggingface.co/datasets/{repo_id}/resolve/{revision or "main"}/{quote(_snake_case )}'''
| 711
|
"""simple docstring"""
snake_case__ : List[Any] = '''Tobias Carryer'''
from time import time
class snake_case_:
def __init__( self : Optional[Any] , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Tuple , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : Dict=int(time() ) ): # noqa: B008
lowerCAmelCase : str = multiplier
lowerCAmelCase : Optional[int] = increment
lowerCAmelCase : Optional[Any] = modulo
lowerCAmelCase : Optional[Any] = seed
def lowerCamelCase__ ( self : Union[str, Any] ):
lowerCAmelCase : Optional[int] = (self.multiplier * self.seed + self.increment) % self.modulo
return self.seed
if __name__ == "__main__":
# Show the LCG in action.
snake_case__ : int = LinearCongruentialGenerator(1_664_525, 1_013_904_223, 2 << 31)
while True:
print(lcg.next_number())
| 637
| 0
|
"""simple docstring"""
import json
import os
from functools import lru_cache
from typing import Dict, List, Optional, Tuple, Union
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding, EncodedInput
from ...utils import PaddingStrategy, logging
snake_case__ : Optional[Any] = logging.get_logger(__name__)
snake_case__ : Any = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt'''}
# See all LED models at https://huggingface.co/models?filter=LED
snake_case__ : Optional[Any] = {
'''vocab_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json''',
},
'''merges_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt''',
},
'''tokenizer_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json''',
},
}
snake_case__ : List[Any] = {
'''allenai/led-base-16384''': 16_384,
}
@lru_cache()
# Copied from transformers.models.bart.tokenization_bart.bytes_to_unicode
def _snake_case ( ):
lowerCAmelCase : Optional[int] = (
list(range(ord('''!''' ) , ord('''~''' ) + 1 ) ) + list(range(ord('''¡''' ) , ord('''¬''' ) + 1 ) ) + list(range(ord('''®''' ) , ord('''ÿ''' ) + 1 ) )
)
lowerCAmelCase : str = bs[:]
lowerCAmelCase : Optional[int] = 0
for b in range(2**8 ):
if b not in bs:
bs.append(_snake_case )
cs.append(2**8 + n )
n += 1
lowerCAmelCase : int = [chr(_snake_case ) for n in cs]
return dict(zip(_snake_case , _snake_case ) )
def _snake_case ( _snake_case : List[Any] ):
lowerCAmelCase : List[str] = set()
lowerCAmelCase : Any = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
lowerCAmelCase : Optional[Any] = char
return pairs
class snake_case_( a__ ):
__UpperCamelCase = VOCAB_FILES_NAMES
__UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase = ['''input_ids''', '''attention_mask''']
def __init__( self : Tuple , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Dict , UpperCamelCase_ : Tuple="replace" , UpperCamelCase_ : Union[str, Any]="<s>" , UpperCamelCase_ : List[str]="</s>" , UpperCamelCase_ : str="</s>" , UpperCamelCase_ : int="<s>" , UpperCamelCase_ : int="<unk>" , UpperCamelCase_ : Union[str, Any]="<pad>" , UpperCamelCase_ : Tuple="<mask>" , UpperCamelCase_ : Optional[int]=False , **UpperCamelCase_ : Tuple , ):
lowerCAmelCase : Any = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else bos_token
lowerCAmelCase : Union[str, Any] = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else eos_token
lowerCAmelCase : Optional[int] = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else sep_token
lowerCAmelCase : int = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else cls_token
lowerCAmelCase : Tuple = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else unk_token
lowerCAmelCase : List[Any] = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
lowerCAmelCase : Tuple = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else mask_token
super().__init__(
errors=UpperCamelCase_ , bos_token=UpperCamelCase_ , eos_token=UpperCamelCase_ , unk_token=UpperCamelCase_ , sep_token=UpperCamelCase_ , cls_token=UpperCamelCase_ , pad_token=UpperCamelCase_ , mask_token=UpperCamelCase_ , add_prefix_space=UpperCamelCase_ , **UpperCamelCase_ , )
with open(UpperCamelCase_ , encoding='''utf-8''' ) as vocab_handle:
lowerCAmelCase : Any = json.load(UpperCamelCase_ )
lowerCAmelCase : Dict = {v: k for k, v in self.encoder.items()}
lowerCAmelCase : Optional[int] = errors # how to handle errors in decoding
lowerCAmelCase : List[Any] = bytes_to_unicode()
lowerCAmelCase : Optional[Any] = {v: k for k, v in self.byte_encoder.items()}
with open(UpperCamelCase_ , encoding='''utf-8''' ) as merges_handle:
lowerCAmelCase : Optional[int] = merges_handle.read().split('''\n''' )[1:-1]
lowerCAmelCase : Optional[int] = [tuple(merge.split() ) for merge in bpe_merges]
lowerCAmelCase : Optional[int] = dict(zip(UpperCamelCase_ , range(len(UpperCamelCase_ ) ) ) )
lowerCAmelCase : List[Any] = {}
lowerCAmelCase : Optional[Any] = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
lowerCAmelCase : Dict = re.compile(r'''\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+''' )
@property
# Copied from transformers.models.bart.tokenization_bart.BartTokenizer.vocab_size
def lowerCamelCase__ ( self : Union[str, Any] ):
return len(self.encoder )
def lowerCamelCase__ ( self : Union[str, Any] ):
return dict(self.encoder , **self.added_tokens_encoder )
def lowerCamelCase__ ( self : Any , UpperCamelCase_ : int ):
if token in self.cache:
return self.cache[token]
lowerCAmelCase : List[str] = tuple(UpperCamelCase_ )
lowerCAmelCase : Optional[Any] = get_pairs(UpperCamelCase_ )
if not pairs:
return token
while True:
lowerCAmelCase : List[Any] = min(UpperCamelCase_ , key=lambda UpperCamelCase_ : self.bpe_ranks.get(UpperCamelCase_ , float('''inf''' ) ) )
if bigram not in self.bpe_ranks:
break
lowerCAmelCase : Any = bigram
lowerCAmelCase : Tuple = []
lowerCAmelCase : Any = 0
while i < len(UpperCamelCase_ ):
try:
lowerCAmelCase : int = word.index(UpperCamelCase_ , UpperCamelCase_ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
lowerCAmelCase : int = j
if word[i] == first and i < len(UpperCamelCase_ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
lowerCAmelCase : Tuple = tuple(UpperCamelCase_ )
lowerCAmelCase : Tuple = new_word
if len(UpperCamelCase_ ) == 1:
break
else:
lowerCAmelCase : Optional[Any] = get_pairs(UpperCamelCase_ )
lowerCAmelCase : Union[str, Any] = ''' '''.join(UpperCamelCase_ )
lowerCAmelCase : List[str] = word
return word
def lowerCamelCase__ ( self : Optional[Any] , UpperCamelCase_ : Tuple ):
lowerCAmelCase : Dict = []
for token in re.findall(self.pat , UpperCamelCase_ ):
lowerCAmelCase : Union[str, Any] = ''''''.join(
self.byte_encoder[b] for b in token.encode('''utf-8''' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(UpperCamelCase_ ).split(''' ''' ) )
return bpe_tokens
def lowerCamelCase__ ( self : int , UpperCamelCase_ : str ):
return self.encoder.get(UpperCamelCase_ , self.encoder.get(self.unk_token ) )
def lowerCamelCase__ ( self : Any , UpperCamelCase_ : Union[str, Any] ):
return self.decoder.get(UpperCamelCase_ )
def lowerCamelCase__ ( self : Any , UpperCamelCase_ : List[str] ):
lowerCAmelCase : Optional[int] = ''''''.join(UpperCamelCase_ )
lowerCAmelCase : Optional[int] = bytearray([self.byte_decoder[c] for c in text] ).decode('''utf-8''' , errors=self.errors )
return text
def lowerCamelCase__ ( self : str , UpperCamelCase_ : str , UpperCamelCase_ : Optional[str] = None ):
if not os.path.isdir(UpperCamelCase_ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
lowerCAmelCase : int = os.path.join(
UpperCamelCase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
lowerCAmelCase : Optional[Any] = os.path.join(
UpperCamelCase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] )
with open(UpperCamelCase_ , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=UpperCamelCase_ , ensure_ascii=UpperCamelCase_ ) + '''\n''' )
lowerCAmelCase : Optional[int] = 0
with open(UpperCamelCase_ , '''w''' , encoding='''utf-8''' ) as writer:
writer.write('''#version: 0.2\n''' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda UpperCamelCase_ : kv[1] ):
if index != token_index:
logger.warning(
F'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
''' Please check that the tokenizer is not corrupted!''' )
lowerCAmelCase : Tuple = token_index
writer.write(''' '''.join(UpperCamelCase_ ) + '''\n''' )
index += 1
return vocab_file, merge_file
def lowerCamelCase__ ( self : Optional[Any] , UpperCamelCase_ : List[int] , UpperCamelCase_ : Optional[List[int]] = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowerCAmelCase : Any = [self.cls_token_id]
lowerCAmelCase : str = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def lowerCamelCase__ ( self : Any , UpperCamelCase_ : List[int] , UpperCamelCase_ : Optional[List[int]] = None , UpperCamelCase_ : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCamelCase_ , token_ids_a=UpperCamelCase_ , already_has_special_tokens=UpperCamelCase_ )
if token_ids_a is None:
return [1] + ([0] * len(UpperCamelCase_ )) + [1]
return [1] + ([0] * len(UpperCamelCase_ )) + [1, 1] + ([0] * len(UpperCamelCase_ )) + [1]
def lowerCamelCase__ ( self : List[str] , UpperCamelCase_ : List[int] , UpperCamelCase_ : Optional[List[int]] = None ):
lowerCAmelCase : Optional[Any] = [self.sep_token_id]
lowerCAmelCase : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowerCamelCase__ ( self : int , UpperCamelCase_ : Any , UpperCamelCase_ : Dict=False , **UpperCamelCase_ : Tuple ):
lowerCAmelCase : Union[str, Any] = kwargs.pop('''add_prefix_space''' , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(UpperCamelCase_ ) > 0 and not text[0].isspace()):
lowerCAmelCase : List[Any] = ''' ''' + text
return (text, kwargs)
def lowerCamelCase__ ( self : str , UpperCamelCase_ : Union[Dict[str, EncodedInput], BatchEncoding] , UpperCamelCase_ : Optional[int] = None , UpperCamelCase_ : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , UpperCamelCase_ : Optional[int] = None , UpperCamelCase_ : Optional[bool] = None , ):
lowerCAmelCase : Dict = super()._pad(
encoded_inputs=UpperCamelCase_ , max_length=UpperCamelCase_ , padding_strategy=UpperCamelCase_ , pad_to_multiple_of=UpperCamelCase_ , return_attention_mask=UpperCamelCase_ , )
# Load from model defaults
if return_attention_mask is None:
lowerCAmelCase : Tuple = '''attention_mask''' in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
lowerCAmelCase : Dict = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
lowerCAmelCase : List[Any] = len(encoded_inputs['''global_attention_mask'''] ) != len(UpperCamelCase_ )
if needs_to_be_padded:
lowerCAmelCase : int = len(UpperCamelCase_ ) - len(encoded_inputs['''global_attention_mask'''] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
lowerCAmelCase : Dict = (
encoded_inputs['''global_attention_mask'''] + [-1] * difference
)
elif self.padding_side == "left":
lowerCAmelCase : int = [-1] * difference + encoded_inputs[
'''global_attention_mask'''
]
else:
raise ValueError('''Invalid padding strategy:''' + str(self.padding_side ) )
return encoded_inputs
| 712
|
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_big_bird import BigBirdTokenizer
else:
snake_case__ : Optional[Any] = None
snake_case__ : Union[str, Any] = logging.get_logger(__name__)
snake_case__ : List[str] = {'''vocab_file''': '''spiece.model''', '''tokenizer_file''': '''tokenizer.json'''}
snake_case__ : Any = {
'''vocab_file''': {
'''google/bigbird-roberta-base''': '''https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model''',
'''google/bigbird-roberta-large''': (
'''https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model'''
),
'''google/bigbird-base-trivia-itc''': (
'''https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model'''
),
},
'''tokenizer_file''': {
'''google/bigbird-roberta-base''': (
'''https://huggingface.co/google/bigbird-roberta-base/resolve/main/tokenizer.json'''
),
'''google/bigbird-roberta-large''': (
'''https://huggingface.co/google/bigbird-roberta-large/resolve/main/tokenizer.json'''
),
'''google/bigbird-base-trivia-itc''': (
'''https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/tokenizer.json'''
),
},
}
snake_case__ : int = {
'''google/bigbird-roberta-base''': 4_096,
'''google/bigbird-roberta-large''': 4_096,
'''google/bigbird-base-trivia-itc''': 4_096,
}
snake_case__ : Optional[Any] = '''▁'''
class snake_case_( a__ ):
__UpperCamelCase = VOCAB_FILES_NAMES
__UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase = BigBirdTokenizer
__UpperCamelCase = ['''input_ids''', '''attention_mask''']
__UpperCamelCase = []
def __init__( self : Union[str, Any] , UpperCamelCase_ : str=None , UpperCamelCase_ : Any=None , UpperCamelCase_ : str="<unk>" , UpperCamelCase_ : str="<s>" , UpperCamelCase_ : str="</s>" , UpperCamelCase_ : int="<pad>" , UpperCamelCase_ : List[Any]="[SEP]" , UpperCamelCase_ : Dict="[MASK]" , UpperCamelCase_ : Any="[CLS]" , **UpperCamelCase_ : Any , ):
lowerCAmelCase : Tuple = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else bos_token
lowerCAmelCase : int = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else eos_token
lowerCAmelCase : List[Any] = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else unk_token
lowerCAmelCase : List[str] = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else pad_token
lowerCAmelCase : Any = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else cls_token
lowerCAmelCase : Tuple = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else sep_token
# Mask token behave like a normal word, i.e. include the space before it
lowerCAmelCase : Optional[Any] = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else mask_token
super().__init__(
UpperCamelCase_ , tokenizer_file=UpperCamelCase_ , bos_token=UpperCamelCase_ , eos_token=UpperCamelCase_ , unk_token=UpperCamelCase_ , sep_token=UpperCamelCase_ , pad_token=UpperCamelCase_ , cls_token=UpperCamelCase_ , mask_token=UpperCamelCase_ , **UpperCamelCase_ , )
lowerCAmelCase : Optional[int] = vocab_file
lowerCAmelCase : Optional[int] = False if not self.vocab_file else True
def lowerCamelCase__ ( self : Union[str, Any] , UpperCamelCase_ : List[int] , UpperCamelCase_ : Optional[List[int]] = None ):
lowerCAmelCase : str = [self.sep_token_id]
lowerCAmelCase : Tuple = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def lowerCamelCase__ ( self : Dict , UpperCamelCase_ : List[int] , UpperCamelCase_ : Optional[List[int]] = None , UpperCamelCase_ : bool = False ):
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'''You should not supply a second sequence if the provided sequence of '''
'''ids is already formatted with special tokens for the model.''' )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is None:
return [1] + ([0] * len(UpperCamelCase_ )) + [1]
return [1] + ([0] * len(UpperCamelCase_ )) + [1] + ([0] * len(UpperCamelCase_ )) + [1]
def lowerCamelCase__ ( self : Dict , UpperCamelCase_ : List[int] , UpperCamelCase_ : Optional[List[int]] = None ):
lowerCAmelCase : Tuple = [self.sep_token_id]
lowerCAmelCase : Tuple = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowerCamelCase__ ( self : Optional[int] , UpperCamelCase_ : str , UpperCamelCase_ : Optional[str] = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(UpperCamelCase_ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
lowerCAmelCase : Optional[int] = os.path.join(
UpperCamelCase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase_ ):
copyfile(self.vocab_file , UpperCamelCase_ )
return (out_vocab_file,)
| 637
| 0
|
"""simple docstring"""
def _snake_case ( _snake_case : int = 4000000 ):
lowerCAmelCase : int = [0, 1]
lowerCAmelCase : List[str] = 0
while fib[i] <= n:
fib.append(fib[i] + fib[i + 1] )
if fib[i + 2] > n:
break
i += 1
lowerCAmelCase : int = 0
for j in range(len(_snake_case ) - 1 ):
if fib[j] % 2 == 0:
total += fib[j]
return total
if __name__ == "__main__":
print(f"""{solution() = }""")
| 713
|
"""simple docstring"""
# using dfs for finding eulerian path traversal
def _snake_case ( _snake_case : Optional[Any] , _snake_case : List[Any] , _snake_case : str , _snake_case : List[Any]=None ):
lowerCAmelCase : Any = (path or []) + [u]
for v in graph[u]:
if visited_edge[u][v] is False:
lowerCAmelCase, lowerCAmelCase : Union[str, Any] = True, True
lowerCAmelCase : int = dfs(_snake_case , _snake_case , _snake_case , _snake_case )
return path
def _snake_case ( _snake_case : Optional[int] , _snake_case : Dict ):
lowerCAmelCase : Tuple = 0
lowerCAmelCase : Optional[Any] = -1
for i in range(_snake_case ):
if i not in graph.keys():
continue
if len(graph[i] ) % 2 == 1:
odd_degree_nodes += 1
lowerCAmelCase : Optional[Any] = i
if odd_degree_nodes == 0:
return 1, odd_node
if odd_degree_nodes == 2:
return 2, odd_node
return 3, odd_node
def _snake_case ( _snake_case : Tuple , _snake_case : List[Any] ):
lowerCAmelCase : Any = [[False for _ in range(max_node + 1 )] for _ in range(max_node + 1 )]
lowerCAmelCase, lowerCAmelCase : Optional[int] = check_circuit_or_path(_snake_case , _snake_case )
if check == 3:
print('''graph is not Eulerian''' )
print('''no path''' )
return
lowerCAmelCase : Dict = 1
if check == 2:
lowerCAmelCase : int = odd_node
print('''graph has a Euler path''' )
if check == 1:
print('''graph has a Euler cycle''' )
lowerCAmelCase : List[str] = dfs(_snake_case , _snake_case , _snake_case )
print(_snake_case )
def _snake_case ( ):
lowerCAmelCase : Optional[Any] = {1: [2, 3, 4], 2: [1, 3], 3: [1, 2], 4: [1, 5], 5: [4]}
lowerCAmelCase : Union[str, Any] = {1: [2, 3, 4, 5], 2: [1, 3], 3: [1, 2], 4: [1, 5], 5: [1, 4]}
lowerCAmelCase : List[Any] = {1: [2, 3, 4], 2: [1, 3, 4], 3: [1, 2], 4: [1, 2, 5], 5: [4]}
lowerCAmelCase : Optional[Any] = {1: [2, 3], 2: [1, 3], 3: [1, 2]}
lowerCAmelCase : Any = {
1: [],
2: []
# all degree is zero
}
lowerCAmelCase : List[str] = 10
check_euler(_snake_case , _snake_case )
check_euler(_snake_case , _snake_case )
check_euler(_snake_case , _snake_case )
check_euler(_snake_case , _snake_case )
check_euler(_snake_case , _snake_case )
if __name__ == "__main__":
main()
| 637
| 0
|
"""simple docstring"""
from math import ceil
def _snake_case ( _snake_case : Optional[int] , _snake_case : Optional[Any] ):
lowerCAmelCase : str = list(range(0 , _snake_case ) )
lowerCAmelCase : List[str] = [item for sublist in list(device_map.values() ) for item in sublist]
# Duplicate check
lowerCAmelCase : Union[str, Any] = []
for i in device_map_blocks:
if device_map_blocks.count(_snake_case ) > 1 and i not in duplicate_blocks:
duplicate_blocks.append(_snake_case )
# Missing blocks
lowerCAmelCase : Optional[Any] = [i for i in blocks if i not in device_map_blocks]
lowerCAmelCase : int = [i for i in device_map_blocks if i not in blocks]
if len(_snake_case ) != 0:
raise ValueError(
'''Duplicate attention blocks specified in device_map. Attention blocks must be specified to one device.'''
''' These attention blocks were specified more than once: ''' + str(_snake_case ) )
if len(_snake_case ) != 0:
raise ValueError(
'''There are attention blocks for this model that are not specified in the device_map. Add these attention '''
'''blocks to a device on the device_map: ''' + str(_snake_case ) )
if len(_snake_case ) != 0:
raise ValueError(
'''The device_map contains more attention blocks than this model has. Remove these from the device_map:'''
+ str(_snake_case ) )
def _snake_case ( _snake_case : Union[str, Any] , _snake_case : Optional[int] ):
lowerCAmelCase : Any = list(range(_snake_case ) )
lowerCAmelCase : Optional[int] = int(ceil(n_layers / len(_snake_case ) ) )
lowerCAmelCase : Any = [layers[i : i + n_blocks] for i in range(0 , _snake_case , _snake_case )]
return dict(zip(_snake_case , _snake_case ) )
| 714
|
"""simple docstring"""
import os
import shutil
import sys
import tempfile
import unittest
from pathlib import Path
import pytest
import transformers
from transformers import (
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoTokenizer,
BertConfig,
BertTokenizer,
BertTokenizerFast,
CTRLTokenizer,
GPTaTokenizer,
GPTaTokenizerFast,
PreTrainedTokenizerFast,
RobertaTokenizer,
RobertaTokenizerFast,
is_tokenizers_available,
)
from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig
from transformers.models.auto.tokenization_auto import (
TOKENIZER_MAPPING,
get_tokenizer_config,
tokenizer_class_from_name,
)
from transformers.models.roberta.configuration_roberta import RobertaConfig
from transformers.testing_utils import (
DUMMY_DIFF_TOKENIZER_IDENTIFIER,
DUMMY_UNKNOWN_IDENTIFIER,
SMALL_MODEL_IDENTIFIER,
RequestCounter,
require_tokenizers,
slow,
)
sys.path.append(str(Path(__file__).parent.parent.parent.parent / '''utils'''))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_tokenization import CustomTokenizer # noqa E402
if is_tokenizers_available():
from test_module.custom_tokenization_fast import CustomTokenizerFast
class snake_case_( unittest.TestCase ):
def lowerCamelCase__ ( self : Optional[int] ):
lowerCAmelCase : Optional[Any] = 0
@slow
def lowerCamelCase__ ( self : Dict ):
for model_name in (x for x in BERT_PRETRAINED_CONFIG_ARCHIVE_MAP.keys() if "japanese" not in x):
lowerCAmelCase : Optional[Any] = AutoTokenizer.from_pretrained(UpperCamelCase_ )
self.assertIsNotNone(UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , (BertTokenizer, BertTokenizerFast) )
self.assertGreater(len(UpperCamelCase_ ) , 0 )
for model_name in GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP.keys():
lowerCAmelCase : Tuple = AutoTokenizer.from_pretrained(UpperCamelCase_ )
self.assertIsNotNone(UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , (GPTaTokenizer, GPTaTokenizerFast) )
self.assertGreater(len(UpperCamelCase_ ) , 0 )
def lowerCamelCase__ ( self : Union[str, Any] ):
lowerCAmelCase : Optional[Any] = AutoTokenizer.from_pretrained(UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 1_2 )
def lowerCamelCase__ ( self : Dict ):
lowerCAmelCase : Tuple = AutoTokenizer.from_pretrained(UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , (RobertaTokenizer, RobertaTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 2_0 )
def lowerCamelCase__ ( self : Dict ):
lowerCAmelCase : int = AutoConfig.from_pretrained(UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
# Check that tokenizer_type ≠ model_type
lowerCAmelCase : List[Any] = AutoTokenizer.from_pretrained(UpperCamelCase_ , config=UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 1_2 )
def lowerCamelCase__ ( self : Any ):
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('''./tests/fixtures/vocab.txt''' , os.path.join(UpperCamelCase_ , '''vocab.txt''' ) )
lowerCAmelCase : Any = AutoTokenizer.from_pretrained(UpperCamelCase_ , tokenizer_type='''bert''' , use_fast=UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('''./tests/fixtures/vocab.json''' , os.path.join(UpperCamelCase_ , '''vocab.json''' ) )
shutil.copy('''./tests/fixtures/merges.txt''' , os.path.join(UpperCamelCase_ , '''merges.txt''' ) )
lowerCAmelCase : List[Any] = AutoTokenizer.from_pretrained(UpperCamelCase_ , tokenizer_type='''gpt2''' , use_fast=UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
@require_tokenizers
def lowerCamelCase__ ( self : Union[str, Any] ):
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('''./tests/fixtures/vocab.txt''' , os.path.join(UpperCamelCase_ , '''vocab.txt''' ) )
lowerCAmelCase : Optional[Any] = AutoTokenizer.from_pretrained(UpperCamelCase_ , tokenizer_type='''bert''' )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('''./tests/fixtures/vocab.json''' , os.path.join(UpperCamelCase_ , '''vocab.json''' ) )
shutil.copy('''./tests/fixtures/merges.txt''' , os.path.join(UpperCamelCase_ , '''merges.txt''' ) )
lowerCAmelCase : int = AutoTokenizer.from_pretrained(UpperCamelCase_ , tokenizer_type='''gpt2''' )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
def lowerCamelCase__ ( self : Dict ):
with pytest.raises(UpperCamelCase_ ):
AutoTokenizer.from_pretrained('''./''' , tokenizer_type='''xxx''' )
@require_tokenizers
def lowerCamelCase__ ( self : str ):
for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]:
lowerCAmelCase : Dict = tokenizer_class.from_pretrained('''wietsedv/bert-base-dutch-cased''' )
self.assertIsInstance(UpperCamelCase_ , (BertTokenizer, BertTokenizerFast) )
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
self.assertEqual(tokenizer.basic_tokenizer.do_lower_case , UpperCamelCase_ )
else:
self.assertEqual(tokenizer.do_lower_case , UpperCamelCase_ )
self.assertEqual(tokenizer.model_max_length , 5_1_2 )
@require_tokenizers
def lowerCamelCase__ ( self : Optional[int] ):
for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]:
with self.assertRaisesRegex(
UpperCamelCase_ , '''julien-c/herlolip-not-exists is not a local folder and is not a valid model identifier''' , ):
lowerCAmelCase : Any = tokenizer_class.from_pretrained('''julien-c/herlolip-not-exists''' )
def lowerCamelCase__ ( self : Tuple ):
# tests: https://github.com/huggingface/transformers/pull/13251
# 1. models with `-`, e.g. xlm-roberta -> xlm_roberta
# 2. models that don't remap 1-1 from model-name to model file, e.g., openai-gpt -> openai
lowerCAmelCase : Optional[Any] = TOKENIZER_MAPPING.values()
lowerCAmelCase : Optional[Any] = []
for slow_tok, fast_tok in tokenizers:
if slow_tok is not None:
tokenizer_names.append(slow_tok.__name__ )
if fast_tok is not None:
tokenizer_names.append(fast_tok.__name__ )
for tokenizer_name in tokenizer_names:
# must find the right class
tokenizer_class_from_name(UpperCamelCase_ )
@require_tokenizers
def lowerCamelCase__ ( self : Any ):
self.assertIsInstance(AutoTokenizer.from_pretrained('''bert-base-cased''' , use_fast=UpperCamelCase_ ) , UpperCamelCase_ )
self.assertIsInstance(AutoTokenizer.from_pretrained('''bert-base-cased''' ) , UpperCamelCase_ )
@require_tokenizers
def lowerCamelCase__ ( self : Dict ):
lowerCAmelCase : List[Any] = AutoTokenizer.from_pretrained('''distilbert-base-uncased''' , do_lower_case=UpperCamelCase_ )
lowerCAmelCase : Union[str, Any] = '''Hello, world. How are you?'''
lowerCAmelCase : Optional[Any] = tokenizer.tokenize(UpperCamelCase_ )
self.assertEqual('''[UNK]''' , tokens[0] )
lowerCAmelCase : List[str] = AutoTokenizer.from_pretrained('''microsoft/mpnet-base''' , do_lower_case=UpperCamelCase_ )
lowerCAmelCase : Optional[int] = tokenizer.tokenize(UpperCamelCase_ )
self.assertEqual('''[UNK]''' , tokens[0] )
@require_tokenizers
def lowerCamelCase__ ( self : int ):
lowerCAmelCase : Union[str, Any] = AutoTokenizer.from_pretrained('''robot-test/dummy-tokenizer-fast-with-model-config''' )
self.assertEqual(type(UpperCamelCase_ ) , UpperCamelCase_ )
self.assertEqual(tokenizer.model_max_length , 5_1_2 )
self.assertEqual(tokenizer.vocab_size , 3_0_0_0_0 )
self.assertEqual(tokenizer.unk_token , '''[UNK]''' )
self.assertEqual(tokenizer.padding_side , '''right''' )
self.assertEqual(tokenizer.truncation_side , '''right''' )
def lowerCamelCase__ ( self : List[Any] ):
lowerCAmelCase : int = AutoTokenizer.from_pretrained(UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , (BertTokenizer, BertTokenizerFast) )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(UpperCamelCase_ )
lowerCAmelCase : List[Any] = AutoTokenizer.from_pretrained(UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , tokenizer.__class__ )
self.assertEqual(tokenizera.vocab_size , 1_2 )
def lowerCamelCase__ ( self : List[str] ):
lowerCAmelCase : List[Any] = AutoTokenizer.from_pretrained('''ctrl''' )
# There is no fast CTRL so this always gives us a slow tokenizer.
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
def lowerCamelCase__ ( self : Dict ):
# Check we can load the tokenizer config of an online model.
lowerCAmelCase : Any = get_tokenizer_config('''bert-base-cased''' )
lowerCAmelCase : Optional[int] = config.pop('''_commit_hash''' , UpperCamelCase_ )
# If we ever update bert-base-cased tokenizer config, this dict here will need to be updated.
self.assertEqual(UpperCamelCase_ , {'''do_lower_case''': False} )
# This model does not have a tokenizer_config so we get back an empty dict.
lowerCAmelCase : Union[str, Any] = get_tokenizer_config(UpperCamelCase_ )
self.assertDictEqual(UpperCamelCase_ , {} )
# A tokenizer saved with `save_pretrained` always creates a tokenizer config.
lowerCAmelCase : List[Any] = AutoTokenizer.from_pretrained(UpperCamelCase_ )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(UpperCamelCase_ )
lowerCAmelCase : Dict = get_tokenizer_config(UpperCamelCase_ )
# Check the class of the tokenizer was properly saved (note that it always saves the slow class).
self.assertEqual(config['''tokenizer_class'''] , '''BertTokenizer''' )
def lowerCamelCase__ ( self : Optional[int] ):
try:
AutoConfig.register('''custom''' , UpperCamelCase_ )
AutoTokenizer.register(UpperCamelCase_ , slow_tokenizer_class=UpperCamelCase_ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(UpperCamelCase_ ):
AutoTokenizer.register(UpperCamelCase_ , slow_tokenizer_class=UpperCamelCase_ )
lowerCAmelCase : Union[str, Any] = CustomTokenizer.from_pretrained(UpperCamelCase_ )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(UpperCamelCase_ )
lowerCAmelCase : Tuple = AutoTokenizer.from_pretrained(UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
@require_tokenizers
def lowerCamelCase__ ( self : str ):
try:
AutoConfig.register('''custom''' , UpperCamelCase_ )
# Can register in two steps
AutoTokenizer.register(UpperCamelCase_ , slow_tokenizer_class=UpperCamelCase_ )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, None) )
AutoTokenizer.register(UpperCamelCase_ , fast_tokenizer_class=UpperCamelCase_ )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, CustomTokenizerFast) )
del TOKENIZER_MAPPING._extra_content[CustomConfig]
# Can register in one step
AutoTokenizer.register(
UpperCamelCase_ , slow_tokenizer_class=UpperCamelCase_ , fast_tokenizer_class=UpperCamelCase_ )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, CustomTokenizerFast) )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(UpperCamelCase_ ):
AutoTokenizer.register(UpperCamelCase_ , fast_tokenizer_class=UpperCamelCase_ )
# We pass through a bert tokenizer fast cause there is no converter slow to fast for our new toknizer
# and that model does not have a tokenizer.json
with tempfile.TemporaryDirectory() as tmp_dir:
lowerCAmelCase : Dict = BertTokenizerFast.from_pretrained(UpperCamelCase_ )
bert_tokenizer.save_pretrained(UpperCamelCase_ )
lowerCAmelCase : int = CustomTokenizerFast.from_pretrained(UpperCamelCase_ )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(UpperCamelCase_ )
lowerCAmelCase : Optional[int] = AutoTokenizer.from_pretrained(UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
lowerCAmelCase : List[str] = AutoTokenizer.from_pretrained(UpperCamelCase_ , use_fast=UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
def lowerCamelCase__ ( self : Optional[int] ):
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(UpperCamelCase_ ):
lowerCAmelCase : int = AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(UpperCamelCase_ ):
lowerCAmelCase : str = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=UpperCamelCase_ )
lowerCAmelCase : List[str] = AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=UpperCamelCase_ )
self.assertTrue(tokenizer.special_attribute_present )
# Test tokenizer can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(UpperCamelCase_ )
lowerCAmelCase : Optional[Any] = AutoTokenizer.from_pretrained(UpperCamelCase_ , trust_remote_code=UpperCamelCase_ )
self.assertTrue(reloaded_tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , '''NewTokenizerFast''' )
# Test we can also load the slow version
lowerCAmelCase : Union[str, Any] = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=UpperCamelCase_ , use_fast=UpperCamelCase_ )
self.assertTrue(tokenizer.special_attribute_present )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
# Test tokenizer can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(UpperCamelCase_ )
lowerCAmelCase : List[str] = AutoTokenizer.from_pretrained(UpperCamelCase_ , trust_remote_code=UpperCamelCase_ , use_fast=UpperCamelCase_ )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , '''NewTokenizer''' )
self.assertTrue(reloaded_tokenizer.special_attribute_present )
else:
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , '''NewTokenizer''' )
@require_tokenizers
def lowerCamelCase__ ( self : Optional[int] ):
class snake_case_( a__ ):
__UpperCamelCase = False
class snake_case_( a__ ):
__UpperCamelCase = NewTokenizer
__UpperCamelCase = False
try:
AutoConfig.register('''custom''' , UpperCamelCase_ )
AutoTokenizer.register(UpperCamelCase_ , slow_tokenizer_class=UpperCamelCase_ )
AutoTokenizer.register(UpperCamelCase_ , fast_tokenizer_class=UpperCamelCase_ )
# If remote code is not set, the default is to use local
lowerCAmelCase : Optional[int] = AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''' )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' )
self.assertFalse(tokenizer.special_attribute_present )
lowerCAmelCase : str = AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''' , use_fast=UpperCamelCase_ )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
self.assertFalse(tokenizer.special_attribute_present )
# If remote code is disabled, we load the local one.
lowerCAmelCase : Union[str, Any] = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=UpperCamelCase_ )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' )
self.assertFalse(tokenizer.special_attribute_present )
lowerCAmelCase : Dict = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=UpperCamelCase_ , use_fast=UpperCamelCase_ )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
self.assertFalse(tokenizer.special_attribute_present )
# If remote is enabled, we load from the Hub
lowerCAmelCase : int = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=UpperCamelCase_ )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' )
self.assertTrue(tokenizer.special_attribute_present )
lowerCAmelCase : int = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=UpperCamelCase_ , use_fast=UpperCamelCase_ )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
self.assertTrue(tokenizer.special_attribute_present )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
def lowerCamelCase__ ( self : Tuple ):
lowerCAmelCase : str = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer_legacy''' , trust_remote_code=UpperCamelCase_ )
self.assertTrue(tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' )
# Test we can also load the slow version
lowerCAmelCase : List[str] = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer_legacy''' , trust_remote_code=UpperCamelCase_ , use_fast=UpperCamelCase_ )
self.assertTrue(tokenizer.special_attribute_present )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
else:
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
def lowerCamelCase__ ( self : str ):
with self.assertRaisesRegex(
UpperCamelCase_ , '''bert-base is not a local folder and is not a valid model identifier''' ):
lowerCAmelCase : List[str] = AutoTokenizer.from_pretrained('''bert-base''' )
def lowerCamelCase__ ( self : int ):
with self.assertRaisesRegex(
UpperCamelCase_ , r'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)''' ):
lowerCAmelCase : List[Any] = AutoTokenizer.from_pretrained(UpperCamelCase_ , revision='''aaaaaa''' )
def lowerCamelCase__ ( self : Optional[int] ):
# Make sure we have cached the tokenizer.
lowerCAmelCase : List[str] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
with RequestCounter() as counter:
lowerCAmelCase : int = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
self.assertEqual(counter.get_request_count , 0 )
self.assertEqual(counter.head_request_count , 1 )
self.assertEqual(counter.other_request_count , 0 )
| 637
| 0
|
"""simple docstring"""
def _snake_case ( _snake_case : list[int] ):
if not nums: # Makes sure that the list is not empty
raise ValueError('''List is empty''' )
lowerCAmelCase : int = sum(_snake_case ) / len(_snake_case ) # Calculate the average
return sum(abs(x - average ) for x in nums ) / len(_snake_case )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 715
|
"""simple docstring"""
import json
import os
from functools import lru_cache
from typing import Dict, List, Optional, Tuple, Union
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding, EncodedInput
from ...utils import PaddingStrategy, logging
snake_case__ : Optional[Any] = logging.get_logger(__name__)
snake_case__ : Any = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt'''}
# See all LED models at https://huggingface.co/models?filter=LED
snake_case__ : Optional[Any] = {
'''vocab_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json''',
},
'''merges_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt''',
},
'''tokenizer_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json''',
},
}
snake_case__ : List[Any] = {
'''allenai/led-base-16384''': 16_384,
}
@lru_cache()
# Copied from transformers.models.bart.tokenization_bart.bytes_to_unicode
def _snake_case ( ):
lowerCAmelCase : Optional[int] = (
list(range(ord('''!''' ) , ord('''~''' ) + 1 ) ) + list(range(ord('''¡''' ) , ord('''¬''' ) + 1 ) ) + list(range(ord('''®''' ) , ord('''ÿ''' ) + 1 ) )
)
lowerCAmelCase : str = bs[:]
lowerCAmelCase : Optional[int] = 0
for b in range(2**8 ):
if b not in bs:
bs.append(_snake_case )
cs.append(2**8 + n )
n += 1
lowerCAmelCase : int = [chr(_snake_case ) for n in cs]
return dict(zip(_snake_case , _snake_case ) )
def _snake_case ( _snake_case : List[Any] ):
lowerCAmelCase : List[str] = set()
lowerCAmelCase : Any = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
lowerCAmelCase : Optional[Any] = char
return pairs
class snake_case_( a__ ):
__UpperCamelCase = VOCAB_FILES_NAMES
__UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase = ['''input_ids''', '''attention_mask''']
def __init__( self : Tuple , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Dict , UpperCamelCase_ : Tuple="replace" , UpperCamelCase_ : Union[str, Any]="<s>" , UpperCamelCase_ : List[str]="</s>" , UpperCamelCase_ : str="</s>" , UpperCamelCase_ : int="<s>" , UpperCamelCase_ : int="<unk>" , UpperCamelCase_ : Union[str, Any]="<pad>" , UpperCamelCase_ : Tuple="<mask>" , UpperCamelCase_ : Optional[int]=False , **UpperCamelCase_ : Tuple , ):
lowerCAmelCase : Any = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else bos_token
lowerCAmelCase : Union[str, Any] = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else eos_token
lowerCAmelCase : Optional[int] = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else sep_token
lowerCAmelCase : int = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else cls_token
lowerCAmelCase : Tuple = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else unk_token
lowerCAmelCase : List[Any] = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
lowerCAmelCase : Tuple = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else mask_token
super().__init__(
errors=UpperCamelCase_ , bos_token=UpperCamelCase_ , eos_token=UpperCamelCase_ , unk_token=UpperCamelCase_ , sep_token=UpperCamelCase_ , cls_token=UpperCamelCase_ , pad_token=UpperCamelCase_ , mask_token=UpperCamelCase_ , add_prefix_space=UpperCamelCase_ , **UpperCamelCase_ , )
with open(UpperCamelCase_ , encoding='''utf-8''' ) as vocab_handle:
lowerCAmelCase : Any = json.load(UpperCamelCase_ )
lowerCAmelCase : Dict = {v: k for k, v in self.encoder.items()}
lowerCAmelCase : Optional[int] = errors # how to handle errors in decoding
lowerCAmelCase : List[Any] = bytes_to_unicode()
lowerCAmelCase : Optional[Any] = {v: k for k, v in self.byte_encoder.items()}
with open(UpperCamelCase_ , encoding='''utf-8''' ) as merges_handle:
lowerCAmelCase : Optional[int] = merges_handle.read().split('''\n''' )[1:-1]
lowerCAmelCase : Optional[int] = [tuple(merge.split() ) for merge in bpe_merges]
lowerCAmelCase : Optional[int] = dict(zip(UpperCamelCase_ , range(len(UpperCamelCase_ ) ) ) )
lowerCAmelCase : List[Any] = {}
lowerCAmelCase : Optional[Any] = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
lowerCAmelCase : Dict = re.compile(r'''\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+''' )
@property
# Copied from transformers.models.bart.tokenization_bart.BartTokenizer.vocab_size
def lowerCamelCase__ ( self : Union[str, Any] ):
return len(self.encoder )
def lowerCamelCase__ ( self : Union[str, Any] ):
return dict(self.encoder , **self.added_tokens_encoder )
def lowerCamelCase__ ( self : Any , UpperCamelCase_ : int ):
if token in self.cache:
return self.cache[token]
lowerCAmelCase : List[str] = tuple(UpperCamelCase_ )
lowerCAmelCase : Optional[Any] = get_pairs(UpperCamelCase_ )
if not pairs:
return token
while True:
lowerCAmelCase : List[Any] = min(UpperCamelCase_ , key=lambda UpperCamelCase_ : self.bpe_ranks.get(UpperCamelCase_ , float('''inf''' ) ) )
if bigram not in self.bpe_ranks:
break
lowerCAmelCase, lowerCAmelCase : Any = bigram
lowerCAmelCase : Tuple = []
lowerCAmelCase : Any = 0
while i < len(UpperCamelCase_ ):
try:
lowerCAmelCase : int = word.index(UpperCamelCase_ , UpperCamelCase_ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
lowerCAmelCase : int = j
if word[i] == first and i < len(UpperCamelCase_ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
lowerCAmelCase : Tuple = tuple(UpperCamelCase_ )
lowerCAmelCase : Tuple = new_word
if len(UpperCamelCase_ ) == 1:
break
else:
lowerCAmelCase : Optional[Any] = get_pairs(UpperCamelCase_ )
lowerCAmelCase : Union[str, Any] = ''' '''.join(UpperCamelCase_ )
lowerCAmelCase : List[str] = word
return word
def lowerCamelCase__ ( self : Optional[Any] , UpperCamelCase_ : Tuple ):
lowerCAmelCase : Dict = []
for token in re.findall(self.pat , UpperCamelCase_ ):
lowerCAmelCase : Union[str, Any] = ''''''.join(
self.byte_encoder[b] for b in token.encode('''utf-8''' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(UpperCamelCase_ ).split(''' ''' ) )
return bpe_tokens
def lowerCamelCase__ ( self : int , UpperCamelCase_ : str ):
return self.encoder.get(UpperCamelCase_ , self.encoder.get(self.unk_token ) )
def lowerCamelCase__ ( self : Any , UpperCamelCase_ : Union[str, Any] ):
return self.decoder.get(UpperCamelCase_ )
def lowerCamelCase__ ( self : Any , UpperCamelCase_ : List[str] ):
lowerCAmelCase : Optional[int] = ''''''.join(UpperCamelCase_ )
lowerCAmelCase : Optional[int] = bytearray([self.byte_decoder[c] for c in text] ).decode('''utf-8''' , errors=self.errors )
return text
def lowerCamelCase__ ( self : str , UpperCamelCase_ : str , UpperCamelCase_ : Optional[str] = None ):
if not os.path.isdir(UpperCamelCase_ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
lowerCAmelCase : int = os.path.join(
UpperCamelCase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
lowerCAmelCase : Optional[Any] = os.path.join(
UpperCamelCase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] )
with open(UpperCamelCase_ , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=UpperCamelCase_ , ensure_ascii=UpperCamelCase_ ) + '''\n''' )
lowerCAmelCase : Optional[int] = 0
with open(UpperCamelCase_ , '''w''' , encoding='''utf-8''' ) as writer:
writer.write('''#version: 0.2\n''' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda UpperCamelCase_ : kv[1] ):
if index != token_index:
logger.warning(
F'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
''' Please check that the tokenizer is not corrupted!''' )
lowerCAmelCase : Tuple = token_index
writer.write(''' '''.join(UpperCamelCase_ ) + '''\n''' )
index += 1
return vocab_file, merge_file
def lowerCamelCase__ ( self : Optional[Any] , UpperCamelCase_ : List[int] , UpperCamelCase_ : Optional[List[int]] = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowerCAmelCase : Any = [self.cls_token_id]
lowerCAmelCase : str = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def lowerCamelCase__ ( self : Any , UpperCamelCase_ : List[int] , UpperCamelCase_ : Optional[List[int]] = None , UpperCamelCase_ : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCamelCase_ , token_ids_a=UpperCamelCase_ , already_has_special_tokens=UpperCamelCase_ )
if token_ids_a is None:
return [1] + ([0] * len(UpperCamelCase_ )) + [1]
return [1] + ([0] * len(UpperCamelCase_ )) + [1, 1] + ([0] * len(UpperCamelCase_ )) + [1]
def lowerCamelCase__ ( self : List[str] , UpperCamelCase_ : List[int] , UpperCamelCase_ : Optional[List[int]] = None ):
lowerCAmelCase : Optional[Any] = [self.sep_token_id]
lowerCAmelCase : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowerCamelCase__ ( self : int , UpperCamelCase_ : Any , UpperCamelCase_ : Dict=False , **UpperCamelCase_ : Tuple ):
lowerCAmelCase : Union[str, Any] = kwargs.pop('''add_prefix_space''' , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(UpperCamelCase_ ) > 0 and not text[0].isspace()):
lowerCAmelCase : List[Any] = ''' ''' + text
return (text, kwargs)
def lowerCamelCase__ ( self : str , UpperCamelCase_ : Union[Dict[str, EncodedInput], BatchEncoding] , UpperCamelCase_ : Optional[int] = None , UpperCamelCase_ : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , UpperCamelCase_ : Optional[int] = None , UpperCamelCase_ : Optional[bool] = None , ):
lowerCAmelCase : Dict = super()._pad(
encoded_inputs=UpperCamelCase_ , max_length=UpperCamelCase_ , padding_strategy=UpperCamelCase_ , pad_to_multiple_of=UpperCamelCase_ , return_attention_mask=UpperCamelCase_ , )
# Load from model defaults
if return_attention_mask is None:
lowerCAmelCase : Tuple = '''attention_mask''' in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
lowerCAmelCase : Dict = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
lowerCAmelCase : List[Any] = len(encoded_inputs['''global_attention_mask'''] ) != len(UpperCamelCase_ )
if needs_to_be_padded:
lowerCAmelCase : int = len(UpperCamelCase_ ) - len(encoded_inputs['''global_attention_mask'''] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
lowerCAmelCase : Dict = (
encoded_inputs['''global_attention_mask'''] + [-1] * difference
)
elif self.padding_side == "left":
lowerCAmelCase : int = [-1] * difference + encoded_inputs[
'''global_attention_mask'''
]
else:
raise ValueError('''Invalid padding strategy:''' + str(self.padding_side ) )
return encoded_inputs
| 637
| 0
|
"""simple docstring"""
def _snake_case ( _snake_case : int , _snake_case : int , _snake_case : int ) -> Any:
if exponent == 1:
return base
if exponent % 2 == 0:
lowerCAmelCase : Dict = _modexpt(_snake_case , exponent // 2 , _snake_case ) % modulo_value
return (x * x) % modulo_value
else:
return (base * _modexpt(_snake_case , exponent - 1 , _snake_case )) % modulo_value
def _snake_case ( _snake_case : int = 1777 , _snake_case : int = 1855 , _snake_case : int = 8 ) -> Union[str, Any]:
lowerCAmelCase : Union[str, Any] = base
for _ in range(1 , _snake_case ):
lowerCAmelCase : Optional[Any] = _modexpt(_snake_case , _snake_case , 10**digits )
return result
if __name__ == "__main__":
print(f"""{solution() = }""")
| 716
|
"""simple docstring"""
def _snake_case ( _snake_case : int = 4000000 ):
lowerCAmelCase : int = [0, 1]
lowerCAmelCase : List[str] = 0
while fib[i] <= n:
fib.append(fib[i] + fib[i + 1] )
if fib[i + 2] > n:
break
i += 1
lowerCAmelCase : int = 0
for j in range(len(_snake_case ) - 1 ):
if fib[j] % 2 == 0:
total += fib[j]
return total
if __name__ == "__main__":
print(f"""{solution() = }""")
| 637
| 0
|
"""simple docstring"""
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import PoolFormerConfig, PoolFormerForImageClassification, PoolFormerImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
snake_case__ = logging.get_logger(__name__)
def _snake_case ( _snake_case : Union[str, Any] , _snake_case : Union[str, Any] , _snake_case : str , _snake_case : List[str] ):
lowerCAmelCase : Tuple = original_name.split('''.''' )[0]
lowerCAmelCase : Optional[int] = key.split('''.''' )
lowerCAmelCase : Tuple = int(key_list[key_list.index(_snake_case ) - 2] )
lowerCAmelCase : List[str] = int(key_list[key_list.index(_snake_case ) - 1] )
lowerCAmelCase : Optional[int] = orig_block_num - offset
lowerCAmelCase : Tuple = key.replace(f'''{orig_block_num}.{layer_num}.{original_name}''' , f'''block.{new_block_num}.{layer_num}.{new_name}''' )
return key
def _snake_case ( _snake_case : Optional[int] ):
lowerCAmelCase : Union[str, Any] = OrderedDict()
lowerCAmelCase : List[Any] = 0, 0
for key, value in state_dict.items():
if key.startswith('''network''' ):
lowerCAmelCase : List[Any] = key.replace('''network''' , '''poolformer.encoder''' )
if "proj" in key:
# Works for the first embedding as well as the internal embedding layers
if key.endswith('''bias''' ) and "patch_embed" not in key:
patch_emb_offset += 1
lowerCAmelCase : Optional[int] = key[: key.find('''proj''' )]
lowerCAmelCase : Union[str, Any] = key.replace(_snake_case , f'''patch_embeddings.{total_embed_found}.''' )
lowerCAmelCase : Optional[int] = key.replace('''proj''' , '''projection''' )
if key.endswith('''bias''' ):
total_embed_found += 1
if "patch_embeddings" in key:
lowerCAmelCase : List[Any] = '''poolformer.encoder.''' + key
if "mlp.fc1" in key:
lowerCAmelCase : Dict = replace_key_with_offset(_snake_case , _snake_case , '''mlp.fc1''' , '''output.conv1''' )
if "mlp.fc2" in key:
lowerCAmelCase : int = replace_key_with_offset(_snake_case , _snake_case , '''mlp.fc2''' , '''output.conv2''' )
if "norm1" in key:
lowerCAmelCase : Optional[int] = replace_key_with_offset(_snake_case , _snake_case , '''norm1''' , '''before_norm''' )
if "norm2" in key:
lowerCAmelCase : int = replace_key_with_offset(_snake_case , _snake_case , '''norm2''' , '''after_norm''' )
if "layer_scale_1" in key:
lowerCAmelCase : Any = replace_key_with_offset(_snake_case , _snake_case , '''layer_scale_1''' , '''layer_scale_1''' )
if "layer_scale_2" in key:
lowerCAmelCase : List[Any] = replace_key_with_offset(_snake_case , _snake_case , '''layer_scale_2''' , '''layer_scale_2''' )
if "head" in key:
lowerCAmelCase : Union[str, Any] = key.replace('''head''' , '''classifier''' )
lowerCAmelCase : Union[str, Any] = value
return new_state_dict
def _snake_case ( ):
lowerCAmelCase : List[Any] = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
lowerCAmelCase : Optional[int] = Image.open(requests.get(_snake_case , stream=_snake_case ).raw )
return image
@torch.no_grad()
def _snake_case ( _snake_case : Optional[Any] , _snake_case : int , _snake_case : List[str] ):
lowerCAmelCase : Dict = PoolFormerConfig()
# set attributes based on model_name
lowerCAmelCase : int = '''huggingface/label-files'''
lowerCAmelCase : Optional[int] = model_name[-3:]
lowerCAmelCase : Union[str, Any] = 1000
lowerCAmelCase : Any = '''imagenet-1k-id2label.json'''
lowerCAmelCase : Dict = (1, 1000)
# set config attributes
lowerCAmelCase : List[str] = json.load(open(hf_hub_download(_snake_case , _snake_case , repo_type='''dataset''' ) , '''r''' ) )
lowerCAmelCase : Tuple = {int(_snake_case ): v for k, v in idalabel.items()}
lowerCAmelCase : Dict = idalabel
lowerCAmelCase : List[str] = {v: k for k, v in idalabel.items()}
if size == "s12":
lowerCAmelCase : int = [2, 2, 6, 2]
lowerCAmelCase : Tuple = [64, 128, 320, 512]
lowerCAmelCase : Optional[Any] = 4.0
lowerCAmelCase : Tuple = 0.9
elif size == "s24":
lowerCAmelCase : Dict = [4, 4, 12, 4]
lowerCAmelCase : List[str] = [64, 128, 320, 512]
lowerCAmelCase : List[Any] = 4.0
lowerCAmelCase : List[str] = 0.9
elif size == "s36":
lowerCAmelCase : Union[str, Any] = [6, 6, 18, 6]
lowerCAmelCase : Union[str, Any] = [64, 128, 320, 512]
lowerCAmelCase : Union[str, Any] = 4.0
lowerCAmelCase : int = 1E-6
lowerCAmelCase : Dict = 0.9
elif size == "m36":
lowerCAmelCase : Optional[int] = [6, 6, 18, 6]
lowerCAmelCase : Optional[Any] = [96, 192, 384, 768]
lowerCAmelCase : str = 4.0
lowerCAmelCase : List[Any] = 1E-6
lowerCAmelCase : Tuple = 0.95
elif size == "m48":
lowerCAmelCase : Any = [8, 8, 24, 8]
lowerCAmelCase : Union[str, Any] = [96, 192, 384, 768]
lowerCAmelCase : List[str] = 4.0
lowerCAmelCase : Tuple = 1E-6
lowerCAmelCase : Union[str, Any] = 0.95
else:
raise ValueError(f'''Size {size} not supported''' )
# load image processor
lowerCAmelCase : List[str] = PoolFormerImageProcessor(crop_pct=_snake_case )
# Prepare image
lowerCAmelCase : Dict = prepare_img()
lowerCAmelCase : int = image_processor(images=_snake_case , return_tensors='''pt''' ).pixel_values
logger.info(f'''Converting model {model_name}...''' )
# load original state dict
lowerCAmelCase : Tuple = torch.load(_snake_case , map_location=torch.device('''cpu''' ) )
# rename keys
lowerCAmelCase : List[str] = rename_keys(_snake_case )
# create HuggingFace model and load state dict
lowerCAmelCase : Dict = PoolFormerForImageClassification(_snake_case )
model.load_state_dict(_snake_case )
model.eval()
# Define image processor
lowerCAmelCase : Tuple = PoolFormerImageProcessor(crop_pct=_snake_case )
lowerCAmelCase : str = image_processor(images=prepare_img() , return_tensors='''pt''' ).pixel_values
# forward pass
lowerCAmelCase : Optional[Any] = model(_snake_case )
lowerCAmelCase : int = outputs.logits
# define expected logit slices for different models
if size == "s12":
lowerCAmelCase : Union[str, Any] = torch.tensor([-0.3045, -0.6758, -0.4869] )
elif size == "s24":
lowerCAmelCase : Any = torch.tensor([0.4402, -0.1374, -0.8045] )
elif size == "s36":
lowerCAmelCase : Union[str, Any] = torch.tensor([-0.6080, -0.5133, -0.5898] )
elif size == "m36":
lowerCAmelCase : List[str] = torch.tensor([0.3952, 0.2263, -1.2668] )
elif size == "m48":
lowerCAmelCase : int = torch.tensor([0.1167, -0.0656, -0.3423] )
else:
raise ValueError(f'''Size {size} not supported''' )
# verify logits
assert logits.shape == expected_shape
assert torch.allclose(logits[0, :3] , _snake_case , atol=1E-2 )
# finally, save model and image processor
logger.info(f'''Saving PyTorch model and image processor to {pytorch_dump_folder_path}...''' )
Path(_snake_case ).mkdir(exist_ok=_snake_case )
model.save_pretrained(_snake_case )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(_snake_case )
if __name__ == "__main__":
snake_case__ = argparse.ArgumentParser()
parser.add_argument(
'''--model_name''',
default='''poolformer_s12''',
type=str,
help='''Name of the model you\'d like to convert.''',
)
parser.add_argument(
'''--checkpoint_path''', default=None, type=str, help='''Path to the original PyTorch checkpoint (.pth file).'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.'''
)
snake_case__ = parser.parse_args()
convert_poolformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path)
| 717
|
"""simple docstring"""
def _snake_case ( _snake_case : float , _snake_case : list[float] ):
if discount_rate < 0:
raise ValueError('''Discount rate cannot be negative''' )
if not cash_flows:
raise ValueError('''Cash flows list cannot be empty''' )
lowerCAmelCase : List[str] = sum(
cash_flow / ((1 + discount_rate) ** i) for i, cash_flow in enumerate(_snake_case ) )
return round(_snake_case , ndigits=2 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 637
| 0
|
"""simple docstring"""
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
if is_torch_available():
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
@require_torch
@require_sentencepiece
@require_tokenizers
class snake_case_( unittest.TestCase ):
@slow
def lowerCamelCase__ ( self : Tuple ):
lowerCAmelCase : str = AutoModelForSeqaSeqLM.from_pretrained('''google/mt5-small''' , return_dict=UpperCamelCase_ ).to(UpperCamelCase_ )
lowerCAmelCase : Optional[Any] = AutoTokenizer.from_pretrained('''google/mt5-small''' )
lowerCAmelCase : str = tokenizer('''Hello there''' , return_tensors='''pt''' ).input_ids
lowerCAmelCase : Any = tokenizer('''Hi I am''' , return_tensors='''pt''' ).input_ids
lowerCAmelCase : Union[str, Any] = model(input_ids.to(UpperCamelCase_ ) , labels=labels.to(UpperCamelCase_ ) ).loss
lowerCAmelCase : Union[str, Any] = -(labels.shape[-1] * loss.item())
lowerCAmelCase : Dict = -84.9_127
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1E-4 )
| 718
|
"""simple docstring"""
from __future__ import annotations
def _snake_case ( _snake_case : list[int] , _snake_case : int ):
if len(_snake_case ) == 0:
return False
lowerCAmelCase : List[Any] = len(_snake_case ) // 2
if a_list[midpoint] == item:
return True
if item < a_list[midpoint]:
return binary_search(a_list[:midpoint] , _snake_case )
else:
return binary_search(a_list[midpoint + 1 :] , _snake_case )
if __name__ == "__main__":
snake_case__ : List[str] = input('''Enter numbers separated by comma:\n''').strip()
snake_case__ : Optional[int] = [int(item.strip()) for item in user_input.split(''',''')]
snake_case__ : Dict = int(input('''Enter the number to be found in the list:\n''').strip())
snake_case__ : str = '''''' if binary_search(sequence, target) else '''not '''
print(f"""{target} was {not_str}found in {sequence}""")
| 637
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
snake_case__ : Dict = {'''configuration_xlnet''': ['''XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''XLNetConfig''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ : Dict = ['''XLNetTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ : Any = ['''XLNetTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ : Any = [
'''XLNET_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XLNetForMultipleChoice''',
'''XLNetForQuestionAnswering''',
'''XLNetForQuestionAnsweringSimple''',
'''XLNetForSequenceClassification''',
'''XLNetForTokenClassification''',
'''XLNetLMHeadModel''',
'''XLNetModel''',
'''XLNetPreTrainedModel''',
'''load_tf_weights_in_xlnet''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ : int = [
'''TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFXLNetForMultipleChoice''',
'''TFXLNetForQuestionAnsweringSimple''',
'''TFXLNetForSequenceClassification''',
'''TFXLNetForTokenClassification''',
'''TFXLNetLMHeadModel''',
'''TFXLNetMainLayer''',
'''TFXLNetModel''',
'''TFXLNetPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_xlnet import XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP, XLNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet import XLNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet_fast import XLNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlnet import (
XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
XLNetForMultipleChoice,
XLNetForQuestionAnswering,
XLNetForQuestionAnsweringSimple,
XLNetForSequenceClassification,
XLNetForTokenClassification,
XLNetLMHeadModel,
XLNetModel,
XLNetPreTrainedModel,
load_tf_weights_in_xlnet,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlnet import (
TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLNetForMultipleChoice,
TFXLNetForQuestionAnsweringSimple,
TFXLNetForSequenceClassification,
TFXLNetForTokenClassification,
TFXLNetLMHeadModel,
TFXLNetMainLayer,
TFXLNetModel,
TFXLNetPreTrainedModel,
)
else:
import sys
snake_case__ : List[str] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 719
|
"""simple docstring"""
import os
from collections import namedtuple
import pytest
from datasets import ClassLabel, Features, Sequence, Value
from datasets.commands.test import TestCommand
from datasets.info import DatasetInfo, DatasetInfosDict
snake_case__ : Optional[Any] = namedtuple(
'''_TestCommandArgs''',
[
'''dataset''',
'''name''',
'''cache_dir''',
'''data_dir''',
'''all_configs''',
'''save_infos''',
'''ignore_verifications''',
'''force_redownload''',
'''clear_cache''',
],
defaults=[None, None, None, False, False, False, False, False],
)
def _snake_case ( _snake_case : List[Any] , _snake_case : List[str] ):
return (abs(source - target ) / target) < 0.01
@pytest.mark.integration
def _snake_case ( _snake_case : Any ):
lowerCAmelCase : Union[str, Any] = _TestCommandArgs(dataset=_snake_case , all_configs=_snake_case , save_infos=_snake_case )
lowerCAmelCase : str = TestCommand(*_snake_case )
test_command.run()
lowerCAmelCase : str = os.path.join(_snake_case , '''README.md''' )
assert os.path.exists(_snake_case )
lowerCAmelCase : Tuple = DatasetInfosDict.from_directory(_snake_case )
lowerCAmelCase : List[str] = DatasetInfosDict(
{
'''default''': DatasetInfo(
features=Features(
{
'''tokens''': Sequence(Value('''string''' ) ),
'''ner_tags''': Sequence(
ClassLabel(names=['''O''', '''B-PER''', '''I-PER''', '''B-ORG''', '''I-ORG''', '''B-LOC''', '''I-LOC'''] ) ),
'''langs''': Sequence(Value('''string''' ) ),
'''spans''': Sequence(Value('''string''' ) ),
} ) , splits=[
{
'''name''': '''train''',
'''num_bytes''': 2351563,
'''num_examples''': 10000,
},
{
'''name''': '''validation''',
'''num_bytes''': 238418,
'''num_examples''': 1000,
},
] , download_size=3940680 , dataset_size=2589981 , )
} )
assert dataset_infos.keys() == expected_dataset_infos.keys()
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
lowerCAmelCase, lowerCAmelCase : Union[str, Any] = getattr(dataset_infos['''default'''] , _snake_case ), getattr(expected_dataset_infos['''default'''] , _snake_case )
if key == "num_bytes":
assert is_apercent_close(_snake_case , _snake_case )
elif key == "splits":
assert list(_snake_case ) == list(_snake_case )
for split in result:
assert result[split].name == expected[split].name
assert result[split].num_examples == expected[split].num_examples
assert is_apercent_close(result[split].num_bytes , expected[split].num_bytes )
else:
result == expected
| 637
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_tf_available,
is_torch_available,
)
snake_case__ : Optional[Any] = {
'''configuration_speech_to_text''': ['''SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''Speech2TextConfig'''],
'''processing_speech_to_text''': ['''Speech2TextProcessor'''],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ : Union[str, Any] = ['''Speech2TextTokenizer''']
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ : Tuple = ['''Speech2TextFeatureExtractor''']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ : Dict = [
'''TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFSpeech2TextForConditionalGeneration''',
'''TFSpeech2TextModel''',
'''TFSpeech2TextPreTrainedModel''',
]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ : Dict = [
'''SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Speech2TextForConditionalGeneration''',
'''Speech2TextModel''',
'''Speech2TextPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_speech_to_text import SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, SpeechaTextConfig
from .processing_speech_to_text import SpeechaTextProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_speech_to_text import SpeechaTextTokenizer
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_speech_to_text import SpeechaTextFeatureExtractor
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_speech_to_text import (
TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFSpeechaTextForConditionalGeneration,
TFSpeechaTextModel,
TFSpeechaTextPreTrainedModel,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_to_text import (
SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
SpeechaTextForConditionalGeneration,
SpeechaTextModel,
SpeechaTextPreTrainedModel,
)
else:
import sys
snake_case__ : str = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 720
|
"""simple docstring"""
def _snake_case ( _snake_case : int , _snake_case : int ):
return base * power(_snake_case , (exponent - 1) ) if exponent else 1
if __name__ == "__main__":
print('''Raise base to the power of exponent using recursion...''')
snake_case__ : Union[str, Any] = int(input('''Enter the base: ''').strip())
snake_case__ : Optional[Any] = int(input('''Enter the exponent: ''').strip())
snake_case__ : Any = power(base, abs(exponent))
if exponent < 0: # power() does not properly deal w/ negative exponents
snake_case__ : Dict = 1 / result
print(f"""{base} to the power of {exponent} is {result}""")
| 637
| 0
|
"""simple docstring"""
import math
def _snake_case ( _snake_case : int ):
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(_snake_case ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def _snake_case ( _snake_case : float = 0.1 ):
lowerCAmelCase : Optional[Any] = 3
lowerCAmelCase : Any = 3
while primes / (2 * j - 1) >= ratio:
for i in range(j * j + j + 1 , (j + 2) * (j + 2) , j + 1 ):
primes += is_prime(_snake_case )
j += 2
return j
if __name__ == "__main__":
import doctest
doctest.testmod()
| 721
|
"""simple docstring"""
import unittest
import numpy as np
import timeout_decorator # noqa
from transformers import BlenderbotConfig, is_flax_available
from transformers.testing_utils import jax_device, require_flax, slow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
snake_case__ : int = '''platform'''
import jax
import jax.numpy as jnp
from transformers import BlenderbotTokenizer
from transformers.models.blenderbot.modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
shift_tokens_right,
)
def _snake_case ( _snake_case : str , _snake_case : Any , _snake_case : str=None , _snake_case : str=None , _snake_case : Dict=None , _snake_case : Tuple=None , _snake_case : str=None , _snake_case : Any=None , ):
if attention_mask is None:
lowerCAmelCase : List[str] = np.where(input_ids != config.pad_token_id , 1 , 0 )
if decoder_attention_mask is None:
lowerCAmelCase : Optional[int] = np.where(decoder_input_ids != config.pad_token_id , 1 , 0 )
if head_mask is None:
lowerCAmelCase : Any = np.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
lowerCAmelCase : int = np.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
lowerCAmelCase : List[str] = np.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
}
class snake_case_:
def __init__( self : int , UpperCamelCase_ : Tuple , UpperCamelCase_ : int=1_3 , UpperCamelCase_ : Union[str, Any]=7 , UpperCamelCase_ : Union[str, Any]=True , UpperCamelCase_ : List[Any]=False , UpperCamelCase_ : Dict=9_9 , UpperCamelCase_ : Optional[int]=1_6 , UpperCamelCase_ : str=2 , UpperCamelCase_ : List[str]=4 , UpperCamelCase_ : List[Any]=4 , UpperCamelCase_ : int="gelu" , UpperCamelCase_ : Optional[int]=0.1 , UpperCamelCase_ : Any=0.1 , UpperCamelCase_ : str=3_2 , UpperCamelCase_ : str=2 , UpperCamelCase_ : Tuple=1 , UpperCamelCase_ : List[Any]=0 , UpperCamelCase_ : Any=0.02 , ):
lowerCAmelCase : Tuple = parent
lowerCAmelCase : str = batch_size
lowerCAmelCase : List[Any] = seq_length
lowerCAmelCase : Optional[int] = is_training
lowerCAmelCase : int = use_labels
lowerCAmelCase : List[Any] = vocab_size
lowerCAmelCase : str = hidden_size
lowerCAmelCase : List[Any] = num_hidden_layers
lowerCAmelCase : Any = num_attention_heads
lowerCAmelCase : List[Any] = intermediate_size
lowerCAmelCase : Optional[int] = hidden_act
lowerCAmelCase : Dict = hidden_dropout_prob
lowerCAmelCase : Optional[int] = attention_probs_dropout_prob
lowerCAmelCase : List[Any] = max_position_embeddings
lowerCAmelCase : Union[str, Any] = eos_token_id
lowerCAmelCase : Dict = pad_token_id
lowerCAmelCase : Optional[Any] = bos_token_id
lowerCAmelCase : List[str] = initializer_range
def lowerCamelCase__ ( self : Dict ):
lowerCAmelCase : List[Any] = np.clip(ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) , 3 , self.vocab_size )
lowerCAmelCase : str = np.concatenate((input_ids, 2 * np.ones((self.batch_size, 1) , dtype=np.intaa )) , -1 )
lowerCAmelCase : Tuple = shift_tokens_right(UpperCamelCase_ , 1 , 2 )
lowerCAmelCase : Union[str, Any] = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , initializer_range=self.initializer_range , use_cache=UpperCamelCase_ , )
lowerCAmelCase : Union[str, Any] = prepare_blenderbot_inputs_dict(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
return config, inputs_dict
def lowerCamelCase__ ( self : str ):
lowerCAmelCase, lowerCAmelCase : Optional[int] = self.prepare_config_and_inputs()
return config, inputs_dict
def lowerCamelCase__ ( self : List[str] , UpperCamelCase_ : List[str] , UpperCamelCase_ : str , UpperCamelCase_ : Tuple ):
lowerCAmelCase : int = 2_0
lowerCAmelCase : Tuple = model_class_name(UpperCamelCase_ )
lowerCAmelCase : Optional[Any] = model.encode(inputs_dict['''input_ids'''] )
lowerCAmelCase, lowerCAmelCase : str = (
inputs_dict['''decoder_input_ids'''],
inputs_dict['''decoder_attention_mask'''],
)
lowerCAmelCase : str = model.init_cache(decoder_input_ids.shape[0] , UpperCamelCase_ , UpperCamelCase_ )
lowerCAmelCase : Union[str, Any] = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype='''i4''' )
lowerCAmelCase : Tuple = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
lowerCAmelCase : List[Any] = model.decode(
decoder_input_ids[:, :-1] , UpperCamelCase_ , decoder_attention_mask=UpperCamelCase_ , past_key_values=UpperCamelCase_ , decoder_position_ids=UpperCamelCase_ , )
lowerCAmelCase : Any = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='''i4''' )
lowerCAmelCase : List[str] = model.decode(
decoder_input_ids[:, -1:] , UpperCamelCase_ , decoder_attention_mask=UpperCamelCase_ , past_key_values=outputs_cache.past_key_values , decoder_position_ids=UpperCamelCase_ , )
lowerCAmelCase : Union[str, Any] = model.decode(UpperCamelCase_ , UpperCamelCase_ )
lowerCAmelCase : int = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=F'''Max diff is {diff}''' )
def lowerCamelCase__ ( self : Union[str, Any] , UpperCamelCase_ : Any , UpperCamelCase_ : Any , UpperCamelCase_ : List[str] ):
lowerCAmelCase : Optional[int] = 2_0
lowerCAmelCase : List[Any] = model_class_name(UpperCamelCase_ )
lowerCAmelCase : Union[str, Any] = model.encode(inputs_dict['''input_ids'''] )
lowerCAmelCase, lowerCAmelCase : Optional[int] = (
inputs_dict['''decoder_input_ids'''],
inputs_dict['''decoder_attention_mask'''],
)
lowerCAmelCase : str = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
lowerCAmelCase : Union[str, Any] = model.init_cache(decoder_input_ids.shape[0] , UpperCamelCase_ , UpperCamelCase_ )
lowerCAmelCase : str = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
lowerCAmelCase : Dict = model.decode(
decoder_input_ids[:, :-1] , UpperCamelCase_ , decoder_attention_mask=UpperCamelCase_ , past_key_values=UpperCamelCase_ , decoder_position_ids=UpperCamelCase_ , )
lowerCAmelCase : Any = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='''i4''' )
lowerCAmelCase : Union[str, Any] = model.decode(
decoder_input_ids[:, -1:] , UpperCamelCase_ , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=UpperCamelCase_ , decoder_position_ids=UpperCamelCase_ , )
lowerCAmelCase : Dict = model.decode(UpperCamelCase_ , UpperCamelCase_ , decoder_attention_mask=UpperCamelCase_ )
lowerCAmelCase : Any = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=F'''Max diff is {diff}''' )
@require_flax
class snake_case_( unittest.TestCase ):
__UpperCamelCase = 99
def lowerCamelCase__ ( self : str ):
lowerCAmelCase : List[Any] = np.array(
[
[7_1, 8_2, 1_8, 3_3, 4_6, 9_1, 2],
[6_8, 3_4, 2_6, 5_8, 3_0, 8_2, 2],
[5, 9_7, 1_7, 3_9, 9_4, 4_0, 2],
[7_6, 8_3, 9_4, 2_5, 7_0, 7_8, 2],
[8_7, 5_9, 4_1, 3_5, 4_8, 6_6, 2],
[5_5, 1_3, 1_6, 5_8, 5, 2, 1], # note padding
[6_4, 2_7, 3_1, 5_1, 1_2, 7_5, 2],
[5_2, 6_4, 8_6, 1_7, 8_3, 3_9, 2],
[4_8, 6_1, 9, 2_4, 7_1, 8_2, 2],
[2_6, 1, 6_0, 4_8, 2_2, 1_3, 2],
[2_1, 5, 6_2, 2_8, 1_4, 7_6, 2],
[4_5, 9_8, 3_7, 8_6, 5_9, 4_8, 2],
[7_0, 7_0, 5_0, 9, 2_8, 0, 2],
] , dtype=np.intaa , )
lowerCAmelCase : List[Any] = input_ids.shape[0]
lowerCAmelCase : Optional[Any] = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=2_4 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=3_2 , decoder_ffn_dim=3_2 , max_position_embeddings=4_8 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
def lowerCamelCase__ ( self : List[str] ):
lowerCAmelCase, lowerCAmelCase, lowerCAmelCase : Any = self._get_config_and_data()
lowerCAmelCase : Any = FlaxBlenderbotForConditionalGeneration(UpperCamelCase_ )
lowerCAmelCase : Optional[int] = lm_model(input_ids=UpperCamelCase_ )
lowerCAmelCase : Tuple = (batch_size, input_ids.shape[1], config.vocab_size)
self.assertEqual(outputs['''logits'''].shape , UpperCamelCase_ )
def lowerCamelCase__ ( self : Any ):
lowerCAmelCase : Any = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=1_4 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=8 , decoder_ffn_dim=8 , max_position_embeddings=4_8 , )
lowerCAmelCase : int = FlaxBlenderbotForConditionalGeneration(UpperCamelCase_ )
lowerCAmelCase : Optional[Any] = np.array([[7_1, 8_2, 1_8, 3_3, 4_6, 9_1, 2], [6_8, 3_4, 2_6, 5_8, 3_0, 2, 1]] , dtype=np.intaa )
lowerCAmelCase : List[str] = np.array([[8_2, 7_1, 8_2, 1_8, 2], [5_8, 6_8, 2, 1, 1]] , dtype=np.intaa )
lowerCAmelCase : List[Any] = lm_model(input_ids=UpperCamelCase_ , decoder_input_ids=UpperCamelCase_ )
lowerCAmelCase : str = (*summary.shape, config.vocab_size)
self.assertEqual(outputs['''logits'''].shape , UpperCamelCase_ )
def lowerCamelCase__ ( self : int ):
lowerCAmelCase : Any = np.array([[7_1, 8_2, 1_8, 3_3, 2, 1, 1], [6_8, 3_4, 2_6, 5_8, 3_0, 8_2, 2]] , dtype=np.intaa )
lowerCAmelCase : Tuple = shift_tokens_right(UpperCamelCase_ , 1 , 2 )
lowerCAmelCase : Optional[int] = np.equal(UpperCamelCase_ , 1 ).astype(np.floataa ).sum()
lowerCAmelCase : str = np.equal(UpperCamelCase_ , 1 ).astype(np.floataa ).sum()
self.assertEqual(shifted.shape , input_ids.shape )
self.assertEqual(UpperCamelCase_ , n_pad_before - 1 )
self.assertTrue(np.equal(shifted[:, 0] , 2 ).all() )
@require_flax
class snake_case_( a__ , unittest.TestCase , a__ ):
__UpperCamelCase = True
__UpperCamelCase = (
(
FlaxBlenderbotModel,
FlaxBlenderbotForConditionalGeneration,
)
if is_flax_available()
else ()
)
__UpperCamelCase = (FlaxBlenderbotForConditionalGeneration,) if is_flax_available() else ()
def lowerCamelCase__ ( self : Dict ):
lowerCAmelCase : Any = FlaxBlenderbotModelTester(self )
def lowerCamelCase__ ( self : Tuple ):
lowerCAmelCase, lowerCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
def lowerCamelCase__ ( self : List[str] ):
lowerCAmelCase, lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
def lowerCamelCase__ ( self : Tuple ):
lowerCAmelCase, lowerCAmelCase : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
lowerCAmelCase : Optional[int] = self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ )
lowerCAmelCase : Optional[Any] = model_class(UpperCamelCase_ )
@jax.jit
def encode_jitted(UpperCamelCase_ : List[str] , UpperCamelCase_ : Optional[Any]=None , **UpperCamelCase_ : List[str] ):
return model.encode(input_ids=UpperCamelCase_ , attention_mask=UpperCamelCase_ )
with self.subTest('''JIT Enabled''' ):
lowerCAmelCase : List[str] = encode_jitted(**UpperCamelCase_ ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
lowerCAmelCase : int = encode_jitted(**UpperCamelCase_ ).to_tuple()
self.assertEqual(len(UpperCamelCase_ ) , len(UpperCamelCase_ ) )
for jitted_output, output in zip(UpperCamelCase_ , UpperCamelCase_ ):
self.assertEqual(jitted_output.shape , output.shape )
def lowerCamelCase__ ( self : Union[str, Any] ):
lowerCAmelCase, lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
lowerCAmelCase : Tuple = model_class(UpperCamelCase_ )
lowerCAmelCase : int = model.encode(inputs_dict['''input_ids'''] , inputs_dict['''attention_mask'''] )
lowerCAmelCase : List[Any] = {
'''decoder_input_ids''': inputs_dict['''decoder_input_ids'''],
'''decoder_attention_mask''': inputs_dict['''decoder_attention_mask'''],
'''encoder_outputs''': encoder_outputs,
}
@jax.jit
def decode_jitted(UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : Dict , UpperCamelCase_ : int ):
return model.decode(
decoder_input_ids=UpperCamelCase_ , decoder_attention_mask=UpperCamelCase_ , encoder_outputs=UpperCamelCase_ , )
with self.subTest('''JIT Enabled''' ):
lowerCAmelCase : str = decode_jitted(**UpperCamelCase_ ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
lowerCAmelCase : Union[str, Any] = decode_jitted(**UpperCamelCase_ ).to_tuple()
self.assertEqual(len(UpperCamelCase_ ) , len(UpperCamelCase_ ) )
for jitted_output, output in zip(UpperCamelCase_ , UpperCamelCase_ ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def lowerCamelCase__ ( self : Optional[int] ):
for model_class_name in self.all_model_classes:
lowerCAmelCase : Optional[int] = model_class_name.from_pretrained('''facebook/blenderbot-400M-distill''' )
# FlaxBlenderbotForSequenceClassification expects eos token in input_ids
lowerCAmelCase : int = np.ones((1, 1) ) * model.config.eos_token_id
lowerCAmelCase : List[str] = model(UpperCamelCase_ )
self.assertIsNotNone(UpperCamelCase_ )
@unittest.skipUnless(jax_device != '''cpu''' , '''3B test too slow on CPU.''' )
@slow
def lowerCamelCase__ ( self : Union[str, Any] ):
lowerCAmelCase : Dict = {'''num_beams''': 1, '''early_stopping''': True, '''min_length''': 1_5, '''max_length''': 2_5}
lowerCAmelCase : List[str] = {'''skip_special_tokens''': True, '''clean_up_tokenization_spaces''': True}
lowerCAmelCase : Tuple = FlaxBlenderbotForConditionalGeneration.from_pretrained('''facebook/blenderbot-3B''' , from_pt=UpperCamelCase_ )
lowerCAmelCase : Union[str, Any] = BlenderbotTokenizer.from_pretrained('''facebook/blenderbot-3B''' )
lowerCAmelCase : List[Any] = ['''Sam''']
lowerCAmelCase : str = tokenizer(UpperCamelCase_ , return_tensors='''jax''' )
lowerCAmelCase : Union[str, Any] = model.generate(**UpperCamelCase_ , **UpperCamelCase_ )
lowerCAmelCase : Tuple = '''Sam is a great name. It means "sun" in Gaelic.'''
lowerCAmelCase : Union[str, Any] = tokenizer.batch_decode(UpperCamelCase_ , **UpperCamelCase_ )
assert generated_txt[0].strip() == tgt_text
| 637
| 0
|
"""simple docstring"""
import math
import sys
import cva
import numpy as np
def _snake_case ( _snake_case : np.ndarray , _snake_case : float ):
# For applying gaussian function for each element in matrix.
lowerCAmelCase : str = math.sqrt(_snake_case )
lowerCAmelCase : Optional[Any] = 1 / (sigma * math.sqrt(2 * math.pi ))
return cons * np.exp(-((img / sigma) ** 2) * 0.5 )
def _snake_case ( _snake_case : np.ndarray , _snake_case : int , _snake_case : int , _snake_case : int ):
lowerCAmelCase : int = kernel_size // 2
return img[x - half : x + half + 1, y - half : y + half + 1]
def _snake_case ( _snake_case : int , _snake_case : float ):
# Creates a gaussian kernel of given dimension.
lowerCAmelCase : Optional[Any] = np.zeros((kernel_size, kernel_size) )
for i in range(0 , _snake_case ):
for j in range(0 , _snake_case ):
lowerCAmelCase : Tuple = math.sqrt(
abs(i - kernel_size // 2 ) ** 2 + abs(j - kernel_size // 2 ) ** 2 )
return vec_gaussian(_snake_case , _snake_case )
def _snake_case ( _snake_case : np.ndarray , _snake_case : float , _snake_case : float , _snake_case : int , ):
lowerCAmelCase : Any = np.zeros(img.shape )
lowerCAmelCase : List[Any] = get_gauss_kernel(_snake_case , _snake_case )
lowerCAmelCase : int = img.shape
for i in range(kernel_size // 2 , size_x - kernel_size // 2 ):
for j in range(kernel_size // 2 , size_y - kernel_size // 2 ):
lowerCAmelCase : Optional[Any] = get_slice(_snake_case , _snake_case , _snake_case , _snake_case )
lowerCAmelCase : str = img_s - img_s[kernel_size // 2, kernel_size // 2]
lowerCAmelCase : Dict = vec_gaussian(_snake_case , _snake_case )
lowerCAmelCase : Tuple = np.multiply(_snake_case , _snake_case )
lowerCAmelCase : Optional[Any] = np.multiply(_snake_case , _snake_case )
lowerCAmelCase : Any = np.sum(_snake_case ) / np.sum(_snake_case )
lowerCAmelCase : List[str] = val
return imga
def _snake_case ( _snake_case : list ):
lowerCAmelCase : List[Any] = args[1] if args[1:] else '''../image_data/lena.jpg'''
lowerCAmelCase : List[str] = float(args[2] ) if args[2:] else 1.0
lowerCAmelCase : Dict = float(args[3] ) if args[3:] else 1.0
if args[4:]:
lowerCAmelCase : Tuple = int(args[4] )
lowerCAmelCase : Dict = kernel_size + abs(kernel_size % 2 - 1 )
else:
lowerCAmelCase : List[Any] = 5
return filename, spatial_variance, intensity_variance, kernel_size
if __name__ == "__main__":
snake_case__ : List[Any] = parse_args(sys.argv)
snake_case__ : List[str] = cva.imread(filename, 0)
cva.imshow('''input image''', img)
snake_case__ : Tuple = img / 255
snake_case__ : Optional[Any] = out.astype('''float32''')
snake_case__ : Tuple = bilateral_filter(out, spatial_variance, intensity_variance, kernel_size)
snake_case__ : int = out * 255
snake_case__ : Optional[Any] = np.uinta(out)
cva.imshow('''output image''', out)
cva.waitKey(0)
cva.destroyAllWindows()
| 700
|
"""simple docstring"""
from __future__ import annotations
from PIL import Image
# Define glider example
snake_case__ : int = [
[0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0],
[1, 1, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
]
# Define blinker example
snake_case__ : Any = [[0, 1, 0], [0, 1, 0], [0, 1, 0]]
def _snake_case ( _snake_case : list[list[int]] ):
lowerCAmelCase : Union[str, Any] = []
for i in range(len(_snake_case ) ):
lowerCAmelCase : Any = []
for j in range(len(cells[i] ) ):
# Get the number of live neighbours
lowerCAmelCase : Optional[int] = 0
if i > 0 and j > 0:
neighbour_count += cells[i - 1][j - 1]
if i > 0:
neighbour_count += cells[i - 1][j]
if i > 0 and j < len(cells[i] ) - 1:
neighbour_count += cells[i - 1][j + 1]
if j > 0:
neighbour_count += cells[i][j - 1]
if j < len(cells[i] ) - 1:
neighbour_count += cells[i][j + 1]
if i < len(_snake_case ) - 1 and j > 0:
neighbour_count += cells[i + 1][j - 1]
if i < len(_snake_case ) - 1:
neighbour_count += cells[i + 1][j]
if i < len(_snake_case ) - 1 and j < len(cells[i] ) - 1:
neighbour_count += cells[i + 1][j + 1]
# Rules of the game of life (excerpt from Wikipedia):
# 1. Any live cell with two or three live neighbours survives.
# 2. Any dead cell with three live neighbours becomes a live cell.
# 3. All other live cells die in the next generation.
# Similarly, all other dead cells stay dead.
lowerCAmelCase : str = cells[i][j] == 1
if (
(alive and 2 <= neighbour_count <= 3)
or not alive
and neighbour_count == 3
):
next_generation_row.append(1 )
else:
next_generation_row.append(0 )
next_generation.append(_snake_case )
return next_generation
def _snake_case ( _snake_case : list[list[int]] , _snake_case : int ):
lowerCAmelCase : int = []
for _ in range(_snake_case ):
# Create output image
lowerCAmelCase : Union[str, Any] = Image.new('''RGB''' , (len(cells[0] ), len(_snake_case )) )
lowerCAmelCase : Union[str, Any] = img.load()
# Save cells to image
for x in range(len(_snake_case ) ):
for y in range(len(cells[0] ) ):
lowerCAmelCase : Optional[int] = 255 - cells[y][x] * 255
lowerCAmelCase : List[Any] = (colour, colour, colour)
# Save image
images.append(_snake_case )
lowerCAmelCase : Union[str, Any] = new_generation(_snake_case )
return images
if __name__ == "__main__":
snake_case__ : Union[str, Any] = generate_images(GLIDER, 16)
images[0].save('''out.gif''', save_all=True, append_images=images[1:])
| 637
| 0
|
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_barthez import BarthezTokenizer
else:
snake_case__ : str = None
snake_case__ : Union[str, Any] = logging.get_logger(__name__)
snake_case__ : Optional[Any] = {'''vocab_file''': '''sentencepiece.bpe.model''', '''tokenizer_file''': '''tokenizer.json'''}
snake_case__ : Any = {
'''vocab_file''': {
'''moussaKam/mbarthez''': '''https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model''',
'''moussaKam/barthez''': '''https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model''',
'''moussaKam/barthez-orangesum-title''': (
'''https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model'''
),
},
'''tokenizer_file''': {
'''moussaKam/mbarthez''': '''https://huggingface.co/moussaKam/mbarthez/resolve/main/tokenizer.json''',
'''moussaKam/barthez''': '''https://huggingface.co/moussaKam/barthez/resolve/main/tokenizer.json''',
'''moussaKam/barthez-orangesum-title''': (
'''https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/tokenizer.json'''
),
},
}
snake_case__ : List[Any] = {
'''moussaKam/mbarthez''': 1_024,
'''moussaKam/barthez''': 1_024,
'''moussaKam/barthez-orangesum-title''': 1_024,
}
snake_case__ : Union[str, Any] = '''▁'''
class snake_case_( a__ ):
__UpperCamelCase = VOCAB_FILES_NAMES
__UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase = ['''input_ids''', '''attention_mask''']
__UpperCamelCase = BarthezTokenizer
def __init__( self : Optional[int] , UpperCamelCase_ : str=None , UpperCamelCase_ : List[Any]=None , UpperCamelCase_ : List[str]="<s>" , UpperCamelCase_ : int="</s>" , UpperCamelCase_ : List[Any]="</s>" , UpperCamelCase_ : List[Any]="<s>" , UpperCamelCase_ : Any="<unk>" , UpperCamelCase_ : int="<pad>" , UpperCamelCase_ : Any="<mask>" , **UpperCamelCase_ : int , ):
# Mask token behave like a normal word, i.e. include the space before it
lowerCAmelCase : Dict = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else mask_token
super().__init__(
UpperCamelCase_ , tokenizer_file=UpperCamelCase_ , bos_token=UpperCamelCase_ , eos_token=UpperCamelCase_ , unk_token=UpperCamelCase_ , sep_token=UpperCamelCase_ , cls_token=UpperCamelCase_ , pad_token=UpperCamelCase_ , mask_token=UpperCamelCase_ , **UpperCamelCase_ , )
lowerCAmelCase : Optional[int] = vocab_file
lowerCAmelCase : Tuple = False if not self.vocab_file else True
def lowerCamelCase__ ( self : Optional[int] , UpperCamelCase_ : List[int] , UpperCamelCase_ : Optional[List[int]] = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowerCAmelCase : Union[str, Any] = [self.cls_token_id]
lowerCAmelCase : Union[str, Any] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def lowerCamelCase__ ( self : List[str] , UpperCamelCase_ : List[int] , UpperCamelCase_ : Optional[List[int]] = None ):
lowerCAmelCase : Optional[Any] = [self.sep_token_id]
lowerCAmelCase : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowerCamelCase__ ( self : List[str] , UpperCamelCase_ : str , UpperCamelCase_ : Optional[str] = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(UpperCamelCase_ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
lowerCAmelCase : Any = os.path.join(
UpperCamelCase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase_ ):
copyfile(self.vocab_file , UpperCamelCase_ )
return (out_vocab_file,)
| 701
|
"""simple docstring"""
from __future__ import annotations
class snake_case_:
def __init__( self : int , UpperCamelCase_ : str , UpperCamelCase_ : str ):
lowerCAmelCase, lowerCAmelCase : List[str] = text, pattern
lowerCAmelCase, lowerCAmelCase : Union[str, Any] = len(UpperCamelCase_ ), len(UpperCamelCase_ )
def lowerCamelCase__ ( self : List[Any] , UpperCamelCase_ : str ):
for i in range(self.patLen - 1 , -1 , -1 ):
if char == self.pattern[i]:
return i
return -1
def lowerCamelCase__ ( self : Optional[Any] , UpperCamelCase_ : int ):
for i in range(self.patLen - 1 , -1 , -1 ):
if self.pattern[i] != self.text[current_pos + i]:
return current_pos + i
return -1
def lowerCamelCase__ ( self : Dict ):
# searches pattern in text and returns index positions
lowerCAmelCase : Union[str, Any] = []
for i in range(self.textLen - self.patLen + 1 ):
lowerCAmelCase : str = self.mismatch_in_text(UpperCamelCase_ )
if mismatch_index == -1:
positions.append(UpperCamelCase_ )
else:
lowerCAmelCase : Optional[Any] = self.match_in_pattern(self.text[mismatch_index] )
lowerCAmelCase : int = (
mismatch_index - match_index
) # shifting index lgtm [py/multiple-definition]
return positions
snake_case__ : str = '''ABAABA'''
snake_case__ : List[str] = '''AB'''
snake_case__ : Union[str, Any] = BoyerMooreSearch(text, pattern)
snake_case__ : Optional[Any] = bms.bad_character_heuristic()
if len(positions) == 0:
print('''No match found''')
else:
print('''Pattern found in following positions: ''')
print(positions)
| 637
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
snake_case__ : List[Any] = {
'''configuration_clipseg''': [
'''CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''CLIPSegConfig''',
'''CLIPSegTextConfig''',
'''CLIPSegVisionConfig''',
],
'''processing_clipseg''': ['''CLIPSegProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ : List[Any] = [
'''CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''CLIPSegModel''',
'''CLIPSegPreTrainedModel''',
'''CLIPSegTextModel''',
'''CLIPSegVisionModel''',
'''CLIPSegForImageSegmentation''',
]
if TYPE_CHECKING:
from .configuration_clipseg import (
CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPSegConfig,
CLIPSegTextConfig,
CLIPSegVisionConfig,
)
from .processing_clipseg import CLIPSegProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clipseg import (
CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPSegForImageSegmentation,
CLIPSegModel,
CLIPSegPreTrainedModel,
CLIPSegTextModel,
CLIPSegVisionModel,
)
else:
import sys
snake_case__ : List[str] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 702
|
"""simple docstring"""
from __future__ import annotations
from typing import Any
class snake_case_( a__ ):
pass
class snake_case_:
def __init__( self : Any , UpperCamelCase_ : Any ):
lowerCAmelCase : Any = data
lowerCAmelCase : Node | None = None
def __iter__( self : int ):
lowerCAmelCase : Any = self
lowerCAmelCase : Union[str, Any] = []
while node:
if node in visited:
raise ContainsLoopError
visited.append(UpperCamelCase_ )
yield node.data
lowerCAmelCase : Optional[int] = node.next_node
@property
def lowerCamelCase__ ( self : str ):
try:
list(self )
return False
except ContainsLoopError:
return True
if __name__ == "__main__":
snake_case__ : Dict = Node(1)
snake_case__ : Any = Node(2)
snake_case__ : int = Node(3)
snake_case__ : Any = Node(4)
print(root_node.has_loop) # False
snake_case__ : Tuple = root_node.next_node
print(root_node.has_loop) # True
snake_case__ : List[Any] = Node(5)
snake_case__ : int = Node(6)
snake_case__ : List[Any] = Node(5)
snake_case__ : Dict = Node(6)
print(root_node.has_loop) # False
snake_case__ : Any = Node(1)
print(root_node.has_loop) # False
| 637
| 0
|
"""simple docstring"""
from __future__ import annotations
import os
import tempfile
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import is_tensorflow_text_available, is_tf_available
from transformers.testing_utils import require_tensorflow_text, require_tf, slow
from ..test_modeling_tf_common import floats_tensor
from .test_framework_agnostic import GenerationIntegrationTestsMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
AutoTokenizer,
TFAutoModelForCausalLM,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSpeechSeqaSeq,
TFAutoModelForVisionaSeq,
TFBartForConditionalGeneration,
TFLogitsProcessorList,
TFMinLengthLogitsProcessor,
tf_top_k_top_p_filtering,
)
if is_tensorflow_text_available():
import tensorflow_text as text
@require_tf
class snake_case_( unittest.TestCase ):
def lowerCamelCase__ ( self : Optional[Any] ):
lowerCAmelCase : Any = tf.convert_to_tensor(
[
[
8.2_220_991, # 3rd highest value; idx. 0
-0.5_620_044,
5.23_229_752,
4.0_386_393,
-6.8_798_378,
-0.54_785_802,
-3.2_012_153,
2.92_777_176,
1.88_171_953,
7.35_341_276, # 5th highest value; idx. 9
8.43_207_833, # 2nd highest value; idx. 10
-9.85_711_836,
-5.96_209_236,
-1.13_039_161,
-7.1_115_294,
-0.8_369_633,
-5.3_186_408,
7.06_427_407,
0.81_369_344,
-0.82_023_817,
-5.9_179_796,
0.58_813_443,
-6.99_778_438,
4.71_551_189,
-0.18_771_637,
7.44_020_759, # 4th highest value; idx. 25
9.38_450_987, # 1st highest value; idx. 26
2.12_662_941,
-9.32_562_038,
2.35_652_522,
], # cummulative prob of 5 highest values <= 0.6
[
0.58_425_518,
4.53_139_238,
-5.57_510_464,
-6.28_030_699,
-7.19_529_503,
-4.02_122_551,
1.39_337_037,
-6.06_707_057,
1.59_480_517,
-9.643_119,
0.03_907_799,
0.67_231_762,
-8.88_206_726,
6.27_115_922, # 4th highest value; idx. 13
2.28_520_723,
4.82_767_506,
4.30_421_368,
8.8_275_313, # 2nd highest value; idx. 17
5.44_029_958, # 5th highest value; idx. 18
-4.4_735_794,
7.38_579_536, # 3rd highest value; idx. 20
-2.91_051_663,
2.61_946_077,
-2.5_674_762,
-9.48_959_302,
-4.02_922_645,
-1.35_416_918,
9.67_702_323, # 1st highest value; idx. 27
-5.89_478_553,
1.85_370_467,
], # cummulative prob of 5 highest values <= 0.6
] , dtype=tf.floataa , )
lowerCAmelCase : int = tf.convert_to_tensor(
[[0, 0], [0, 9], [0, 1_0], [0, 2_5], [0, 2_6], [1, 1_3], [1, 1_7], [1, 1_8], [1, 2_0], [1, 2_7]] , dtype=tf.intaa , ) # expected non filtered idx as noted above
lowerCAmelCase : Union[str, Any] = tf.convert_to_tensor(
[8.222_099, 7.3_534_126, 8.432_078, 7.4_402_075, 9.38_451, 6.271_159, 8.827_531, 5.4_402_995, 7.3_857_956, 9.677_023] , dtype=tf.floataa , ) # expected non filtered values as noted above
lowerCAmelCase : Optional[int] = tf_top_k_top_p_filtering(UpperCamelCase_ , top_k=1_0 , top_p=0.6 , min_tokens_to_keep=4 )
lowerCAmelCase : Union[str, Any] = output[output != -float('''inf''' )]
lowerCAmelCase : Union[str, Any] = tf.cast(
tf.where(tf.not_equal(UpperCamelCase_ , tf.constant(-float('''inf''' ) , dtype=tf.floataa ) ) ) , dtype=tf.intaa , )
tf.debugging.assert_near(UpperCamelCase_ , UpperCamelCase_ , rtol=1E-12 )
tf.debugging.assert_equal(UpperCamelCase_ , UpperCamelCase_ )
@require_tf
class snake_case_( unittest.TestCase , a__ ):
# setting framework_dependent_parameters needs to be gated, just like its contents' imports
if is_tf_available():
__UpperCamelCase = {
'''AutoModelForCausalLM''': TFAutoModelForCausalLM,
'''AutoModelForSpeechSeq2Seq''': TFAutoModelForSpeechSeqaSeq,
'''AutoModelForSeq2SeqLM''': TFAutoModelForSeqaSeqLM,
'''AutoModelForVision2Seq''': TFAutoModelForVisionaSeq,
'''LogitsProcessorList''': TFLogitsProcessorList,
'''MinLengthLogitsProcessor''': TFMinLengthLogitsProcessor,
'''create_tensor_fn''': tf.convert_to_tensor,
'''floats_tensor''': floats_tensor,
'''return_tensors''': '''tf''',
}
@slow
def lowerCamelCase__ ( self : Any ):
# TF-only test: tf.saved_model export
lowerCAmelCase : Any = TFAutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
lowerCAmelCase : Optional[Any] = 2
lowerCAmelCase : Any = 2
class snake_case_( tf.Module ):
def __init__( self : Any , UpperCamelCase_ : Optional[Any] ):
super(UpperCamelCase_ , self ).__init__()
lowerCAmelCase : List[str] = model
@tf.function(
input_signature=(
tf.TensorSpec((None, input_length) , tf.intaa , name='''input_ids''' ),
tf.TensorSpec((None, input_length) , tf.intaa , name='''attention_mask''' ),
) , jit_compile=UpperCamelCase_ , )
def lowerCamelCase__ ( self : str , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : Optional[Any] ):
lowerCAmelCase : List[Any] = self.model.generate(
input_ids=UpperCamelCase_ , attention_mask=UpperCamelCase_ , max_new_tokens=UpperCamelCase_ , return_dict_in_generate=UpperCamelCase_ , )
return {"sequences": outputs["sequences"]}
lowerCAmelCase : Tuple = [[2, 0], [1_0_2, 1_0_3]]
lowerCAmelCase : Dict = [[1, 0], [1, 1]]
lowerCAmelCase : str = DummyModel(model=UpperCamelCase_ )
with tempfile.TemporaryDirectory() as tmp_dir:
tf.saved_model.save(UpperCamelCase_ , UpperCamelCase_ , signatures={'''serving_default''': dummy_model.serving} )
lowerCAmelCase : Union[str, Any] = tf.saved_model.load(UpperCamelCase_ ).signatures['''serving_default''']
for batch_size in range(1 , len(UpperCamelCase_ ) + 1 ):
lowerCAmelCase : List[str] = {
'''input_ids''': tf.constant(dummy_input_ids[:batch_size] ),
'''attention_mask''': tf.constant(dummy_attention_masks[:batch_size] ),
}
lowerCAmelCase : Dict = serving_func(**UpperCamelCase_ )['''sequences''']
lowerCAmelCase : Tuple = test_model.generate(**UpperCamelCase_ , max_new_tokens=UpperCamelCase_ )
tf.debugging.assert_equal(UpperCamelCase_ , UpperCamelCase_ )
@slow
def lowerCamelCase__ ( self : Any ):
# TF-only test: tf.saved_model export
lowerCAmelCase : str = TFAutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
lowerCAmelCase : List[Any] = 1
lowerCAmelCase : Dict = 2
class snake_case_( tf.Module ):
def __init__( self : Dict , UpperCamelCase_ : str ):
super(UpperCamelCase_ , self ).__init__()
lowerCAmelCase : int = model
@tf.function(
input_signature=(
tf.TensorSpec((batch_size, None) , tf.intaa , name='''input_ids''' ),
tf.TensorSpec((batch_size, None) , tf.intaa , name='''attention_mask''' ),
) , jit_compile=UpperCamelCase_ , )
def lowerCamelCase__ ( self : Union[str, Any] , UpperCamelCase_ : List[Any] , UpperCamelCase_ : List[str] ):
lowerCAmelCase : List[Any] = self.model.generate(
input_ids=UpperCamelCase_ , attention_mask=UpperCamelCase_ , max_new_tokens=UpperCamelCase_ , return_dict_in_generate=UpperCamelCase_ , )
return {"sequences": outputs["sequences"]}
lowerCAmelCase : Optional[int] = [[2], [1_0_2, 1_0_3]]
lowerCAmelCase : Tuple = [[1], [1, 1]]
lowerCAmelCase : Optional[int] = DummyModel(model=UpperCamelCase_ )
with tempfile.TemporaryDirectory() as tmp_dir:
tf.saved_model.save(UpperCamelCase_ , UpperCamelCase_ , signatures={'''serving_default''': dummy_model.serving} )
lowerCAmelCase : List[Any] = tf.saved_model.load(UpperCamelCase_ ).signatures['''serving_default''']
for input_row in range(len(UpperCamelCase_ ) ):
lowerCAmelCase : Optional[Any] = {
'''input_ids''': tf.constant([dummy_input_ids[input_row]] ),
'''attention_mask''': tf.constant([dummy_attention_masks[input_row]] ),
}
lowerCAmelCase : Union[str, Any] = serving_func(**UpperCamelCase_ )['''sequences''']
lowerCAmelCase : Optional[Any] = test_model.generate(**UpperCamelCase_ , max_new_tokens=UpperCamelCase_ )
tf.debugging.assert_equal(UpperCamelCase_ , UpperCamelCase_ )
@slow
@require_tensorflow_text
def lowerCamelCase__ ( self : int ):
# TF-only test: tf.saved_model export
with tempfile.TemporaryDirectory() as tmp_dir:
# file needed to load the TF tokenizer
hf_hub_download(repo_id='''google/flan-t5-small''' , filename='''spiece.model''' , local_dir=UpperCamelCase_ )
class snake_case_( tf.keras.layers.Layer ):
def __init__( self : Any ):
super().__init__()
lowerCAmelCase : Union[str, Any] = text.SentencepieceTokenizer(
model=tf.io.gfile.GFile(os.path.join(UpperCamelCase_ , '''spiece.model''' ) , '''rb''' ).read() )
lowerCAmelCase : Optional[Any] = TFAutoModelForSeqaSeqLM.from_pretrained('''hf-internal-testing/tiny-random-t5''' )
def lowerCamelCase__ ( self : Any , UpperCamelCase_ : Dict , *UpperCamelCase_ : Optional[Any] , **UpperCamelCase_ : List[Any] ):
lowerCAmelCase : str = self.tokenizer.tokenize(UpperCamelCase_ )
lowerCAmelCase : Dict = text.pad_model_inputs(
UpperCamelCase_ , max_seq_length=6_4 , pad_value=self.model.config.pad_token_id )
lowerCAmelCase : Dict = self.model.generate(input_ids=UpperCamelCase_ , attention_mask=UpperCamelCase_ )
return self.tokenizer.detokenize(UpperCamelCase_ )
lowerCAmelCase : Dict = CompleteSentenceTransformer()
lowerCAmelCase : List[Any] = tf.keras.layers.Input(shape=(1,) , dtype=tf.string , name='''inputs''' )
lowerCAmelCase : Dict = complete_model(UpperCamelCase_ )
lowerCAmelCase : int = tf.keras.Model(UpperCamelCase_ , UpperCamelCase_ )
keras_model.save(UpperCamelCase_ )
def lowerCamelCase__ ( self : List[Any] ):
# Has PT equivalent: this test relies on random sampling
lowerCAmelCase : Dict = {
'''do_sample''': True,
'''num_beams''': 1,
'''top_p''': 0.7,
'''top_k''': 1_0,
'''temperature''': 0.7,
}
lowerCAmelCase : int = 1_4
lowerCAmelCase : Any = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
lowerCAmelCase : Union[str, Any] = '''Hello, my dog is cute and'''
lowerCAmelCase : List[Any] = tokenizer(UpperCamelCase_ , return_tensors='''tf''' )
lowerCAmelCase : List[Any] = TFAutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
lowerCAmelCase : Union[str, Any] = 6_3_8
# forces the generation to happen on CPU, to avoid GPU-related quirks
with tf.device(''':/CPU:0''' ):
tf.random.set_seed(0 )
lowerCAmelCase : Any = model.generate(**UpperCamelCase_ , eos_token_id=UpperCamelCase_ , **UpperCamelCase_ )
self.assertTrue(expectation == len(generated_tokens[0] ) )
lowerCAmelCase : int = [6_3_8, 1_9_8]
with tf.device(''':/CPU:0''' ):
tf.random.set_seed(0 )
lowerCAmelCase : str = model.generate(**UpperCamelCase_ , eos_token_id=UpperCamelCase_ , **UpperCamelCase_ )
self.assertTrue(expectation == len(generated_tokens[0] ) )
def lowerCamelCase__ ( self : Dict ):
# Has PT equivalent: ample use of framework-specific code
lowerCAmelCase : List[Any] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-bart''' )
lowerCAmelCase : Dict = '''Hugging Face is a technology company based in New York and Paris.'''
lowerCAmelCase : Tuple = bart_tokenizer(UpperCamelCase_ , return_tensors='''tf''' ).input_ids
lowerCAmelCase : Union[str, Any] = TFBartForConditionalGeneration.from_pretrained('''hf-internal-testing/tiny-random-bart''' )
lowerCAmelCase : Optional[int] = bart_model.generate(UpperCamelCase_ ).numpy()
class snake_case_( a__ ):
def lowerCamelCase__ ( self : Optional[Any] , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : Union[str, Any]=None , **UpperCamelCase_ : Dict ):
return super().call(UpperCamelCase_ , **UpperCamelCase_ )
lowerCAmelCase : Any = FakeBart.from_pretrained('''hf-internal-testing/tiny-random-bart''' )
lowerCAmelCase : Tuple = bart_model.generate(UpperCamelCase_ , foo='''bar''' ).numpy()
self.assertTrue(np.array_equal(UpperCamelCase_ , UpperCamelCase_ ) )
class snake_case_( bart_model.model.encoder.__class__ ):
def lowerCamelCase__ ( self : Optional[Any] , UpperCamelCase_ : Union[str, Any] , **UpperCamelCase_ : int ):
return super().call(UpperCamelCase_ , **UpperCamelCase_ )
lowerCAmelCase : str = FakeEncoder(bart_model.config , bart_model.model.shared )
lowerCAmelCase : Union[str, Any] = fake_encoder
# Normal generation still works (the output will be different because the encoder weights are different)
lowerCAmelCase : Optional[Any] = bart_model.generate(UpperCamelCase_ ).numpy()
with self.assertRaises(UpperCamelCase_ ):
# FakeEncoder.call() accepts **kwargs -> no filtering -> value error due to unexpected input "foo"
bart_model.generate(UpperCamelCase_ , foo='''bar''' )
| 703
|
"""simple docstring"""
from torch import nn
class snake_case_( nn.Module ):
def __init__( self : int , UpperCamelCase_ : int , UpperCamelCase_ : int ):
super().__init__()
lowerCAmelCase : str = class_size
lowerCAmelCase : Dict = embed_size
# self.mlp1 = nn.Linear(embed_size, embed_size)
# self.mlp2 = (nn.Linear(embed_size, class_size))
lowerCAmelCase : Any = nn.Linear(UpperCamelCase_ , UpperCamelCase_ )
def lowerCamelCase__ ( self : List[Any] , UpperCamelCase_ : Tuple ):
# hidden_state = nn.functional.relu(self.mlp1(hidden_state))
# hidden_state = self.mlp2(hidden_state)
lowerCAmelCase : int = self.mlp(UpperCamelCase_ )
return logits
| 637
| 0
|
"""simple docstring"""
from typing import List
import datasets
from datasets.tasks import AudioClassification
from ..folder_based_builder import folder_based_builder
snake_case__ : int = datasets.utils.logging.get_logger(__name__)
class snake_case_( folder_based_builder.FolderBasedBuilderConfig ):
__UpperCamelCase = None
__UpperCamelCase = None
class snake_case_( folder_based_builder.FolderBasedBuilder ):
__UpperCamelCase = datasets.Audio()
__UpperCamelCase = '''audio'''
__UpperCamelCase = AudioFolderConfig
__UpperCamelCase = 42 # definition at the bottom of the script
__UpperCamelCase = AudioClassification(audio_column='''audio''' , label_column='''label''' )
snake_case__ : Union[str, Any] = [
'''.aiff''',
'''.au''',
'''.avr''',
'''.caf''',
'''.flac''',
'''.htk''',
'''.svx''',
'''.mat4''',
'''.mat5''',
'''.mpc2k''',
'''.ogg''',
'''.paf''',
'''.pvf''',
'''.raw''',
'''.rf64''',
'''.sd2''',
'''.sds''',
'''.ircam''',
'''.voc''',
'''.w64''',
'''.wav''',
'''.nist''',
'''.wavex''',
'''.wve''',
'''.xi''',
'''.mp3''',
'''.opus''',
]
snake_case__ : str = AUDIO_EXTENSIONS
| 704
|
"""simple docstring"""
class snake_case_:
def __init__( self : Union[str, Any] , UpperCamelCase_ : str ):
lowerCAmelCase : Dict = val
lowerCAmelCase : str = None
lowerCAmelCase : Dict = None
def lowerCamelCase__ ( self : Union[str, Any] , UpperCamelCase_ : Dict ):
if self.val:
if val < self.val:
if self.left is None:
lowerCAmelCase : int = Node(UpperCamelCase_ )
else:
self.left.insert(UpperCamelCase_ )
elif val > self.val:
if self.right is None:
lowerCAmelCase : Any = Node(UpperCamelCase_ )
else:
self.right.insert(UpperCamelCase_ )
else:
lowerCAmelCase : Optional[Any] = val
def _snake_case ( _snake_case : Tuple , _snake_case : str ):
# Recursive traversal
if root:
inorder(root.left , _snake_case )
res.append(root.val )
inorder(root.right , _snake_case )
def _snake_case ( _snake_case : Optional[Any] ):
# Build BST
if len(_snake_case ) == 0:
return arr
lowerCAmelCase : Optional[Any] = Node(arr[0] )
for i in range(1 , len(_snake_case ) ):
root.insert(arr[i] )
# Traverse BST in order.
lowerCAmelCase : Optional[int] = []
inorder(_snake_case , _snake_case )
return res
if __name__ == "__main__":
print(tree_sort([10, 1, 3, 2, 9, 14, 13]))
| 637
| 0
|
"""simple docstring"""
def _snake_case ( _snake_case : Union[str, Any] , _snake_case : Any ):
lowerCAmelCase : Tuple = [1]
for i in range(2 , _snake_case ):
factorials.append(factorials[-1] * i )
assert 0 <= k < factorials[-1] * n, "k out of bounds"
lowerCAmelCase : str = []
lowerCAmelCase : Any = list(range(_snake_case ) )
# Find permutation
while factorials:
lowerCAmelCase : List[Any] = factorials.pop()
lowerCAmelCase : str = divmod(_snake_case , _snake_case )
permutation.append(elements[number] )
elements.remove(elements[number] )
permutation.append(elements[0] )
return permutation
if __name__ == "__main__":
import doctest
doctest.testmod()
| 705
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
snake_case__ : Tuple = logging.get_logger(__name__)
snake_case__ : int = {
'''facebook/levit-128S''': '''https://huggingface.co/facebook/levit-128S/resolve/main/config.json''',
# See all LeViT models at https://huggingface.co/models?filter=levit
}
class snake_case_( a__ ):
__UpperCamelCase = '''levit'''
def __init__( self : str , UpperCamelCase_ : Union[str, Any]=2_2_4 , UpperCamelCase_ : Union[str, Any]=3 , UpperCamelCase_ : Union[str, Any]=3 , UpperCamelCase_ : int=2 , UpperCamelCase_ : Union[str, Any]=1 , UpperCamelCase_ : Tuple=1_6 , UpperCamelCase_ : Dict=[1_2_8, 2_5_6, 3_8_4] , UpperCamelCase_ : Optional[Any]=[4, 8, 1_2] , UpperCamelCase_ : Dict=[4, 4, 4] , UpperCamelCase_ : Any=[1_6, 1_6, 1_6] , UpperCamelCase_ : str=0 , UpperCamelCase_ : int=[2, 2, 2] , UpperCamelCase_ : Optional[Any]=[2, 2, 2] , UpperCamelCase_ : str=0.02 , **UpperCamelCase_ : List[str] , ):
super().__init__(**UpperCamelCase_ )
lowerCAmelCase : Tuple = image_size
lowerCAmelCase : int = num_channels
lowerCAmelCase : Optional[int] = kernel_size
lowerCAmelCase : Dict = stride
lowerCAmelCase : List[Any] = padding
lowerCAmelCase : Dict = hidden_sizes
lowerCAmelCase : List[str] = num_attention_heads
lowerCAmelCase : Tuple = depths
lowerCAmelCase : Dict = key_dim
lowerCAmelCase : Union[str, Any] = drop_path_rate
lowerCAmelCase : List[Any] = patch_size
lowerCAmelCase : Tuple = attention_ratio
lowerCAmelCase : Optional[int] = mlp_ratio
lowerCAmelCase : Union[str, Any] = initializer_range
lowerCAmelCase : List[str] = [
['''Subsample''', key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2],
['''Subsample''', key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2],
]
class snake_case_( a__ ):
__UpperCamelCase = version.parse('''1.11''' )
@property
def lowerCamelCase__ ( self : Tuple ):
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def lowerCamelCase__ ( self : Optional[Any] ):
return 1E-4
| 637
| 0
|
"""simple docstring"""
from math import factorial, radians
def _snake_case ( _snake_case : float , _snake_case : int = 18 , _snake_case : int = 10 ):
lowerCAmelCase : Optional[int] = angle_in_degrees - ((angle_in_degrees // 360.0) * 360.0)
# Converting from degrees to radians
lowerCAmelCase : Any = radians(_snake_case )
lowerCAmelCase : Any = angle_in_radians
lowerCAmelCase : List[Any] = 3
lowerCAmelCase : Optional[int] = -1
for _ in range(_snake_case ):
result += (b * (angle_in_radians**a)) / factorial(_snake_case )
lowerCAmelCase : str = -b # One positive term and the next will be negative and so on...
a += 2 # Increased by 2 for every term.
return round(_snake_case , _snake_case )
if __name__ == "__main__":
__import__('''doctest''').testmod()
| 706
|
"""simple docstring"""
import time
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers.generation import (
MaxLengthCriteria,
MaxNewTokensCriteria,
MaxTimeCriteria,
StoppingCriteriaList,
validate_stopping_criteria,
)
@require_torch
class snake_case_( unittest.TestCase ):
def lowerCamelCase__ ( self : int , UpperCamelCase_ : int ):
lowerCAmelCase : str = 3
lowerCAmelCase : Tuple = 2_5_0
lowerCAmelCase : Optional[Any] = ids_tensor((batch_size, length) , UpperCamelCase_ )
lowerCAmelCase : Optional[Any] = torch.ones((batch_size, length) , device=UpperCamelCase_ , dtype=torch.float ) / length
return input_ids, scores
def lowerCamelCase__ ( self : Optional[Any] ):
lowerCAmelCase, lowerCAmelCase : Optional[int] = self._get_tensors(5 )
lowerCAmelCase : Union[str, Any] = StoppingCriteriaList(
[
MaxLengthCriteria(max_length=1_0 ),
MaxTimeCriteria(max_time=0.1 ),
] )
self.assertFalse(criteria(UpperCamelCase_ , UpperCamelCase_ ) )
lowerCAmelCase, lowerCAmelCase : List[str] = self._get_tensors(9 )
self.assertFalse(criteria(UpperCamelCase_ , UpperCamelCase_ ) )
lowerCAmelCase, lowerCAmelCase : Any = self._get_tensors(1_0 )
self.assertTrue(criteria(UpperCamelCase_ , UpperCamelCase_ ) )
def lowerCamelCase__ ( self : Optional[Any] ):
lowerCAmelCase : Optional[Any] = MaxLengthCriteria(max_length=1_0 )
lowerCAmelCase, lowerCAmelCase : Optional[Any] = self._get_tensors(5 )
self.assertFalse(criteria(UpperCamelCase_ , UpperCamelCase_ ) )
lowerCAmelCase, lowerCAmelCase : List[str] = self._get_tensors(9 )
self.assertFalse(criteria(UpperCamelCase_ , UpperCamelCase_ ) )
lowerCAmelCase, lowerCAmelCase : str = self._get_tensors(1_0 )
self.assertTrue(criteria(UpperCamelCase_ , UpperCamelCase_ ) )
def lowerCamelCase__ ( self : List[Any] ):
lowerCAmelCase : Optional[Any] = MaxNewTokensCriteria(start_length=5 , max_new_tokens=5 )
lowerCAmelCase, lowerCAmelCase : Optional[int] = self._get_tensors(5 )
self.assertFalse(criteria(UpperCamelCase_ , UpperCamelCase_ ) )
lowerCAmelCase, lowerCAmelCase : Union[str, Any] = self._get_tensors(9 )
self.assertFalse(criteria(UpperCamelCase_ , UpperCamelCase_ ) )
lowerCAmelCase, lowerCAmelCase : str = self._get_tensors(1_0 )
self.assertTrue(criteria(UpperCamelCase_ , UpperCamelCase_ ) )
lowerCAmelCase : Dict = StoppingCriteriaList([criteria] )
self.assertEqual(criteria_list.max_length , 1_0 )
def lowerCamelCase__ ( self : Union[str, Any] ):
lowerCAmelCase, lowerCAmelCase : Tuple = self._get_tensors(5 )
lowerCAmelCase : List[str] = MaxTimeCriteria(max_time=0.1 )
self.assertFalse(criteria(UpperCamelCase_ , UpperCamelCase_ ) )
lowerCAmelCase : List[str] = MaxTimeCriteria(max_time=0.1 , initial_timestamp=time.time() - 0.2 )
self.assertTrue(criteria(UpperCamelCase_ , UpperCamelCase_ ) )
def lowerCamelCase__ ( self : str ):
validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(1_0 )] ) , 1_0 )
with self.assertWarns(UpperCamelCase_ ):
validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(1_0 )] ) , 1_1 )
lowerCAmelCase : str = validate_stopping_criteria(StoppingCriteriaList() , 1_1 )
self.assertEqual(len(UpperCamelCase_ ) , 1 )
| 637
| 0
|
"""simple docstring"""
snake_case__ : List[Any] = '''Tobias Carryer'''
from time import time
class snake_case_:
def __init__( self : Optional[Any] , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Tuple , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : Dict=int(time() ) ): # noqa: B008
lowerCAmelCase : str = multiplier
lowerCAmelCase : Optional[int] = increment
lowerCAmelCase : Optional[Any] = modulo
lowerCAmelCase : Optional[Any] = seed
def lowerCamelCase__ ( self : Union[str, Any] ):
lowerCAmelCase : Optional[int] = (self.multiplier * self.seed + self.increment) % self.modulo
return self.seed
if __name__ == "__main__":
# Show the LCG in action.
snake_case__ : int = LinearCongruentialGenerator(1_664_525, 1_013_904_223, 2 << 31)
while True:
print(lcg.next_number())
| 707
|
"""simple docstring"""
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin
@dataclass
# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->UnCLIP
class snake_case_( a__ ):
__UpperCamelCase = 42
__UpperCamelCase = None
def _snake_case ( _snake_case : Dict , _snake_case : List[str]=0.999 , _snake_case : Dict="cosine" , ):
if alpha_transform_type == "cosine":
def alpha_bar_fn(_snake_case : List[Any] ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(_snake_case : Optional[int] ):
return math.exp(t * -12.0 )
else:
raise ValueError(f'''Unsupported alpha_tranform_type: {alpha_transform_type}''' )
lowerCAmelCase : List[Any] = []
for i in range(_snake_case ):
lowerCAmelCase : int = i / num_diffusion_timesteps
lowerCAmelCase : Tuple = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(_snake_case ) / alpha_bar_fn(_snake_case ) , _snake_case ) )
return torch.tensor(_snake_case , dtype=torch.floataa )
class snake_case_( a__ , a__ ):
@register_to_config
def __init__( self : Any , UpperCamelCase_ : int = 1_0_0_0 , UpperCamelCase_ : str = "fixed_small_log" , UpperCamelCase_ : bool = True , UpperCamelCase_ : Optional[float] = 1.0 , UpperCamelCase_ : str = "epsilon" , UpperCamelCase_ : str = "squaredcos_cap_v2" , ):
if beta_schedule != "squaredcos_cap_v2":
raise ValueError('''UnCLIPScheduler only supports `beta_schedule`: \'squaredcos_cap_v2\'''' )
lowerCAmelCase : Any = betas_for_alpha_bar(UpperCamelCase_ )
lowerCAmelCase : str = 1.0 - self.betas
lowerCAmelCase : Union[str, Any] = torch.cumprod(self.alphas , dim=0 )
lowerCAmelCase : Tuple = torch.tensor(1.0 )
# standard deviation of the initial noise distribution
lowerCAmelCase : Any = 1.0
# setable values
lowerCAmelCase : Any = None
lowerCAmelCase : Any = torch.from_numpy(np.arange(0 , UpperCamelCase_ )[::-1].copy() )
lowerCAmelCase : List[str] = variance_type
def lowerCamelCase__ ( self : Union[str, Any] , UpperCamelCase_ : torch.FloatTensor , UpperCamelCase_ : Optional[int] = None ):
return sample
def lowerCamelCase__ ( self : Tuple , UpperCamelCase_ : int , UpperCamelCase_ : Union[str, torch.device] = None ):
lowerCAmelCase : Any = num_inference_steps
lowerCAmelCase : str = (self.config.num_train_timesteps - 1) / (self.num_inference_steps - 1)
lowerCAmelCase : Tuple = (np.arange(0 , UpperCamelCase_ ) * step_ratio).round()[::-1].copy().astype(np.intaa )
lowerCAmelCase : Any = torch.from_numpy(UpperCamelCase_ ).to(UpperCamelCase_ )
def lowerCamelCase__ ( self : Optional[Any] , UpperCamelCase_ : List[Any] , UpperCamelCase_ : str=None , UpperCamelCase_ : Tuple=None , UpperCamelCase_ : Any=None ):
if prev_timestep is None:
lowerCAmelCase : Any = t - 1
lowerCAmelCase : int = self.alphas_cumprod[t]
lowerCAmelCase : Union[str, Any] = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one
lowerCAmelCase : Dict = 1 - alpha_prod_t
lowerCAmelCase : str = 1 - alpha_prod_t_prev
if prev_timestep == t - 1:
lowerCAmelCase : Tuple = self.betas[t]
else:
lowerCAmelCase : Union[str, Any] = 1 - alpha_prod_t / alpha_prod_t_prev
# For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
# and sample from it to get previous sample
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
lowerCAmelCase : Optional[Any] = beta_prod_t_prev / beta_prod_t * beta
if variance_type is None:
lowerCAmelCase : List[str] = self.config.variance_type
# hacks - were probably added for training stability
if variance_type == "fixed_small_log":
lowerCAmelCase : Any = torch.log(torch.clamp(UpperCamelCase_ , min=1E-20 ) )
lowerCAmelCase : Union[str, Any] = torch.exp(0.5 * variance )
elif variance_type == "learned_range":
# NOTE difference with DDPM scheduler
lowerCAmelCase : Optional[Any] = variance.log()
lowerCAmelCase : Union[str, Any] = beta.log()
lowerCAmelCase : Dict = (predicted_variance + 1) / 2
lowerCAmelCase : Union[str, Any] = frac * max_log + (1 - frac) * min_log
return variance
def lowerCamelCase__ ( self : Optional[Any] , UpperCamelCase_ : torch.FloatTensor , UpperCamelCase_ : int , UpperCamelCase_ : torch.FloatTensor , UpperCamelCase_ : Optional[int] = None , UpperCamelCase_ : List[Any]=None , UpperCamelCase_ : bool = True , ):
lowerCAmelCase : Optional[Any] = timestep
if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type == "learned_range":
lowerCAmelCase, lowerCAmelCase : List[Any] = torch.split(UpperCamelCase_ , sample.shape[1] , dim=1 )
else:
lowerCAmelCase : Optional[int] = None
# 1. compute alphas, betas
if prev_timestep is None:
lowerCAmelCase : Any = t - 1
lowerCAmelCase : Union[str, Any] = self.alphas_cumprod[t]
lowerCAmelCase : Optional[int] = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one
lowerCAmelCase : int = 1 - alpha_prod_t
lowerCAmelCase : str = 1 - alpha_prod_t_prev
if prev_timestep == t - 1:
lowerCAmelCase : List[Any] = self.betas[t]
lowerCAmelCase : Optional[int] = self.alphas[t]
else:
lowerCAmelCase : List[Any] = 1 - alpha_prod_t / alpha_prod_t_prev
lowerCAmelCase : Dict = 1 - beta
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if self.config.prediction_type == "epsilon":
lowerCAmelCase : List[Any] = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif self.config.prediction_type == "sample":
lowerCAmelCase : Tuple = model_output
else:
raise ValueError(
F'''prediction_type given as {self.config.prediction_type} must be one of `epsilon` or `sample`'''
''' for the UnCLIPScheduler.''' )
# 3. Clip "predicted x_0"
if self.config.clip_sample:
lowerCAmelCase : Dict = torch.clamp(
UpperCamelCase_ , -self.config.clip_sample_range , self.config.clip_sample_range )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
lowerCAmelCase : int = (alpha_prod_t_prev ** 0.5 * beta) / beta_prod_t
lowerCAmelCase : List[Any] = alpha ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
lowerCAmelCase : str = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
lowerCAmelCase : int = 0
if t > 0:
lowerCAmelCase : Union[str, Any] = randn_tensor(
model_output.shape , dtype=model_output.dtype , generator=UpperCamelCase_ , device=model_output.device )
lowerCAmelCase : Any = self._get_variance(
UpperCamelCase_ , predicted_variance=UpperCamelCase_ , prev_timestep=UpperCamelCase_ , )
if self.variance_type == "fixed_small_log":
lowerCAmelCase : str = variance
elif self.variance_type == "learned_range":
lowerCAmelCase : Optional[Any] = (0.5 * variance).exp()
else:
raise ValueError(
F'''variance_type given as {self.variance_type} must be one of `fixed_small_log` or `learned_range`'''
''' for the UnCLIPScheduler.''' )
lowerCAmelCase : List[Any] = variance * variance_noise
lowerCAmelCase : int = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample,)
return UnCLIPSchedulerOutput(prev_sample=UpperCamelCase_ , pred_original_sample=UpperCamelCase_ )
def lowerCamelCase__ ( self : Optional[int] , UpperCamelCase_ : torch.FloatTensor , UpperCamelCase_ : torch.FloatTensor , UpperCamelCase_ : torch.IntTensor , ):
# Make sure alphas_cumprod and timestep have same device and dtype as original_samples
lowerCAmelCase : Tuple = self.alphas_cumprod.to(device=original_samples.device , dtype=original_samples.dtype )
lowerCAmelCase : int = timesteps.to(original_samples.device )
lowerCAmelCase : Dict = alphas_cumprod[timesteps] ** 0.5
lowerCAmelCase : str = sqrt_alpha_prod.flatten()
while len(sqrt_alpha_prod.shape ) < len(original_samples.shape ):
lowerCAmelCase : Any = sqrt_alpha_prod.unsqueeze(-1 )
lowerCAmelCase : List[str] = (1 - alphas_cumprod[timesteps]) ** 0.5
lowerCAmelCase : Tuple = sqrt_one_minus_alpha_prod.flatten()
while len(sqrt_one_minus_alpha_prod.shape ) < len(original_samples.shape ):
lowerCAmelCase : int = sqrt_one_minus_alpha_prod.unsqueeze(-1 )
lowerCAmelCase : Dict = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise
return noisy_samples
| 637
| 0
|
"""simple docstring"""
def _snake_case ( _snake_case : int ):
if length <= 0 or not isinstance(_snake_case , _snake_case ):
raise ValueError('''Length must be a positive integer.''' )
return [n * (2 * n - 1) for n in range(_snake_case )]
if __name__ == "__main__":
print(hexagonal_numbers(length=5))
print(hexagonal_numbers(length=10))
| 708
|
"""simple docstring"""
import unittest
from parameterized import parameterized
from transformers import LlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaTokenizer
class snake_case_:
def __init__( self : int , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Dict=1_3 , UpperCamelCase_ : Optional[Any]=7 , UpperCamelCase_ : Optional[int]=True , UpperCamelCase_ : Dict=True , UpperCamelCase_ : Optional[int]=False , UpperCamelCase_ : Any=True , UpperCamelCase_ : List[str]=9_9 , UpperCamelCase_ : Tuple=3_2 , UpperCamelCase_ : Optional[Any]=5 , UpperCamelCase_ : str=4 , UpperCamelCase_ : Any=3_7 , UpperCamelCase_ : Optional[Any]="gelu" , UpperCamelCase_ : Tuple=0.1 , UpperCamelCase_ : Union[str, Any]=0.1 , UpperCamelCase_ : Union[str, Any]=5_1_2 , UpperCamelCase_ : Union[str, Any]=1_6 , UpperCamelCase_ : Any=2 , UpperCamelCase_ : Optional[Any]=0.02 , UpperCamelCase_ : List[Any]=3 , UpperCamelCase_ : Any=4 , UpperCamelCase_ : int=None , ):
lowerCAmelCase : Any = parent
lowerCAmelCase : Any = batch_size
lowerCAmelCase : List[Any] = seq_length
lowerCAmelCase : str = is_training
lowerCAmelCase : List[Any] = use_input_mask
lowerCAmelCase : Optional[int] = use_token_type_ids
lowerCAmelCase : Union[str, Any] = use_labels
lowerCAmelCase : List[str] = vocab_size
lowerCAmelCase : Tuple = hidden_size
lowerCAmelCase : int = num_hidden_layers
lowerCAmelCase : Union[str, Any] = num_attention_heads
lowerCAmelCase : Optional[int] = intermediate_size
lowerCAmelCase : List[Any] = hidden_act
lowerCAmelCase : int = hidden_dropout_prob
lowerCAmelCase : Tuple = attention_probs_dropout_prob
lowerCAmelCase : Optional[Any] = max_position_embeddings
lowerCAmelCase : Optional[int] = type_vocab_size
lowerCAmelCase : Tuple = type_sequence_label_size
lowerCAmelCase : List[str] = initializer_range
lowerCAmelCase : str = num_labels
lowerCAmelCase : Optional[int] = num_choices
lowerCAmelCase : Tuple = scope
def lowerCamelCase__ ( self : Optional[int] ):
lowerCAmelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase : Tuple = None
if self.use_input_mask:
lowerCAmelCase : str = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase : List[str] = None
if self.use_token_type_ids:
lowerCAmelCase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCAmelCase : int = None
lowerCAmelCase : int = None
lowerCAmelCase : Tuple = None
if self.use_labels:
lowerCAmelCase : int = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase : Union[str, Any] = ids_tensor([self.batch_size] , self.num_choices )
lowerCAmelCase : Optional[Any] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCamelCase__ ( self : Tuple ):
return LlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCamelCase_ , initializer_range=self.initializer_range , )
def lowerCamelCase__ ( self : int , UpperCamelCase_ : Any , UpperCamelCase_ : Dict , UpperCamelCase_ : Any , UpperCamelCase_ : Dict , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : List[str] , UpperCamelCase_ : Tuple ):
lowerCAmelCase : List[Any] = LlamaModel(config=UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
lowerCAmelCase : Dict = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ )
lowerCAmelCase : Optional[int] = model(UpperCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase__ ( self : Any , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Tuple , UpperCamelCase_ : Tuple , UpperCamelCase_ : Dict , UpperCamelCase_ : int , UpperCamelCase_ : Dict , UpperCamelCase_ : Tuple , UpperCamelCase_ : int , UpperCamelCase_ : Any , ):
lowerCAmelCase : Tuple = True
lowerCAmelCase : Optional[int] = LlamaModel(UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
lowerCAmelCase : List[Any] = model(
UpperCamelCase_ , attention_mask=UpperCamelCase_ , encoder_hidden_states=UpperCamelCase_ , encoder_attention_mask=UpperCamelCase_ , )
lowerCAmelCase : Dict = model(
UpperCamelCase_ , attention_mask=UpperCamelCase_ , encoder_hidden_states=UpperCamelCase_ , )
lowerCAmelCase : Tuple = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase__ ( self : Any , UpperCamelCase_ : int , UpperCamelCase_ : Tuple , UpperCamelCase_ : Any , UpperCamelCase_ : List[Any] , UpperCamelCase_ : str , UpperCamelCase_ : int , UpperCamelCase_ : int , UpperCamelCase_ : int , UpperCamelCase_ : str , ):
lowerCAmelCase : Optional[Any] = LlamaForCausalLM(config=UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
lowerCAmelCase : List[str] = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ , labels=UpperCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCamelCase__ ( self : str , UpperCamelCase_ : List[str] , UpperCamelCase_ : int , UpperCamelCase_ : Tuple , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : int , UpperCamelCase_ : str , UpperCamelCase_ : Dict , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : List[Any] , ):
lowerCAmelCase : Union[str, Any] = True
lowerCAmelCase : str = True
lowerCAmelCase : Tuple = LlamaForCausalLM(config=UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
# first forward pass
lowerCAmelCase : Optional[Any] = model(
UpperCamelCase_ , attention_mask=UpperCamelCase_ , encoder_hidden_states=UpperCamelCase_ , encoder_attention_mask=UpperCamelCase_ , use_cache=UpperCamelCase_ , )
lowerCAmelCase : Dict = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
lowerCAmelCase : Any = ids_tensor((self.batch_size, 3) , config.vocab_size )
lowerCAmelCase : Dict = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
lowerCAmelCase : Optional[Any] = torch.cat([input_ids, next_tokens] , dim=-1 )
lowerCAmelCase : List[str] = torch.cat([input_mask, next_mask] , dim=-1 )
lowerCAmelCase : Dict = model(
UpperCamelCase_ , attention_mask=UpperCamelCase_ , encoder_hidden_states=UpperCamelCase_ , encoder_attention_mask=UpperCamelCase_ , output_hidden_states=UpperCamelCase_ , )['''hidden_states'''][0]
lowerCAmelCase : str = model(
UpperCamelCase_ , attention_mask=UpperCamelCase_ , encoder_hidden_states=UpperCamelCase_ , encoder_attention_mask=UpperCamelCase_ , past_key_values=UpperCamelCase_ , output_hidden_states=UpperCamelCase_ , )['''hidden_states'''][0]
# select random slice
lowerCAmelCase : Tuple = ids_tensor((1,) , output_from_past.shape[-1] ).item()
lowerCAmelCase : Any = output_from_no_past[:, -3:, random_slice_idx].detach()
lowerCAmelCase : Optional[int] = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(UpperCamelCase_ , UpperCamelCase_ , atol=1E-3 ) )
def lowerCamelCase__ ( self : Union[str, Any] ):
lowerCAmelCase : Dict = self.prepare_config_and_inputs()
(
(
lowerCAmelCase
), (
lowerCAmelCase
), (
lowerCAmelCase
), (
lowerCAmelCase
), (
lowerCAmelCase
), (
lowerCAmelCase
), (
lowerCAmelCase
),
) : Tuple = config_and_inputs
lowerCAmelCase : Optional[int] = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class snake_case_( a__ , a__ , a__ , unittest.TestCase ):
__UpperCamelCase = (LlamaModel, LlamaForCausalLM, LlamaForSequenceClassification) if is_torch_available() else ()
__UpperCamelCase = (LlamaForCausalLM,) if is_torch_available() else ()
__UpperCamelCase = (
{
'''feature-extraction''': LlamaModel,
'''text-classification''': LlamaForSequenceClassification,
'''text-generation''': LlamaForCausalLM,
'''zero-shot''': LlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
__UpperCamelCase = False
__UpperCamelCase = False
def lowerCamelCase__ ( self : Optional[Any] ):
lowerCAmelCase : Any = LlamaModelTester(self )
lowerCAmelCase : Dict = ConfigTester(self , config_class=UpperCamelCase_ , hidden_size=3_7 )
def lowerCamelCase__ ( self : str ):
self.config_tester.run_common_tests()
def lowerCamelCase__ ( self : Tuple ):
lowerCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase_ )
def lowerCamelCase__ ( self : List[Any] ):
lowerCAmelCase : str = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
lowerCAmelCase : str = type
self.model_tester.create_and_check_model(*UpperCamelCase_ )
def lowerCamelCase__ ( self : List[Any] ):
lowerCAmelCase, lowerCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase : List[str] = 3
lowerCAmelCase : List[str] = input_dict['''input_ids''']
lowerCAmelCase : List[str] = input_ids.ne(1 ).to(UpperCamelCase_ )
lowerCAmelCase : Tuple = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
lowerCAmelCase : Union[str, Any] = LlamaForSequenceClassification(UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
lowerCAmelCase : List[Any] = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ , labels=UpperCamelCase_ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def lowerCamelCase__ ( self : Optional[int] ):
lowerCAmelCase, lowerCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase : Any = 3
lowerCAmelCase : int = '''single_label_classification'''
lowerCAmelCase : Tuple = input_dict['''input_ids''']
lowerCAmelCase : Tuple = input_ids.ne(1 ).to(UpperCamelCase_ )
lowerCAmelCase : str = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
lowerCAmelCase : Tuple = LlamaForSequenceClassification(UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
lowerCAmelCase : Any = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ , labels=UpperCamelCase_ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def lowerCamelCase__ ( self : Union[str, Any] ):
lowerCAmelCase, lowerCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase : Any = 3
lowerCAmelCase : Dict = '''multi_label_classification'''
lowerCAmelCase : Union[str, Any] = input_dict['''input_ids''']
lowerCAmelCase : Tuple = input_ids.ne(1 ).to(UpperCamelCase_ )
lowerCAmelCase : Any = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
lowerCAmelCase : Optional[int] = LlamaForSequenceClassification(UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
lowerCAmelCase : Optional[Any] = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ , labels=UpperCamelCase_ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip('''LLaMA buffers include complex numbers, which breaks this test''' )
def lowerCamelCase__ ( self : Optional[Any] ):
pass
@parameterized.expand([('''linear''',), ('''dynamic''',)] )
def lowerCamelCase__ ( self : List[str] , UpperCamelCase_ : Tuple ):
lowerCAmelCase, lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase : Optional[int] = ids_tensor([1, 1_0] , config.vocab_size )
lowerCAmelCase : int = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(4_2 ) # Fixed seed at init time so the two models get the same random weights
lowerCAmelCase : List[Any] = LlamaModel(UpperCamelCase_ )
original_model.to(UpperCamelCase_ )
original_model.eval()
lowerCAmelCase : Optional[int] = original_model(UpperCamelCase_ ).last_hidden_state
lowerCAmelCase : List[Any] = original_model(UpperCamelCase_ ).last_hidden_state
set_seed(4_2 ) # Fixed seed at init time so the two models get the same random weights
lowerCAmelCase : int = {'''type''': scaling_type, '''factor''': 10.0}
lowerCAmelCase : List[str] = LlamaModel(UpperCamelCase_ )
scaled_model.to(UpperCamelCase_ )
scaled_model.eval()
lowerCAmelCase : Union[str, Any] = scaled_model(UpperCamelCase_ ).last_hidden_state
lowerCAmelCase : Optional[int] = scaled_model(UpperCamelCase_ ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(UpperCamelCase_ , UpperCamelCase_ , atol=1E-5 ) )
else:
self.assertFalse(torch.allclose(UpperCamelCase_ , UpperCamelCase_ , atol=1E-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(UpperCamelCase_ , UpperCamelCase_ , atol=1E-5 ) )
@require_torch
class snake_case_( unittest.TestCase ):
@unittest.skip('''Logits are not exactly the same, once we fix the instabalities somehow, will update!''' )
@slow
def lowerCamelCase__ ( self : List[Any] ):
lowerCAmelCase : Tuple = [1, 3_0_6, 4_6_5_8, 2_7_8, 6_5_9_3, 3_1_0, 2_8_3_4, 3_3_8]
lowerCAmelCase : Optional[Any] = LlamaForCausalLM.from_pretrained('''meta-llama/Llama-2-7b-hf''' , device_map='''auto''' )
lowerCAmelCase : str = model(torch.tensor([input_ids] ) )
# Expected mean on dim = -1
lowerCAmelCase : int = torch.tensor([[-6.6_550, -4.1_227, -4.9_859, -3.2_406, 0.8_262, -3.0_033, 1.2_964, -3.3_699]] )
torch.testing.assert_close(out.mean(-1 ) , UpperCamelCase_ , atol=1E-2 , rtol=1E-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
lowerCAmelCase : Tuple = torch.tensor([-12.8_281, -7.4_453, -0.4_639, -8.0_625, -7.2_500, -8.0_000, -6.4_883, -7.7_695, -7.8_438, -7.0_312, -6.2_188, -7.1_328, -1.8_496, 1.9_961, -8.6_250, -6.7_227, -12.8_281, -6.9_492, -7.0_742, -7.7_852, -7.5_820, -7.9_062, -6.9_375, -7.9_805, -8.3_438, -8.1_562, -8.0_469, -7.6_250, -7.7_422, -7.3_398,] )
# fmt: on
torch.testing.assert_close(out[0, 0, :3_0] , UpperCamelCase_ , atol=1E-5 , rtol=1E-5 )
@unittest.skip('''Logits are not exactly the same, once we fix the instabalities somehow, will update!''' )
@slow
def lowerCamelCase__ ( self : Dict ):
lowerCAmelCase : str = [1, 3_0_6, 4_6_5_8, 2_7_8, 6_5_9_3, 3_1_0, 2_8_3_4, 3_3_8]
lowerCAmelCase : Dict = LlamaForCausalLM.from_pretrained('''meta-llama/Llama-2-13b-hf''' , device_map='''auto''' )
lowerCAmelCase : str = model(torch.tensor(UpperCamelCase_ ) )
# Expected mean on dim = -1
lowerCAmelCase : Any = torch.tensor([[-2.0_622, -1.2_794, -1.1_638, -0.9_788, -1.4_603, -1.0_238, -1.7_893, -1.4_411]] )
torch.testing.assert_close(out.mean(-1 ) , UpperCamelCase_ , atol=1E-2 , rtol=1E-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
lowerCAmelCase : Tuple = torch.tensor([-8.1_406, -8.0_547, 2.7_461, -1.2_344, -0.1_448, -1.8_262, -1.0_020, -1.8_154, -1.6_895, -1.8_516, -2.3_574, -0.9_277, 3.7_598, 6.5_742, -1.2_998, -0.1_177, -8.1_406, -2.9_688, -2.9_199, -3.1_699, -3.5_254, -2.3_555, -2.7_988, -3.4_141, -2.8_262, -4.5_195, -3.3_379, -3.3_164, -2.7_832, -3.0_273] )
# fmt: on
torch.testing.assert_close(out[0, 0, :3_0] , UpperCamelCase_ , atol=1E-5 , rtol=1E-5 )
@unittest.skip('''Logits are not exactly the same, once we fix the instabalities somehow, will update!''' )
@slow
def lowerCamelCase__ ( self : Optional[int] ):
lowerCAmelCase : int = [1, 3_0_6, 4_6_5_8, 2_7_8, 6_5_9_3, 3_1_0, 2_8_3_4, 3_3_8]
lowerCAmelCase : List[str] = LlamaForCausalLM.from_pretrained('''meta-llama/Llama-2-13b-chat-hf''' , device_map='''auto''' )
lowerCAmelCase : List[Any] = model(torch.tensor(UpperCamelCase_ ) )
# Expected mean on dim = -1
lowerCAmelCase : List[str] = torch.tensor([[-0.8_562, -1.8_520, -0.7_551, -0.4_162, -1.5_161, -1.2_038, -2.4_823, -2.3_254]] )
torch.testing.assert_close(out.mean(-1 ) , UpperCamelCase_ , atol=1E-2 , rtol=1E-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
lowerCAmelCase : Dict = torch.tensor([-2.2_227, 4.8_828, 0.9_023, -0.4_578, -0.7_871, -0.1_033, -0.6_221, -0.5_786, -0.7_803, -1.0_674, -1.2_920, -0.1_570, 0.8_008, 2.0_723, -0.9_497, 0.2_771, -2.2_227, -0.7_612, -1.4_346, -1.2_061, -1.6_426, -0.3_000, -0.7_139, -1.1_934, -1.8_691, -1.6_973, -1.5_947, -1.2_705, -0.3_523, -0.5_513] )
# fmt: on
torch.testing.assert_close(out.mean(-1 ) , UpperCamelCase_ , atol=1E-2 , rtol=1E-2 )
@unittest.skip(
'''Logits are not exactly the same, once we fix the instabalities somehow, will update! Also it is gonna be a `too_slow` test''' )
@slow
def lowerCamelCase__ ( self : List[str] ):
lowerCAmelCase : Optional[Any] = [1, 3_0_6, 4_6_5_8, 2_7_8, 6_5_9_3, 3_1_0, 2_8_3_4, 3_3_8]
lowerCAmelCase : Optional[int] = LlamaForCausalLM.from_pretrained('''meta-llama/Llama-2-70b-hf''' , device_map='''auto''' )
lowerCAmelCase : Any = model(torch.tensor(UpperCamelCase_ ) )
lowerCAmelCase : Optional[Any] = torch.tensor(
[[-4.2_327, -3.3_360, -4.6_665, -4.7_631, -1.8_180, -3.4_170, -1.4_211, -3.1_810]] , dtype=torch.floataa )
torch.testing.assert_close(out.mean(-1 ) , UpperCamelCase_ , atol=1E-2 , rtol=1E-2 )
# fmt: off
lowerCAmelCase : Any = torch.tensor([-9.4_922, -3.9_551, 1.7_998, -5.6_758, -5.1_055, -5.8_984, -4.8_320, -6.8_086, -6.5_391, -5.6_172, -5.5_820, -5.5_352, 1.7_881, 3.6_289, -6.5_117, -3.4_785, -9.5_000, -6.0_352, -6.8_125, -6.0_195, -6.6_836, -5.4_727, -6.2_812, -6.0_391, -7.3_398, -7.4_297, -7.4_844, -6.5_820, -5.8_789, -5.5_312] )
# fmt: on
torch.testing.assert_close(out[0, 0, :3_0] , UpperCamelCase_ , atol=1E-5 , rtol=1E-5 )
@unittest.skip('''Model is curently gated''' )
@slow
def lowerCamelCase__ ( self : List[Any] ):
lowerCAmelCase : List[Any] = '''Simply put, the theory of relativity states that 1) the laws of physics are the same everywhere in the universe and 2) the passage of time and the length of objects can vary depending on the observer\'s frame of reference.\n\nThe first part of the theory, that the laws of physics are the same everywhere, is known as the "princi'''
lowerCAmelCase : int = '''Simply put, the theory of relativity states that '''
lowerCAmelCase : str = LlamaTokenizer.from_pretrained('''meta-llama/Llama-2-13b-chat-hf''' )
lowerCAmelCase : Optional[int] = tokenizer.encode(UpperCamelCase_ , return_tensors='''pt''' )
lowerCAmelCase : List[Any] = LlamaForCausalLM.from_pretrained(
'''meta-llama/Llama-2-13b-chat-hf''' , device_map='''sequential''' , use_safetensors=UpperCamelCase_ )
# greedy generation outputs
lowerCAmelCase : int = model.generate(UpperCamelCase_ , max_new_tokens=6_4 , top_p=UpperCamelCase_ , temperature=1 , do_sample=UpperCamelCase_ )
lowerCAmelCase : int = tokenizer.decode(generated_ids[0] , skip_special_tokens=UpperCamelCase_ )
self.assertEqual(UpperCamelCase_ , UpperCamelCase_ )
| 637
| 0
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case__ : int = logging.get_logger(__name__)
snake_case__ : List[str] = {
'''microsoft/swinv2-tiny-patch4-window8-256''': (
'''https://huggingface.co/microsoft/swinv2-tiny-patch4-window8-256/resolve/main/config.json'''
),
}
class snake_case_( a__ ):
__UpperCamelCase = '''swinv2'''
__UpperCamelCase = {
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self : str , UpperCamelCase_ : Any=2_2_4 , UpperCamelCase_ : int=4 , UpperCamelCase_ : List[Any]=3 , UpperCamelCase_ : int=9_6 , UpperCamelCase_ : str=[2, 2, 6, 2] , UpperCamelCase_ : Dict=[3, 6, 1_2, 2_4] , UpperCamelCase_ : List[str]=7 , UpperCamelCase_ : Dict=4.0 , UpperCamelCase_ : Any=True , UpperCamelCase_ : Any=0.0 , UpperCamelCase_ : Union[str, Any]=0.0 , UpperCamelCase_ : str=0.1 , UpperCamelCase_ : List[Any]="gelu" , UpperCamelCase_ : Union[str, Any]=False , UpperCamelCase_ : Any=0.02 , UpperCamelCase_ : Optional[int]=1E-5 , UpperCamelCase_ : str=3_2 , **UpperCamelCase_ : str , ):
super().__init__(**UpperCamelCase_ )
lowerCAmelCase : int = image_size
lowerCAmelCase : List[Any] = patch_size
lowerCAmelCase : Optional[Any] = num_channels
lowerCAmelCase : Tuple = embed_dim
lowerCAmelCase : Optional[int] = depths
lowerCAmelCase : Optional[int] = len(UpperCamelCase_ )
lowerCAmelCase : int = num_heads
lowerCAmelCase : str = window_size
lowerCAmelCase : List[Any] = mlp_ratio
lowerCAmelCase : List[Any] = qkv_bias
lowerCAmelCase : List[Any] = hidden_dropout_prob
lowerCAmelCase : str = attention_probs_dropout_prob
lowerCAmelCase : int = drop_path_rate
lowerCAmelCase : Optional[Any] = hidden_act
lowerCAmelCase : Optional[Any] = use_absolute_embeddings
lowerCAmelCase : List[Any] = layer_norm_eps
lowerCAmelCase : Optional[int] = initializer_range
lowerCAmelCase : str = encoder_stride
# we set the hidden_size attribute in order to make Swinv2 work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
lowerCAmelCase : List[str] = int(embed_dim * 2 ** (len(UpperCamelCase_ ) - 1) )
lowerCAmelCase : Dict = (0, 0, 0, 0)
| 709
|
"""simple docstring"""
import os
import tempfile
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from torch import nn
from transformers import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_inverse_sqrt_schedule,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
def _snake_case ( _snake_case : Tuple , _snake_case : Union[str, Any]=10 ):
lowerCAmelCase : Dict = []
for _ in range(_snake_case ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
return lrs
def _snake_case ( _snake_case : Optional[int] , _snake_case : int=10 ):
lowerCAmelCase : Optional[int] = []
for step in range(_snake_case ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
if step == num_steps // 2:
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCAmelCase : List[str] = os.path.join(_snake_case , '''schedule.bin''' )
torch.save(scheduler.state_dict() , _snake_case )
lowerCAmelCase : List[Any] = torch.load(_snake_case )
scheduler.load_state_dict(_snake_case )
return lrs
@require_torch
class snake_case_( unittest.TestCase ):
def lowerCamelCase__ ( self : Optional[int] , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : str , UpperCamelCase_ : Any ):
self.assertEqual(len(UpperCamelCase_ ) , len(UpperCamelCase_ ) )
for a, b in zip(UpperCamelCase_ , UpperCamelCase_ ):
self.assertAlmostEqual(UpperCamelCase_ , UpperCamelCase_ , delta=UpperCamelCase_ )
def lowerCamelCase__ ( self : Tuple ):
lowerCAmelCase : Any = torch.tensor([0.1, -0.2, -0.1] , requires_grad=UpperCamelCase_ )
lowerCAmelCase : List[str] = torch.tensor([0.4, 0.2, -0.5] )
lowerCAmelCase : List[Any] = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
lowerCAmelCase : Union[str, Any] = AdamW(params=[w] , lr=2E-1 , weight_decay=0.0 )
for _ in range(1_0_0 ):
lowerCAmelCase : Union[str, Any] = criterion(UpperCamelCase_ , UpperCamelCase_ )
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1E-2 )
def lowerCamelCase__ ( self : Union[str, Any] ):
lowerCAmelCase : Tuple = torch.tensor([0.1, -0.2, -0.1] , requires_grad=UpperCamelCase_ )
lowerCAmelCase : Union[str, Any] = torch.tensor([0.4, 0.2, -0.5] )
lowerCAmelCase : Optional[int] = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
lowerCAmelCase : Any = Adafactor(
params=[w] , lr=1E-2 , eps=(1E-30, 1E-3) , clip_threshold=1.0 , decay_rate=-0.8 , betaa=UpperCamelCase_ , weight_decay=0.0 , relative_step=UpperCamelCase_ , scale_parameter=UpperCamelCase_ , warmup_init=UpperCamelCase_ , )
for _ in range(1_0_0_0 ):
lowerCAmelCase : List[Any] = criterion(UpperCamelCase_ , UpperCamelCase_ )
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1E-2 )
@require_torch
class snake_case_( unittest.TestCase ):
__UpperCamelCase = nn.Linear(50 , 50 ) if is_torch_available() else None
__UpperCamelCase = AdamW(m.parameters() , lr=10.0 ) if is_torch_available() else None
__UpperCamelCase = 10
def lowerCamelCase__ ( self : Union[str, Any] , UpperCamelCase_ : str , UpperCamelCase_ : str , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Union[str, Any]=None ):
self.assertEqual(len(UpperCamelCase_ ) , len(UpperCamelCase_ ) )
for a, b in zip(UpperCamelCase_ , UpperCamelCase_ ):
self.assertAlmostEqual(UpperCamelCase_ , UpperCamelCase_ , delta=UpperCamelCase_ , msg=UpperCamelCase_ )
def lowerCamelCase__ ( self : Union[str, Any] ):
lowerCAmelCase : Tuple = {'''num_warmup_steps''': 2, '''num_training_steps''': 1_0}
# schedulers doct format
# function: (sched_args_dict, expected_learning_rates)
lowerCAmelCase : Optional[Any] = {
get_constant_schedule: ({}, [10.0] * self.num_steps),
get_constant_schedule_with_warmup: (
{'''num_warmup_steps''': 4},
[0.0, 2.5, 5.0, 7.5, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0],
),
get_linear_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 8.75, 7.5, 6.25, 5.0, 3.75, 2.5, 1.25],
),
get_cosine_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 9.61, 8.53, 6.91, 5.0, 3.08, 1.46, 0.38],
),
get_cosine_with_hard_restarts_schedule_with_warmup: (
{**common_kwargs, '''num_cycles''': 2},
[0.0, 5.0, 10.0, 8.53, 5.0, 1.46, 10.0, 8.53, 5.0, 1.46],
),
get_polynomial_decay_schedule_with_warmup: (
{**common_kwargs, '''power''': 2.0, '''lr_end''': 1E-7},
[0.0, 5.0, 10.0, 7.656, 5.625, 3.906, 2.5, 1.406, 0.625, 0.156],
),
get_inverse_sqrt_schedule: (
{'''num_warmup_steps''': 2},
[0.0, 5.0, 10.0, 8.165, 7.071, 6.325, 5.774, 5.345, 5.0, 4.714],
),
}
for scheduler_func, data in scheds.items():
lowerCAmelCase, lowerCAmelCase : Union[str, Any] = data
lowerCAmelCase : List[Any] = scheduler_func(self.optimizer , **UpperCamelCase_ )
self.assertEqual(len([scheduler.get_lr()[0]] ) , 1 )
lowerCAmelCase : str = unwrap_schedule(UpperCamelCase_ , self.num_steps )
self.assertListAlmostEqual(
UpperCamelCase_ , UpperCamelCase_ , tol=1E-2 , msg=F'''failed for {scheduler_func} in normal scheduler''' , )
lowerCAmelCase : Optional[int] = scheduler_func(self.optimizer , **UpperCamelCase_ )
if scheduler_func.__name__ != "get_constant_schedule":
LambdaScheduleWrapper.wrap_scheduler(UpperCamelCase_ ) # wrap to test picklability of the schedule
lowerCAmelCase : List[Any] = unwrap_and_save_reload_schedule(UpperCamelCase_ , self.num_steps )
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ , msg=F'''failed for {scheduler_func} in save and reload''' )
class snake_case_:
def __init__( self : List[Any] , UpperCamelCase_ : Any ):
lowerCAmelCase : Tuple = fn
def __call__( self : Union[str, Any] , *UpperCamelCase_ : Optional[Any] , **UpperCamelCase_ : List[Any] ):
return self.fn(*UpperCamelCase_ , **UpperCamelCase_ )
@classmethod
def lowerCamelCase__ ( self : Any , UpperCamelCase_ : Optional[int] ):
lowerCAmelCase : Union[str, Any] = list(map(self , scheduler.lr_lambdas ) )
| 637
| 0
|
"""simple docstring"""
from __future__ import annotations
snake_case__ : Optional[Any] = [-10, -5, 0, 5, 5.1, 11, 13, 21, 3, 4, -21, -10, -5, -1, 0]
snake_case__ : str = [-5, 0, 5, 5.1, 11, 13, 21, -1, 4, -1, -10, -5, -1, 0, -1]
def _snake_case ( _snake_case : list[float] ):
lowerCAmelCase : Union[str, Any] = []
lowerCAmelCase : int = len(_snake_case )
for i in range(_snake_case ):
lowerCAmelCase : float = -1
for j in range(i + 1 , _snake_case ):
if arr[i] < arr[j]:
lowerCAmelCase : Union[str, Any] = arr[j]
break
result.append(_snake_case )
return result
def _snake_case ( _snake_case : list[float] ):
lowerCAmelCase : Tuple = []
for i, outer in enumerate(_snake_case ):
lowerCAmelCase : float = -1
for inner in arr[i + 1 :]:
if outer < inner:
lowerCAmelCase : Optional[Any] = inner
break
result.append(_snake_case )
return result
def _snake_case ( _snake_case : list[float] ):
lowerCAmelCase : Dict = len(_snake_case )
lowerCAmelCase : list[float] = []
lowerCAmelCase : list[float] = [-1] * arr_size
for index in reversed(range(_snake_case ) ):
if stack:
while stack[-1] <= arr[index]:
stack.pop()
if not stack:
break
if stack:
lowerCAmelCase : Optional[int] = stack[-1]
stack.append(arr[index] )
return result
if __name__ == "__main__":
from doctest import testmod
from timeit import timeit
testmod()
print(next_greatest_element_slow(arr))
print(next_greatest_element_fast(arr))
print(next_greatest_element(arr))
snake_case__ : Optional[Any] = (
'''from __main__ import arr, next_greatest_element_slow, '''
'''next_greatest_element_fast, next_greatest_element'''
)
print(
'''next_greatest_element_slow():''',
timeit('''next_greatest_element_slow(arr)''', setup=setup),
)
print(
'''next_greatest_element_fast():''',
timeit('''next_greatest_element_fast(arr)''', setup=setup),
)
print(
''' next_greatest_element():''',
timeit('''next_greatest_element(arr)''', setup=setup),
)
| 710
|
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer
from .base import PipelineTool
class snake_case_( a__ ):
__UpperCamelCase = '''philschmid/bart-large-cnn-samsum'''
__UpperCamelCase = (
'''This is a tool that summarizes an English text. It takes an input `text` containing the text to summarize, '''
'''and returns a summary of the text.'''
)
__UpperCamelCase = '''summarizer'''
__UpperCamelCase = AutoTokenizer
__UpperCamelCase = AutoModelForSeqaSeqLM
__UpperCamelCase = ['''text''']
__UpperCamelCase = ['''text''']
def lowerCamelCase__ ( self : Dict , UpperCamelCase_ : int ):
return self.pre_processor(UpperCamelCase_ , return_tensors='''pt''' , truncation=UpperCamelCase_ )
def lowerCamelCase__ ( self : Optional[int] , UpperCamelCase_ : str ):
return self.model.generate(**UpperCamelCase_ )[0]
def lowerCamelCase__ ( self : Any , UpperCamelCase_ : Tuple ):
return self.pre_processor.decode(UpperCamelCase_ , skip_special_tokens=UpperCamelCase_ , clean_up_tokenization_spaces=UpperCamelCase_ )
| 637
| 0
|
"""simple docstring"""
def _snake_case ( _snake_case : int , _snake_case : int ) -> List[Any]:
return base * power(_snake_case , (exponent - 1) ) if exponent else 1
if __name__ == "__main__":
print('''Raise base to the power of exponent using recursion...''')
snake_case__ : Union[str, Any] = int(input('''Enter the base: ''').strip())
snake_case__ : Optional[Any] = int(input('''Enter the exponent: ''').strip())
snake_case__ : Any = power(base, abs(exponent))
if exponent < 0: # power() does not properly deal w/ negative exponents
snake_case__ : Dict = 1 / result
print(f"""{base} to the power of {exponent} is {result}""")
| 711
|
"""simple docstring"""
snake_case__ : List[Any] = '''Tobias Carryer'''
from time import time
class snake_case_:
def __init__( self : Optional[Any] , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Tuple , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : Dict=int(time() ) ): # noqa: B008
lowerCAmelCase : str = multiplier
lowerCAmelCase : Optional[int] = increment
lowerCAmelCase : Optional[Any] = modulo
lowerCAmelCase : Optional[Any] = seed
def lowerCamelCase__ ( self : Union[str, Any] ):
lowerCAmelCase : Optional[int] = (self.multiplier * self.seed + self.increment) % self.modulo
return self.seed
if __name__ == "__main__":
# Show the LCG in action.
snake_case__ : int = LinearCongruentialGenerator(1_664_525, 1_013_904_223, 2 << 31)
while True:
print(lcg.next_number())
| 637
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
snake_case__ : str = {
'''configuration_time_series_transformer''': [
'''TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''TimeSeriesTransformerConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ : Optional[int] = [
'''TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TimeSeriesTransformerForPrediction''',
'''TimeSeriesTransformerModel''',
'''TimeSeriesTransformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TimeSeriesTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimeSeriesTransformerForPrediction,
TimeSeriesTransformerModel,
TimeSeriesTransformerPreTrainedModel,
)
else:
import sys
snake_case__ : str = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 712
|
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_big_bird import BigBirdTokenizer
else:
snake_case__ : Optional[Any] = None
snake_case__ : Union[str, Any] = logging.get_logger(__name__)
snake_case__ : List[str] = {'''vocab_file''': '''spiece.model''', '''tokenizer_file''': '''tokenizer.json'''}
snake_case__ : Any = {
'''vocab_file''': {
'''google/bigbird-roberta-base''': '''https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model''',
'''google/bigbird-roberta-large''': (
'''https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model'''
),
'''google/bigbird-base-trivia-itc''': (
'''https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model'''
),
},
'''tokenizer_file''': {
'''google/bigbird-roberta-base''': (
'''https://huggingface.co/google/bigbird-roberta-base/resolve/main/tokenizer.json'''
),
'''google/bigbird-roberta-large''': (
'''https://huggingface.co/google/bigbird-roberta-large/resolve/main/tokenizer.json'''
),
'''google/bigbird-base-trivia-itc''': (
'''https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/tokenizer.json'''
),
},
}
snake_case__ : int = {
'''google/bigbird-roberta-base''': 4_096,
'''google/bigbird-roberta-large''': 4_096,
'''google/bigbird-base-trivia-itc''': 4_096,
}
snake_case__ : Optional[Any] = '''▁'''
class snake_case_( a__ ):
__UpperCamelCase = VOCAB_FILES_NAMES
__UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase = BigBirdTokenizer
__UpperCamelCase = ['''input_ids''', '''attention_mask''']
__UpperCamelCase = []
def __init__( self : Union[str, Any] , UpperCamelCase_ : str=None , UpperCamelCase_ : Any=None , UpperCamelCase_ : str="<unk>" , UpperCamelCase_ : str="<s>" , UpperCamelCase_ : str="</s>" , UpperCamelCase_ : int="<pad>" , UpperCamelCase_ : List[Any]="[SEP]" , UpperCamelCase_ : Dict="[MASK]" , UpperCamelCase_ : Any="[CLS]" , **UpperCamelCase_ : Any , ):
lowerCAmelCase : Tuple = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else bos_token
lowerCAmelCase : int = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else eos_token
lowerCAmelCase : List[Any] = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else unk_token
lowerCAmelCase : List[str] = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else pad_token
lowerCAmelCase : Any = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else cls_token
lowerCAmelCase : Tuple = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else sep_token
# Mask token behave like a normal word, i.e. include the space before it
lowerCAmelCase : Optional[Any] = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else mask_token
super().__init__(
UpperCamelCase_ , tokenizer_file=UpperCamelCase_ , bos_token=UpperCamelCase_ , eos_token=UpperCamelCase_ , unk_token=UpperCamelCase_ , sep_token=UpperCamelCase_ , pad_token=UpperCamelCase_ , cls_token=UpperCamelCase_ , mask_token=UpperCamelCase_ , **UpperCamelCase_ , )
lowerCAmelCase : Optional[int] = vocab_file
lowerCAmelCase : Optional[int] = False if not self.vocab_file else True
def lowerCamelCase__ ( self : Union[str, Any] , UpperCamelCase_ : List[int] , UpperCamelCase_ : Optional[List[int]] = None ):
lowerCAmelCase : str = [self.sep_token_id]
lowerCAmelCase : Tuple = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def lowerCamelCase__ ( self : Dict , UpperCamelCase_ : List[int] , UpperCamelCase_ : Optional[List[int]] = None , UpperCamelCase_ : bool = False ):
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'''You should not supply a second sequence if the provided sequence of '''
'''ids is already formatted with special tokens for the model.''' )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is None:
return [1] + ([0] * len(UpperCamelCase_ )) + [1]
return [1] + ([0] * len(UpperCamelCase_ )) + [1] + ([0] * len(UpperCamelCase_ )) + [1]
def lowerCamelCase__ ( self : Dict , UpperCamelCase_ : List[int] , UpperCamelCase_ : Optional[List[int]] = None ):
lowerCAmelCase : Tuple = [self.sep_token_id]
lowerCAmelCase : Tuple = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowerCamelCase__ ( self : Optional[int] , UpperCamelCase_ : str , UpperCamelCase_ : Optional[str] = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(UpperCamelCase_ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
lowerCAmelCase : Optional[int] = os.path.join(
UpperCamelCase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase_ ):
copyfile(self.vocab_file , UpperCamelCase_ )
return (out_vocab_file,)
| 637
| 0
|
"""simple docstring"""
import json
import logging
import os
import socket
import git
import numpy as np
import torch
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - PID: %(process)d - %(message)s''',
datefmt='''%m/%d/%Y %H:%M:%S''',
level=logging.INFO,
)
snake_case__ : Optional[int] = logging.getLogger(__name__)
def _snake_case ( _snake_case : str ):
lowerCAmelCase : Dict = git.Repo(search_parent_directories=_snake_case )
lowerCAmelCase : int = {
'''repo_id''': str(_snake_case ),
'''repo_sha''': str(repo.head.object.hexsha ),
'''repo_branch''': str(repo.active_branch ),
}
with open(os.path.join(_snake_case , '''git_log.json''' ) , '''w''' ) as f:
json.dump(_snake_case , _snake_case , indent=4 )
def _snake_case ( _snake_case : List[str] ):
if params.n_gpu <= 0:
lowerCAmelCase : int = 0
lowerCAmelCase : Optional[int] = -1
lowerCAmelCase : List[str] = True
lowerCAmelCase : List[str] = False
return
assert torch.cuda.is_available()
logger.info('''Initializing GPUs''' )
if params.n_gpu > 1:
assert params.local_rank != -1
lowerCAmelCase : int = int(os.environ['''WORLD_SIZE'''] )
lowerCAmelCase : str = int(os.environ['''N_GPU_NODE'''] )
lowerCAmelCase : Any = int(os.environ['''RANK'''] )
# number of nodes / node ID
lowerCAmelCase : str = params.world_size // params.n_gpu_per_node
lowerCAmelCase : Optional[int] = params.global_rank // params.n_gpu_per_node
lowerCAmelCase : Any = True
assert params.n_nodes == int(os.environ['''N_NODES'''] )
assert params.node_id == int(os.environ['''NODE_RANK'''] )
# local job (single GPU)
else:
assert params.local_rank == -1
lowerCAmelCase : int = 1
lowerCAmelCase : int = 0
lowerCAmelCase : Optional[Any] = 0
lowerCAmelCase : Union[str, Any] = 0
lowerCAmelCase : Any = 1
lowerCAmelCase : Any = 1
lowerCAmelCase : Any = False
# sanity checks
assert params.n_nodes >= 1
assert 0 <= params.node_id < params.n_nodes
assert 0 <= params.local_rank <= params.global_rank < params.world_size
assert params.world_size == params.n_nodes * params.n_gpu_per_node
# define whether this is the master process / if we are in multi-node distributed mode
lowerCAmelCase : int = params.node_id == 0 and params.local_rank == 0
lowerCAmelCase : str = params.n_nodes > 1
# summary
lowerCAmelCase : List[str] = f'''--- Global rank: {params.global_rank} - '''
logger.info(PREFIX + '''Number of nodes: %i''' % params.n_nodes )
logger.info(PREFIX + '''Node ID : %i''' % params.node_id )
logger.info(PREFIX + '''Local rank : %i''' % params.local_rank )
logger.info(PREFIX + '''World size : %i''' % params.world_size )
logger.info(PREFIX + '''GPUs per node : %i''' % params.n_gpu_per_node )
logger.info(PREFIX + '''Master : %s''' % str(params.is_master ) )
logger.info(PREFIX + '''Multi-node : %s''' % str(params.multi_node ) )
logger.info(PREFIX + '''Multi-GPU : %s''' % str(params.multi_gpu ) )
logger.info(PREFIX + '''Hostname : %s''' % socket.gethostname() )
# set GPU device
torch.cuda.set_device(params.local_rank )
# initialize multi-GPU
if params.multi_gpu:
logger.info('''Initializing PyTorch distributed''' )
torch.distributed.init_process_group(
init_method='''env://''' , backend='''nccl''' , )
def _snake_case ( _snake_case : Dict ):
np.random.seed(args.seed )
torch.manual_seed(args.seed )
if args.n_gpu > 0:
torch.cuda.manual_seed_all(args.seed )
| 713
|
"""simple docstring"""
# using dfs for finding eulerian path traversal
def _snake_case ( _snake_case : Optional[Any] , _snake_case : List[Any] , _snake_case : str , _snake_case : List[Any]=None ):
lowerCAmelCase : Any = (path or []) + [u]
for v in graph[u]:
if visited_edge[u][v] is False:
lowerCAmelCase, lowerCAmelCase : Union[str, Any] = True, True
lowerCAmelCase : int = dfs(_snake_case , _snake_case , _snake_case , _snake_case )
return path
def _snake_case ( _snake_case : Optional[int] , _snake_case : Dict ):
lowerCAmelCase : Tuple = 0
lowerCAmelCase : Optional[Any] = -1
for i in range(_snake_case ):
if i not in graph.keys():
continue
if len(graph[i] ) % 2 == 1:
odd_degree_nodes += 1
lowerCAmelCase : Optional[Any] = i
if odd_degree_nodes == 0:
return 1, odd_node
if odd_degree_nodes == 2:
return 2, odd_node
return 3, odd_node
def _snake_case ( _snake_case : Tuple , _snake_case : List[Any] ):
lowerCAmelCase : Any = [[False for _ in range(max_node + 1 )] for _ in range(max_node + 1 )]
lowerCAmelCase, lowerCAmelCase : Optional[int] = check_circuit_or_path(_snake_case , _snake_case )
if check == 3:
print('''graph is not Eulerian''' )
print('''no path''' )
return
lowerCAmelCase : Dict = 1
if check == 2:
lowerCAmelCase : int = odd_node
print('''graph has a Euler path''' )
if check == 1:
print('''graph has a Euler cycle''' )
lowerCAmelCase : List[str] = dfs(_snake_case , _snake_case , _snake_case )
print(_snake_case )
def _snake_case ( ):
lowerCAmelCase : Optional[Any] = {1: [2, 3, 4], 2: [1, 3], 3: [1, 2], 4: [1, 5], 5: [4]}
lowerCAmelCase : Union[str, Any] = {1: [2, 3, 4, 5], 2: [1, 3], 3: [1, 2], 4: [1, 5], 5: [1, 4]}
lowerCAmelCase : List[Any] = {1: [2, 3, 4], 2: [1, 3, 4], 3: [1, 2], 4: [1, 2, 5], 5: [4]}
lowerCAmelCase : Optional[Any] = {1: [2, 3], 2: [1, 3], 3: [1, 2]}
lowerCAmelCase : Any = {
1: [],
2: []
# all degree is zero
}
lowerCAmelCase : List[str] = 10
check_euler(_snake_case , _snake_case )
check_euler(_snake_case , _snake_case )
check_euler(_snake_case , _snake_case )
check_euler(_snake_case , _snake_case )
check_euler(_snake_case , _snake_case )
if __name__ == "__main__":
main()
| 637
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.