code stringlengths 86 54.5k | code_codestyle int64 0 371 | style_context stringlengths 87 49.2k | style_context_codestyle int64 0 349 | label int64 0 1 |
|---|---|---|---|---|
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_lowercase = logging.get_logger(__name__)
_lowercase = '''▁'''
_lowercase = {'''vocab_file''': '''spiece.model'''}
_lowercase = {
'''vocab_file''': {
'''google/reformer-crime-and-punishment''': (
'''https://huggingface.co/google/reformer-crime-and-punishment/resolve/main/spiece.model'''
)
}
}
_lowercase = {
'''google/reformer-crime-and-punishment''': 52_42_88,
}
class lowerCAmelCase_ ( _lowercase ):
'''simple docstring'''
_lowerCamelCase: Any = VOCAB_FILES_NAMES
_lowerCamelCase: List[str] = PRETRAINED_VOCAB_FILES_MAP
_lowerCamelCase: int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCamelCase: Any = ['''input_ids''', '''attention_mask''']
def __init__( self : Tuple ,A_ : Optional[Any] ,A_ : Optional[Any]="</s>" ,A_ : Union[str, Any]="<unk>" ,A_ : Dict=[] ,A_ : Optional[Dict[str, Any]] = None ,**A_ : str ,) -> None:
A = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=A_ ,unk_token=A_ ,additional_special_tokens=A_ ,sp_model_kwargs=self.sp_model_kwargs ,**A_ ,)
A = vocab_file
A = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(A_ )
@property
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> List[Any]:
return self.sp_model.get_piece_size()
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Dict[str, int]:
A = {self.convert_ids_to_tokens(A_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : str ) -> Tuple:
A = self.__dict__.copy()
A = None
return state
def __setstate__( self : int ,A_ : str ) -> str:
A = d
# for backward compatibility
if not hasattr(self ,'sp_model_kwargs' ):
A = {}
A = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ,A_ : str ) -> List[str]:
return self.sp_model.encode(A_ ,out_type=A_ )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ,A_ : Optional[Any] ) -> Optional[Any]:
return self.sp_model.piece_to_id(A_ )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ,A_ : Optional[int] ) -> Dict:
if index < self.sp_model.get_piece_size():
A = self.sp_model.IdToPiece(A_ )
return token
def _SCREAMING_SNAKE_CASE ( self : Tuple ,A_ : int ) -> Dict:
A = []
A = ''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(A_ ) + token
A = []
else:
current_sub_tokens.append(A_ )
out_string += self.sp_model.decode(A_ )
return out_string.strip()
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ,A_ : str ,A_ : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(A_ ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
A = os.path.join(
A_ ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(A_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file ,A_ )
elif not os.path.isfile(self.vocab_file ):
with open(A_ ,'wb' ) as fi:
A = self.sp_model.serialized_model_proto()
fi.write(A_ )
return (out_vocab_file,) | 74 |
"""simple docstring"""
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A__ : int = logging.get_logger(__name__)
A__ : List[str] = {
'google/pix2struct-textcaps-base': (
'https://huggingface.co/google/pix2struct-textcaps-base/resolve/main/config.json'
),
}
class lowercase__ ( snake_case__ ):
_UpperCAmelCase :List[Any] = "pix2struct_text_model"
_UpperCAmelCase :str = ["past_key_values"]
_UpperCAmelCase :str = {
"hidden_size": "hidden_size",
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self : Dict , snake_case__ : Any=5_0244 , snake_case__ : Optional[int]=768 , snake_case__ : Dict=64 , snake_case__ : List[str]=2048 , snake_case__ : Dict=12 , snake_case__ : Any=12 , snake_case__ : Dict=32 , snake_case__ : int=128 , snake_case__ : List[str]=0.1 , snake_case__ : Optional[int]=1E-6 , snake_case__ : Any=1.0 , snake_case__ : int="gelu_new" , snake_case__ : Optional[Any]=0 , snake_case__ : Any=False , snake_case__ : Any=0 , snake_case__ : Any=1 , snake_case__ : Optional[int]=False , snake_case__ : Tuple=True , **snake_case__ : Any , ):
lowerCamelCase_ : List[str] =vocab_size
lowerCamelCase_ : Tuple =hidden_size
lowerCamelCase_ : Optional[int] =d_kv
lowerCamelCase_ : List[Any] =d_ff
lowerCamelCase_ : Tuple =num_layers
lowerCamelCase_ : Optional[int] =num_heads
lowerCamelCase_ : Any =relative_attention_num_buckets
lowerCamelCase_ : Optional[int] =relative_attention_max_distance
lowerCamelCase_ : List[Any] =dropout_rate
lowerCamelCase_ : str =layer_norm_epsilon
lowerCamelCase_ : int =initializer_factor
lowerCamelCase_ : str =use_cache
lowerCamelCase_ : int =eos_token_id
lowerCamelCase_ : Optional[Any] =decoder_start_token_id
# for backwards compatibility
lowerCamelCase_ : Optional[Any] =dense_act_fn
super().__init__(
pad_token_id=snake_case__ , eos_token_id=snake_case__ , decoder_start_token_id=snake_case__ , tie_word_embeddings=snake_case__ , is_decoder=snake_case__ , **snake_case__ , )
@classmethod
def UpperCAmelCase__ ( cls : Tuple , snake_case__ : Union[str, os.PathLike] , **snake_case__ : str ):
cls._set_token_in_kwargs(snake_case__ )
lowerCamelCase_ , lowerCamelCase_ : Any =cls.get_config_dict(snake_case__ , **snake_case__ )
# get the text config dict if we are loading from Pix2StructConfig
if config_dict.get("model_type" ) == "pix2struct":
lowerCamelCase_ : List[Any] =config_dict["text_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(snake_case__ , **snake_case__ )
class lowercase__ ( snake_case__ ):
_UpperCAmelCase :List[Any] = "pix2struct_vision_model"
def __init__( self : Optional[int] , snake_case__ : Tuple=768 , snake_case__ : str=768 , snake_case__ : Union[str, Any]=2048 , snake_case__ : Tuple=64 , snake_case__ : List[Any]=12 , snake_case__ : Dict=12 , snake_case__ : int="gelu_new" , snake_case__ : str=1E-6 , snake_case__ : int=0.0 , snake_case__ : int=0.0 , snake_case__ : Dict=1E-10 , snake_case__ : Tuple=1.0 , snake_case__ : int=4096 , snake_case__ : Tuple=32 , snake_case__ : List[str]=128 , **snake_case__ : List[Any] , ):
super().__init__(**snake_case__ )
lowerCamelCase_ : int =hidden_size
lowerCamelCase_ : List[Any] =patch_embed_hidden_size
lowerCamelCase_ : Tuple =d_ff
lowerCamelCase_ : List[Any] =dropout_rate
lowerCamelCase_ : Dict =num_hidden_layers
lowerCamelCase_ : List[str] =num_attention_heads
lowerCamelCase_ : Optional[Any] =initializer_range
lowerCamelCase_ : int =initializer_factor
lowerCamelCase_ : Any =attention_dropout
lowerCamelCase_ : List[str] =layer_norm_eps
lowerCamelCase_ : int =dense_act_fn
lowerCamelCase_ : Optional[Any] =seq_len
lowerCamelCase_ : Optional[int] =relative_attention_num_buckets
lowerCamelCase_ : Optional[int] =relative_attention_max_distance
lowerCamelCase_ : Dict =d_kv
@classmethod
def UpperCAmelCase__ ( cls : Optional[Any] , snake_case__ : Union[str, os.PathLike] , **snake_case__ : Dict ):
cls._set_token_in_kwargs(snake_case__ )
lowerCamelCase_ , lowerCamelCase_ : Dict =cls.get_config_dict(snake_case__ , **snake_case__ )
# get the vision config dict if we are loading from Pix2StructConfig
if config_dict.get("model_type" ) == "pix2struct":
lowerCamelCase_ : List[Any] =config_dict["vision_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(snake_case__ , **snake_case__ )
class lowercase__ ( snake_case__ ):
_UpperCAmelCase :str = "pix2struct"
_UpperCAmelCase :List[str] = True
def __init__( self : Tuple , snake_case__ : List[Any]=None , snake_case__ : Optional[Any]=None , snake_case__ : List[str]=1.0 , snake_case__ : List[Any]=0.02 , snake_case__ : List[Any]=False , snake_case__ : int=False , snake_case__ : Any=True , **snake_case__ : List[Any] , ):
super().__init__(tie_word_embeddings=snake_case__ , is_encoder_decoder=snake_case__ , **snake_case__ )
if text_config is None:
lowerCamelCase_ : Dict ={}
logger.info("text_config is None. Initializing the Pix2StructTextConfig with default values." )
if vision_config is None:
lowerCamelCase_ : int ={}
logger.info("vision_config is None. Initializing the Pix2StructVisionConfig with default values." )
lowerCamelCase_ : Any =PixaStructTextConfig(**snake_case__ )
lowerCamelCase_ : Optional[Any] =PixaStructVisionConfig(**snake_case__ )
lowerCamelCase_ : str =self.text_config.decoder_start_token_id
lowerCamelCase_ : Optional[int] =self.text_config.pad_token_id
lowerCamelCase_ : List[Any] =self.text_config.eos_token_id
lowerCamelCase_ : int =initializer_factor
lowerCamelCase_ : Optional[Any] =initializer_range
lowerCamelCase_ : Any =self.initializer_range
lowerCamelCase_ : List[Any] =self.initializer_range
lowerCamelCase_ : List[str] =is_vqa
@classmethod
def UpperCAmelCase__ ( cls : List[str] , snake_case__ : PixaStructTextConfig , snake_case__ : PixaStructVisionConfig , **snake_case__ : Optional[int] ):
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **snake_case__ )
def UpperCAmelCase__ ( self : Any ):
lowerCamelCase_ : Tuple =copy.deepcopy(self.__dict__ )
lowerCamelCase_ : Dict =self.text_config.to_dict()
lowerCamelCase_ : Union[str, Any] =self.vision_config.to_dict()
lowerCamelCase_ : Dict =self.__class__.model_type
return output
| 144 | 0 |
'''simple docstring'''
import logging
import torch
from accelerate import Accelerator
from arguments import EvaluationArguments
from datasets import load_dataset
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, set_seed
class UpperCamelCase__( SCREAMING_SNAKE_CASE__ ):
def __init__( self : Optional[int] , lowerCAmelCase : Any , lowerCAmelCase : int , lowerCAmelCase : Union[str, Any]=1024 , lowerCAmelCase : str=1024 , lowerCAmelCase : Optional[int]=3.6 )-> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase = tokenizer
UpperCAmelCase = tokenizer.bos_token_id
UpperCAmelCase = dataset
UpperCAmelCase = seq_length
UpperCAmelCase = seq_length * chars_per_token * num_of_sequences
def __iter__( self : int )-> Tuple:
"""simple docstring"""
UpperCAmelCase = iter(self.dataset )
UpperCAmelCase = True
while more_examples:
UpperCAmelCase = [], 0
while True:
if buffer_len >= self.input_characters:
break
try:
buffer.append(next(lowerCAmelCase )['''content'''] )
buffer_len += len(buffer[-1] )
except StopIteration:
UpperCAmelCase = False
break
UpperCAmelCase = tokenizer(lowerCAmelCase , truncation=lowerCAmelCase )['input_ids']
UpperCAmelCase = []
for tokenized_input in tokenized_inputs:
all_token_ids.extend(tokenized_input + [self.concat_token_id] )
for i in range(0 , len(lowerCAmelCase ) , self.seq_length ):
UpperCAmelCase = all_token_ids[i : i + self.seq_length]
if len(lowerCAmelCase ) == self.seq_length:
yield torch.tensor(lowerCAmelCase )
def lowerCamelCase__ ( A : Any ):
'''simple docstring'''
UpperCAmelCase = {'streaming': True}
UpperCAmelCase = load_dataset(args.dataset_name , split='''train''' , **_UpperCAmelCase )
UpperCAmelCase = ConstantLengthDataset(_UpperCAmelCase , _UpperCAmelCase , seq_length=args.seq_length )
UpperCAmelCase = DataLoader(_UpperCAmelCase , batch_size=args.batch_size )
return eval_dataloader
def lowerCamelCase__ ( A : Tuple ):
'''simple docstring'''
model.eval()
UpperCAmelCase = []
for step, batch in enumerate(_UpperCAmelCase ):
with torch.no_grad():
UpperCAmelCase = model(_UpperCAmelCase , labels=_UpperCAmelCase )
UpperCAmelCase = outputs.loss.repeat(args.batch_size )
losses.append(accelerator.gather(_UpperCAmelCase ) )
if args.max_eval_steps > 0 and step >= args.max_eval_steps:
break
UpperCAmelCase = torch.mean(torch.cat(_UpperCAmelCase ) )
try:
UpperCAmelCase = torch.exp(_UpperCAmelCase )
except OverflowError:
UpperCAmelCase = float('''inf''' )
return loss.item(), perplexity.item()
# Setup Accelerator
_lowercase : Optional[int] = Accelerator()
# Parse configuration
_lowercase : Optional[Any] = HfArgumentParser(EvaluationArguments)
_lowercase : Optional[int] = parser.parse_args()
set_seed(args.seed)
# Logging
_lowercase : Any = logging.getLogger(__name__)
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""", datefmt="""%m/%d/%Y %H:%M:%S""", level=logging.INFO
)
# Load model and tokenizer
_lowercase : Optional[Any] = AutoModelForCausalLM.from_pretrained(args.model_ckpt)
_lowercase : str = AutoTokenizer.from_pretrained(args.model_ckpt)
# Load dataset and dataloader
_lowercase : Union[str, Any] = create_dataloader(args)
# Prepare everything with our `accelerator`.
_lowercase : Any = accelerator.prepare(model, eval_dataloader)
# Evaluate and save the last checkpoint
logger.info("""Evaluating and saving model after training""")
_lowercase : Dict = evaluate(args)
logger.info(F"""loss/eval: {eval_loss}, perplexity: {perplexity}""")
| 359 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowercase : List[Any] = {
"""configuration_x_clip""": [
"""XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""XCLIPConfig""",
"""XCLIPTextConfig""",
"""XCLIPVisionConfig""",
],
"""processing_x_clip""": ["""XCLIPProcessor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : List[str] = [
"""XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""XCLIPModel""",
"""XCLIPPreTrainedModel""",
"""XCLIPTextModel""",
"""XCLIPVisionModel""",
]
if TYPE_CHECKING:
from .configuration_x_clip import (
XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
XCLIPConfig,
XCLIPTextConfig,
XCLIPVisionConfig,
)
from .processing_x_clip import XCLIPProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_x_clip import (
XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
XCLIPModel,
XCLIPPreTrainedModel,
XCLIPTextModel,
XCLIPVisionModel,
)
else:
import sys
_lowercase : List[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 91 | 0 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class A ( metaclass=__UpperCAmelCase ):
__magic_name__ = ['''speech''']
def __init__( self , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
requires_backends(self , ['''speech'''] )
class A ( metaclass=__UpperCAmelCase ):
__magic_name__ = ['''speech''']
def __init__( self , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> Tuple:
"""simple docstring"""
requires_backends(self , ['''speech'''] )
| 3 |
"""simple docstring"""
UpperCAmelCase : Optional[Any] = tuple[float, float, float]
UpperCAmelCase : Optional[Any] = tuple[float, float, float]
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase ) -> Vectorad:
'''simple docstring'''
lowercase_ = end_pointa[0] - end_pointa[0]
lowercase_ = end_pointa[1] - end_pointa[1]
lowercase_ = end_pointa[2] - end_pointa[2]
return (x, y, z)
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase ) -> Vectorad:
'''simple docstring'''
lowercase_ = ab[1] * ac[2] - ab[2] * ac[1] # *i
lowercase_ = (ab[0] * ac[2] - ab[2] * ac[0]) * -1 # *j
lowercase_ = ab[0] * ac[1] - ab[1] * ac[0] # *k
return (x, y, z)
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase ) -> bool:
'''simple docstring'''
return tuple(round(__lowerCAmelCase , __lowerCAmelCase ) for x in vector ) == (0, 0, 0)
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = 10 ) -> bool:
'''simple docstring'''
lowercase_ = create_vector(__lowerCAmelCase , __lowerCAmelCase )
lowercase_ = create_vector(__lowerCAmelCase , __lowerCAmelCase )
return is_zero_vector(get_ad_vectors_cross(__lowerCAmelCase , __lowerCAmelCase ) , __lowerCAmelCase )
| 136 | 0 |
"""simple docstring"""
import logging
import os
import threading
import time
try:
import warnings
except ImportError:
_lowerCAmelCase :Optional[int] = None
try:
import msvcrt
except ImportError:
_lowerCAmelCase :List[str] = None
try:
import fcntl
except ImportError:
_lowerCAmelCase :Union[str, Any] = None
# Backward compatibility
# ------------------------------------------------
try:
TimeoutError
except NameError:
_lowerCAmelCase :Tuple = OSError
# Data
# ------------------------------------------------
_lowerCAmelCase :int = [
'Timeout',
'BaseFileLock',
'WindowsFileLock',
'UnixFileLock',
'SoftFileLock',
'FileLock',
]
_lowerCAmelCase :Optional[Any] = '3.0.12'
_lowerCAmelCase :Optional[Any] = None
def lowerCamelCase_ ():
global _logger
_UpperCAmelCase : Any = _logger or logging.getLogger(__name__ )
return _logger
class _UpperCAmelCase ( a ):
'''simple docstring'''
def __init__( self , A ) -> List[str]:
_UpperCAmelCase : Optional[Any] = lock_file
return None
def __str__( self ) -> List[Any]:
_UpperCAmelCase : int = f'The file lock \'{self.lock_file}\' could not be acquired.'
return temp
class _UpperCAmelCase :
'''simple docstring'''
def __init__( self , A ) -> int:
_UpperCAmelCase : Union[str, Any] = lock
return None
def __enter__( self ) -> Any:
return self.lock
def __exit__( self , A , A , A ) -> Optional[int]:
self.lock.release()
return None
class _UpperCAmelCase :
'''simple docstring'''
def __init__( self , A , A=-1 , A=None ) -> Dict:
_UpperCAmelCase : Dict = max_filename_length if max_filename_length is not None else 2_5_5
# Hash the filename if it's too long
_UpperCAmelCase : Any = self.hash_filename_if_too_long(A , A )
# The path to the lock file.
_UpperCAmelCase : int = lock_file
# The file descriptor for the *_lock_file* as it is returned by the
# os.open() function.
# This file lock is only NOT None, if the object currently holds the
# lock.
_UpperCAmelCase : List[str] = None
# The default timeout value.
_UpperCAmelCase : Optional[Any] = timeout
# We use this lock primarily for the lock counter.
_UpperCAmelCase : Any = threading.Lock()
# The lock counter is used for implementing the nested locking
# mechanism. Whenever the lock is acquired, the counter is increased and
# the lock is only released, when this value is 0 again.
_UpperCAmelCase : str = 0
return None
@property
def __lowerCAmelCase ( self ) -> int:
return self._lock_file
@property
def __lowerCAmelCase ( self ) -> Optional[Any]:
return self._timeout
@timeout.setter
def __lowerCAmelCase ( self , A ) -> Optional[Any]:
_UpperCAmelCase : Optional[Any] = float(A )
return None
def __lowerCAmelCase ( self ) -> int:
raise NotImplementedError()
def __lowerCAmelCase ( self ) -> List[str]:
raise NotImplementedError()
@property
def __lowerCAmelCase ( self ) -> Any:
return self._lock_file_fd is not None
def __lowerCAmelCase ( self , A=None , A=0.05 ) -> Optional[Any]:
# Use the default timeout, if no timeout is provided.
if timeout is None:
_UpperCAmelCase : Optional[Any] = self.timeout
# Increment the number right at the beginning.
# We can still undo it, if something fails.
with self._thread_lock:
self._lock_counter += 1
_UpperCAmelCase : Dict = id(self )
_UpperCAmelCase : Any = self._lock_file
_UpperCAmelCase : List[Any] = time.time()
try:
while True:
with self._thread_lock:
if not self.is_locked:
logger().debug(f'Attempting to acquire lock {lock_id} on {lock_filename}' )
self._acquire()
if self.is_locked:
logger().debug(f'Lock {lock_id} acquired on {lock_filename}' )
break
elif timeout >= 0 and time.time() - start_time > timeout:
logger().debug(f'Timeout on acquiring lock {lock_id} on {lock_filename}' )
raise Timeout(self._lock_file )
else:
logger().debug(
f'Lock {lock_id} not acquired on {lock_filename}, waiting {poll_intervall} seconds ...' )
time.sleep(A )
except: # noqa
# Something did go wrong, so decrement the counter.
with self._thread_lock:
_UpperCAmelCase : Optional[Any] = max(0 , self._lock_counter - 1 )
raise
return _Acquire_ReturnProxy(lock=self )
def __lowerCAmelCase ( self , A=False ) -> int:
with self._thread_lock:
if self.is_locked:
self._lock_counter -= 1
if self._lock_counter == 0 or force:
_UpperCAmelCase : str = id(self )
_UpperCAmelCase : str = self._lock_file
logger().debug(f'Attempting to release lock {lock_id} on {lock_filename}' )
self._release()
_UpperCAmelCase : List[str] = 0
logger().debug(f'Lock {lock_id} released on {lock_filename}' )
return None
def __enter__( self ) -> List[str]:
self.acquire()
return self
def __exit__( self , A , A , A ) -> int:
self.release()
return None
def __del__( self ) -> Optional[Any]:
self.release(force=A )
return None
def __lowerCAmelCase ( self , A , A ) -> str:
_UpperCAmelCase : int = os.path.basename(A )
if len(A ) > max_length and max_length > 0:
_UpperCAmelCase : Any = os.path.dirname(A )
_UpperCAmelCase : Any = str(hash(A ) )
_UpperCAmelCase : Dict = filename[: max_length - len(A ) - 8] + '''...''' + hashed_filename + '''.lock'''
return os.path.join(A , A )
else:
return path
class _UpperCAmelCase ( a ):
'''simple docstring'''
def __init__( self , A , A=-1 , A=None ) -> str:
from .file_utils import relative_to_absolute_path
super().__init__(A , timeout=A , max_filename_length=A )
_UpperCAmelCase : str = '''\\\\?\\''' + relative_to_absolute_path(self.lock_file )
def __lowerCAmelCase ( self ) -> Optional[Any]:
_UpperCAmelCase : Any = os.O_RDWR | os.O_CREAT | os.O_TRUNC
try:
_UpperCAmelCase : Optional[Any] = os.open(self._lock_file , A )
except OSError:
pass
else:
try:
msvcrt.locking(A , msvcrt.LK_NBLCK , 1 )
except OSError:
os.close(A )
else:
_UpperCAmelCase : Dict = fd
return None
def __lowerCAmelCase ( self ) -> List[Any]:
_UpperCAmelCase : Optional[int] = self._lock_file_fd
_UpperCAmelCase : Any = None
msvcrt.locking(A , msvcrt.LK_UNLCK , 1 )
os.close(A )
try:
os.remove(self._lock_file )
# Probably another instance of the application
# that acquired the file lock.
except OSError:
pass
return None
class _UpperCAmelCase ( a ):
'''simple docstring'''
def __init__( self , A , A=-1 , A=None ) -> Dict:
_UpperCAmelCase : Tuple = os.statvfs(os.path.dirname(A ) ).f_namemax
super().__init__(A , timeout=A , max_filename_length=A )
def __lowerCAmelCase ( self ) -> List[Any]:
_UpperCAmelCase : Optional[Any] = os.O_RDWR | os.O_CREAT | os.O_TRUNC
_UpperCAmelCase : Union[str, Any] = os.open(self._lock_file , A )
try:
fcntl.flock(A , fcntl.LOCK_EX | fcntl.LOCK_NB )
except OSError:
os.close(A )
else:
_UpperCAmelCase : List[str] = fd
return None
def __lowerCAmelCase ( self ) -> Tuple:
# Do not remove the lockfile:
#
# https://github.com/benediktschmitt/py-filelock/issues/31
# https://stackoverflow.com/questions/17708885/flock-removing-locked-file-without-race-condition
_UpperCAmelCase : List[str] = self._lock_file_fd
_UpperCAmelCase : List[Any] = None
fcntl.flock(A , fcntl.LOCK_UN )
os.close(A )
return None
class _UpperCAmelCase ( a ):
'''simple docstring'''
def __lowerCAmelCase ( self ) -> Optional[int]:
_UpperCAmelCase : Tuple = os.O_WRONLY | os.O_CREAT | os.O_EXCL | os.O_TRUNC
try:
_UpperCAmelCase : Optional[Any] = os.open(self._lock_file , A )
except OSError:
pass
else:
_UpperCAmelCase : int = fd
return None
def __lowerCAmelCase ( self ) -> List[str]:
os.close(self._lock_file_fd )
_UpperCAmelCase : Optional[Any] = None
try:
os.remove(self._lock_file )
# The file is already deleted and that's what we want.
except OSError:
pass
return None
_lowerCAmelCase :Tuple = None
if msvcrt:
_lowerCAmelCase :Optional[Any] = WindowsFileLock
elif fcntl:
_lowerCAmelCase :Optional[Any] = UnixFileLock
else:
_lowerCAmelCase :Dict = SoftFileLock
if warnings is not None:
warnings.warn('only soft file lock is available')
| 365 |
"""simple docstring"""
from dataclasses import dataclass, field
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import pyarrow as pa
if TYPE_CHECKING:
from .features import FeatureType
@dataclass
class _UpperCAmelCase :
'''simple docstring'''
a__ =42
a__ =None
# Automatically constructed
a__ ="dict"
a__ =None
a__ =field(default='''Translation''' ,init=a ,repr=a )
def __call__( self ) -> List[Any]:
return pa.struct({lang: pa.string() for lang in sorted(self.languages )} )
def __lowerCAmelCase ( self ) -> Union["FeatureType", Dict[str, "FeatureType"]]:
from .features import Value
return {k: Value('''string''' ) for k in sorted(self.languages )}
@dataclass
class _UpperCAmelCase :
'''simple docstring'''
a__ =None
a__ =None
a__ =None
# Automatically constructed
a__ ="dict"
a__ =None
a__ =field(default='''TranslationVariableLanguages''' ,init=a ,repr=a )
def __lowerCAmelCase ( self ) -> Dict:
_UpperCAmelCase : int = sorted(set(self.languages ) ) if self.languages else None
_UpperCAmelCase : List[str] = len(self.languages ) if self.languages else None
def __call__( self ) -> str:
return pa.struct({'''language''': pa.list_(pa.string() ), '''translation''': pa.list_(pa.string() )} )
def __lowerCAmelCase ( self , A ) -> List[Any]:
_UpperCAmelCase : List[str] = set(self.languages )
if self.languages and set(A ) - lang_set:
raise ValueError(
f'Some languages in example ({", ".join(sorted(set(A ) - lang_set ) )}) are not in valid set ({", ".join(A )}).' )
# Convert dictionary into tuples, splitting out cases where there are
# multiple translations for a single language.
_UpperCAmelCase : Dict = []
for lang, text in translation_dict.items():
if isinstance(A , A ):
translation_tuples.append((lang, text) )
else:
translation_tuples.extend([(lang, el) for el in text] )
# Ensure translations are in ascending order by language code.
_UpperCAmelCase , _UpperCAmelCase : Union[str, Any] = zip(*sorted(A ) )
return {"language": languages, "translation": translations}
def __lowerCAmelCase ( self ) -> Union["FeatureType", Dict[str, "FeatureType"]]:
from .features import Sequence, Value
return {
"language": Sequence(Value('''string''' ) ),
"translation": Sequence(Value('''string''' ) ),
}
| 68 | 0 |
# A Bipartite Graph is a graph whose vertices can be divided into two independent sets,
# U and V such that every edge (u, v) either connects a vertex from U to V or a vertex
# from V to U. In other words, for every edge (u, v), either u belongs to U and v to V,
# or u belongs to V and v to U. We can also say that there is no edge that connects
# vertices of same set.
def UpperCAmelCase_ ( __snake_case ) -> Any:
"""simple docstring"""
_lowercase =[False] * len(__snake_case )
_lowercase =[-1] * len(__snake_case )
def dfs(__snake_case , __snake_case ):
_lowercase =True
_lowercase =c
for u in graph[v]:
if not visited[u]:
dfs(__snake_case , 1 - c )
for i in range(len(__snake_case ) ):
if not visited[i]:
dfs(__snake_case , 0 )
for i in range(len(__snake_case ) ):
for j in graph[i]:
if color[i] == color[j]:
return False
return True
# Adjacency list of graph
UpperCAmelCase__ = {0: [1, 3], 1: [0, 2], 2: [1, 3], 3: [0, 2], 4: []}
print(check_bipartite_dfs(graph))
| 5 |
"""simple docstring"""
def _A ( UpperCamelCase_ : list[int]) -> float:
'''simple docstring'''
if not nums: # Makes sure that the list is not empty
raise ValueError("List is empty")
__lowercase = sum(UpperCamelCase_) / len(UpperCamelCase_) # Calculate the average
return sum(abs(x - average) for x in nums) / len(UpperCamelCase_)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 17 | 0 |
"""simple docstring"""
import unittest
from pathlib import Path
from tempfile import TemporaryDirectory
from transformers import AutoConfig, TFGPTaLMHeadModel, is_keras_nlp_available, is_tf_available
from transformers.models.gpta.tokenization_gpta import GPTaTokenizer
from transformers.testing_utils import require_keras_nlp, require_tf, slow
if is_tf_available():
import tensorflow as tf
if is_keras_nlp_available():
from transformers.models.gpta import TFGPTaTokenizer
_a : int = ['''gpt2''']
_a : List[str] = '''gpt2'''
if is_tf_available():
class __A ( tf.Module ):
def __init__( self , a__ ):
super().__init__()
_lowerCAmelCase : List[str] = tokenizer
_lowerCAmelCase : Union[str, Any] = AutoConfig.from_pretrained(SCREAMING_SNAKE_CASE_ )
_lowerCAmelCase : Optional[int] = TFGPTaLMHeadModel.from_config(SCREAMING_SNAKE_CASE_ )
@tf.function(input_signature=(tf.TensorSpec((None,) , tf.string , name="""text""" ),) )
def __A ( self , a__ ):
_lowerCAmelCase : Optional[Any] = self.tokenizer(SCREAMING_SNAKE_CASE_ )
_lowerCAmelCase : List[str] = tokenized["""input_ids"""].to_tensor()
_lowerCAmelCase : List[str] = tf.cast(input_ids_dense > 0 , tf.intaa )
# input_mask = tf.reshape(input_mask, [-1, MAX_SEQ_LEN])
_lowerCAmelCase : int = self.model(input_ids=SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ )["""logits"""]
return outputs
@require_tf
@require_keras_nlp
class __A ( unittest.TestCase ):
def __A ( self ):
super().setUp()
_lowerCAmelCase : Union[str, Any] = [GPTaTokenizer.from_pretrained(SCREAMING_SNAKE_CASE_ ) for checkpoint in (TOKENIZER_CHECKPOINTS)]
_lowerCAmelCase : Optional[Any] = [TFGPTaTokenizer.from_pretrained(SCREAMING_SNAKE_CASE_ ) for checkpoint in TOKENIZER_CHECKPOINTS]
assert len(self.tokenizers ) == len(self.tf_tokenizers )
_lowerCAmelCase : Dict = [
"""This is a straightforward English test sentence.""",
"""This one has some weird characters\rto\nsee\r\nif those\u00E9break things.""",
"""Now we're going to add some Chinese: 一 二 三 一二三""",
"""And some much more rare Chinese: 齉 堃 齉堃""",
"""Je vais aussi écrire en français pour tester les accents""",
"""Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ""",
]
_lowerCAmelCase : Dict = list(zip(self.test_sentences , self.test_sentences[::-1] ) )
def __A ( self ):
for tokenizer, tf_tokenizer in zip(self.tokenizers , self.tf_tokenizers ):
for test_inputs in self.test_sentences:
_lowerCAmelCase : Dict = tokenizer([test_inputs] , return_tensors="""tf""" )
_lowerCAmelCase : Any = tf_tokenizer([test_inputs] )
for key in python_outputs.keys():
# convert them to numpy to avoid messing with ragged tensors
_lowerCAmelCase : List[Any] = python_outputs[key].numpy()
_lowerCAmelCase : Dict = tf_outputs[key].numpy()
self.assertTrue(tf.reduce_all(python_outputs_values.shape == tf_outputs_values.shape ) )
self.assertTrue(tf.reduce_all(tf.cast(SCREAMING_SNAKE_CASE_ , tf.intaa ) == tf_outputs_values ) )
@slow
def __A ( self ):
for tf_tokenizer in self.tf_tokenizers:
_lowerCAmelCase : str = tf.function(SCREAMING_SNAKE_CASE_ )
for test_inputs in self.test_sentences:
_lowerCAmelCase : int = tf.constant(SCREAMING_SNAKE_CASE_ )
_lowerCAmelCase : Any = compiled_tokenizer(SCREAMING_SNAKE_CASE_ )
_lowerCAmelCase : Optional[int] = tf_tokenizer(SCREAMING_SNAKE_CASE_ )
for key in eager_outputs.keys():
self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key] ) )
@slow
def __A ( self ):
for tf_tokenizer in self.tf_tokenizers:
_lowerCAmelCase : Optional[Any] = ModelToSave(tokenizer=SCREAMING_SNAKE_CASE_ )
_lowerCAmelCase : int = tf.convert_to_tensor([self.test_sentences[0]] )
_lowerCAmelCase : Union[str, Any] = model.serving(SCREAMING_SNAKE_CASE_ ) # Build model with some sample inputs
with TemporaryDirectory() as tempdir:
_lowerCAmelCase : Any = Path(SCREAMING_SNAKE_CASE_ ) / """saved.model"""
tf.saved_model.save(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , signatures={"""serving_default""": model.serving} )
_lowerCAmelCase : Optional[Any] = tf.saved_model.load(SCREAMING_SNAKE_CASE_ )
_lowerCAmelCase : List[str] = loaded_model.signatures["""serving_default"""](SCREAMING_SNAKE_CASE_ )["""output_0"""]
# We may see small differences because the loaded model is compiled, so we need an epsilon for the test
self.assertTrue(tf.reduce_all(out == loaded_output ) )
@slow
def __A ( self ):
for tf_tokenizer in self.tf_tokenizers:
_lowerCAmelCase : int = tf.convert_to_tensor([self.test_sentences[0]] )
_lowerCAmelCase : int = tf_tokenizer(SCREAMING_SNAKE_CASE_ ) # Build model with some sample inputs
_lowerCAmelCase : Tuple = tf_tokenizer.get_config()
_lowerCAmelCase : Tuple = TFGPTaTokenizer.from_config(SCREAMING_SNAKE_CASE_ )
_lowerCAmelCase : Union[str, Any] = model_from_config(SCREAMING_SNAKE_CASE_ )
for key in from_config_output.keys():
self.assertTrue(tf.reduce_all(from_config_output[key] == out[key] ) )
@slow
def __A ( self ):
for tf_tokenizer in self.tf_tokenizers:
# for the test to run
_lowerCAmelCase : int = 123123
for max_length in [3, 5, 1024]:
_lowerCAmelCase : Optional[int] = tf.convert_to_tensor([self.test_sentences[0]] )
_lowerCAmelCase : str = tf_tokenizer(SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ )
_lowerCAmelCase : List[str] = out["""input_ids"""].numpy().shape[1]
assert out_length == max_length
| 355 | """simple docstring"""
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : int = 100 ) -> int:
_lowerCAmelCase : Optional[Any] = n * (n + 1) * (2 * n + 1) / 6
_lowerCAmelCase : Tuple = (n * (n + 1) / 2) ** 2
return int(square_of_sum - sum_of_squares )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 126 | 0 |
"""simple docstring"""
from __future__ import annotations
from typing import Any
class UpperCamelCase_ :
"""simple docstring"""
def __init__( self : Tuple , UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : float = 0 ) -> None:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = row, column
__SCREAMING_SNAKE_CASE = [[default_value for c in range(UpperCAmelCase__ )] for r in range(UpperCAmelCase__ )]
def __str__( self : str ) -> str:
__SCREAMING_SNAKE_CASE = F"""Matrix consist of {self.row} rows and {self.column} columns\n"""
# Make string identifier
__SCREAMING_SNAKE_CASE = 0
for row_vector in self.array:
for obj in row_vector:
__SCREAMING_SNAKE_CASE = max(UpperCAmelCase__ , len(str(UpperCAmelCase__ ) ) )
__SCREAMING_SNAKE_CASE = F"""%{max_element_length}s"""
# Make string and return
def single_line(UpperCAmelCase__ : list[float] ) -> str:
nonlocal string_format_identifier
__SCREAMING_SNAKE_CASE = "["
line += ", ".join(string_format_identifier % (obj,) for obj in row_vector )
line += "]"
return line
s += "\n".join(single_line(UpperCAmelCase__ ) for row_vector in self.array )
return s
def __repr__( self : Union[str, Any] ) -> str:
return str(self )
def UpperCAmelCase_ ( self : List[Any] , UpperCAmelCase__ : tuple[int, int] ) -> bool:
if not (isinstance(UpperCAmelCase__ , (list, tuple) ) and len(UpperCAmelCase__ ) == 2):
return False
elif not (0 <= loc[0] < self.row and 0 <= loc[1] < self.column):
return False
else:
return True
def __getitem__( self : Tuple , UpperCAmelCase__ : tuple[int, int] ) -> Any:
assert self.validate_indicies(UpperCAmelCase__ )
return self.array[loc[0]][loc[1]]
def __setitem__( self : Optional[int] , UpperCAmelCase__ : tuple[int, int] , UpperCAmelCase__ : float ) -> None:
assert self.validate_indicies(UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = value
def __add__( self : Any , UpperCAmelCase__ : Matrix ) -> Matrix:
assert isinstance(UpperCAmelCase__ , UpperCAmelCase__ )
assert self.row == another.row and self.column == another.column
# Add
__SCREAMING_SNAKE_CASE = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
__SCREAMING_SNAKE_CASE = self[r, c] + another[r, c]
return result
def __neg__( self : List[str] ) -> Matrix:
__SCREAMING_SNAKE_CASE = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
__SCREAMING_SNAKE_CASE = -self[r, c]
return result
def __sub__( self : Optional[int] , UpperCAmelCase__ : Matrix ) -> Matrix:
return self + (-another)
def __mul__( self : Optional[int] , UpperCAmelCase__ : int | float | Matrix ) -> Matrix:
if isinstance(UpperCAmelCase__ , (int, float) ): # Scalar multiplication
__SCREAMING_SNAKE_CASE = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
__SCREAMING_SNAKE_CASE = self[r, c] * another
return result
elif isinstance(UpperCAmelCase__ , UpperCAmelCase__ ): # Matrix multiplication
assert self.column == another.row
__SCREAMING_SNAKE_CASE = Matrix(self.row , another.column )
for r in range(self.row ):
for c in range(another.column ):
for i in range(self.column ):
result[r, c] += self[r, i] * another[i, c]
return result
else:
__SCREAMING_SNAKE_CASE = F"""Unsupported type given for another ({type(UpperCAmelCase__ )})"""
raise TypeError(UpperCAmelCase__ )
def UpperCAmelCase_ ( self : Optional[int] ) -> Matrix:
__SCREAMING_SNAKE_CASE = Matrix(self.column , self.row )
for r in range(self.row ):
for c in range(self.column ):
__SCREAMING_SNAKE_CASE = self[r, c]
return result
def UpperCAmelCase_ ( self : Dict , UpperCAmelCase__ : Matrix , UpperCAmelCase__ : Matrix ) -> Any:
assert isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) and isinstance(UpperCAmelCase__ , UpperCAmelCase__ )
assert self.row == self.column == u.row == v.row # u, v should be column vector
assert u.column == v.column == 1 # u, v should be column vector
# Calculate
__SCREAMING_SNAKE_CASE = v.transpose()
__SCREAMING_SNAKE_CASE = (v_t * self * u)[0, 0] + 1
if numerator_factor == 0:
return None # It's not invertable
return self - ((self * u) * (v_t * self) * (1.0 / numerator_factor))
# Testing
if __name__ == "__main__":
def UpperCAmelCase__ ():
'''simple docstring'''
__SCREAMING_SNAKE_CASE = Matrix(3 , 3 , 0 )
for i in range(3 ):
__SCREAMING_SNAKE_CASE = 1
print(f"""a^(-1) is {ainv}""" )
# u, v
__SCREAMING_SNAKE_CASE = Matrix(3 , 1 , 0 )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = 1, 2, -3
__SCREAMING_SNAKE_CASE = Matrix(3 , 1 , 0 )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = 4, -2, 5
print(f"""u is {u}""" )
print(f"""v is {v}""" )
print(f"""uv^T is {u * v.transpose()}""" )
# Sherman Morrison
print(f"""(a + uv^T)^(-1) is {ainv.sherman_morrison(lowerCAmelCase_ , lowerCAmelCase_ )}""" )
def UpperCAmelCase__ ():
'''simple docstring'''
import doctest
doctest.testmod()
testa()
| 54 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
a__ : str = {
'''configuration_roformer''': ['''ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''RoFormerConfig''', '''RoFormerOnnxConfig'''],
'''tokenization_roformer''': ['''RoFormerTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : int = ['''RoFormerTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : int = [
'''ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''RoFormerForCausalLM''',
'''RoFormerForMaskedLM''',
'''RoFormerForMultipleChoice''',
'''RoFormerForQuestionAnswering''',
'''RoFormerForSequenceClassification''',
'''RoFormerForTokenClassification''',
'''RoFormerLayer''',
'''RoFormerModel''',
'''RoFormerPreTrainedModel''',
'''load_tf_weights_in_roformer''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : List[Any] = [
'''TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFRoFormerForCausalLM''',
'''TFRoFormerForMaskedLM''',
'''TFRoFormerForMultipleChoice''',
'''TFRoFormerForQuestionAnswering''',
'''TFRoFormerForSequenceClassification''',
'''TFRoFormerForTokenClassification''',
'''TFRoFormerLayer''',
'''TFRoFormerModel''',
'''TFRoFormerPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Tuple = [
'''FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''FlaxRoFormerForMaskedLM''',
'''FlaxRoFormerForMultipleChoice''',
'''FlaxRoFormerForQuestionAnswering''',
'''FlaxRoFormerForSequenceClassification''',
'''FlaxRoFormerForTokenClassification''',
'''FlaxRoFormerModel''',
'''FlaxRoFormerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_roformer import ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, RoFormerConfig, RoFormerOnnxConfig
from .tokenization_roformer import RoFormerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roformer_fast import RoFormerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roformer import (
ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
RoFormerForCausalLM,
RoFormerForMaskedLM,
RoFormerForMultipleChoice,
RoFormerForQuestionAnswering,
RoFormerForSequenceClassification,
RoFormerForTokenClassification,
RoFormerLayer,
RoFormerModel,
RoFormerPreTrainedModel,
load_tf_weights_in_roformer,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roformer import (
TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerLayer,
TFRoFormerModel,
TFRoFormerPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roformer import (
FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
FlaxRoFormerPreTrainedModel,
)
else:
import sys
a__ : int = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 54 | 1 |
import argparse
import os
from pathlib import Path
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import PegasusConfig, PegasusForConditionalGeneration, PegasusTokenizer
from transformers.models.pegasus.configuration_pegasus import DEFAULTS, task_specific_params
_SCREAMING_SNAKE_CASE : str = [
# replace left string with right string to get the relevant state_dict key (identical state dict to bart)
['''memory_attention''', '''encoder_attn'''],
['''attention''', '''attn'''],
['''/''', '''.'''],
['''.LayerNorm.gamma''', '''_layer_norm.weight'''],
['''.LayerNorm.beta''', '''_layer_norm.bias'''],
['''r.layer_''', '''r.layers.'''],
['''output_proj''', '''out_proj'''],
['''ffn.dense_1.''', '''fc2.'''],
['''ffn.dense.''', '''fc1.'''],
['''ffn_layer_norm''', '''final_layer_norm'''],
['''kernel''', '''weight'''],
['''encoder_layer_norm.''', '''encoder.layer_norm.'''],
['''decoder_layer_norm.''', '''decoder.layer_norm.'''],
['''embeddings.weights''', '''shared.weight'''],
]
def UpperCAmelCase_ ( _A ):
'''simple docstring'''
for pegasus_name, hf_name in PATTERNS:
SCREAMING_SNAKE_CASE__ = k.replace(_A , _A )
return k
def UpperCAmelCase_ ( _A , _A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = DEFAULTS.copy()
cfg_kwargs.update(_A )
SCREAMING_SNAKE_CASE__ = PegasusConfig(**_A )
SCREAMING_SNAKE_CASE__ = PegasusForConditionalGeneration(_A )
SCREAMING_SNAKE_CASE__ = torch_model.model.state_dict()
SCREAMING_SNAKE_CASE__ = {}
for k, v in tf_weights.items():
SCREAMING_SNAKE_CASE__ = rename_state_dict_key(_A )
if new_k not in sd:
raise ValueError(F'''could not find new key {new_k} in state dict. (converted from {k})''' )
if "dense" in k or "proj" in new_k:
SCREAMING_SNAKE_CASE__ = v.T
SCREAMING_SNAKE_CASE__ = torch.tensor(_A , dtype=sd[new_k].dtype )
assert v.shape == sd[new_k].shape, F'''{new_k}, {k}, {v.shape}, {sd[new_k].shape}'''
# make sure embedding.padding_idx is respected
SCREAMING_SNAKE_CASE__ = torch.zeros_like(mapping['''shared.weight'''][cfg.pad_token_id + 1] )
SCREAMING_SNAKE_CASE__ = mapping['''shared.weight''']
SCREAMING_SNAKE_CASE__ = mapping['''shared.weight''']
SCREAMING_SNAKE_CASE__ = {k: torch.zeros_like(_A ) for k, v in sd.items() if k.endswith('''bias''' ) and k not in mapping}
mapping.update(**_A )
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = torch_model.model.load_state_dict(_A , strict=_A )
SCREAMING_SNAKE_CASE__ = [
k for k in missing if k not in ['''encoder.embed_positions.weight''', '''decoder.embed_positions.weight''']
]
assert unexpected_missing == [], F'''no matches found for the following torch keys {unexpected_missing}'''
assert extra == [], F'''no matches found for the following tf keys {extra}'''
return torch_model
def UpperCAmelCase_ ( _A="./ckpt/aeslc/model.ckpt-32000" ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = tf.train.list_variables(_A )
SCREAMING_SNAKE_CASE__ = {}
SCREAMING_SNAKE_CASE__ = ['''Adafactor''', '''global_step''']
for name, shape in tqdm(_A , desc='''converting tf checkpoint to dict''' ):
SCREAMING_SNAKE_CASE__ = any(pat in name for pat in ignore_name )
if skip_key:
continue
SCREAMING_SNAKE_CASE__ = tf.train.load_variable(_A , _A )
SCREAMING_SNAKE_CASE__ = array
return tf_weights
def UpperCAmelCase_ ( _A , _A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = Path(_A ).parent.name
SCREAMING_SNAKE_CASE__ = task_specific_params[F'''summarization_{dataset}''']['''max_position_embeddings''']
SCREAMING_SNAKE_CASE__ = PegasusTokenizer.from_pretrained('''sshleifer/pegasus''' , model_max_length=_A )
assert tok.model_max_length == desired_max_model_length
tok.save_pretrained(_A )
# convert model
SCREAMING_SNAKE_CASE__ = get_tf_weights_as_numpy(_A )
SCREAMING_SNAKE_CASE__ = task_specific_params[F'''summarization_{dataset}''']
if dataset == "large":
SCREAMING_SNAKE_CASE__ = task_specific_params
SCREAMING_SNAKE_CASE__ = convert_pegasus(_A , _A )
torch_model.save_pretrained(_A )
SCREAMING_SNAKE_CASE__ = torch_model.state_dict()
sd.pop('''model.decoder.embed_positions.weight''' )
sd.pop('''model.encoder.embed_positions.weight''' )
torch.save(_A , Path(_A ) / '''pytorch_model.bin''' )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''tf_ckpt_path''', type=str, help='''passed to tf.train.list_variables''')
parser.add_argument('''save_dir''', default=None, type=str, help='''Path to the output PyTorch model.''')
_SCREAMING_SNAKE_CASE : Optional[Any] = parser.parse_args()
if args.save_dir is None:
_SCREAMING_SNAKE_CASE : Optional[Any] = Path(args.tf_ckpt_path).parent.name
_SCREAMING_SNAKE_CASE : List[Any] = os.path.join('''pegasus''', dataset)
convert_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir)
| 351 |
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
_SCREAMING_SNAKE_CASE : List[str] = logging.get_logger(__name__)
class UpperCAmelCase__ ( A__ ):
"""simple docstring"""
a = ["input_features"]
def __init__( self : Dict , __lowerCamelCase : Tuple=80 , __lowerCamelCase : List[Any]=1_6000 , __lowerCamelCase : Optional[int]=160 , __lowerCamelCase : List[str]=30 , __lowerCamelCase : List[Any]=400 , __lowerCamelCase : Union[str, Any]=0.0 , __lowerCamelCase : str=False , **__lowerCamelCase : List[str] , ) -> Any:
super().__init__(
feature_size=__lowerCamelCase , sampling_rate=__lowerCamelCase , padding_value=__lowerCamelCase , return_attention_mask=__lowerCamelCase , **__lowerCamelCase , )
SCREAMING_SNAKE_CASE__ = n_fft
SCREAMING_SNAKE_CASE__ = hop_length
SCREAMING_SNAKE_CASE__ = chunk_length
SCREAMING_SNAKE_CASE__ = chunk_length * sampling_rate
SCREAMING_SNAKE_CASE__ = self.n_samples // hop_length
SCREAMING_SNAKE_CASE__ = sampling_rate
SCREAMING_SNAKE_CASE__ = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=__lowerCamelCase , min_frequency=0.0 , max_frequency=8000.0 , sampling_rate=__lowerCamelCase , norm='''slaney''' , mel_scale='''slaney''' , )
def lowercase_ ( self : int , __lowerCamelCase : np.array ) -> np.ndarray:
SCREAMING_SNAKE_CASE__ = spectrogram(
__lowerCamelCase , window_function(self.n_fft , '''hann''' ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters , log_mel='''log10''' , )
SCREAMING_SNAKE_CASE__ = log_spec[:, :-1]
SCREAMING_SNAKE_CASE__ = np.maximum(__lowerCamelCase , log_spec.max() - 8.0 )
SCREAMING_SNAKE_CASE__ = (log_spec + 4.0) / 4.0
return log_spec
@staticmethod
# Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm
def lowercase_ ( __lowerCamelCase : List[np.ndarray] , __lowerCamelCase : List[np.ndarray] , __lowerCamelCase : float = 0.0 ) -> List[np.ndarray]:
if attention_mask is not None:
SCREAMING_SNAKE_CASE__ = np.array(__lowerCamelCase , np.intaa )
SCREAMING_SNAKE_CASE__ = []
for vector, length in zip(__lowerCamelCase , attention_mask.sum(-1 ) ):
SCREAMING_SNAKE_CASE__ = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1e-7 )
if length < normed_slice.shape[0]:
SCREAMING_SNAKE_CASE__ = padding_value
normed_input_values.append(__lowerCamelCase )
else:
SCREAMING_SNAKE_CASE__ = [(x - x.mean()) / np.sqrt(x.var() + 1e-7 ) for x in input_values]
return normed_input_values
def __call__( self : List[str] , __lowerCamelCase : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , __lowerCamelCase : bool = True , __lowerCamelCase : Optional[int] = None , __lowerCamelCase : Optional[Union[str, TensorType]] = None , __lowerCamelCase : Optional[bool] = None , __lowerCamelCase : Optional[str] = "max_length" , __lowerCamelCase : Optional[int] = None , __lowerCamelCase : Optional[int] = None , __lowerCamelCase : Optional[bool] = None , **__lowerCamelCase : List[str] , ) -> BatchFeature:
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f'''The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a'''
f''' sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input'''
f''' was sampled with {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
'''It is strongly recommended to pass the `sampling_rate` argument to this function. '''
'''Failing to do so can result in silent errors that might be hard to debug.''' )
SCREAMING_SNAKE_CASE__ = isinstance(__lowerCamelCase , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f'''Only mono-channel audio is supported for input to {self}''' )
SCREAMING_SNAKE_CASE__ = is_batched_numpy or (
isinstance(__lowerCamelCase , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
SCREAMING_SNAKE_CASE__ = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(__lowerCamelCase , np.ndarray ):
SCREAMING_SNAKE_CASE__ = np.asarray(__lowerCamelCase , dtype=np.floataa )
elif isinstance(__lowerCamelCase , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
SCREAMING_SNAKE_CASE__ = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
SCREAMING_SNAKE_CASE__ = [np.asarray([raw_speech] ).T]
SCREAMING_SNAKE_CASE__ = BatchFeature({'''input_features''': raw_speech} )
# convert into correct format for padding
SCREAMING_SNAKE_CASE__ = self.pad(
__lowerCamelCase , padding=__lowerCamelCase , max_length=max_length if max_length else self.n_samples , truncation=__lowerCamelCase , pad_to_multiple_of=__lowerCamelCase , return_attention_mask=return_attention_mask or do_normalize , )
# zero-mean and unit-variance normalization
if do_normalize:
SCREAMING_SNAKE_CASE__ = self.zero_mean_unit_var_norm(
padded_inputs['''input_features'''] , attention_mask=padded_inputs['''attention_mask'''] , padding_value=self.padding_value , )
SCREAMING_SNAKE_CASE__ = np.stack(padded_inputs['''input_features'''] , axis=0 )
# make sure list is in array format
SCREAMING_SNAKE_CASE__ = padded_inputs.get('''input_features''' ).transpose(2 , 0 , 1 )
SCREAMING_SNAKE_CASE__ = [self._np_extract_fbank_features(__lowerCamelCase ) for waveform in input_features[0]]
if isinstance(input_features[0] , __lowerCamelCase ):
SCREAMING_SNAKE_CASE__ = [np.asarray(__lowerCamelCase , dtype=np.floataa ) for feature in input_features]
else:
SCREAMING_SNAKE_CASE__ = input_features
if return_attention_mask:
# rescale from sample (48000) to feature (3000)
SCREAMING_SNAKE_CASE__ = padded_inputs['''attention_mask'''][:, :: self.hop_length]
if return_tensors is not None:
SCREAMING_SNAKE_CASE__ = padded_inputs.convert_to_tensors(__lowerCamelCase )
return padded_inputs
def lowercase_ ( self : str ) -> Dict[str, Any]:
SCREAMING_SNAKE_CASE__ = copy.deepcopy(self.__dict__ )
SCREAMING_SNAKE_CASE__ = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
return output
| 218 | 0 |
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__UpperCAmelCase : List[str] = {
"configuration_trajectory_transformer": [
"TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"TrajectoryTransformerConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase : Union[str, Any] = [
"TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TrajectoryTransformerModel",
"TrajectoryTransformerPreTrainedModel",
"load_tf_weights_in_trajectory_transformer",
]
if TYPE_CHECKING:
from .configuration_trajectory_transformer import (
TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TrajectoryTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trajectory_transformer import (
TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TrajectoryTransformerModel,
TrajectoryTransformerPreTrainedModel,
load_tf_weights_in_trajectory_transformer,
)
else:
import sys
__UpperCAmelCase : int = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 111 |
from __future__ import annotations
from sys import maxsize
from typing import Generic, TypeVar
__UpperCAmelCase : str = TypeVar("T")
def A__ ( SCREAMING_SNAKE_CASE__) -> int:
return (position - 1) // 2
def A__ ( SCREAMING_SNAKE_CASE__) -> int:
return (2 * position) + 1
def A__ ( SCREAMING_SNAKE_CASE__) -> int:
return (2 * position) + 2
class __snake_case ( Generic[T] ):
'''simple docstring'''
def __init__( self : Tuple ):
__snake_case: list[tuple[T, int]] = []
__snake_case: dict[T, int] = {}
__snake_case: int = 0
def __len__( self : Tuple ):
return self.elements
def __repr__( self : Optional[int] ):
return str(self.heap )
def UpperCAmelCase__ ( self : List[Any] ):
# Check if the priority queue is empty
return self.elements == 0
def UpperCAmelCase__ ( self : List[Any] , A : T , A : int ):
# Add an element with given priority to the queue
self.heap.append((elem, weight) )
__snake_case: Optional[Any] = self.elements
self.elements += 1
self._bubble_up(A )
def UpperCAmelCase__ ( self : List[Any] ):
# Remove and return the element with lowest weight (highest priority)
if self.elements > 1:
self._swap_nodes(0 , self.elements - 1 )
__snake_case , __snake_case: Union[str, Any] = self.heap.pop()
del self.position_map[elem]
self.elements -= 1
if self.elements > 0:
__snake_case , __snake_case: int = self.heap[0]
self._bubble_down(A )
return elem
def UpperCAmelCase__ ( self : Any , A : T , A : int ):
# Update the weight of the given key
__snake_case: Dict = self.position_map[elem]
__snake_case: Optional[Any] = (elem, weight)
if position > 0:
__snake_case: Optional[int] = get_parent_position(A )
__snake_case , __snake_case: Dict = self.heap[parent_position]
if parent_weight > weight:
self._bubble_up(A )
else:
self._bubble_down(A )
else:
self._bubble_down(A )
def UpperCAmelCase__ ( self : int , A : T ):
# Place a node at the proper position (upward movement) [to be used internally
# only]
__snake_case: int = self.position_map[elem]
if curr_pos == 0:
return None
__snake_case: Optional[Any] = get_parent_position(A )
__snake_case , __snake_case: Tuple = self.heap[curr_pos]
__snake_case , __snake_case: Optional[Any] = self.heap[parent_position]
if parent_weight > weight:
self._swap_nodes(A , A )
return self._bubble_up(A )
return None
def UpperCAmelCase__ ( self : List[str] , A : T ):
# Place a node at the proper position (downward movement) [to be used
# internally only]
__snake_case: Any = self.position_map[elem]
__snake_case , __snake_case: int = self.heap[curr_pos]
__snake_case: Union[str, Any] = get_child_left_position(A )
__snake_case: Dict = get_child_right_position(A )
if child_left_position < self.elements and child_right_position < self.elements:
__snake_case , __snake_case: Any = self.heap[child_left_position]
__snake_case , __snake_case: List[str] = self.heap[child_right_position]
if child_right_weight < child_left_weight and child_right_weight < weight:
self._swap_nodes(A , A )
return self._bubble_down(A )
if child_left_position < self.elements:
__snake_case , __snake_case: List[Any] = self.heap[child_left_position]
if child_left_weight < weight:
self._swap_nodes(A , A )
return self._bubble_down(A )
else:
return None
if child_right_position < self.elements:
__snake_case , __snake_case: List[Any] = self.heap[child_right_position]
if child_right_weight < weight:
self._swap_nodes(A , A )
return self._bubble_down(A )
return None
def UpperCAmelCase__ ( self : int , A : int , A : int ):
# Swap the nodes at the given positions
__snake_case: Optional[int] = self.heap[nodea_pos][0]
__snake_case: Optional[Any] = self.heap[nodea_pos][0]
__snake_case , __snake_case: Tuple = (
self.heap[nodea_pos],
self.heap[nodea_pos],
)
__snake_case: Any = nodea_pos
__snake_case: str = nodea_pos
class __snake_case ( Generic[T] ):
'''simple docstring'''
def __init__( self : Any ):
__snake_case: dict[T, dict[T, int]] = {}
__snake_case: int = 0
def __repr__( self : Dict ):
return str(self.connections )
def __len__( self : List[str] ):
return self.nodes
def UpperCAmelCase__ ( self : Optional[Any] , A : T ):
# Add a node in the graph if it is not in the graph
if node not in self.connections:
__snake_case: List[str] = {}
self.nodes += 1
def UpperCAmelCase__ ( self : Optional[Any] , A : T , A : T , A : int ):
# Add an edge between 2 nodes in the graph
self.add_node(A )
self.add_node(A )
__snake_case: Optional[Any] = weight
__snake_case: str = weight
def A__ ( SCREAMING_SNAKE_CASE__ , ) -> tuple[dict[T, int], dict[T, T | None]]:
__snake_case: dict[T, int] = {node: maxsize for node in graph.connections}
__snake_case: dict[T, T | None] = {node: None for node in graph.connections}
__snake_case: MinPriorityQueue[T] = MinPriorityQueue()
for node, weight in dist.items():
priority_queue.push(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__)
if priority_queue.is_empty():
return dist, parent
# initialization
__snake_case: Optional[int] = priority_queue.extract_min()
__snake_case: Any = 0
for neighbour in graph.connections[node]:
if dist[neighbour] > dist[node] + graph.connections[node][neighbour]:
__snake_case: Tuple = dist[node] + graph.connections[node][neighbour]
priority_queue.update_key(SCREAMING_SNAKE_CASE__ , dist[neighbour])
__snake_case: Optional[int] = node
# running prim's algorithm
while not priority_queue.is_empty():
__snake_case: Optional[int] = priority_queue.extract_min()
for neighbour in graph.connections[node]:
if dist[neighbour] > dist[node] + graph.connections[node][neighbour]:
__snake_case: Tuple = dist[node] + graph.connections[node][neighbour]
priority_queue.update_key(SCREAMING_SNAKE_CASE__ , dist[neighbour])
__snake_case: List[Any] = node
return dist, parent
| 111 | 1 |
"""simple docstring"""
import json
import os
from typing import Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__lowerCamelCase = logging.get_logger(__name__)
__lowerCamelCase = {"vocab_file": "vocab.json"}
__lowerCamelCase = {
"vocab_file": {
"mgp-str": "https://huggingface.co/alibaba-damo/mgp-str-base/blob/main/vocab.json",
}
}
__lowerCamelCase = {"mgp-str": 27}
class UpperCamelCase__( __A ):
lowerCAmelCase__ : Optional[int] = VOCAB_FILES_NAMES
lowerCAmelCase__ : Dict = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase__ : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self ,__UpperCAmelCase ,__UpperCAmelCase="[GO]" ,__UpperCAmelCase="[GO]" ,__UpperCAmelCase="[s]" ,__UpperCAmelCase="[GO]" ,**__UpperCAmelCase ) -> List[str]:
super().__init__(
unk_token=__UpperCAmelCase ,bos_token=__UpperCAmelCase ,eos_token=__UpperCAmelCase ,pad_token=__UpperCAmelCase ,**__UpperCAmelCase ,)
with open(__UpperCAmelCase ,encoding='utf-8' ) as vocab_handle:
A__ = json.load(__UpperCAmelCase )
A__ = {v: k for k, v in self.vocab.items()}
@property
def snake_case__ ( self ) -> Any:
return len(self.vocab )
def snake_case__ ( self ) -> List[str]:
return dict(self.vocab ,**self.added_tokens_encoder )
def snake_case__ ( self ,__UpperCAmelCase ) -> Optional[Any]:
A__ = []
for s in text:
char_tokens.extend(__UpperCAmelCase )
return char_tokens
def snake_case__ ( self ,__UpperCAmelCase ) -> Union[str, Any]:
return self.vocab.get(__UpperCAmelCase ,self.vocab.get(self.unk_token ) )
def snake_case__ ( self ,__UpperCAmelCase ) -> Dict:
return self.decoder.get(__UpperCAmelCase )
def snake_case__ ( self ,__UpperCAmelCase ,__UpperCAmelCase = None ) -> Tuple[str]:
if not os.path.isdir(__UpperCAmelCase ):
logger.error('Vocabulary path ({}) should be a directory'.format(__UpperCAmelCase ) )
return
A__ = os.path.join(
__UpperCAmelCase ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
with open(__UpperCAmelCase ,'w' ,encoding='utf-8' ) as f:
f.write(json.dumps(self.vocab ,indent=2 ,sort_keys=__UpperCAmelCase ,ensure_ascii=__UpperCAmelCase ) + '\n' )
return (vocab_file,)
| 154 | """simple docstring"""
import unittest
from transformers import SPIECE_UNDERLINE
from transformers.models.speechta import SpeechTaTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.tokenization_utils import AddedToken
from ...test_tokenization_common import TokenizerTesterMixin
__lowerCamelCase = get_tests_dir("fixtures/test_sentencepiece_bpe_char.model")
@require_sentencepiece
@require_tokenizers
class UpperCamelCase__( __A , unittest.TestCase ):
lowerCAmelCase__ : Any = SpeechTaTokenizer
lowerCAmelCase__ : List[str] = False
lowerCAmelCase__ : List[str] = True
def snake_case__ ( self ) -> Optional[Any]:
super().setUp()
# We have a SentencePiece fixture for testing
A__ = SpeechTaTokenizer(__UpperCAmelCase )
A__ = AddedToken('<mask>' ,lstrip=__UpperCAmelCase ,rstrip=__UpperCAmelCase )
A__ = mask_token
tokenizer.add_special_tokens({'mask_token': mask_token} )
tokenizer.add_tokens(['<ctc_blank>'] )
tokenizer.save_pretrained(self.tmpdirname )
def snake_case__ ( self ,__UpperCAmelCase ) -> Optional[Any]:
A__ = 'this is a test'
A__ = 'this is a test'
return input_text, output_text
def snake_case__ ( self ,__UpperCAmelCase ,__UpperCAmelCase=False ,__UpperCAmelCase=20 ,__UpperCAmelCase=5 ) -> Union[str, Any]:
A__ , A__ = self.get_input_output_texts(__UpperCAmelCase )
A__ = tokenizer.encode(__UpperCAmelCase ,add_special_tokens=__UpperCAmelCase )
A__ = tokenizer.decode(__UpperCAmelCase ,clean_up_tokenization_spaces=__UpperCAmelCase )
return text, ids
def snake_case__ ( self ) -> Optional[Any]:
A__ = '<pad>'
A__ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__UpperCAmelCase ) ,__UpperCAmelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__UpperCAmelCase ) ,__UpperCAmelCase )
def snake_case__ ( self ) -> Tuple:
A__ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] ,'<s>' )
self.assertEqual(vocab_keys[1] ,'<pad>' )
self.assertEqual(vocab_keys[-4] ,'œ' )
self.assertEqual(vocab_keys[-2] ,'<mask>' )
self.assertEqual(vocab_keys[-1] ,'<ctc_blank>' )
self.assertEqual(len(__UpperCAmelCase ) ,81 )
def snake_case__ ( self ) -> str:
self.assertEqual(self.get_tokenizer().vocab_size ,79 )
def snake_case__ ( self ) -> Tuple:
A__ = self.get_tokenizers(do_lower_case=__UpperCAmelCase )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
A__ = tokenizer.vocab_size
A__ = len(__UpperCAmelCase )
self.assertNotEqual(__UpperCAmelCase ,0 )
# We usually have added tokens from the start in tests because our vocab fixtures are
# smaller than the original vocabs - let's not assert this
# self.assertEqual(vocab_size, all_size)
A__ = ['aaaaa bbbbbb', 'cccccccccdddddddd']
A__ = tokenizer.add_tokens(__UpperCAmelCase )
A__ = tokenizer.vocab_size
A__ = len(__UpperCAmelCase )
self.assertNotEqual(__UpperCAmelCase ,0 )
self.assertEqual(__UpperCAmelCase ,__UpperCAmelCase )
self.assertEqual(__UpperCAmelCase ,len(__UpperCAmelCase ) )
self.assertEqual(__UpperCAmelCase ,all_size + len(__UpperCAmelCase ) )
A__ = tokenizer.encode('aaaaa bbbbbb low cccccccccdddddddd l' ,add_special_tokens=__UpperCAmelCase )
self.assertGreaterEqual(len(__UpperCAmelCase ) ,4 )
self.assertGreater(tokens[0] ,tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] ,tokenizer.vocab_size - 1 )
A__ = {'eos_token': '>>>>|||<||<<|<<', 'pad_token': '<<<<<|||>|>>>>|>'}
A__ = tokenizer.add_special_tokens(__UpperCAmelCase )
A__ = tokenizer.vocab_size
A__ = len(__UpperCAmelCase )
self.assertNotEqual(__UpperCAmelCase ,0 )
self.assertEqual(__UpperCAmelCase ,__UpperCAmelCase )
self.assertEqual(__UpperCAmelCase ,len(__UpperCAmelCase ) )
self.assertEqual(__UpperCAmelCase ,all_size_a + len(__UpperCAmelCase ) )
A__ = tokenizer.encode(
'>>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l' ,add_special_tokens=__UpperCAmelCase )
self.assertGreaterEqual(len(__UpperCAmelCase ) ,6 )
self.assertGreater(tokens[0] ,tokenizer.vocab_size - 1 )
self.assertGreater(tokens[0] ,tokens[1] )
self.assertGreater(tokens[-3] ,tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] ,tokens[-4] )
self.assertEqual(tokens[0] ,tokenizer.eos_token_id )
self.assertEqual(tokens[-3] ,tokenizer.pad_token_id )
def snake_case__ ( self ) -> List[str]:
pass
def snake_case__ ( self ) -> List[str]:
pass
def snake_case__ ( self ) -> Dict:
A__ = self.get_tokenizer()
A__ = tokenizer.tokenize('This is a test' )
# fmt: off
self.assertListEqual(__UpperCAmelCase ,[SPIECE_UNDERLINE, 'T', 'h', 'i', 's', SPIECE_UNDERLINE, 'i', 's', SPIECE_UNDERLINE, 'a', SPIECE_UNDERLINE, 't', 'e', 's', 't'] )
# fmt: on
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__UpperCAmelCase ) ,[4, 32, 11, 10, 12, 4, 10, 12, 4, 7, 4, 6, 5, 12, 6] ,)
A__ = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
__UpperCAmelCase ,[SPIECE_UNDERLINE, 'I', SPIECE_UNDERLINE, 'w', 'a', 's', SPIECE_UNDERLINE, 'b', 'o', 'r', 'n', SPIECE_UNDERLINE, 'i', 'n', SPIECE_UNDERLINE, '92000', ',', SPIECE_UNDERLINE, 'a', 'n', 'd', SPIECE_UNDERLINE, 't', 'h', 'i', 's', SPIECE_UNDERLINE, 'i', 's', SPIECE_UNDERLINE, 'f', 'a', 'l', 's', 'é', '.'] )
A__ = tokenizer.convert_tokens_to_ids(__UpperCAmelCase )
# fmt: off
self.assertListEqual(__UpperCAmelCase ,[4, 30, 4, 20, 7, 12, 4, 25, 8, 13, 9, 4, 10, 9, 4, 3, 23, 4, 7, 9, 14, 4, 6, 11, 10, 12, 4, 10, 12, 4, 19, 7, 15, 12, 73, 26] )
# fmt: on
A__ = tokenizer.convert_ids_to_tokens(__UpperCAmelCase )
self.assertListEqual(
__UpperCAmelCase ,[SPIECE_UNDERLINE, 'I', SPIECE_UNDERLINE, 'w', 'a', 's', SPIECE_UNDERLINE, 'b', 'o', 'r', 'n', SPIECE_UNDERLINE, 'i', 'n', SPIECE_UNDERLINE, '<unk>', ',', SPIECE_UNDERLINE, 'a', 'n', 'd', SPIECE_UNDERLINE, 't', 'h', 'i', 's', SPIECE_UNDERLINE, 'i', 's', SPIECE_UNDERLINE, 'f', 'a', 'l', 's', 'é', '.'] )
@slow
def snake_case__ ( self ) -> Union[str, Any]:
# Use custom sequence because this tokenizer does not handle numbers.
A__ = [
'Transformers (formerly known as pytorch-transformers and pytorch-pretrained-bert) provides '
'general-purpose architectures (BERT, GPT, RoBERTa, XLM, DistilBert, XLNet...) for Natural '
'Language Understanding (NLU) and Natural Language Generation (NLG) with over thirty-two pretrained '
'models in one hundred plus languages and deep interoperability between Jax, PyTorch and TensorFlow.',
'BERT is designed to pre-train deep bidirectional representations from unlabeled text by jointly '
'conditioning on both left and right context in all layers.',
'The quick brown fox jumps over the lazy dog.',
]
# fmt: off
A__ = {
'input_ids': [
[4, 32, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 64, 19, 8, 13, 18, 5, 13, 15, 22, 4, 28, 9, 8, 20, 9, 4, 7, 12, 4, 24, 22, 6, 8, 13, 17, 11, 39, 6, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 7, 9, 14, 4, 24, 22, 6, 8, 13, 17, 11, 39, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 39, 25, 5, 13, 6, 63, 4, 24, 13, 8, 27, 10, 14, 5, 12, 4, 21, 5, 9, 5, 13, 7, 15, 39, 24, 16, 13, 24, 8, 12, 5, 4, 7, 13, 17, 11, 10, 6, 5, 17, 6, 16, 13, 5, 12, 4, 64, 40, 47, 54, 32, 23, 4, 53, 49, 32, 23, 4, 54, 8, 40, 47, 54, 32, 7, 23, 4, 69, 52, 43, 23, 4, 51, 10, 12, 6, 10, 15, 40, 5, 13, 6, 23, 4, 69, 52, 48, 5, 6, 26, 26, 26, 63, 4, 19, 8, 13, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 61, 9, 14, 5, 13, 12, 6, 7, 9, 14, 10, 9, 21, 4, 64, 48, 52, 61, 63, 4, 7, 9, 14, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 53, 5, 9, 5, 13, 7, 6, 10, 8, 9, 4, 64, 48, 52, 53, 63, 4, 20, 10, 6, 11, 4, 8, 27, 5, 13, 4, 6, 11, 10, 13, 6, 22, 39, 6, 20, 8, 4, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 4, 18, 8, 14, 5, 15, 12, 4, 10, 9, 4, 8, 9, 5, 4, 11, 16, 9, 14, 13, 5, 14, 4, 24, 15, 16, 12, 4, 15, 7, 9, 21, 16, 7, 21, 5, 12, 4, 7, 9, 14, 4, 14, 5, 5, 24, 4, 10, 9, 6, 5, 13, 8, 24, 5, 13, 7, 25, 10, 15, 10, 6, 22, 4, 25, 5, 6, 20, 5, 5, 9, 4, 58, 7, 37, 23, 4, 49, 22, 32, 8, 13, 17, 11, 4, 7, 9, 14, 4, 32, 5, 9, 12, 8, 13, 55, 15, 8, 20, 26, 2],
[4, 40, 47, 54, 32, 4, 10, 12, 4, 14, 5, 12, 10, 21, 9, 5, 14, 4, 6, 8, 4, 24, 13, 5, 39, 6, 13, 7, 10, 9, 4, 14, 5, 5, 24, 4, 25, 10, 14, 10, 13, 5, 17, 6, 10, 8, 9, 7, 15, 4, 13, 5, 24, 13, 5, 12, 5, 9, 6, 7, 6, 10, 8, 9, 12, 4, 19, 13, 8, 18, 4, 16, 9, 15, 7, 25, 5, 15, 5, 14, 4, 6, 5, 37, 6, 4, 25, 22, 4, 46, 8, 10, 9, 6, 15, 22, 4, 17, 8, 9, 14, 10, 6, 10, 8, 9, 10, 9, 21, 4, 8, 9, 4, 25, 8, 6, 11, 4, 15, 5, 19, 6, 4, 7, 9, 14, 4, 13, 10, 21, 11, 6, 4, 17, 8, 9, 6, 5, 37, 6, 4, 10, 9, 4, 7, 15, 15, 4, 15, 7, 22, 5, 13, 12, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[4, 32, 11, 5, 4, 45, 16, 10, 17, 28, 4, 25, 13, 8, 20, 9, 4, 19, 8, 37, 4, 46, 16, 18, 24, 12, 4, 8, 27, 5, 13, 4, 6, 11, 5, 4, 15, 7, 57, 22, 4, 14, 8, 21, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
],
'attention_mask': [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
}
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__UpperCAmelCase ,model_name='microsoft/speecht5_asr' ,revision='c5ef64c71905caeccde0e4462ef3f9077224c524' ,sequences=__UpperCAmelCase ,)
| 154 | 1 |
"""simple docstring"""
import numpy as np
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = int(np.ceil((x_end - xa) / h ) )
__lowerCAmelCase = np.zeros((n + 1,) )
__lowerCAmelCase = ya
__lowerCAmelCase = xa
for k in range(_UpperCamelCase ):
__lowerCAmelCase = f(_UpperCamelCase , y[k] )
__lowerCAmelCase = f(x + 0.5 * h , y[k] + 0.5 * h * ka )
__lowerCAmelCase = f(x + 0.5 * h , y[k] + 0.5 * h * ka )
__lowerCAmelCase = f(x + h , y[k] + h * ka )
__lowerCAmelCase = y[k] + (1 / 6) * h * (ka + 2 * ka + 2 * ka + ka)
x += h
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 57 |
"""simple docstring"""
import argparse
import pytorch_lightning as pl
import torch
from torch import nn
from transformers import LongformerForQuestionAnswering, LongformerModel
class _UpperCamelCase ( pl.LightningModule ):
'''simple docstring'''
def __init__( self , __a ):
super().__init__()
__lowerCAmelCase = model
__lowerCAmelCase = 2
__lowerCAmelCase = nn.Linear(self.model.config.hidden_size , self.num_labels )
def snake_case ( self ):
pass
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = LongformerModel.from_pretrained(_UpperCamelCase )
__lowerCAmelCase = LightningModel(_UpperCamelCase )
__lowerCAmelCase = torch.load(_UpperCamelCase , map_location=torch.device("cpu" ) )
lightning_model.load_state_dict(ckpt["state_dict"] )
# init longformer question answering model
__lowerCAmelCase = LongformerForQuestionAnswering.from_pretrained(_UpperCamelCase )
# transfer weights
longformer_for_qa.longformer.load_state_dict(lightning_model.model.state_dict() )
longformer_for_qa.qa_outputs.load_state_dict(lightning_model.qa_outputs.state_dict() )
longformer_for_qa.eval()
# save model
longformer_for_qa.save_pretrained(_UpperCamelCase )
print(f"Conversion successful. Model saved under {pytorch_dump_folder_path}" )
if __name__ == "__main__":
A : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--longformer_model",
default=None,
type=str,
required=True,
help="model identifier of longformer. Should be either `longformer-base-4096` or `longformer-large-4096`.",
)
parser.add_argument(
"--longformer_question_answering_ckpt_path",
default=None,
type=str,
required=True,
help="Path the official PyTorch Lightning Checkpoint.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
A : Optional[int] = parser.parse_args()
convert_longformer_qa_checkpoint_to_pytorch(
args.longformer_model, args.longformer_question_answering_ckpt_path, args.pytorch_dump_folder_path
)
| 57 | 1 |
'''simple docstring'''
import argparse
import torch
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_from_original_stable_diffusion_ckpt
if __name__ == "__main__":
lowerCAmelCase: Tuple = argparse.ArgumentParser()
parser.add_argument(
'--checkpoint_path', default=None, type=str, required=True, help='Path to the checkpoint to convert.'
)
# !wget https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml
parser.add_argument(
'--original_config_file',
default=None,
type=str,
help='The YAML config file corresponding to the original architecture.',
)
parser.add_argument(
'--num_in_channels',
default=None,
type=int,
help='The number of input channels. If `None` number of input channels will be automatically inferred.',
)
parser.add_argument(
'--scheduler_type',
default='pndm',
type=str,
help='Type of scheduler to use. Should be one of [\'pndm\', \'lms\', \'ddim\', \'euler\', \'euler-ancestral\', \'dpm\']',
)
parser.add_argument(
'--pipeline_type',
default=None,
type=str,
help=(
'The pipeline type. One of \'FrozenOpenCLIPEmbedder\', \'FrozenCLIPEmbedder\', \'PaintByExample\''
'. If `None` pipeline will be automatically inferred.'
),
)
parser.add_argument(
'--image_size',
default=None,
type=int,
help=(
'The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2'
' Base. Use 768 for Stable Diffusion v2.'
),
)
parser.add_argument(
'--prediction_type',
default=None,
type=str,
help=(
'The prediction type that the model was trained on. Use \'epsilon\' for Stable Diffusion v1.X and Stable'
' Diffusion v2 Base. Use \'v_prediction\' for Stable Diffusion v2.'
),
)
parser.add_argument(
'--extract_ema',
action='store_true',
help=(
'Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights'
' or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield'
' higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning.'
),
)
parser.add_argument(
'--upcast_attention',
action='store_true',
help=(
'Whether the attention computation should always be upcasted. This is necessary when running stable'
' diffusion 2.1.'
),
)
parser.add_argument(
'--from_safetensors',
action='store_true',
help='If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.',
)
parser.add_argument(
'--to_safetensors',
action='store_true',
help='Whether to store pipeline in safetensors format or not.',
)
parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.')
parser.add_argument('--device', type=str, help='Device to use (e.g. cpu, cuda:0, cuda:1, etc.)')
parser.add_argument(
'--stable_unclip',
type=str,
default=None,
required=False,
help='Set if this is a stable unCLIP model. One of \'txt2img\' or \'img2img\'.',
)
parser.add_argument(
'--stable_unclip_prior',
type=str,
default=None,
required=False,
help='Set if this is a stable unCLIP txt2img model. Selects which prior to use. If `--stable_unclip` is set to `txt2img`, the karlo prior (https://huggingface.co/kakaobrain/karlo-v1-alpha/tree/main/prior) is selected by default.',
)
parser.add_argument(
'--clip_stats_path',
type=str,
help='Path to the clip stats file. Only required if the stable unclip model\'s config specifies `model.params.noise_aug_config.params.clip_stats_path`.',
required=False,
)
parser.add_argument(
'--controlnet', action='store_true', default=None, help='Set flag if this is a controlnet checkpoint.'
)
parser.add_argument('--half', action='store_true', help='Save weights in half precision.')
parser.add_argument(
'--vae_path',
type=str,
default=None,
required=False,
help='Set to a path, hub id to an already converted vae to not convert it again.',
)
lowerCAmelCase: Dict = parser.parse_args()
lowerCAmelCase: str = download_from_original_stable_diffusion_ckpt(
checkpoint_path=args.checkpoint_path,
original_config_file=args.original_config_file,
image_size=args.image_size,
prediction_type=args.prediction_type,
model_type=args.pipeline_type,
extract_ema=args.extract_ema,
scheduler_type=args.scheduler_type,
num_in_channels=args.num_in_channels,
upcast_attention=args.upcast_attention,
from_safetensors=args.from_safetensors,
device=args.device,
stable_unclip=args.stable_unclip,
stable_unclip_prior=args.stable_unclip_prior,
clip_stats_path=args.clip_stats_path,
controlnet=args.controlnet,
vae_path=args.vae_path,
)
if args.half:
pipe.to(torch_dtype=torch.floataa)
if args.controlnet:
# only save the controlnet model
pipe.controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
else:
pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors) | 96 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, CycleDiffusionPipeline, DDIMScheduler, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class a__( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
lowercase__ = CycleDiffusionPipeline
lowercase__ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {
"""negative_prompt""",
"""height""",
"""width""",
"""negative_prompt_embeds""",
}
lowercase__ = PipelineTesterMixin.required_optional_params - {"""latents"""}
lowercase__ = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({"""source_prompt"""} )
lowercase__ = IMAGE_TO_IMAGE_IMAGE_PARAMS
lowercase__ = IMAGE_TO_IMAGE_IMAGE_PARAMS
def lowercase_ ( self : Any ):
torch.manual_seed(0 )
a : Union[str, Any] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , )
a : str = DDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule='scaled_linear' , num_train_timesteps=10_00 , clip_sample=__snake_case , set_alpha_to_one=__snake_case , )
torch.manual_seed(0 )
a : List[str] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
torch.manual_seed(0 )
a : int = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
a : List[str] = CLIPTextModel(__snake_case )
a : int = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
a : Tuple = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def lowercase_ ( self : Optional[int] , __snake_case : Dict , __snake_case : Any=0 ):
a : str = floats_tensor((1, 3, 32, 32) , rng=random.Random(__snake_case ) ).to(__snake_case )
a : Optional[Any] = image / 2 + 0.5
if str(__snake_case ).startswith('mps' ):
a : List[str] = torch.manual_seed(__snake_case )
else:
a : Union[str, Any] = torch.Generator(device=__snake_case ).manual_seed(__snake_case )
a : List[Any] = {
'prompt': 'An astronaut riding an elephant',
'source_prompt': 'An astronaut riding a horse',
'image': image,
'generator': generator,
'num_inference_steps': 2,
'eta': 0.1,
'strength': 0.8,
'guidance_scale': 3,
'source_guidance_scale': 1,
'output_type': 'numpy',
}
return inputs
def lowercase_ ( self : Optional[int] ):
a : Optional[Any] = 'cpu' # ensure determinism for the device-dependent torch.Generator
a : int = self.get_dummy_components()
a : str = CycleDiffusionPipeline(**__snake_case )
a : List[str] = pipe.to(__snake_case )
pipe.set_progress_bar_config(disable=__snake_case )
a : Dict = self.get_dummy_inputs(__snake_case )
a : Union[str, Any] = pipe(**__snake_case )
a : List[Any] = output.images
a : Optional[Any] = images[0, -3:, -3:, -1]
assert images.shape == (1, 32, 32, 3)
a : Tuple = np.array([0.4459, 0.4943, 0.4544, 0.6643, 0.5474, 0.4327, 0.5701, 0.5959, 0.5179] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@unittest.skipIf(torch_device != 'cuda' , 'This test requires a GPU' )
def lowercase_ ( self : int ):
a : List[Any] = self.get_dummy_components()
for name, module in components.items():
if hasattr(__snake_case , 'half' ):
a : Any = module.half()
a : Tuple = CycleDiffusionPipeline(**__snake_case )
a : Any = pipe.to(__snake_case )
pipe.set_progress_bar_config(disable=__snake_case )
a : str = self.get_dummy_inputs(__snake_case )
a : int = pipe(**__snake_case )
a : Optional[int] = output.images
a : Tuple = images[0, -3:, -3:, -1]
assert images.shape == (1, 32, 32, 3)
a : int = np.array([0.3506, 0.4543, 0.446, 0.4575, 0.5195, 0.4155, 0.5273, 0.518, 0.4116] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@skip_mps
def lowercase_ ( self : List[Any] ):
return super().test_save_load_local()
@unittest.skip('non-deterministic pipeline' )
def lowercase_ ( self : Dict ):
return super().test_inference_batch_single_identical()
@skip_mps
def lowercase_ ( self : int ):
return super().test_dict_tuple_outputs_equivalent()
@skip_mps
def lowercase_ ( self : Dict ):
return super().test_save_load_optional_components()
@skip_mps
def lowercase_ ( self : List[Any] ):
return super().test_attention_slicing_forward_pass()
@slow
@require_torch_gpu
class a__( unittest.TestCase ):
def lowercase_ ( self : Tuple ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase_ ( self : Optional[int] ):
a : Any = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/cycle-diffusion/black_colored_car.png' )
a : Optional[int] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car_fp16.npy' )
a : List[str] = init_image.resize((5_12, 5_12) )
a : Dict = 'CompVis/stable-diffusion-v1-4'
a : List[str] = DDIMScheduler.from_pretrained(__snake_case , subfolder='scheduler' )
a : Any = CycleDiffusionPipeline.from_pretrained(
__snake_case , scheduler=__snake_case , safety_checker=__snake_case , torch_dtype=torch.floataa , revision='fp16' )
pipe.to(__snake_case )
pipe.set_progress_bar_config(disable=__snake_case )
pipe.enable_attention_slicing()
a : Union[str, Any] = 'A black colored car'
a : Optional[Any] = 'A blue colored car'
a : int = torch.manual_seed(0 )
a : Optional[Any] = pipe(
prompt=__snake_case , source_prompt=__snake_case , image=__snake_case , num_inference_steps=1_00 , eta=0.1 , strength=0.85 , guidance_scale=3 , source_guidance_scale=1 , generator=__snake_case , output_type='np' , )
a : Dict = output.images
# the values aren't exactly equal, but the images look the same visually
assert np.abs(image - expected_image ).max() < 5e-1
def lowercase_ ( self : int ):
a : Any = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/cycle-diffusion/black_colored_car.png' )
a : List[str] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car.npy' )
a : str = init_image.resize((5_12, 5_12) )
a : Optional[int] = 'CompVis/stable-diffusion-v1-4'
a : Union[str, Any] = DDIMScheduler.from_pretrained(__snake_case , subfolder='scheduler' )
a : str = CycleDiffusionPipeline.from_pretrained(__snake_case , scheduler=__snake_case , safety_checker=__snake_case )
pipe.to(__snake_case )
pipe.set_progress_bar_config(disable=__snake_case )
pipe.enable_attention_slicing()
a : Tuple = 'A black colored car'
a : Tuple = 'A blue colored car'
a : List[str] = torch.manual_seed(0 )
a : str = pipe(
prompt=__snake_case , source_prompt=__snake_case , image=__snake_case , num_inference_steps=1_00 , eta=0.1 , strength=0.85 , guidance_scale=3 , source_guidance_scale=1 , generator=__snake_case , output_type='np' , )
a : Tuple = output.images
assert np.abs(image - expected_image ).max() < 2e-2 | 96 | 1 |
def a ( snake_case__: list ):
'''simple docstring'''
for i in range(len(snake_case__ ) - 1 , 0 , -1 ):
lowercase_ = False
for j in range(snake_case__ , 0 , -1 ):
if unsorted[j] < unsorted[j - 1]:
lowercase_ , lowercase_ = unsorted[j - 1], unsorted[j]
lowercase_ = True
for j in range(snake_case__ ):
if unsorted[j] > unsorted[j + 1]:
lowercase_ , lowercase_ = unsorted[j + 1], unsorted[j]
lowercase_ = True
if not swapped:
break
return unsorted
if __name__ == "__main__":
import doctest
doctest.testmod()
__a = input('Enter numbers separated by a comma:\n').strip()
__a = [int(item) for item in user_input.split(',')]
print(f"{cocktail_shaker_sort(unsorted) = }")
| 30 |
'''simple docstring'''
def __a ( UpperCAmelCase , UpperCAmelCase ) ->float:
"""simple docstring"""
def get_matched_characters(UpperCAmelCase , UpperCAmelCase ) -> str:
A = []
A = min(len(_stra ) , len(_stra ) ) // 2
for i, l in enumerate(_stra ):
A = int(max(0 , i - limit ) )
A = int(min(i + limit + 1 , len(_stra ) ) )
if l in _stra[left:right]:
matched.append(UpperCAmelCase )
A = f"""{_stra[0:_stra.index(UpperCAmelCase )]} {_stra[_stra.index(UpperCAmelCase ) + 1:]}"""
return "".join(UpperCAmelCase )
# matching characters
A = get_matched_characters(UpperCAmelCase , UpperCAmelCase )
A = get_matched_characters(UpperCAmelCase , UpperCAmelCase )
A = len(UpperCAmelCase )
# transposition
A = (
len([(ca, ca) for ca, ca in zip(UpperCAmelCase , UpperCAmelCase ) if ca != ca] ) // 2
)
if not match_count:
A = 0.0
else:
A = (
1
/ 3
* (
match_count / len(UpperCAmelCase )
+ match_count / len(UpperCAmelCase )
+ (match_count - transpositions) / match_count
)
)
# common prefix up to 4 characters
A = 0
for ca, ca in zip(stra[:4] , stra[:4] ):
if ca == ca:
prefix_len += 1
else:
break
return jaro + 0.1 * prefix_len * (1 - jaro)
if __name__ == "__main__":
import doctest
doctest.testmod()
print(jaro_winkler('hello', 'world'))
| 258 | 0 |
import inspect
import unittest
from transformers import MobileNetVaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileNetVaForImageClassification, MobileNetVaModel
from transformers.models.mobilenet_va.modeling_mobilenet_va import MOBILENET_V1_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class lowercase_ ( __snake_case ):
def UpperCamelCase ( self ):
_snake_case : str = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(lowercase_ , "tf_padding" ) )
self.parent.assertTrue(hasattr(lowercase_ , "depth_multiplier" ) )
class lowercase_ :
def __init__( self , lowercase_ , lowercase_=13 , lowercase_=3 , lowercase_=32 , lowercase_=0.25 , lowercase_=8 , lowercase_=True , lowercase_=1_024 , lowercase_=32 , lowercase_="relu6" , lowercase_=0.1 , lowercase_=0.02 , lowercase_=True , lowercase_=True , lowercase_=10 , lowercase_=None , ):
_snake_case : Tuple = parent
_snake_case : int = batch_size
_snake_case : Optional[int] = num_channels
_snake_case : Any = image_size
_snake_case : Dict = depth_multiplier
_snake_case : str = min_depth
_snake_case : Optional[int] = tf_padding
_snake_case : List[str] = int(last_hidden_size * depth_multiplier )
_snake_case : Optional[Any] = output_stride
_snake_case : Optional[Any] = hidden_act
_snake_case : int = classifier_dropout_prob
_snake_case : Any = use_labels
_snake_case : str = is_training
_snake_case : int = num_labels
_snake_case : Tuple = initializer_range
_snake_case : Optional[int] = scope
def UpperCamelCase ( self ):
_snake_case : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_snake_case : Any = None
_snake_case : Any = None
if self.use_labels:
_snake_case : Optional[Any] = ids_tensor([self.batch_size] , self.num_labels )
_snake_case : List[Any] = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
_snake_case : str = self.get_config()
return config, pixel_values, labels, pixel_labels
def UpperCamelCase ( self ):
return MobileNetVaConfig(
num_channels=self.num_channels , image_size=self.image_size , depth_multiplier=self.depth_multiplier , min_depth=self.min_depth , tf_padding=self.tf_padding , hidden_act=self.hidden_act , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , )
def UpperCamelCase ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ ):
_snake_case : int = MobileNetVaModel(config=lowercase_ )
model.to(lowercase_ )
model.eval()
_snake_case : Tuple = model(lowercase_ )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def UpperCamelCase ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ ):
_snake_case : Tuple = self.num_labels
_snake_case : Any = MobileNetVaForImageClassification(lowercase_ )
model.to(lowercase_ )
model.eval()
_snake_case : Optional[int] = model(lowercase_ , labels=lowercase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCamelCase ( self ):
_snake_case : Union[str, Any] = self.prepare_config_and_inputs()
_snake_case : str = config_and_inputs
_snake_case : List[str] = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class lowercase_ ( __snake_case , __snake_case , unittest.TestCase ):
_lowerCamelCase = (MobileNetVaModel, MobileNetVaForImageClassification) if is_torch_available() else ()
_lowerCamelCase = (
{'feature-extraction': MobileNetVaModel, 'image-classification': MobileNetVaForImageClassification}
if is_torch_available()
else {}
)
_lowerCamelCase = False
_lowerCamelCase = False
_lowerCamelCase = False
_lowerCamelCase = False
def UpperCamelCase ( self ):
_snake_case : Union[str, Any] = MobileNetVaModelTester(self )
_snake_case : List[Any] = MobileNetVaConfigTester(self , config_class=lowercase_ , has_text_modality=lowercase_ )
def UpperCamelCase ( self ):
self.config_tester.run_common_tests()
@unittest.skip(reason="MobileNetV1 does not use inputs_embeds" )
def UpperCamelCase ( self ):
pass
@unittest.skip(reason="MobileNetV1 does not support input and output embeddings" )
def UpperCamelCase ( self ):
pass
@unittest.skip(reason="MobileNetV1 does not output attentions" )
def UpperCamelCase ( self ):
pass
def UpperCamelCase ( self ):
_snake_case : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_snake_case : Tuple = model_class(lowercase_ )
_snake_case : Dict = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_snake_case : Optional[int] = [*signature.parameters.keys()]
_snake_case : Dict = ["pixel_values"]
self.assertListEqual(arg_names[:1] , lowercase_ )
def UpperCamelCase ( self ):
_snake_case : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase_ )
def UpperCamelCase ( self ):
def check_hidden_states_output(lowercase_ , lowercase_ , lowercase_ ):
_snake_case : Union[str, Any] = model_class(lowercase_ )
model.to(lowercase_ )
model.eval()
with torch.no_grad():
_snake_case : Any = model(**self._prepare_for_class(lowercase_ , lowercase_ ) )
_snake_case : Dict = outputs.hidden_states
_snake_case : List[Any] = 26
self.assertEqual(len(lowercase_ ) , lowercase_ )
_snake_case : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_snake_case : Dict = True
check_hidden_states_output(lowercase_ , lowercase_ , lowercase_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_snake_case : Optional[int] = True
check_hidden_states_output(lowercase_ , lowercase_ , lowercase_ )
def UpperCamelCase ( self ):
_snake_case : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowercase_ )
@slow
def UpperCamelCase ( self ):
for model_name in MOBILENET_V1_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_snake_case : int = MobileNetVaModel.from_pretrained(lowercase_ )
self.assertIsNotNone(lowercase_ )
def snake_case ():
'''simple docstring'''
_snake_case : Tuple = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class lowercase_ ( unittest.TestCase ):
@cached_property
def UpperCamelCase ( self ):
return (
MobileNetVaImageProcessor.from_pretrained("google/mobilenet_v1_1.0_224" ) if is_vision_available() else None
)
@slow
def UpperCamelCase ( self ):
_snake_case : Optional[int] = MobileNetVaForImageClassification.from_pretrained("google/mobilenet_v1_1.0_224" ).to(lowercase_ )
_snake_case : Dict = self.default_image_processor
_snake_case : Optional[Any] = prepare_img()
_snake_case : int = image_processor(images=lowercase_ , return_tensors="pt" ).to(lowercase_ )
# forward pass
with torch.no_grad():
_snake_case : int = model(**lowercase_ )
# verify the logits
_snake_case : List[Any] = torch.Size((1, 1_001) )
self.assertEqual(outputs.logits.shape , lowercase_ )
_snake_case : Any = torch.tensor([-4.1_739, -1.1_233, 3.1_205] ).to(lowercase_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowercase_ , atol=1e-4 ) ) | 363 | from __future__ import annotations
import unittest
import numpy as np
from transformers import OPTConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import GPTaTokenizer, TFOPTForCausalLM, TFOPTModel
def snake_case (__lowercase , __lowercase , __lowercase=None , __lowercase=None ) -> Tuple:
'''simple docstring'''
if attention_mask is None:
_snake_case : List[Any] = tf.cast(tf.math.not_equal(__lowercase , config.pad_token_id ) , tf.inta )
return {"input_ids": input_ids, "attention_mask": attention_mask}
@require_tf
class lowercase_ :
_lowerCamelCase = OPTConfig
_lowerCamelCase = {}
_lowerCamelCase = 'gelu'
def __init__( self , lowercase_ , lowercase_=13 , lowercase_=7 , lowercase_=True , lowercase_=False , lowercase_=99 , lowercase_=16 , lowercase_=2 , lowercase_=4 , lowercase_=4 , lowercase_="gelu" , lowercase_=0.1 , lowercase_=0.1 , lowercase_=20 , lowercase_=2 , lowercase_=1 , lowercase_=0 , lowercase_=16 , lowercase_=16 , ):
_snake_case : Dict = parent
_snake_case : List[str] = batch_size
_snake_case : Optional[Any] = seq_length
_snake_case : Dict = is_training
_snake_case : List[Any] = use_labels
_snake_case : Dict = vocab_size
_snake_case : Tuple = hidden_size
_snake_case : Optional[int] = num_hidden_layers
_snake_case : List[str] = num_attention_heads
_snake_case : Tuple = intermediate_size
_snake_case : Dict = hidden_act
_snake_case : Any = hidden_dropout_prob
_snake_case : Optional[int] = attention_probs_dropout_prob
_snake_case : Tuple = max_position_embeddings
_snake_case : List[Any] = eos_token_id
_snake_case : Optional[int] = pad_token_id
_snake_case : Dict = bos_token_id
_snake_case : List[Any] = embed_dim
_snake_case : Optional[int] = word_embed_proj_dim
_snake_case : Union[str, Any] = False
def UpperCamelCase ( self ):
_snake_case : int = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
_snake_case : str = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
_snake_case : int = tf.concat([input_ids, eos_tensor] , axis=1 )
_snake_case : Optional[Any] = self.config_cls(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , embed_dim=self.embed_dim , word_embed_proj_dim=self.word_embed_proj_dim , is_encoder_decoder=lowercase_ , **self.config_updates , )
_snake_case : Any = prepare_opt_inputs_dict(lowercase_ , lowercase_ )
return config, inputs_dict
def UpperCamelCase ( self , lowercase_ , lowercase_ ):
_snake_case : Any = TFOPTModel(config=lowercase_ )
_snake_case : int = inputs_dict["input_ids"]
_snake_case : Optional[int] = input_ids[:1, :]
_snake_case : Any = inputs_dict["attention_mask"][:1, :]
_snake_case : List[str] = 1
# first forward pass
_snake_case : Union[str, Any] = model(lowercase_ , attention_mask=lowercase_ , use_cache=lowercase_ )
_snake_case ,_snake_case : str = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
_snake_case : Dict = ids_tensor((self.batch_size, 3) , config.vocab_size )
_snake_case : Optional[Any] = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
_snake_case : int = tf.concat([input_ids, next_tokens] , axis=-1 )
_snake_case : int = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
_snake_case : Optional[Any] = model(lowercase_ , attention_mask=lowercase_ )[0]
_snake_case : Optional[int] = model(lowercase_ , attention_mask=lowercase_ , past_key_values=lowercase_ )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
_snake_case : int = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
_snake_case : List[str] = output_from_no_past[:, -3:, random_slice_idx]
_snake_case : List[Any] = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(lowercase_ , lowercase_ , rtol=1e-3 )
@require_tf
class lowercase_ ( __snake_case , __snake_case , unittest.TestCase ):
_lowerCamelCase = (TFOPTModel, TFOPTForCausalLM) if is_tf_available() else ()
_lowerCamelCase = (TFOPTForCausalLM,) if is_tf_available() else ()
_lowerCamelCase = (
{'feature-extraction': TFOPTModel, 'text-generation': TFOPTForCausalLM} if is_tf_available() else {}
)
_lowerCamelCase = False
_lowerCamelCase = False
_lowerCamelCase = False
_lowerCamelCase = 10
def UpperCamelCase ( self ):
_snake_case : Dict = TFOPTModelTester(self )
_snake_case : Optional[int] = ConfigTester(self , config_class=lowercase_ )
def UpperCamelCase ( self ):
self.config_tester.run_common_tests()
def UpperCamelCase ( self ):
_snake_case : Dict = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*lowercase_ )
def UpperCamelCase ( self ):
_snake_case ,_snake_case : str = self.model_tester.prepare_config_and_inputs_for_common()
def _get_word_embedding_weight(lowercase_ , lowercase_ ):
if hasattr(lowercase_ , "weight" ):
return embedding_layer.weight
else:
# Here we build the word embeddings weights if not exists.
# And then we retry to get the attribute once built.
model.build()
if hasattr(lowercase_ , "weight" ):
return embedding_layer.weight
else:
return None
for model_class in self.all_model_classes:
for size in [config.vocab_size - 10, config.vocab_size + 10]:
# build the embeddings
_snake_case : Dict = model_class(config=lowercase_ )
_snake_case : Union[str, Any] = _get_word_embedding_weight(lowercase_ , model.get_input_embeddings() )
_snake_case : Optional[Any] = _get_word_embedding_weight(lowercase_ , model.get_output_embeddings() )
# reshape the embeddings
model.resize_token_embeddings(lowercase_ )
_snake_case : int = _get_word_embedding_weight(lowercase_ , model.get_input_embeddings() )
_snake_case : Tuple = _get_word_embedding_weight(lowercase_ , model.get_output_embeddings() )
# check that the resized embeddings size matches the desired size.
_snake_case : Any = size if size is not None else config.vocab_size
self.assertEqual(new_input_embeddings.shape[0] , lowercase_ )
# check that weights remain the same after resizing
_snake_case : Dict = True
for pa, pa in zip(old_input_embeddings.value() , new_input_embeddings.value() ):
if tf.math.reduce_sum(tf.math.abs(pa - pa ) ) > 0:
_snake_case : Optional[int] = False
self.assertTrue(lowercase_ )
if old_output_embeddings is not None and new_output_embeddings is not None:
self.assertEqual(new_output_embeddings.shape[0] , lowercase_ )
_snake_case : Optional[int] = True
for pa, pa in zip(old_output_embeddings.value() , new_output_embeddings.value() ):
if tf.math.reduce_sum(tf.math.abs(pa - pa ) ) > 0:
_snake_case : str = False
self.assertTrue(lowercase_ )
def snake_case (__lowercase ) -> Dict:
'''simple docstring'''
return tf.constant(__lowercase , dtype=tf.intaa )
@require_tf
class lowercase_ ( unittest.TestCase ):
_lowerCamelCase = 99
def UpperCamelCase ( self ):
_snake_case : Any = tf.ones((4, 1) , dtype=tf.intaa ) * 2
_snake_case : Optional[int] = tf.concat([ids_tensor((4, 6) , self.vocab_size - 3 ) + 3, eos_column_vector] , axis=1 )
_snake_case : List[Any] = input_ids.shape[0]
_snake_case : List[str] = OPTConfig(
vocab_size=self.vocab_size , hidden_size=24 , num_hidden_layers=2 , num_attention_heads=2 , ffn_dim=32 , max_position_embeddings=48 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
@require_sentencepiece
@require_tf
class lowercase_ ( unittest.TestCase ):
@slow
def UpperCamelCase ( self ):
_snake_case : Optional[Any] = TFOPTModel.from_pretrained("facebook/opt-350m" )
_snake_case : Optional[Any] = _long_tensor([[0, 31_414, 232, 328, 740, 1_140, 12_695, 69, 46_078, 1_588, 2]] )
_snake_case : Optional[int] = tf.not_equal(lowercase_ , model.config.pad_token_id )
with tf.GradientTape():
_snake_case : List[Any] = model(input_ids=lowercase_ , attention_mask=lowercase_ ).last_hidden_state
_snake_case : List[str] = (1, 11, 512)
self.assertEqual(output.shape , lowercase_ )
_snake_case : Optional[int] = tf.constant(
[[-0.2_873, -1.9_218, -0.3_033], [-1.2_710, -0.1_338, -0.1_902], [0.4_095, 0.1_214, -1.3_121]] )
self.assertTrue(np.allclose(output[:, :3, :3] , lowercase_ , atol=4e-3 ) )
_snake_case : List[Any] = tf.function(lowercase_ , jit_compile=lowercase_ )
_snake_case : Tuple = xla_generate(lowercase_ , lowercase_ )[0]
self.assertTrue(np.allclose(output[:, :3, :3] , lowercase_ , atol=4e-2 ) )
@require_tf
@slow
class lowercase_ ( unittest.TestCase ):
def UpperCamelCase ( self ):
super().setUp()
_snake_case : Optional[int] = "facebook/opt-350m"
def UpperCamelCase ( self ):
_snake_case : Union[str, Any] = TFOPTForCausalLM.from_pretrained(self.path_model )
_snake_case : Optional[Any] = GPTaTokenizer.from_pretrained(self.path_model )
_snake_case : List[str] = [
"Today is a beautiful day and I want to",
"In the city of",
"Paris is the capital of France and",
"Computers and mobile phones have taken",
]
# verify that prompt without BOS token is identical to Metaseq -> add_special_tokens=False
_snake_case : List[Any] = tokenizer(lowercase_ , return_tensors="tf" , padding=lowercase_ , add_special_tokens=lowercase_ )
_snake_case : List[str] = tf.math.reduce_mean(model(inputs.input_ids , attention_mask=inputs.attention_mask )[0] , axis=-1 )
_snake_case : Tuple = tf.constant(
[
[1.3_851, -13.8_923, -10.5_229, -10.7_533, -0.2_309, -10.2_384, -0.5_365, -9.0_947, -5.1_670],
[-4.7_073, -10.6_276, -3.9_415, -21.5_242, -0.2_822, -0.2_822, -0.2_822, -0.2_822, -0.2_822],
[0.6_247, -3.4_229, -8.9_179, -1.4_297, -14.1_650, 1.4_146, -9.0_218, -0.2_703, -0.2_703],
[6.4_783, -1.9_913, -10.7_926, -2.3_336, 1.5_092, -0.9_974, -6.8_213, 1.3_477, 1.3_477],
] )
self.assertTrue(np.allclose(lowercase_ , lowercase_ , atol=1e-4 ) )
_snake_case : List[Any] = tf.function(lowercase_ , jit_compile=lowercase_ )
_snake_case : Tuple = tf.math.reduce_mean(xla_generate(inputs.input_ids , attention_mask=inputs.attention_mask )[0] , axis=-1 )
self.assertTrue(np.allclose(lowercase_ , lowercase_ , atol=1e-4 ) )
@require_tf
@slow
class lowercase_ ( unittest.TestCase ):
@property
def UpperCamelCase ( self ):
return [
"Today is a beautiful day and I want",
"In the city of",
"Paris is the capital of France and",
"Computers and mobile phones have taken",
]
def UpperCamelCase ( self ):
_snake_case : List[Any] = "facebook/opt-125m"
_snake_case : int = [
"Today is a beautiful day and I want to",
"In the city of New York, the city",
"Paris is the capital of France and the capital",
"Computers and mobile phones have taken over the",
]
_snake_case : str = []
_snake_case : Any = GPTaTokenizer.from_pretrained(lowercase_ )
_snake_case : List[Any] = TFOPTForCausalLM.from_pretrained(lowercase_ )
for prompt in self.prompts:
_snake_case : str = tokenizer(lowercase_ , return_tensors="tf" ).input_ids
_snake_case : Any = model.generate(lowercase_ , max_length=10 )
_snake_case : List[str] = tokenizer.batch_decode(lowercase_ , skip_special_tokens=lowercase_ )
predicted_outputs += generated_string
self.assertListEqual(lowercase_ , lowercase_ )
def UpperCamelCase ( self ):
_snake_case : Optional[int] = "facebook/opt-350m"
_snake_case : Dict = GPTaTokenizer.from_pretrained(lowercase_ )
_snake_case : Dict = TFOPTForCausalLM.from_pretrained(lowercase_ )
_snake_case : int = "left"
# use different length sentences to test batching
_snake_case : Union[str, Any] = [
"Hello, my dog is a little",
"Today, I",
]
_snake_case : Optional[Any] = tokenizer(lowercase_ , return_tensors="tf" , padding=lowercase_ )
_snake_case : List[Any] = inputs["input_ids"]
_snake_case : Union[str, Any] = model.generate(input_ids=lowercase_ , attention_mask=inputs["attention_mask"] )
_snake_case : int = tokenizer(sentences[0] , return_tensors="tf" ).input_ids
_snake_case : List[str] = model.generate(input_ids=lowercase_ )
_snake_case : Dict = inputs_non_padded.shape[-1] - tf.math.reduce_sum(
tf.cast(inputs["attention_mask"][-1] , tf.intaa ) )
_snake_case : int = tokenizer(sentences[1] , return_tensors="tf" ).input_ids
_snake_case : int = model.generate(input_ids=lowercase_ , max_length=model.config.max_length - num_paddings )
_snake_case : Tuple = tokenizer.batch_decode(lowercase_ , skip_special_tokens=lowercase_ )
_snake_case : Dict = tokenizer.decode(output_non_padded[0] , skip_special_tokens=lowercase_ )
_snake_case : Any = tokenizer.decode(output_padded[0] , skip_special_tokens=lowercase_ )
_snake_case : Optional[Any] = [
"Hello, my dog is a little bit of a dork.\nI'm a little bit",
"Today, I was in the middle of a conversation with a friend about the",
]
self.assertListEqual(lowercase_ , lowercase_ )
self.assertListEqual(lowercase_ , [non_padded_sentence, padded_sentence] )
def UpperCamelCase ( self ):
_snake_case : Tuple = "facebook/opt-350m"
_snake_case : Optional[int] = [
"Today is a beautiful day and I want to",
"In the city of San Francisco, the city",
"Paris is the capital of France and the capital",
"Computers and mobile phones have taken over the",
]
_snake_case : str = []
_snake_case : str = GPTaTokenizer.from_pretrained(lowercase_ )
_snake_case : List[str] = TFOPTForCausalLM.from_pretrained(lowercase_ )
for prompt in self.prompts:
_snake_case : Dict = tokenizer(lowercase_ , return_tensors="tf" ).input_ids
_snake_case : Any = model.generate(lowercase_ , max_length=10 )
_snake_case : Tuple = tokenizer.batch_decode(lowercase_ , skip_special_tokens=lowercase_ )
predicted_outputs += generated_string
self.assertListEqual(lowercase_ , lowercase_ ) | 284 | 0 |
'''simple docstring'''
import torch
from torch import nn
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class A_ ( lowerCAmelCase_ , lowerCAmelCase_ ):
@register_to_config
def __init__( self : Union[str, Any] , *,
snake_case_ : int = 4 , snake_case_ : int = 7_6_8 , snake_case_ : int , snake_case_ : List[Any] , ):
super().__init__()
_UpperCAmelCase = nn.Parameter(torch.zeros(snake_case_ ) )
# parameters for additional clip time embeddings
_UpperCAmelCase = nn.Linear(snake_case_ , snake_case_ )
_UpperCAmelCase = nn.Linear(snake_case_ , snake_case_ )
# parameters for encoder hidden states
_UpperCAmelCase = clip_extra_context_tokens
_UpperCAmelCase = nn.Linear(
snake_case_ , self.clip_extra_context_tokens * cross_attention_dim )
_UpperCAmelCase = nn.Linear(snake_case_ , snake_case_ )
_UpperCAmelCase = nn.LayerNorm(snake_case_ )
def lowercase ( self : Union[str, Any] , *, snake_case_ : Dict , snake_case_ : int , snake_case_ : List[Any] , snake_case_ : Tuple ):
if do_classifier_free_guidance:
# Add the classifier free guidance embeddings to the image embeddings
_UpperCAmelCase = image_embeddings.shape[0]
_UpperCAmelCase = self.learned_classifier_free_guidance_embeddings.unsqueeze(0 )
_UpperCAmelCase = classifier_free_guidance_embeddings.expand(
snake_case_ , -1 )
_UpperCAmelCase = torch.cat([classifier_free_guidance_embeddings, image_embeddings] , dim=0 )
# The image embeddings batch size and the text embeddings batch size are equal
assert image_embeddings.shape[0] == prompt_embeds.shape[0]
_UpperCAmelCase = prompt_embeds.shape[0]
# "Specifically, we modify the architecture described in Nichol et al. (2021) by projecting and
# adding CLIP embeddings to the existing timestep embedding, ...
_UpperCAmelCase = self.embedding_proj(snake_case_ )
_UpperCAmelCase = self.clip_image_embeddings_project_to_time_embeddings(snake_case_ )
_UpperCAmelCase = time_projected_image_embeddings + time_projected_prompt_embeds
# ... and by projecting CLIP embeddings into four
# extra tokens of context that are concatenated to the sequence of outputs from the GLIDE text encoder"
_UpperCAmelCase = self.clip_extra_context_tokens_proj(snake_case_ )
_UpperCAmelCase = clip_extra_context_tokens.reshape(snake_case_ , -1 , self.clip_extra_context_tokens )
_UpperCAmelCase = clip_extra_context_tokens.permute(0 , 2 , 1 )
_UpperCAmelCase = self.encoder_hidden_states_proj(snake_case_ )
_UpperCAmelCase = self.text_encoder_hidden_states_norm(snake_case_ )
_UpperCAmelCase = torch.cat([clip_extra_context_tokens, text_encoder_hidden_states] , dim=1 )
return text_encoder_hidden_states, additive_clip_time_embeddings
| 22 |
"""simple docstring"""
import inspect
import unittest
from transformers import ConvNextVaConfig
from transformers.models.auto import get_values
from transformers.models.auto.modeling_auto import MODEL_FOR_BACKBONE_MAPPING_NAMES, MODEL_MAPPING_NAMES
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextVaBackbone, ConvNextVaForImageClassification, ConvNextVaModel
from transformers.models.convnextva.modeling_convnextva import CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class A_ :
"""simple docstring"""
def __init__( self :Tuple , lowerCamelCase_ :Dict , lowerCamelCase_ :int=13 , lowerCamelCase_ :Optional[int]=32 , lowerCamelCase_ :Any=3 , lowerCamelCase_ :Union[str, Any]=4 , lowerCamelCase_ :int=[10, 20, 30, 40] , lowerCamelCase_ :Dict=[2, 2, 3, 2] , lowerCamelCase_ :Any=True , lowerCamelCase_ :List[Any]=True , lowerCamelCase_ :Any=37 , lowerCamelCase_ :Dict="gelu" , lowerCamelCase_ :List[Any]=10 , lowerCamelCase_ :int=0.02 , lowerCamelCase_ :Optional[Any]=["stage2", "stage3", "stage4"] , lowerCamelCase_ :List[str]=[2, 3, 4] , lowerCamelCase_ :Optional[Any]=None , ):
"""simple docstring"""
lowerCamelCase__ : List[Any] =parent
lowerCamelCase__ : Optional[int] =batch_size
lowerCamelCase__ : List[Any] =image_size
lowerCamelCase__ : Dict =num_channels
lowerCamelCase__ : Optional[int] =num_stages
lowerCamelCase__ : Optional[int] =hidden_sizes
lowerCamelCase__ : Optional[Any] =depths
lowerCamelCase__ : Any =is_training
lowerCamelCase__ : Optional[Any] =use_labels
lowerCamelCase__ : List[str] =intermediate_size
lowerCamelCase__ : int =hidden_act
lowerCamelCase__ : Dict =num_labels
lowerCamelCase__ : Union[str, Any] =initializer_range
lowerCamelCase__ : Dict =out_features
lowerCamelCase__ : List[str] =out_indices
lowerCamelCase__ : Any =scope
def UpperCAmelCase__ ( self :str ):
"""simple docstring"""
lowerCamelCase__ : Optional[Any] =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase__ : Any =None
if self.use_labels:
lowerCamelCase__ : str =ids_tensor([self.batch_size] , self.num_labels )
lowerCamelCase__ : Tuple =self.get_config()
return config, pixel_values, labels
def UpperCAmelCase__ ( self :Union[str, Any] ):
"""simple docstring"""
return ConvNextVaConfig(
num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=lowerCamelCase_ , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , )
def UpperCAmelCase__ ( self :str , lowerCamelCase_ :Dict , lowerCamelCase_ :int , lowerCamelCase_ :Union[str, Any] ):
"""simple docstring"""
lowerCamelCase__ : Tuple =ConvNextVaModel(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : Union[str, Any] =model(lowerCamelCase_ )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def UpperCAmelCase__ ( self :Optional[Any] , lowerCamelCase_ :Dict , lowerCamelCase_ :Any , lowerCamelCase_ :List[Any] ):
"""simple docstring"""
lowerCamelCase__ : List[str] =ConvNextVaForImageClassification(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : Union[str, Any] =model(lowerCamelCase_ , labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCAmelCase__ ( self :Optional[Any] , lowerCamelCase_ :List[str] , lowerCamelCase_ :Union[str, Any] , lowerCamelCase_ :Optional[int] ):
"""simple docstring"""
lowerCamelCase__ : Union[str, Any] =ConvNextVaBackbone(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : Any =model(lowerCamelCase_ )
# verify hidden states
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
lowerCamelCase__ : Union[str, Any] =None
lowerCamelCase__ : Dict =ConvNextVaBackbone(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : List[str] =model(lowerCamelCase_ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def UpperCAmelCase__ ( self :Union[str, Any] ):
"""simple docstring"""
lowerCamelCase__ : List[str] =self.prepare_config_and_inputs()
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] =config_and_inputs
lowerCamelCase__ : Dict ={'pixel_values': pixel_values}
return config, inputs_dict
def UpperCAmelCase__ ( self :Any ):
"""simple docstring"""
lowerCamelCase__ : str =self.prepare_config_and_inputs()
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] =config_and_inputs
lowerCamelCase__ : Optional[int] ={'pixel_values': pixel_values, 'labels': labels}
return config, inputs_dict
@require_torch
class A_ ( A__ , A__ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = (
(
ConvNextVaModel,
ConvNextVaForImageClassification,
ConvNextVaBackbone,
)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE_ = (
{"""feature-extraction""": ConvNextVaModel, """image-classification""": ConvNextVaForImageClassification}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE_ = False
SCREAMING_SNAKE_CASE_ = False
SCREAMING_SNAKE_CASE_ = False
SCREAMING_SNAKE_CASE_ = False
SCREAMING_SNAKE_CASE_ = False
def UpperCAmelCase__ ( self :Tuple ):
"""simple docstring"""
lowerCamelCase__ : int =ConvNextVaModelTester(self )
lowerCamelCase__ : Tuple =ConfigTester(self , config_class=lowerCamelCase_ , has_text_modality=lowerCamelCase_ , hidden_size=37 )
def UpperCAmelCase__ ( self :Union[str, Any] ):
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def UpperCAmelCase__ ( self :Dict ):
"""simple docstring"""
return
@unittest.skip(reason='ConvNextV2 does not use inputs_embeds' )
def UpperCAmelCase__ ( self :str ):
"""simple docstring"""
pass
@unittest.skip(reason='ConvNextV2 does not support input and output embeddings' )
def UpperCAmelCase__ ( self :Dict ):
"""simple docstring"""
pass
@unittest.skip(reason='ConvNextV2 does not use feedforward chunking' )
def UpperCAmelCase__ ( self :int ):
"""simple docstring"""
pass
def UpperCAmelCase__ ( self :Tuple ):
"""simple docstring"""
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
lowerCamelCase__ , lowerCamelCase__ : List[str] =self.model_tester.prepare_config_and_inputs_with_labels()
lowerCamelCase__ : List[str] =True
if model_class.__name__ in [
*get_values(lowerCamelCase_ ),
*get_values(lowerCamelCase_ ),
]:
continue
lowerCamelCase__ : Any =model_class(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.train()
lowerCamelCase__ : Union[str, Any] =self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ , return_labels=lowerCamelCase_ )
lowerCamelCase__ : Dict =model(**lowerCamelCase_ ).loss
loss.backward()
def UpperCAmelCase__ ( self :int ):
"""simple docstring"""
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
lowerCamelCase__ , lowerCamelCase__ : Dict =self.model_tester.prepare_config_and_inputs_with_labels()
lowerCamelCase__ : Optional[Any] =False
lowerCamelCase__ : Optional[Any] =True
if (
model_class.__name__
in [*get_values(lowerCamelCase_ ), *get_values(lowerCamelCase_ )]
or not model_class.supports_gradient_checkpointing
):
continue
lowerCamelCase__ : List[str] =model_class(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.gradient_checkpointing_enable()
model.train()
lowerCamelCase__ : List[Any] =self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ , return_labels=lowerCamelCase_ )
lowerCamelCase__ : Dict =model(**lowerCamelCase_ ).loss
loss.backward()
def UpperCAmelCase__ ( self :Dict ):
"""simple docstring"""
lowerCamelCase__ , lowerCamelCase__ : Optional[int] =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__ : Optional[Any] =model_class(lowerCamelCase_ )
lowerCamelCase__ : Dict =inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase__ : Union[str, Any] =[*signature.parameters.keys()]
lowerCamelCase__ : Union[str, Any] =['pixel_values']
self.assertListEqual(arg_names[:1] , lowerCamelCase_ )
def UpperCAmelCase__ ( self :str ):
"""simple docstring"""
lowerCamelCase__ : List[str] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase_ )
def UpperCAmelCase__ ( self :int ):
"""simple docstring"""
def check_hidden_states_output(lowerCamelCase_ :List[str] , lowerCamelCase_ :Tuple , lowerCamelCase_ :Optional[Any] ):
lowerCamelCase__ : Tuple =model_class(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
with torch.no_grad():
lowerCamelCase__ : Optional[int] =model(**self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ ) )
lowerCamelCase__ : int =outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
lowerCamelCase__ : Union[str, Any] =self.model_tester.num_stages
self.assertEqual(len(lowerCamelCase_ ) , expected_num_stages + 1 )
# ConvNextV2's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
lowerCamelCase__ , lowerCamelCase__ : List[str] =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__ : str =True
check_hidden_states_output(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCamelCase__ : str =True
check_hidden_states_output(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
def UpperCAmelCase__ ( self :int ):
"""simple docstring"""
lowerCamelCase__ : Optional[Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCamelCase_ )
@slow
def UpperCAmelCase__ ( self :Any ):
"""simple docstring"""
for model_name in CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase__ : Optional[int] =ConvNextVaModel.from_pretrained(lowerCamelCase_ )
self.assertIsNotNone(lowerCamelCase_ )
def lowerCAmelCase_ ( ) ->List[str]:
lowerCamelCase__ : Tuple =Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class A_ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def UpperCAmelCase__ ( self :Optional[Any] ):
"""simple docstring"""
return AutoImageProcessor.from_pretrained('facebook/convnextv2-tiny-1k-224' ) if is_vision_available() else None
@slow
def UpperCAmelCase__ ( self :int ):
"""simple docstring"""
lowerCamelCase__ : int =ConvNextVaForImageClassification.from_pretrained('facebook/convnextv2-tiny-1k-224' ).to(lowerCamelCase_ )
lowerCamelCase__ : List[Any] =self.default_image_processor
lowerCamelCase__ : int =prepare_img()
lowerCamelCase__ : List[Any] =preprocessor(images=lowerCamelCase_ , return_tensors='pt' ).to(lowerCamelCase_ )
# forward pass
with torch.no_grad():
lowerCamelCase__ : List[str] =model(**lowerCamelCase_ )
# verify the logits
lowerCamelCase__ : Union[str, Any] =torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , lowerCamelCase_ )
lowerCamelCase__ : Dict =torch.tensor([0.99_96, 0.19_66, -0.43_86] ).to(lowerCamelCase_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCamelCase_ , atol=1e-4 ) ) | 126 | 0 |
from math import factorial, pi
def _snake_case ( A , A = 30 ) -> float:
if not isinstance(lowerCAmelCase__ , (int, float) ):
raise ValueError('''maclaurin_sin() requires either an int or float for theta''' )
if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) or accuracy <= 0:
raise ValueError('''maclaurin_sin() requires a positive int for accuracy''' )
lowerCAmelCase__ = float(lowerCAmelCase__ )
lowerCAmelCase__ = theta // (2 * pi)
theta -= 2 * div * pi
return sum(
(-1) ** r * theta ** (2 * r + 1) / factorial(2 * r + 1 ) for r in range(lowerCAmelCase__ ) )
def _snake_case ( A , A = 30 ) -> float:
if not isinstance(lowerCAmelCase__ , (int, float) ):
raise ValueError('''maclaurin_cos() requires either an int or float for theta''' )
if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) or accuracy <= 0:
raise ValueError('''maclaurin_cos() requires a positive int for accuracy''' )
lowerCAmelCase__ = float(lowerCAmelCase__ )
lowerCAmelCase__ = theta // (2 * pi)
theta -= 2 * div * pi
return sum((-1) ** r * theta ** (2 * r) / factorial(2 * r ) for r in range(lowerCAmelCase__ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(maclaurin_sin(10))
print(maclaurin_sin(-10))
print(maclaurin_sin(10, 15))
print(maclaurin_sin(-10, 15))
print(maclaurin_cos(5))
print(maclaurin_cos(-5))
print(maclaurin_cos(10, 15))
print(maclaurin_cos(-10, 15)) | 360 |
'''simple docstring'''
import unittest
from transformers import (
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TextClassificationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow
from .test_pipelines_common import ANY
# These 2 model types require different inputs than those of the usual text models.
__UpperCAmelCase = {'''LayoutLMv2Config''', '''LayoutLMv3Config'''}
@is_pipeline_test
class a__ ( unittest.TestCase ):
'''simple docstring'''
lowercase__ : Optional[Any] = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
lowercase__ : List[Any] = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if model_mapping is not None:
lowercase__ : Optional[Any] = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP}
if tf_model_mapping is not None:
lowercase__ : Tuple = {
config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP
}
@require_torch
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
lowerCAmelCase__ = pipeline(
task='''text-classification''' , model='''hf-internal-testing/tiny-random-distilbert''' , framework='''pt''' )
lowerCAmelCase__ = text_classifier('''This is great !''' )
self.assertEqual(nested_simplify(lowerCamelCase_ ) , [{'''label''': '''LABEL_0''', '''score''': 0.504}] )
lowerCAmelCase__ = text_classifier('''This is great !''' , top_k=2 )
self.assertEqual(
nested_simplify(lowerCamelCase_ ) , [{'''label''': '''LABEL_0''', '''score''': 0.504}, {'''label''': '''LABEL_1''', '''score''': 0.496}] )
lowerCAmelCase__ = text_classifier(['''This is great !''', '''This is bad'''] , top_k=2 )
self.assertEqual(
nested_simplify(lowerCamelCase_ ) , [
[{'''label''': '''LABEL_0''', '''score''': 0.504}, {'''label''': '''LABEL_1''', '''score''': 0.496}],
[{'''label''': '''LABEL_0''', '''score''': 0.504}, {'''label''': '''LABEL_1''', '''score''': 0.496}],
] , )
lowerCAmelCase__ = text_classifier('''This is great !''' , top_k=1 )
self.assertEqual(nested_simplify(lowerCamelCase_ ) , [{'''label''': '''LABEL_0''', '''score''': 0.504}] )
# Legacy behavior
lowerCAmelCase__ = text_classifier('''This is great !''' , return_all_scores=lowerCamelCase_ )
self.assertEqual(nested_simplify(lowerCamelCase_ ) , [{'''label''': '''LABEL_0''', '''score''': 0.504}] )
lowerCAmelCase__ = text_classifier('''This is great !''' , return_all_scores=lowerCamelCase_ )
self.assertEqual(
nested_simplify(lowerCamelCase_ ) , [[{'''label''': '''LABEL_0''', '''score''': 0.504}, {'''label''': '''LABEL_1''', '''score''': 0.496}]] )
lowerCAmelCase__ = text_classifier(['''This is great !''', '''Something else'''] , return_all_scores=lowerCamelCase_ )
self.assertEqual(
nested_simplify(lowerCamelCase_ ) , [
[{'''label''': '''LABEL_0''', '''score''': 0.504}, {'''label''': '''LABEL_1''', '''score''': 0.496}],
[{'''label''': '''LABEL_0''', '''score''': 0.504}, {'''label''': '''LABEL_1''', '''score''': 0.496}],
] , )
lowerCAmelCase__ = text_classifier(['''This is great !''', '''Something else'''] , return_all_scores=lowerCamelCase_ )
self.assertEqual(
nested_simplify(lowerCamelCase_ ) , [
{'''label''': '''LABEL_0''', '''score''': 0.504},
{'''label''': '''LABEL_0''', '''score''': 0.504},
] , )
@require_torch
def __SCREAMING_SNAKE_CASE ( self ) -> int:
import torch
lowerCAmelCase__ = pipeline(
task='''text-classification''' , model='''hf-internal-testing/tiny-random-distilbert''' , framework='''pt''' , device=torch.device('''cpu''' ) , )
lowerCAmelCase__ = text_classifier('''This is great !''' )
self.assertEqual(nested_simplify(lowerCamelCase_ ) , [{'''label''': '''LABEL_0''', '''score''': 0.504}] )
@require_tf
def __SCREAMING_SNAKE_CASE ( self ) -> str:
lowerCAmelCase__ = pipeline(
task='''text-classification''' , model='''hf-internal-testing/tiny-random-distilbert''' , framework='''tf''' )
lowerCAmelCase__ = text_classifier('''This is great !''' )
self.assertEqual(nested_simplify(lowerCamelCase_ ) , [{'''label''': '''LABEL_0''', '''score''': 0.504}] )
@slow
@require_torch
def __SCREAMING_SNAKE_CASE ( self ) -> Dict:
lowerCAmelCase__ = pipeline('''text-classification''' )
lowerCAmelCase__ = text_classifier('''This is great !''' )
self.assertEqual(nested_simplify(lowerCamelCase_ ) , [{'''label''': '''POSITIVE''', '''score''': 1.0}] )
lowerCAmelCase__ = text_classifier('''This is bad !''' )
self.assertEqual(nested_simplify(lowerCamelCase_ ) , [{'''label''': '''NEGATIVE''', '''score''': 1.0}] )
lowerCAmelCase__ = text_classifier('''Birds are a type of animal''' )
self.assertEqual(nested_simplify(lowerCamelCase_ ) , [{'''label''': '''POSITIVE''', '''score''': 0.988}] )
@slow
@require_tf
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
lowerCAmelCase__ = pipeline('''text-classification''' , framework='''tf''' )
lowerCAmelCase__ = text_classifier('''This is great !''' )
self.assertEqual(nested_simplify(lowerCamelCase_ ) , [{'''label''': '''POSITIVE''', '''score''': 1.0}] )
lowerCAmelCase__ = text_classifier('''This is bad !''' )
self.assertEqual(nested_simplify(lowerCamelCase_ ) , [{'''label''': '''NEGATIVE''', '''score''': 1.0}] )
lowerCAmelCase__ = text_classifier('''Birds are a type of animal''' )
self.assertEqual(nested_simplify(lowerCamelCase_ ) , [{'''label''': '''POSITIVE''', '''score''': 0.988}] )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> Tuple:
lowerCAmelCase__ = TextClassificationPipeline(model=lowerCamelCase_ , tokenizer=lowerCamelCase_ )
return text_classifier, ["HuggingFace is in", "This is another test"]
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ ) -> Union[str, Any]:
lowerCAmelCase__ = text_classifier.model
# Small inputs because BartTokenizer tiny has maximum position embeddings = 22
lowerCAmelCase__ = '''HuggingFace is in'''
lowerCAmelCase__ = text_classifier(lowerCamelCase_ )
self.assertEqual(nested_simplify(lowerCamelCase_ ) , [{'''label''': ANY(lowerCamelCase_ ), '''score''': ANY(lowerCamelCase_ )}] )
self.assertTrue(outputs[0]['''label'''] in model.config.idalabel.values() )
lowerCAmelCase__ = ['''HuggingFace is in ''', '''Paris is in France''']
lowerCAmelCase__ = text_classifier(lowerCamelCase_ )
self.assertEqual(
nested_simplify(lowerCamelCase_ ) , [{'''label''': ANY(lowerCamelCase_ ), '''score''': ANY(lowerCamelCase_ )}, {'''label''': ANY(lowerCamelCase_ ), '''score''': ANY(lowerCamelCase_ )}] , )
self.assertTrue(outputs[0]['''label'''] in model.config.idalabel.values() )
self.assertTrue(outputs[1]['''label'''] in model.config.idalabel.values() )
# Forcing to get all results with `top_k=None`
# This is NOT the legacy format
lowerCAmelCase__ = text_classifier(lowerCamelCase_ , top_k=lowerCamelCase_ )
lowerCAmelCase__ = len(model.config.idalabel.values() )
self.assertEqual(
nested_simplify(lowerCamelCase_ ) , [[{'''label''': ANY(lowerCamelCase_ ), '''score''': ANY(lowerCamelCase_ )}] * N, [{'''label''': ANY(lowerCamelCase_ ), '''score''': ANY(lowerCamelCase_ )}] * N] , )
lowerCAmelCase__ = {'''text''': '''HuggingFace is in ''', '''text_pair''': '''Paris is in France'''}
lowerCAmelCase__ = text_classifier(lowerCamelCase_ )
self.assertEqual(
nested_simplify(lowerCamelCase_ ) , {'''label''': ANY(lowerCamelCase_ ), '''score''': ANY(lowerCamelCase_ )} , )
self.assertTrue(outputs['''label'''] in model.config.idalabel.values() )
# This might be used a text pair, but tokenizer + pipe interaction
# makes it hard to understand that it's not using the pair properly
# https://github.com/huggingface/transformers/issues/17305
# We disabled this usage instead as it was outputting wrong outputs.
lowerCAmelCase__ = [['''HuggingFace is in ''', '''Paris is in France''']]
with self.assertRaises(lowerCamelCase_ ):
text_classifier(lowerCamelCase_ )
# This used to be valid for doing text pairs
# We're keeping it working because of backward compatibility
lowerCAmelCase__ = text_classifier([[['''HuggingFace is in ''', '''Paris is in France''']]] )
self.assertEqual(
nested_simplify(lowerCamelCase_ ) , [{'''label''': ANY(lowerCamelCase_ ), '''score''': ANY(lowerCamelCase_ )}] , )
self.assertTrue(outputs[0]['''label'''] in model.config.idalabel.values() ) | 228 | 0 |
from typing import Any, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from torch.utils.data import DistributedSampler, RandomSampler
from transformers import PreTrainedModel, Trainer, logging
from transformers.integrations import is_fairscale_available
from transformers.models.fsmt.configuration_fsmt import FSMTConfig
from transformers.optimization import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
from transformers.trainer_pt_utils import get_tpu_sampler
from transformers.training_args import ParallelMode
from transformers.utils import is_torch_tpu_available
if is_fairscale_available():
from fairscale.optim import OSS
snake_case_ : List[Any] = logging.get_logger(__name__)
snake_case_ : Tuple = {
"linear": get_linear_schedule_with_warmup,
"cosine": get_cosine_schedule_with_warmup,
"cosine_w_restarts": get_cosine_with_hard_restarts_schedule_with_warmup,
"polynomial": get_polynomial_decay_schedule_with_warmup,
"constant": get_constant_schedule,
"constant_w_warmup": get_constant_schedule_with_warmup,
}
class __snake_case ( a ):
def __init__( self : Optional[Any] , _snake_case : int=None , _snake_case : Optional[Any]=None , *_snake_case : Any , **_snake_case : Optional[Any]):
"""simple docstring"""
super().__init__(*_snake_case , **_snake_case)
if config is None:
assert isinstance(self.model , _snake_case), (
"If no `config` is passed the model to be trained has to be of type `PreTrainedModel`, but is"
F""" {self.model.__class__}"""
)
UpperCAmelCase_ = self.model.config
else:
UpperCAmelCase_ = config
UpperCAmelCase_ = data_args
UpperCAmelCase_ = self.config.tgt_vocab_size if isinstance(self.config , _snake_case) else self.config.vocab_size
if self.args.label_smoothing != 0 or (self.data_args is not None and self.data_args.ignore_pad_token_for_loss):
assert self.config.pad_token_id is not None, (
"Make sure that `config.pad_token_id` is correcly defined when ignoring `pad_token` for loss"
" calculation or doing label smoothing."
)
if self.config.pad_token_id is None and self.config.eos_token_id is not None:
logger.warning(
F"""The `config.pad_token_id` is `None`. Using `config.eos_token_id` = {self.config.eos_token_id} for"""
''' padding..''')
if self.args.label_smoothing == 0:
UpperCAmelCase_ = torch.nn.CrossEntropyLoss(ignore_index=self.config.pad_token_id)
else:
# dynamically import label_smoothed_nll_loss
from utils import label_smoothed_nll_loss
UpperCAmelCase_ = label_smoothed_nll_loss
def lowerCamelCase ( self : Any , _snake_case : int):
"""simple docstring"""
if self.optimizer is None:
UpperCAmelCase_ = ['''bias''', '''LayerNorm.weight''']
UpperCAmelCase_ = [
{
'''params''': [p for n, p in self.model.named_parameters() if not any(nd in n for nd in no_decay)],
'''weight_decay''': self.args.weight_decay,
},
{
'''params''': [p for n, p in self.model.named_parameters() if any(nd in n for nd in no_decay)],
'''weight_decay''': 0.0,
},
]
UpperCAmelCase_ = Adafactor if self.args.adafactor else AdamW
if self.args.adafactor:
UpperCAmelCase_ = Adafactor
UpperCAmelCase_ = {'''scale_parameter''': False, '''relative_step''': False}
else:
UpperCAmelCase_ = AdamW
UpperCAmelCase_ = {
'''betas''': (self.args.adam_betaa, self.args.adam_betaa),
'''eps''': self.args.adam_epsilon,
}
UpperCAmelCase_ = self.args.learning_rate
if self.sharded_ddp:
UpperCAmelCase_ = OSS(
params=_snake_case , optim=_snake_case , **_snake_case , )
else:
UpperCAmelCase_ = optimizer_cls(_snake_case , **_snake_case)
if self.lr_scheduler is None:
UpperCAmelCase_ = self._get_lr_scheduler(_snake_case)
else: # ignoring --lr_scheduler
logger.warning('''scheduler is passed to `Seq2SeqTrainer`, `--lr_scheduler` arg is ignored.''')
def lowerCamelCase ( self : str , _snake_case : Union[str, Any]):
"""simple docstring"""
UpperCAmelCase_ = arg_to_scheduler[self.args.lr_scheduler]
if self.args.lr_scheduler == "constant":
UpperCAmelCase_ = schedule_func(self.optimizer)
elif self.args.lr_scheduler == "constant_w_warmup":
UpperCAmelCase_ = schedule_func(self.optimizer , num_warmup_steps=self.args.warmup_steps)
else:
UpperCAmelCase_ = schedule_func(
self.optimizer , num_warmup_steps=self.args.warmup_steps , num_training_steps=_snake_case)
return scheduler
def lowerCamelCase ( self : Dict):
"""simple docstring"""
if isinstance(self.train_dataset , torch.utils.data.IterableDataset):
return None
elif is_torch_tpu_available():
return get_tpu_sampler(self.train_dataset)
else:
if self.args.sortish_sampler:
self.train_dataset.make_sortish_sampler(
self.args.per_device_train_batch_size , distributed=(self.args.parallel_mode == ParallelMode.DISTRIBUTED) , )
return (
RandomSampler(self.train_dataset)
if self.args.local_rank == -1
else DistributedSampler(self.train_dataset)
)
def lowerCamelCase ( self : str , _snake_case : Optional[int] , _snake_case : int , _snake_case : Optional[Any]):
"""simple docstring"""
if self.args.label_smoothing == 0:
if self.data_args is not None and self.data_args.ignore_pad_token_for_loss:
# force training to ignore pad token
UpperCAmelCase_ = model(**_snake_case , use_cache=_snake_case)[0]
UpperCAmelCase_ = self.loss_fn(logits.view(-1 , logits.shape[-1]) , labels.view(-1))
else:
# compute usual loss via models
UpperCAmelCase_ , UpperCAmelCase_ = model(**_snake_case , labels=_snake_case , use_cache=_snake_case)[:2]
else:
# compute label smoothed loss
UpperCAmelCase_ = model(**_snake_case , use_cache=_snake_case)[0]
UpperCAmelCase_ = torch.nn.functional.log_softmax(_snake_case , dim=-1)
UpperCAmelCase_ , UpperCAmelCase_ = self.loss_fn(_snake_case , _snake_case , self.args.label_smoothing , ignore_index=self.config.pad_token_id)
return loss, logits
def lowerCamelCase ( self : List[str] , _snake_case : Union[str, Any] , _snake_case : Dict):
"""simple docstring"""
UpperCAmelCase_ = inputs.pop('''labels''')
UpperCAmelCase_ , UpperCAmelCase_ = self._compute_loss(_snake_case , _snake_case , _snake_case)
return loss
def lowerCamelCase ( self : List[str] , _snake_case : nn.Module , _snake_case : Dict[str, Union[torch.Tensor, Any]] , _snake_case : bool , _snake_case : Optional[List[str]] = None , ):
"""simple docstring"""
UpperCAmelCase_ = self._prepare_inputs(_snake_case)
UpperCAmelCase_ = {
'''max_length''': self.data_args.val_max_target_length
if self.data_args is not None
else self.config.max_length,
'''num_beams''': self.data_args.eval_beams if self.data_args is not None else self.config.num_beams,
}
if self.args.predict_with_generate and not self.args.prediction_loss_only:
UpperCAmelCase_ = self.model.generate(
inputs['''input_ids'''] , attention_mask=inputs['''attention_mask'''] , **_snake_case , )
# in case the batch is shorter than max length, the output should be padded
if generated_tokens.shape[-1] < gen_kwargs["max_length"]:
UpperCAmelCase_ = self._pad_tensors_to_max_len(_snake_case , gen_kwargs['''max_length'''])
UpperCAmelCase_ = inputs.pop('''labels''')
with torch.no_grad():
# compute loss on predict data
UpperCAmelCase_ , UpperCAmelCase_ = self._compute_loss(_snake_case , _snake_case , _snake_case)
UpperCAmelCase_ = loss.mean().detach()
if self.args.prediction_loss_only:
return (loss, None, None)
UpperCAmelCase_ = generated_tokens if self.args.predict_with_generate else logits
if labels.shape[-1] < gen_kwargs["max_length"]:
UpperCAmelCase_ = self._pad_tensors_to_max_len(_snake_case , gen_kwargs['''max_length'''])
return (loss, logits, labels)
def lowerCamelCase ( self : str , _snake_case : Optional[Any] , _snake_case : Optional[Any]):
"""simple docstring"""
UpperCAmelCase_ = self.config.pad_token_id if self.config.pad_token_id is not None else self.config.eos_token_id
if pad_token_id is None:
raise ValueError(
'''Make sure that either `config.pad_token_id` or `config.eos_token_id` is defined if tensor has to be'''
F""" padded to `max_length`={max_length}""")
UpperCAmelCase_ = pad_token_id * torch.ones(
(tensor.shape[0], max_length) , dtype=tensor.dtype , device=tensor.device)
UpperCAmelCase_ = tensor
return padded_tensor
| 51 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available
from ...utils import OptionalDependencyNotAvailable
UpperCAmelCase_ : str = {"""configuration_gpt_neox""": ["""GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GPTNeoXConfig"""]}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Dict = ["""GPTNeoXTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : List[str] = [
"""GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""GPTNeoXForCausalLM""",
"""GPTNeoXForQuestionAnswering""",
"""GPTNeoXForSequenceClassification""",
"""GPTNeoXForTokenClassification""",
"""GPTNeoXLayer""",
"""GPTNeoXModel""",
"""GPTNeoXPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_gpt_neox import GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXConfig
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_gpt_neox_fast import GPTNeoXTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neox import (
GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
GPTNeoXLayer,
GPTNeoXModel,
GPTNeoXPreTrainedModel,
)
else:
import sys
UpperCAmelCase_ : Tuple = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 91 | 0 |
import multiprocessing
import time
from arguments import PretokenizationArguments
from datasets import load_dataset
from transformers import AutoTokenizer, HfArgumentParser
def __snake_case ( _lowerCAmelCase : Dict ) -> Union[str, Any]:
A_ : List[str] = {}
A_ : int = tokenizer(example["content"] , truncation=_lowerCAmelCase )["input_ids"]
A_ : Any = len(example["content"] ) / len(output["input_ids"] )
return output
_lowerCAmelCase : Dict = HfArgumentParser(PretokenizationArguments)
_lowerCAmelCase : Dict = parser.parse_args()
if args.num_workers is None:
_lowerCAmelCase : List[str] = multiprocessing.cpu_count()
_lowerCAmelCase : Tuple = AutoTokenizer.from_pretrained(args.tokenizer_dir)
_lowerCAmelCase : str = time.time()
_lowerCAmelCase : Dict = load_dataset(args.dataset_name, split='''train''')
print(F'''Dataset loaded in {time.time()-t_start:.2f}s''')
_lowerCAmelCase : List[Any] = time.time()
_lowerCAmelCase : Union[str, Any] = ds.map(
tokenize,
num_proc=args.num_workers,
remove_columns=[
'''repo_name''',
'''path''',
'''copies''',
'''size''',
'''content''',
'''license''',
'''hash''',
'''line_mean''',
'''line_max''',
'''alpha_frac''',
'''autogenerated''',
],
)
print(F'''Dataset tokenized in {time.time()-t_start:.2f}s''')
_lowerCAmelCase : Union[str, Any] = time.time()
ds.push_to_hub(args.tokenized_data_repo)
print(F'''Data pushed to the hub in {time.time()-t_start:.2f}s''')
| 368 |
import json
import os
import shutil
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoConfig, BertConfig, GPTaConfig
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / '''utils'''))
from test_module.custom_configuration import CustomConfig # noqa E402
_lowerCAmelCase : Union[str, Any] = {
'''return_dict''': False,
'''output_hidden_states''': True,
'''output_attentions''': True,
'''torchscript''': True,
'''torch_dtype''': '''float16''',
'''use_bfloat16''': True,
'''tf_legacy_loss''': True,
'''pruned_heads''': {'''a''': 1},
'''tie_word_embeddings''': False,
'''is_decoder''': True,
'''cross_attention_hidden_size''': 128,
'''add_cross_attention''': True,
'''tie_encoder_decoder''': True,
'''max_length''': 50,
'''min_length''': 3,
'''do_sample''': True,
'''early_stopping''': True,
'''num_beams''': 3,
'''num_beam_groups''': 3,
'''diversity_penalty''': 0.5,
'''temperature''': 2.0,
'''top_k''': 10,
'''top_p''': 0.7,
'''typical_p''': 0.2,
'''repetition_penalty''': 0.8,
'''length_penalty''': 0.8,
'''no_repeat_ngram_size''': 5,
'''encoder_no_repeat_ngram_size''': 5,
'''bad_words_ids''': [1, 2, 3],
'''num_return_sequences''': 3,
'''chunk_size_feed_forward''': 5,
'''output_scores''': True,
'''return_dict_in_generate''': True,
'''forced_bos_token_id''': 2,
'''forced_eos_token_id''': 3,
'''remove_invalid_values''': True,
'''architectures''': ['''BertModel'''],
'''finetuning_task''': '''translation''',
'''id2label''': {0: '''label'''},
'''label2id''': {'''label''': '''0'''},
'''tokenizer_class''': '''BertTokenizerFast''',
'''prefix''': '''prefix''',
'''bos_token_id''': 6,
'''pad_token_id''': 7,
'''eos_token_id''': 8,
'''sep_token_id''': 9,
'''decoder_start_token_id''': 10,
'''exponential_decay_length_penalty''': (5, 1.01),
'''suppress_tokens''': [0, 1],
'''begin_suppress_tokens''': 2,
'''task_specific_params''': {'''translation''': '''some_params'''},
'''problem_type''': '''regression''',
}
@is_staging_test
class __magic_name__ ( unittest.TestCase ):
"""simple docstring"""
@classmethod
def SCREAMING_SNAKE_CASE ( cls :str ):
'''simple docstring'''
A_ : Tuple = TOKEN
HfFolder.save_token(snake_case )
@classmethod
def SCREAMING_SNAKE_CASE ( cls :List[str] ):
'''simple docstring'''
try:
delete_repo(token=cls._token , repo_id="test-config" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="valid_org/test-config-org" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="test-dynamic-config" )
except HTTPError:
pass
def SCREAMING_SNAKE_CASE ( self :List[Any] ):
'''simple docstring'''
A_ : int = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
config.push_to_hub("test-config" , use_auth_token=self._token )
A_ : Optional[Any] = BertConfig.from_pretrained(f"{USER}/test-config" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(snake_case , getattr(snake_case , snake_case ) )
# Reset repo
delete_repo(token=self._token , repo_id="test-config" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(snake_case , repo_id="test-config" , push_to_hub=snake_case , use_auth_token=self._token )
A_ : Any = BertConfig.from_pretrained(f"{USER}/test-config" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(snake_case , getattr(snake_case , snake_case ) )
def SCREAMING_SNAKE_CASE ( self :str ):
'''simple docstring'''
A_ : List[Any] = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
config.push_to_hub("valid_org/test-config-org" , use_auth_token=self._token )
A_ : Optional[Any] = BertConfig.from_pretrained("valid_org/test-config-org" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(snake_case , getattr(snake_case , snake_case ) )
# Reset repo
delete_repo(token=self._token , repo_id="valid_org/test-config-org" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
snake_case , repo_id="valid_org/test-config-org" , push_to_hub=snake_case , use_auth_token=self._token )
A_ : Dict = BertConfig.from_pretrained("valid_org/test-config-org" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(snake_case , getattr(snake_case , snake_case ) )
def SCREAMING_SNAKE_CASE ( self :Optional[int] ):
'''simple docstring'''
CustomConfig.register_for_auto_class()
A_ : Union[str, Any] = CustomConfig(attribute=42 )
config.push_to_hub("test-dynamic-config" , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(config.auto_map , {"AutoConfig": "custom_configuration.CustomConfig"} )
A_ : Optional[int] = AutoConfig.from_pretrained(f"{USER}/test-dynamic-config" , trust_remote_code=snake_case )
# Can't make an isinstance check because the new_config is from the FakeConfig class of a dynamic module
self.assertEqual(new_config.__class__.__name__ , "CustomConfig" )
self.assertEqual(new_config.attribute , 42 )
class __magic_name__ ( unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( self :Optional[Any] ):
'''simple docstring'''
A_ : List[Any] = GPTaConfig()
# attempt to modify each of int/float/bool/str config records and verify they were updated
A_ : Any = c.n_embd + 1 # int
A_ : List[Any] = c.resid_pdrop + 1.0 # float
A_ : Optional[int] = not c.scale_attn_weights # bool
A_ : str = c.summary_type + "foo" # str
c.update_from_string(
f"n_embd={n_embd},resid_pdrop={resid_pdrop},scale_attn_weights={scale_attn_weights},summary_type={summary_type}" )
self.assertEqual(snake_case , c.n_embd , "mismatch for key: n_embd" )
self.assertEqual(snake_case , c.resid_pdrop , "mismatch for key: resid_pdrop" )
self.assertEqual(snake_case , c.scale_attn_weights , "mismatch for key: scale_attn_weights" )
self.assertEqual(snake_case , c.summary_type , "mismatch for key: summary_type" )
def SCREAMING_SNAKE_CASE ( self :Any ):
'''simple docstring'''
A_ : Optional[int] = PretrainedConfig()
A_ : Dict = [key for key in base_config.__dict__ if key not in config_common_kwargs]
# If this part of the test fails, you have arguments to addin config_common_kwargs above.
self.assertListEqual(
snake_case , ["is_encoder_decoder", "_name_or_path", "_commit_hash", "transformers_version"] )
A_ : List[str] = [key for key, value in config_common_kwargs.items() if value == getattr(snake_case , snake_case )]
if len(snake_case ) > 0:
raise ValueError(
"The following keys are set with the default values in"
" `test_configuration_common.config_common_kwargs` pick another value for them:"
f" {', '.join(snake_case )}." )
def SCREAMING_SNAKE_CASE ( self :Dict ):
'''simple docstring'''
with self.assertRaises(snake_case ):
# config is in subfolder, the following should not work without specifying the subfolder
A_ : int = BertConfig.from_pretrained("hf-internal-testing/tiny-random-bert-subfolder" )
A_ : int = BertConfig.from_pretrained("hf-internal-testing/tiny-random-bert-subfolder" , subfolder="bert" )
self.assertIsNotNone(snake_case )
def SCREAMING_SNAKE_CASE ( self :Optional[int] ):
'''simple docstring'''
A_ : int = mock.Mock()
A_ : Tuple = 500
A_ : str = {}
A_ : Dict = HTTPError
A_ : Dict = {}
# Download this model to make sure it's in the cache.
A_ : Dict = BertConfig.from_pretrained("hf-internal-testing/tiny-random-bert" )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch("requests.Session.request" , return_value=snake_case ) as mock_head:
A_ : List[Any] = BertConfig.from_pretrained("hf-internal-testing/tiny-random-bert" )
# This check we did call the fake head request
mock_head.assert_called()
def SCREAMING_SNAKE_CASE ( self :List[str] ):
'''simple docstring'''
A_ : List[str] = BertConfig.from_pretrained(
"https://huggingface.co/hf-internal-testing/tiny-random-bert/resolve/main/config.json" )
def SCREAMING_SNAKE_CASE ( self :str ):
'''simple docstring'''
A_ : Tuple = AutoConfig.from_pretrained("bert-base-cased" )
A_ : Union[str, Any] = ["config.4.0.0.json"]
with tempfile.TemporaryDirectory() as tmp_dir:
configuration.save_pretrained(snake_case )
A_ : Union[str, Any] = 2
json.dump(configuration.to_dict() , open(os.path.join(snake_case , "config.4.0.0.json" ) , "w" ) )
# This should pick the new configuration file as the version of Transformers is > 4.0.0
A_ : Dict = AutoConfig.from_pretrained(snake_case )
self.assertEqual(new_configuration.hidden_size , 2 )
# Will need to be adjusted if we reach v42 and this test is still here.
# Should pick the old configuration file as the version of Transformers is < 4.42.0
A_ : Optional[Any] = ["config.42.0.0.json"]
A_ : Optional[int] = 768
configuration.save_pretrained(snake_case )
shutil.move(os.path.join(snake_case , "config.4.0.0.json" ) , os.path.join(snake_case , "config.42.0.0.json" ) )
A_ : Dict = AutoConfig.from_pretrained(snake_case )
self.assertEqual(new_configuration.hidden_size , 768 )
def SCREAMING_SNAKE_CASE ( self :Optional[Any] ):
'''simple docstring'''
A_ : Any = "hf-internal-testing/test-two-configs"
import transformers as new_transformers
A_ : Optional[int] = "v4.0.0"
A_ , A_ : Optional[Any] = new_transformers.models.auto.AutoConfig.from_pretrained(
snake_case , return_unused_kwargs=snake_case )
self.assertEqual(new_configuration.hidden_size , 2 )
# This checks `_configuration_file` ia not kept in the kwargs by mistake.
self.assertDictEqual(snake_case , {} )
# Testing an older version by monkey-patching the version in the module it's used.
import transformers as old_transformers
A_ : Union[str, Any] = "v3.0.0"
A_ : Dict = old_transformers.models.auto.AutoConfig.from_pretrained(snake_case )
self.assertEqual(old_configuration.hidden_size , 768 )
| 70 | 0 |
"""simple docstring"""
import json
import os
from pathlib import Path
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple, Union
import sentencepiece
from ...tokenization_utils import BatchEncoding, PreTrainedTokenizer
from ...utils import logging
A: List[str] = logging.get_logger(__name__)
A: Tuple = "▁"
A: Tuple = {
"vocab_file": "vocab.json",
"spm_file": "sentencepiece.bpe.model",
"tokenizer_config_file": "tokenizer_config.json",
}
A: Union[str, Any] = {
"vocab_file": {
"facebook/m2m100_418M": "https://huggingface.co/facebook/m2m100_418M/resolve/main/vocab.json",
"facebook/m2m100_1.2B": "https://huggingface.co/facebook/m2m100_1.2B/resolve/main/vocab.json",
},
"spm_file": {
"facebook/m2m100_418M": "https://huggingface.co/facebook/m2m100_418M/resolve/main/sentencepiece.bpe.model",
"facebook/m2m100_1.2B": "https://huggingface.co/facebook/m2m100_1.2B/resolve/main/sentencepiece.bpe.model",
},
"tokenizer_config_file": {
"facebook/m2m100_418M": "https://huggingface.co/facebook/m2m100_418M/resolve/main/tokenizer_config.json",
"facebook/m2m100_1.2B": "https://huggingface.co/facebook/m2m100_1.2B/resolve/main/tokenizer_config.json",
},
}
A: Optional[Any] = {
"facebook/m2m100_418M": 1_0_2_4,
}
# fmt: off
A: Dict = {
"m2m100": ["af", "am", "ar", "ast", "az", "ba", "be", "bg", "bn", "br", "bs", "ca", "ceb", "cs", "cy", "da", "de", "el", "en", "es", "et", "fa", "ff", "fi", "fr", "fy", "ga", "gd", "gl", "gu", "ha", "he", "hi", "hr", "ht", "hu", "hy", "id", "ig", "ilo", "is", "it", "ja", "jv", "ka", "kk", "km", "kn", "ko", "lb", "lg", "ln", "lo", "lt", "lv", "mg", "mk", "ml", "mn", "mr", "ms", "my", "ne", "nl", "no", "ns", "oc", "or", "pa", "pl", "ps", "pt", "ro", "ru", "sd", "si", "sk", "sl", "so", "sq", "sr", "ss", "su", "sv", "sw", "ta", "th", "tl", "tn", "tr", "uk", "ur", "uz", "vi", "wo", "xh", "yi", "yo", "zh", "zu"],
"wmt21": ["en", "ha", "is", "ja", "cs", "ru", "zh", "de"]
}
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase__ ):
__lowerCAmelCase : Union[str, Any] = VOCAB_FILES_NAMES
__lowerCAmelCase : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCAmelCase : List[Any] = PRETRAINED_VOCAB_FILES_MAP
__lowerCAmelCase : List[str] = ['input_ids', 'attention_mask']
__lowerCAmelCase : List[str] = []
__lowerCAmelCase : Any = []
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE="<s>" , _SCREAMING_SNAKE_CASE="</s>" , _SCREAMING_SNAKE_CASE="</s>" , _SCREAMING_SNAKE_CASE="<pad>" , _SCREAMING_SNAKE_CASE="<unk>" , _SCREAMING_SNAKE_CASE="m2m100" , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE=8 , **_SCREAMING_SNAKE_CASE , ) -> None:
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = {} if sp_model_kwargs is None else sp_model_kwargs
UpperCAmelCase : List[Any] = language_codes
UpperCAmelCase : int = FAIRSEQ_LANGUAGE_CODES[language_codes]
UpperCAmelCase : Optional[Any] = {lang_code: F"__{lang_code}__" for lang_code in fairseq_language_code}
UpperCAmelCase : int = kwargs.get("""additional_special_tokens""" , [] )
kwargs["additional_special_tokens"] += [
self.get_lang_token(_SCREAMING_SNAKE_CASE )
for lang_code in fairseq_language_code
if self.get_lang_token(_SCREAMING_SNAKE_CASE ) not in kwargs["additional_special_tokens"]
]
super().__init__(
src_lang=_SCREAMING_SNAKE_CASE , tgt_lang=_SCREAMING_SNAKE_CASE , bos_token=_SCREAMING_SNAKE_CASE , eos_token=_SCREAMING_SNAKE_CASE , sep_token=_SCREAMING_SNAKE_CASE , unk_token=_SCREAMING_SNAKE_CASE , pad_token=_SCREAMING_SNAKE_CASE , language_codes=_SCREAMING_SNAKE_CASE , sp_model_kwargs=self.sp_model_kwargs , num_madeup_words=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
UpperCAmelCase : List[str] = vocab_file
UpperCAmelCase : Dict = load_json(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : int = {v: k for k, v in self.encoder.items()}
UpperCAmelCase : List[Any] = spm_file
UpperCAmelCase : Union[str, Any] = load_spm(_SCREAMING_SNAKE_CASE , self.sp_model_kwargs )
UpperCAmelCase : List[Any] = len(self.encoder )
UpperCAmelCase : int = {
self.get_lang_token(_SCREAMING_SNAKE_CASE ): self.encoder_size + i for i, lang_code in enumerate(_SCREAMING_SNAKE_CASE )
}
UpperCAmelCase : Any = {lang_code: self.encoder_size + i for i, lang_code in enumerate(_SCREAMING_SNAKE_CASE )}
UpperCAmelCase : List[str] = {v: k for k, v in self.lang_token_to_id.items()}
UpperCAmelCase : Optional[Any] = src_lang if src_lang is not None else """en"""
UpperCAmelCase : Optional[int] = tgt_lang
UpperCAmelCase : Optional[int] = self.get_lang_id(self._src_lang )
self.set_src_lang_special_tokens(self._src_lang )
UpperCAmelCase : Dict = num_madeup_words
@property
def SCREAMING_SNAKE_CASE ( self ) -> int:
'''simple docstring'''
return len(self.encoder ) + len(self.lang_token_to_id )
@property
def SCREAMING_SNAKE_CASE ( self ) -> str:
'''simple docstring'''
return self._src_lang
@src_lang.setter
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE ) -> None:
'''simple docstring'''
UpperCAmelCase : Any = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE ) -> List[str]:
'''simple docstring'''
return self.sp_model.encode(_SCREAMING_SNAKE_CASE , out_type=_SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE ) -> Optional[Any]:
'''simple docstring'''
if token in self.lang_token_to_id:
return self.lang_token_to_id[token]
return self.encoder.get(_SCREAMING_SNAKE_CASE , self.encoder[self.unk_token] )
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE ) -> str:
'''simple docstring'''
if index in self.id_to_lang_token:
return self.id_to_lang_token[index]
return self.decoder.get(_SCREAMING_SNAKE_CASE , self.unk_token )
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase : List[str] = []
UpperCAmelCase : Dict = """"""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(_SCREAMING_SNAKE_CASE ) + token
UpperCAmelCase : List[str] = []
else:
current_sub_tokens.append(_SCREAMING_SNAKE_CASE )
out_string += self.sp_model.decode(_SCREAMING_SNAKE_CASE )
return out_string.strip()
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_SCREAMING_SNAKE_CASE , token_ids_a=_SCREAMING_SNAKE_CASE , already_has_special_tokens=_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Optional[Any] = [1] * len(self.prefix_tokens )
UpperCAmelCase : Optional[Any] = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(_SCREAMING_SNAKE_CASE )) + suffix_ones
return prefix_ones + ([0] * len(_SCREAMING_SNAKE_CASE )) + ([0] * len(_SCREAMING_SNAKE_CASE )) + suffix_ones
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def SCREAMING_SNAKE_CASE ( self ) -> Dict:
'''simple docstring'''
UpperCAmelCase : Dict = {self.convert_ids_to_tokens(_SCREAMING_SNAKE_CASE ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ) -> Dict:
'''simple docstring'''
UpperCAmelCase : str = self.__dict__.copy()
UpperCAmelCase : Optional[Any] = None
return state
def __setstate__( self , _SCREAMING_SNAKE_CASE ) -> None:
'''simple docstring'''
UpperCAmelCase : Any = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
UpperCAmelCase : str = {}
UpperCAmelCase : Tuple = load_spm(self.spm_file , self.sp_model_kwargs )
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ) -> Tuple[str]:
'''simple docstring'''
UpperCAmelCase : List[str] = Path(_SCREAMING_SNAKE_CASE )
if not save_dir.is_dir():
raise OSError(F"{save_directory} should be a directory" )
UpperCAmelCase : Union[str, Any] = save_dir / (
(filename_prefix + """-""" if filename_prefix else """""") + self.vocab_files_names["""vocab_file"""]
)
UpperCAmelCase : Optional[Any] = save_dir / (
(filename_prefix + """-""" if filename_prefix else """""") + self.vocab_files_names["""spm_file"""]
)
save_json(self.encoder , _SCREAMING_SNAKE_CASE )
if os.path.abspath(self.spm_file ) != os.path.abspath(_SCREAMING_SNAKE_CASE ) and os.path.isfile(self.spm_file ):
copyfile(self.spm_file , _SCREAMING_SNAKE_CASE )
elif not os.path.isfile(self.spm_file ):
with open(_SCREAMING_SNAKE_CASE , """wb""" ) as fi:
UpperCAmelCase : Tuple = self.sp_model.serialized_model_proto()
fi.write(_SCREAMING_SNAKE_CASE )
return (str(_SCREAMING_SNAKE_CASE ), str(_SCREAMING_SNAKE_CASE ))
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = "en" , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = "ro" , **_SCREAMING_SNAKE_CASE , ) -> BatchEncoding:
'''simple docstring'''
UpperCAmelCase : Optional[Any] = src_lang
UpperCAmelCase : str = tgt_lang
self.set_src_lang_special_tokens(self.src_lang )
return super().prepare_seqaseq_batch(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> Optional[Any]:
'''simple docstring'''
if src_lang is None or tgt_lang is None:
raise ValueError("""Translation requires a `src_lang` and a `tgt_lang` for this model""" )
UpperCAmelCase : List[str] = src_lang
UpperCAmelCase : List[Any] = self(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
UpperCAmelCase : str = self.get_lang_id(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Tuple = tgt_lang_id
return inputs
def SCREAMING_SNAKE_CASE ( self ) -> Tuple:
'''simple docstring'''
self.set_src_lang_special_tokens(self.src_lang )
def SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
'''simple docstring'''
self.set_tgt_lang_special_tokens(self.tgt_lang )
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE ) -> None:
'''simple docstring'''
UpperCAmelCase : Dict = self.get_lang_token(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Union[str, Any] = self.lang_token_to_id[lang_token]
UpperCAmelCase : Optional[Any] = [self.cur_lang_id]
UpperCAmelCase : Union[str, Any] = [self.eos_token_id]
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE ) -> None:
'''simple docstring'''
UpperCAmelCase : List[str] = self.get_lang_token(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Union[str, Any] = self.lang_token_to_id[lang_token]
UpperCAmelCase : List[Any] = [self.cur_lang_id]
UpperCAmelCase : Dict = [self.eos_token_id]
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE ) -> str:
'''simple docstring'''
return self.lang_code_to_token[lang]
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE ) -> int:
'''simple docstring'''
UpperCAmelCase : Optional[int] = self.get_lang_token(_SCREAMING_SNAKE_CASE )
return self.lang_token_to_id[lang_token]
def _snake_case ( UpperCamelCase : str , UpperCamelCase : Dict[str, Any] ):
UpperCAmelCase : Dict = sentencepiece.SentencePieceProcessor(**SCREAMING_SNAKE_CASE_ )
spm.Load(str(SCREAMING_SNAKE_CASE_ ) )
return spm
def _snake_case ( UpperCamelCase : str ):
with open(SCREAMING_SNAKE_CASE_ , """r""" ) as f:
return json.load(SCREAMING_SNAKE_CASE_ )
def _snake_case ( UpperCamelCase : str , UpperCamelCase : str ):
with open(SCREAMING_SNAKE_CASE_ , """w""" ) as f:
json.dump(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , indent=2 )
| 109 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
"""abeja/gpt-neox-japanese-2.7b""": """https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/config.json""",
}
class a__ ( snake_case ):
"""simple docstring"""
__lowerCamelCase = 'gpt_neox_japanese'
def __init__( self , lowercase=32000 , lowercase=2560 , lowercase=32 , lowercase=32 , lowercase=4 , lowercase="gelu" , lowercase=1.00 , lowercase=10000 , lowercase=2048 , lowercase=0.02 , lowercase=1e-5 , lowercase=True , lowercase=31996 , lowercase=31999 , lowercase=0.1 , lowercase=0.0 , **lowercase , ) -> Dict:
'''simple docstring'''
super().__init__(bos_token_id=lowercase , eos_token_id=lowercase , **lowercase )
A__ = vocab_size
A__ = max_position_embeddings
A__ = hidden_size
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = intermediate_multiple_size
A__ = hidden_act
A__ = rotary_pct
A__ = rotary_emb_base
A__ = initializer_range
A__ = layer_norm_eps
A__ = use_cache
A__ = attention_dropout
A__ = hidden_dropout
| 68 | 0 |
def __lowerCamelCase ( UpperCamelCase__ ):
'''simple docstring'''
snake_case_ = 0
while len(UpperCamelCase__ ) > 1:
snake_case_ = 0
# Consider two files with minimum cost to be merged
for _ in range(2 ):
snake_case_ = files.index(min(UpperCamelCase__ ) )
temp += files[min_index]
files.pop(UpperCamelCase__ )
files.append(UpperCamelCase__ )
optimal_merge_cost += temp
return optimal_merge_cost
if __name__ == "__main__":
import doctest
doctest.testmod()
| 361 |
import tempfile
import unittest
from transformers import TaConfig, is_torch_available
from transformers.testing_utils import (
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
torch_device,
)
from ...generation.test_utils import GenerationTesterMixin
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import AutoTokenizer, UMTaForConditionalGeneration, UMTaForQuestionAnswering, UMTaModel
class lowercase :
def __init__( self , snake_case , snake_case=99 , snake_case=13 , snake_case=7 , snake_case=9 , snake_case=True , snake_case=True , snake_case=False , snake_case=32 , snake_case=5 , snake_case=4 , snake_case=37 , snake_case=8 , snake_case=0.1 , snake_case=0.0_02 , snake_case=1 , snake_case=0 , snake_case=0 , snake_case=None , snake_case=None , ):
snake_case_ = parent
snake_case_ = batch_size
snake_case_ = encoder_seq_length
snake_case_ = decoder_seq_length
# For common tests
snake_case_ = self.decoder_seq_length
snake_case_ = is_training
snake_case_ = use_attention_mask
snake_case_ = use_labels
snake_case_ = vocab_size
snake_case_ = hidden_size
snake_case_ = num_hidden_layers
snake_case_ = num_attention_heads
snake_case_ = d_ff
snake_case_ = relative_attention_num_buckets
snake_case_ = dropout_rate
snake_case_ = initializer_factor
snake_case_ = eos_token_id
snake_case_ = pad_token_id
snake_case_ = decoder_start_token_id
snake_case_ = None
snake_case_ = decoder_layers
def a ( self ):
return TaConfig.from_pretrained('google/umt5-base' )
def a ( self , snake_case , snake_case , snake_case , snake_case=None , snake_case=None , snake_case=None , snake_case=None , snake_case=None , ):
if attention_mask is None:
snake_case_ = input_ids.ne(config.pad_token_id )
if decoder_attention_mask is None:
snake_case_ = decoder_input_ids.ne(config.pad_token_id )
if head_mask is None:
snake_case_ = torch.ones(config.num_hidden_layers , config.num_attention_heads , device=snake_case )
if decoder_head_mask is None:
snake_case_ = torch.ones(config.num_decoder_layers , config.num_attention_heads , device=snake_case )
if cross_attn_head_mask is None:
snake_case_ = torch.ones(
config.num_decoder_layers , config.num_attention_heads , device=snake_case )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
def a ( self ):
snake_case_ = ids_tensor([self.batch_size, self.encoder_seq_length] , self.vocab_size )
snake_case_ = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
# we need to clamp the input ids here to avoid having pad token in between
# this is because for NllbMoe the position_ids are prepared such that
# all pad tokens have pos id = 2 and rest are between 2..seq_length
# and the seq_length here is seq_length - num_pad_tokens
# but when using past, there is no way of knowing if the past input ids had
# pad tokens in them, which results in incorrect seq_lenth and which in turn results in
# position_ids being off by num_pad_tokens in past input
snake_case_ = input_ids.clamp(self.pad_token_id + 1 )
snake_case_ = decoder_input_ids.clamp(self.pad_token_id + 1 )
snake_case_ = self.get_config()
snake_case_ = config.num_attention_heads
snake_case_ = self.prepare_inputs_dict(snake_case , snake_case , snake_case )
return config, input_dict
def a ( self ):
snake_case_ , snake_case_ = self.prepare_config_and_inputs()
return config, inputs_dict
def a ( self ):
return TaConfig(
vocab_size=166 , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def a ( self ):
return TaConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def a ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , ):
snake_case_ = UMTaModel(config=snake_case )
model.to(snake_case )
model.eval()
snake_case_ = model(
input_ids=snake_case , decoder_input_ids=snake_case , attention_mask=snake_case , decoder_attention_mask=snake_case , )
snake_case_ = model(input_ids=snake_case , decoder_input_ids=snake_case )
snake_case_ = result.last_hidden_state
snake_case_ = result.past_key_values
snake_case_ = result.encoder_last_hidden_state
self.parent.assertEqual(encoder_output.size() , (self.batch_size, self.encoder_seq_length, self.hidden_size) )
self.parent.assertEqual(decoder_output.size() , (self.batch_size, self.decoder_seq_length, self.hidden_size) )
# There should be `num_layers` key value embeddings stored in decoder_past
self.parent.assertEqual(len(snake_case ) , config.num_layers )
# There should be a self attn key, a self attn value, a cross attn key and a cross attn value stored in each decoder_past tuple
self.parent.assertEqual(len(decoder_past[0] ) , 4 )
def a ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , ):
snake_case_ = UMTaModel(config=snake_case ).get_decoder().to(snake_case ).eval()
# first forward pass
snake_case_ = model(snake_case , use_cache=snake_case )
snake_case_ = model(snake_case )
snake_case_ = model(snake_case , use_cache=snake_case )
self.parent.assertTrue(len(snake_case ) == len(snake_case ) )
self.parent.assertTrue(len(snake_case ) == len(snake_case ) + 1 )
snake_case_ , snake_case_ = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
snake_case_ = ids_tensor((self.batch_size, 1) , config.vocab_size )
# append to next input_ids and
snake_case_ = torch.cat([input_ids, next_tokens] , dim=-1 )
snake_case_ = model(snake_case )['last_hidden_state']
snake_case_ = model(snake_case , past_key_values=snake_case )['last_hidden_state']
# select random slice
snake_case_ = ids_tensor((1,) , output_from_past.shape[-1] ).item()
snake_case_ = output_from_no_past[:, -1, random_slice_idx].detach()
snake_case_ = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(snake_case , snake_case , atol=1e-3 ) )
def a ( self , snake_case , snake_case , ):
snake_case_ = UMTaModel(config=snake_case ).to(snake_case ).half().eval()
snake_case_ = model(**snake_case )['last_hidden_state']
self.parent.assertFalse(torch.isnan(snake_case ).any().item() )
@require_torch
class lowercase ( lowercase_ , lowercase_ , lowercase_ , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Optional[Any] = (
(UMTaModel, UMTaForConditionalGeneration, UMTaForQuestionAnswering) if is_torch_available() else ()
)
__SCREAMING_SNAKE_CASE : int = (UMTaForConditionalGeneration,) if is_torch_available() else ()
__SCREAMING_SNAKE_CASE : Optional[int] = (
{
'''conversational''': UMTaForConditionalGeneration,
'''feature-extraction''': UMTaModel,
'''summarization''': UMTaForConditionalGeneration,
'''text2text-generation''': UMTaForConditionalGeneration,
'''translation''': UMTaForConditionalGeneration,
'''question-answering''': UMTaForQuestionAnswering,
}
if is_torch_available()
else {}
)
__SCREAMING_SNAKE_CASE : List[str] = True
__SCREAMING_SNAKE_CASE : str = False
__SCREAMING_SNAKE_CASE : int = False
__SCREAMING_SNAKE_CASE : Optional[int] = True
__SCREAMING_SNAKE_CASE : Any = True
# The small UMT5 model needs higher percentages for CPU/MP tests
__SCREAMING_SNAKE_CASE : List[str] = [0.8, 0.9]
def a ( self ):
snake_case_ = UMTaModelTester(self )
@unittest.skip('Test has a segmentation fault on torch 1.8.0' )
def a ( self ):
snake_case_ = self.model_tester.prepare_config_and_inputs()
snake_case_ = UMTaModel(config_and_inputs[0] ).to(snake_case )
with tempfile.TemporaryDirectory() as tmpdirname:
torch.onnx.export(
snake_case , (config_and_inputs[1], config_and_inputs[3], config_and_inputs[2]) , F'''{tmpdirname}/t5_test.onnx''' , export_params=snake_case , opset_version=9 , input_names=['input_ids', 'decoder_input_ids'] , )
@unittest.skipIf(torch_device == 'cpu' , 'Cant do half precision' )
def a ( self ):
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model_fpaa_forward(*snake_case )
def a ( self ):
snake_case_ = ['encoder_attentions', 'decoder_attentions', 'cross_attentions']
snake_case_ = self.model_tester.prepare_config_and_inputs()
snake_case_ = config_and_inputs[0]
snake_case_ = UMTaForConditionalGeneration(snake_case ).eval()
model.to(snake_case )
snake_case_ = {
'head_mask': torch.zeros(config.num_layers , config.num_heads , device=snake_case ),
'decoder_head_mask': torch.zeros(config.num_decoder_layers , config.num_heads , device=snake_case ),
'cross_attn_head_mask': torch.zeros(config.num_decoder_layers , config.num_heads , device=snake_case ),
}
for attn_name, (name, mask) in zip(snake_case , head_masking.items() ):
snake_case_ = {name: mask}
# Explicitly pass decoder_head_mask as it is required from T5 model when head_mask specified
if name == "head_mask":
snake_case_ = torch.ones(
config.num_decoder_layers , config.num_heads , device=snake_case )
snake_case_ = model.generate(
config_and_inputs[1]['input_ids'] , num_beams=1 , max_length=3 , output_attentions=snake_case , return_dict_in_generate=snake_case , **snake_case , )
# We check the state of decoder_attentions and cross_attentions just from the last step
snake_case_ = out[attn_name] if attn_name == attention_names[0] else out[attn_name][-1]
self.assertEqual(sum([w.sum().item() for w in attn_weights] ) , 0.0 )
@unittest.skip('Does not work on the tiny model as we keep hitting edge cases.' )
def a ( self ):
pass
@require_torch
@require_sentencepiece
@require_tokenizers
class lowercase ( unittest.TestCase ):
@slow
@unittest.skip(
'Unless we stop stripping left and right by default for all special tokens, the expected ids obtained here will not match the original ones. Wait for https://github.com/huggingface/transformers/pull/23909 to be merged' )
def a ( self ):
snake_case_ = UMTaForConditionalGeneration.from_pretrained('google/umt5-small' , return_dict=snake_case ).to(snake_case )
snake_case_ = AutoTokenizer.from_pretrained('google/umt5-small' , use_fast=snake_case , legacy=snake_case )
snake_case_ = [
'Bonjour monsieur <extra_id_0> bien <extra_id_1>.',
'No se como puedo <extra_id_0>.',
'This is the reason why we <extra_id_0> them.',
'The <extra_id_0> walks in <extra_id_1>, seats',
'A <extra_id_0> walks into a bar and orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.',
]
snake_case_ = tokenizer(snake_case , return_tensors='pt' , padding=snake_case ).input_ids
# fmt: off
snake_case_ = torch.tensor(
[
[ 3_8530, 21_0703, 25_6299, 1410, 25_6298, 274, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 826, 321, 671, 2_5922, 25_6299, 274, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 1460, 339, 312, 1_9014, 1_0620, 758, 25_6299, 2355,274, 1, 0, 0, 0, 0, 0, 0,0, 0],
[ 517, 25_6299, 1_4869, 281, 301, 25_6298, 275, 11_9983,1, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 320, 25_6299, 1_4869, 281, 2234, 289, 2275, 333,6_1391, 289, 25_6298, 543, 25_6297, 16_8714, 329, 25_6296,274, 1],
] )
# fmt: on
torch.testing.assert_allclose(snake_case , snake_case )
snake_case_ = model.generate(input_ids.to(snake_case ) )
snake_case_ = [
'<pad><extra_id_0> et<extra_id_1> [eod] <extra_id_2><extra_id_55>.. [eod] 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 <extra_id_56>ajšietosto<extra_id_56>lleux<extra_id_19><extra_id_6>ajšie</s>',
'<pad><extra_id_0>.<extra_id_1>.,<0x0A>...spech <0x0A><extra_id_20> <extra_id_21></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>',
'<pad><extra_id_0> are not going to be a part of the world. We are not going to be a part of<extra_id_1> and<extra_id_2><0x0A><extra_id_48>.<extra_id_48></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>',
'<pad><extra_id_0> door<extra_id_1>, the door<extra_id_2> 피해[/</s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>',
'<pad><extra_id_0>nyone who<extra_id_1> drink<extra_id_2> a<extra_id_3> alcohol<extra_id_4> A<extra_id_5> A. This<extra_id_6> I<extra_id_7><extra_id_52><extra_id_53></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>',
]
snake_case_ = tokenizer.batch_decode(snake_case )
self.assertEqual(snake_case , snake_case )
| 200 | 0 |
import itertools
import json
import os
import unittest
from transformers import AddedToken, LongformerTokenizer, LongformerTokenizerFast
from transformers.models.longformer.tokenization_longformer import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase , unittest.TestCase ):
A_ : Tuple = LongformerTokenizer
A_ : Any = True
A_ : Tuple = LongformerTokenizerFast
A_ : Dict = True
def a (self : Optional[int] ):
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
__snake_case = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''<unk>''',
]
__snake_case = dict(zip(a__ , range(len(a__ ) ) ) )
__snake_case = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
__snake_case = {'''unk_token''': '''<unk>'''}
__snake_case = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
__snake_case = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(a__ ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(a__ ) )
def a (self : Dict , **a__ : Optional[int] ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **a__ )
def a (self : Any , **a__ : Optional[Any] ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **a__ )
def a (self : Optional[int] , a__ : Dict ):
"""simple docstring"""
__snake_case = '''lower newer'''
__snake_case = '''lower newer'''
return input_text, output_text
def a (self : Optional[int] ):
"""simple docstring"""
__snake_case = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map )
__snake_case = '''lower newer'''
__snake_case = ['''l''', '''o''', '''w''', '''er''', '''\u0120''', '''n''', '''e''', '''w''', '''er''']
__snake_case = tokenizer.tokenize(a__ ) # , add_prefix_space=True)
self.assertListEqual(a__ , a__ )
__snake_case = tokens + [tokenizer.unk_token]
__snake_case = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(a__ ) , a__ )
def a (self : Dict ):
"""simple docstring"""
__snake_case = self.get_tokenizer()
self.assertListEqual(tokenizer.encode('''Hello world!''' , add_special_tokens=a__ ) , [0, 3_1414, 232, 328, 2] )
self.assertListEqual(
tokenizer.encode('''Hello world! cécé herlolip 418''' , add_special_tokens=a__ ) , [0, 3_1414, 232, 328, 740, 1140, 1_2695, 69, 4_6078, 1588, 2] , )
@slow
def a (self : Any ):
"""simple docstring"""
__snake_case = self.tokenizer_class.from_pretrained('''allenai/longformer-base-4096''' )
__snake_case = tokenizer.encode('''sequence builders''' , add_special_tokens=a__ )
__snake_case = tokenizer.encode('''multi-sequence build''' , add_special_tokens=a__ )
__snake_case = tokenizer.encode(
'''sequence builders''' , add_special_tokens=a__ , add_prefix_space=a__ )
__snake_case = tokenizer.encode(
'''sequence builders''' , '''multi-sequence build''' , add_special_tokens=a__ , add_prefix_space=a__ )
__snake_case = tokenizer.build_inputs_with_special_tokens(a__ )
__snake_case = tokenizer.build_inputs_with_special_tokens(a__ , a__ )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
def a (self : Dict ):
"""simple docstring"""
__snake_case = self.get_tokenizer()
__snake_case = '''Encode this sequence.'''
__snake_case = tokenizer.byte_encoder[''' '''.encode('''utf-8''' )[0]]
# Testing encoder arguments
__snake_case = tokenizer.encode(a__ , add_special_tokens=a__ , add_prefix_space=a__ )
__snake_case = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertNotEqual(a__ , a__ )
__snake_case = tokenizer.encode(a__ , add_special_tokens=a__ , add_prefix_space=a__ )
__snake_case = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertEqual(a__ , a__ )
tokenizer.add_special_tokens({'''bos_token''': '''<s>'''} )
__snake_case = tokenizer.encode(a__ , add_special_tokens=a__ )
__snake_case = tokenizer.convert_ids_to_tokens(encoded[1] )[0]
self.assertNotEqual(a__ , a__ )
# Testing spaces after special tokens
__snake_case = '''<mask>'''
tokenizer.add_special_tokens(
{'''mask_token''': AddedToken(a__ , lstrip=a__ , rstrip=a__ )} ) # mask token has a left space
__snake_case = tokenizer.convert_tokens_to_ids(a__ )
__snake_case = '''Encode <mask> sequence'''
__snake_case = '''Encode <mask>sequence'''
__snake_case = tokenizer.encode(a__ )
__snake_case = encoded.index(a__ )
__snake_case = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertEqual(a__ , a__ )
__snake_case = tokenizer.encode(a__ )
__snake_case = encoded.index(a__ )
__snake_case = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertNotEqual(a__ , a__ )
def a (self : Tuple ):
"""simple docstring"""
pass
def a (self : str ):
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
__snake_case = self.rust_tokenizer_class.from_pretrained(a__ , **a__ )
__snake_case = self.tokenizer_class.from_pretrained(a__ , **a__ )
__snake_case = '''A, <mask> AllenNLP sentence.'''
__snake_case = tokenizer_r.encode_plus(a__ , add_special_tokens=a__ , return_token_type_ids=a__ )
__snake_case = tokenizer_p.encode_plus(a__ , add_special_tokens=a__ , return_token_type_ids=a__ )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r['''token_type_ids'''] ) , sum(tokens_p['''token_type_ids'''] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r['''attention_mask'''] ) / len(tokens_r['''attention_mask'''] ) , sum(tokens_p['''attention_mask'''] ) / len(tokens_p['''attention_mask'''] ) , )
__snake_case = tokenizer_r.convert_ids_to_tokens(tokens_r['''input_ids'''] )
__snake_case = tokenizer_p.convert_ids_to_tokens(tokens_p['''input_ids'''] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p['''input_ids'''] , [0, 250, 6, 5_0264, 3823, 487, 2_1992, 3645, 4, 2] )
self.assertSequenceEqual(tokens_r['''input_ids'''] , [0, 250, 6, 5_0264, 3823, 487, 2_1992, 3645, 4, 2] )
self.assertSequenceEqual(
a__ , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] )
self.assertSequenceEqual(
a__ , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] )
def a (self : str ):
"""simple docstring"""
for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ):
__snake_case = self.rust_tokenizer_class.from_pretrained(
self.tmpdirname , use_fast=a__ , add_prefix_space=a__ , trim_offsets=a__ )
__snake_case = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() )
__snake_case = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() )
self.assertEqual(pre_tokenizer_state['''add_prefix_space'''] , a__ )
self.assertEqual(post_processor_state['''add_prefix_space'''] , a__ )
self.assertEqual(post_processor_state['''trim_offsets'''] , a__ )
def a (self : Optional[int] ):
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
__snake_case = '''hello''' # `hello` is a token in the vocabulary of `pretrained_name`
__snake_case = f"""{text_of_1_token} {text_of_1_token}"""
__snake_case = self.rust_tokenizer_class.from_pretrained(
a__ , use_fast=a__ , add_prefix_space=a__ , trim_offsets=a__ )
__snake_case = tokenizer_r(a__ , return_offsets_mapping=a__ , add_special_tokens=a__ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(a__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(a__ ) + 1, len(a__ ) + 1 + len(a__ )) , )
__snake_case = self.rust_tokenizer_class.from_pretrained(
a__ , use_fast=a__ , add_prefix_space=a__ , trim_offsets=a__ )
__snake_case = tokenizer_r(a__ , return_offsets_mapping=a__ , add_special_tokens=a__ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(a__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(a__ ) + 1, len(a__ ) + 1 + len(a__ )) , )
__snake_case = self.rust_tokenizer_class.from_pretrained(
a__ , use_fast=a__ , add_prefix_space=a__ , trim_offsets=a__ )
__snake_case = tokenizer_r(a__ , return_offsets_mapping=a__ , add_special_tokens=a__ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(a__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(a__ ), len(a__ ) + 1 + len(a__ )) , )
__snake_case = self.rust_tokenizer_class.from_pretrained(
a__ , use_fast=a__ , add_prefix_space=a__ , trim_offsets=a__ )
__snake_case = tokenizer_r(a__ , return_offsets_mapping=a__ , add_special_tokens=a__ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(a__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(a__ ), len(a__ ) + 1 + len(a__ )) , )
__snake_case = f""" {text}"""
# tokenizer_r = self.rust_tokenizer_class.from_pretrained(
# pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True
# )
# encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
# self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token)))
# self.assertEqual(
# encoding.offset_mapping[1],
# (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)),
# )
__snake_case = self.rust_tokenizer_class.from_pretrained(
a__ , use_fast=a__ , add_prefix_space=a__ , trim_offsets=a__ )
__snake_case = tokenizer_r(a__ , return_offsets_mapping=a__ , add_special_tokens=a__ )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(a__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(a__ ) + 1, 1 + len(a__ ) + 1 + len(a__ )) , )
__snake_case = self.rust_tokenizer_class.from_pretrained(
a__ , use_fast=a__ , add_prefix_space=a__ , trim_offsets=a__ )
__snake_case = tokenizer_r(a__ , return_offsets_mapping=a__ , add_special_tokens=a__ )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(a__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(a__ ), 1 + len(a__ ) + 1 + len(a__ )) , )
__snake_case = self.rust_tokenizer_class.from_pretrained(
a__ , use_fast=a__ , add_prefix_space=a__ , trim_offsets=a__ )
__snake_case = tokenizer_r(a__ , return_offsets_mapping=a__ , add_special_tokens=a__ )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(a__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(a__ ), 1 + len(a__ ) + 1 + len(a__ )) , )
| 24 |
"""simple docstring"""
from ...utils import is_note_seq_available, is_transformers_available, is_torch_available
from ...utils import OptionalDependencyNotAvailable
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .notes_encoder import SpectrogramNotesEncoder
from .continous_encoder import SpectrogramContEncoder
from .pipeline_spectrogram_diffusion import (
SpectrogramContEncoder,
SpectrogramDiffusionPipeline,
TaFilmDecoder,
)
try:
if not (is_transformers_available() and is_torch_available() and is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403
else:
from .midi_utils import MidiProcessor | 126 | 0 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.auto import AutoModelForSequenceClassification, AutoTokenizer
from .base import PipelineTool
class UpperCAmelCase_ ( UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ : Any = '''facebook/bart-large-mnli'''
UpperCamelCase__ : Dict = (
'''This is a tool that classifies an English text using provided labels. It takes two inputs: `text`, which '''
'''should be the text to classify, and `labels`, which should be the list of labels to use for classification. '''
'''It returns the most likely label in the list of provided `labels` for the input text.'''
)
UpperCamelCase__ : Optional[int] = '''text_classifier'''
UpperCamelCase__ : Any = AutoTokenizer
UpperCamelCase__ : Union[str, Any] = AutoModelForSequenceClassification
UpperCamelCase__ : int = ['''text''', ['''text''']]
UpperCamelCase__ : Optional[Any] = ['''text''']
def _A ( self ):
'''simple docstring'''
super().setup()
__SCREAMING_SNAKE_CASE = self.model.config
__SCREAMING_SNAKE_CASE = -1
for idx, label in config.idalabel.items():
if label.lower().startswith('entail' ):
__SCREAMING_SNAKE_CASE = int(_A )
if self.entailment_id == -1:
raise ValueError('Could not determine the entailment ID from the model config, please pass it at init.' )
def _A ( self , _A , _A ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = labels
return self.pre_processor(
[text] * len(_A ) , [f"""This example is {label}""" for label in labels] , return_tensors='pt' , padding='max_length' , )
def _A ( self , _A ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = outputs.logits
__SCREAMING_SNAKE_CASE = torch.argmax(logits[:, 2] ).item()
return self._labels[label_id]
| 364 |
import itertools
import random
import unittest
import numpy as np
from transformers import ASTFeatureExtractor
from transformers.testing_utils import require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
lowerCAmelCase__ : Dict =random.Random()
if is_torch_available():
import torch
def __lowercase ( a__ , a__=1.0 , a__=None , a__=None ) -> Any:
if rng is None:
__SCREAMING_SNAKE_CASE = global_rng
__SCREAMING_SNAKE_CASE = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class UpperCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self , _A , _A=7 , _A=400 , _A=2_000 , _A=1 , _A=0.0 , _A=16_000 , _A=True , _A=True , ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = parent
__SCREAMING_SNAKE_CASE = batch_size
__SCREAMING_SNAKE_CASE = min_seq_length
__SCREAMING_SNAKE_CASE = max_seq_length
__SCREAMING_SNAKE_CASE = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
__SCREAMING_SNAKE_CASE = feature_size
__SCREAMING_SNAKE_CASE = padding_value
__SCREAMING_SNAKE_CASE = sampling_rate
__SCREAMING_SNAKE_CASE = return_attention_mask
__SCREAMING_SNAKE_CASE = do_normalize
def _A ( self ):
'''simple docstring'''
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def _A ( self , _A=False , _A=False ):
'''simple docstring'''
def _flatten(_A ):
return list(itertools.chain(*_A ) )
if equal_length:
__SCREAMING_SNAKE_CASE = floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
__SCREAMING_SNAKE_CASE = [
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
__SCREAMING_SNAKE_CASE = [np.asarray(_A ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class UpperCAmelCase_ ( UpperCamelCase_ , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase__ : Optional[int] = ASTFeatureExtractor
def _A ( self ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = ASTFeatureExtractionTester(self )
def _A ( self ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
__SCREAMING_SNAKE_CASE = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )]
__SCREAMING_SNAKE_CASE = [np.asarray(_A ) for speech_input in speech_inputs]
# Test not batched input
__SCREAMING_SNAKE_CASE = feat_extract(speech_inputs[0] , return_tensors='np' ).input_values
__SCREAMING_SNAKE_CASE = feat_extract(np_speech_inputs[0] , return_tensors='np' ).input_values
self.assertTrue(np.allclose(_A , _A , atol=1e-3 ) )
# Test batched
__SCREAMING_SNAKE_CASE = feat_extract(_A , padding=_A , return_tensors='np' ).input_values
__SCREAMING_SNAKE_CASE = feat_extract(_A , padding=_A , return_tensors='np' ).input_values
for enc_seq_a, enc_seq_a in zip(_A , _A ):
self.assertTrue(np.allclose(_A , _A , atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
__SCREAMING_SNAKE_CASE = [floats_list((1, x) )[0] for x in (800, 800, 800)]
__SCREAMING_SNAKE_CASE = np.asarray(_A )
__SCREAMING_SNAKE_CASE = feat_extract(_A , return_tensors='np' ).input_values
__SCREAMING_SNAKE_CASE = feat_extract(_A , return_tensors='np' ).input_values
for enc_seq_a, enc_seq_a in zip(_A , _A ):
self.assertTrue(np.allclose(_A , _A , atol=1e-3 ) )
@require_torch
def _A ( self ):
'''simple docstring'''
import torch
__SCREAMING_SNAKE_CASE = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__SCREAMING_SNAKE_CASE = np.random.rand(100 ).astype(np.floataa )
__SCREAMING_SNAKE_CASE = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
__SCREAMING_SNAKE_CASE = feature_extractor.pad([{'input_values': inputs}] , return_tensors='np' )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
__SCREAMING_SNAKE_CASE = feature_extractor.pad([{'input_values': inputs}] , return_tensors='pt' )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
def _A ( self , _A ):
'''simple docstring'''
from datasets import load_dataset
__SCREAMING_SNAKE_CASE = load_dataset('hf-internal-testing/librispeech_asr_dummy' , 'clean' , split='validation' )
# automatic decoding with librispeech
__SCREAMING_SNAKE_CASE = ds.sort('id' ).select(range(_A ) )[:num_samples]['audio']
return [x["array"] for x in speech_samples]
@require_torch
def _A ( self ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = torch.tensor(
[-0.9_8_9_4, -1.2_7_7_6, -0.9_0_6_6, -1.2_7_7_6, -0.9_3_4_9, -1.2_6_0_9, -1.0_3_8_6, -1.2_7_7_6,
-1.1_5_6_1, -1.2_7_7_6, -1.2_0_5_2, -1.2_7_2_3, -1.2_1_9_0, -1.2_1_3_2, -1.2_7_7_6, -1.1_1_3_3,
-1.1_9_5_3, -1.1_3_4_3, -1.1_5_8_4, -1.2_2_0_3, -1.1_7_7_0, -1.2_4_7_4, -1.2_3_8_1, -1.1_9_3_6,
-0.9_2_7_0, -0.8_3_1_7, -0.8_0_4_9, -0.7_7_0_6, -0.7_5_6_5, -0.7_8_6_9] )
# fmt: on
__SCREAMING_SNAKE_CASE = self._load_datasamples(1 )
__SCREAMING_SNAKE_CASE = ASTFeatureExtractor()
__SCREAMING_SNAKE_CASE = feature_extractor(_A , return_tensors='pt' ).input_values
self.assertEquals(input_values.shape , (1, 1_024, 128) )
self.assertTrue(torch.allclose(input_values[0, 0, :30] , _A , atol=1e-4 ) )
| 118 | 0 |
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCAmelCase : Optional[int] = logging.get_logger(__name__)
__lowerCAmelCase : List[str] = {
"microsoft/git-base": "https://huggingface.co/microsoft/git-base/resolve/main/config.json",
}
class __lowerCAmelCase ( lowerCAmelCase_ ):
"""simple docstring"""
A__ : List[str] = '''git_vision_model'''
def __init__( self : Optional[int] , _snake_case : Dict=768 , _snake_case : List[Any]=3072 , _snake_case : int=12 , _snake_case : Union[str, Any]=12 , _snake_case : Dict=3 , _snake_case : Union[str, Any]=224 , _snake_case : Tuple=16 , _snake_case : List[Any]="quick_gelu" , _snake_case : str=1E-5 , _snake_case : str=0.0 , _snake_case : Union[str, Any]=0.02 , **_snake_case : Optional[int] , ):
super().__init__(**__snake_case )
__lowercase : Union[str, Any] = hidden_size
__lowercase : List[Any] = intermediate_size
__lowercase : List[str] = num_hidden_layers
__lowercase : Optional[int] = num_attention_heads
__lowercase : Any = num_channels
__lowercase : Optional[Any] = patch_size
__lowercase : Optional[int] = image_size
__lowercase : Any = initializer_range
__lowercase : str = attention_dropout
__lowercase : List[Any] = layer_norm_eps
__lowercase : List[str] = hidden_act
@classmethod
def snake_case_ ( cls : Optional[Any] , _snake_case : str , **_snake_case : Any ):
cls._set_token_in_kwargs(__snake_case )
__lowercase , __lowercase : Dict = cls.get_config_dict(__snake_case , **__snake_case )
# get the vision config dict if we are loading from GITConfig
if config_dict.get('''model_type''' ) == "git":
__lowercase : Any = config_dict['''vision_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
F'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(__snake_case , **__snake_case )
class __lowerCAmelCase ( lowerCAmelCase_ ):
"""simple docstring"""
A__ : Union[str, Any] = '''git'''
def __init__( self : Any , _snake_case : List[str]=None , _snake_case : Optional[Any]=3_0522 , _snake_case : List[str]=768 , _snake_case : Tuple=6 , _snake_case : int=12 , _snake_case : Optional[Any]=3072 , _snake_case : Any="gelu" , _snake_case : List[str]=0.1 , _snake_case : Optional[int]=0.1 , _snake_case : Tuple=1024 , _snake_case : Union[str, Any]=0.02 , _snake_case : List[str]=1E-1_2 , _snake_case : Optional[Any]=0 , _snake_case : Union[str, Any]="absolute" , _snake_case : Any=True , _snake_case : Tuple=False , _snake_case : Union[str, Any]=101 , _snake_case : Tuple=102 , _snake_case : int=None , **_snake_case : int , ):
super().__init__(bos_token_id=__snake_case , eos_token_id=__snake_case , pad_token_id=__snake_case , **__snake_case )
if vision_config is None:
__lowercase : List[str] = {}
logger.info('''vision_config is None. initializing the GitVisionConfig with default values.''' )
__lowercase : Dict = GitVisionConfig(**__snake_case )
__lowercase : Optional[int] = vocab_size
__lowercase : List[str] = hidden_size
__lowercase : str = num_hidden_layers
__lowercase : Tuple = num_attention_heads
__lowercase : Optional[int] = hidden_act
__lowercase : Optional[Any] = intermediate_size
__lowercase : List[str] = hidden_dropout_prob
__lowercase : Any = attention_probs_dropout_prob
__lowercase : Tuple = max_position_embeddings
__lowercase : Optional[Any] = initializer_range
__lowercase : List[str] = layer_norm_eps
__lowercase : Tuple = position_embedding_type
__lowercase : str = use_cache
__lowercase : Optional[int] = tie_word_embeddings
__lowercase : Union[str, Any] = num_image_with_embedding
__lowercase : int = bos_token_id
__lowercase : Union[str, Any] = eos_token_id
def snake_case_ ( self : str ):
__lowercase : Optional[int] = copy.deepcopy(self.__dict__ )
__lowercase : Any = self.vision_config.to_dict()
__lowercase : List[Any] = self.__class__.model_type
return output
| 156 |
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.local_sgd import LocalSGD
########################################################################
# This is a fully working simple example to use Accelerate
# with LocalSGD, which is a method to synchronize model
# parameters every K batches. It is different, but complementary
# to gradient accumulation.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
_lowerCAmelCase : Union[str, Any] = 16
_lowerCAmelCase : List[str] = 32
def UpperCamelCase_( _snake_case : Accelerator , _snake_case : int = 16 ):
"""simple docstring"""
__a =AutoTokenizer.from_pretrained('bert-base-cased' )
__a =load_dataset('glue' , 'mrpc' )
def tokenize_function(_snake_case : Optional[int] ):
# max_length=None => use the model max length (it's actually the default)
__a =tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=_snake_case , max_length=_snake_case )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
__a =datasets.map(
_snake_case , batched=_snake_case , remove_columns=['idx', 'sentence1', 'sentence2'] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
__a =tokenized_datasets.rename_column('label' , 'labels' )
def collate_fn(_snake_case : List[Any] ):
# On TPU it's best to pad everything to the same length or training will be very slow.
__a =128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
__a =16
elif accelerator.mixed_precision != "no":
__a =8
else:
__a =None
return tokenizer.pad(
_snake_case , padding='longest' , max_length=_snake_case , pad_to_multiple_of=_snake_case , return_tensors='pt' , )
# Instantiate dataloaders.
__a =DataLoader(
tokenized_datasets['train'] , shuffle=_snake_case , collate_fn=_snake_case , batch_size=_snake_case )
__a =DataLoader(
tokenized_datasets['validation'] , shuffle=_snake_case , collate_fn=_snake_case , batch_size=_snake_case )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("TESTING_MOCKED_DATALOADERS", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
_lowerCAmelCase : List[Any] = mocked_dataloaders # noqa: F811
def UpperCamelCase_( _snake_case : Tuple , _snake_case : Union[str, Any] ):
"""simple docstring"""
if os.environ.get('TESTING_MOCKED_DATALOADERS' , _snake_case ) == "1":
__a =2
# New Code #
__a =int(args.gradient_accumulation_steps )
__a =int(args.local_sgd_steps )
# Initialize accelerator
__a =Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , gradient_accumulation_steps=_snake_case )
if accelerator.distributed_type not in [DistributedType.NO, DistributedType.MULTI_CPU, DistributedType.MULTI_GPU]:
raise NotImplementedError('LocalSGD is supported only for CPUs and GPUs (no DeepSpeed or MegatronLM)' )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
__a =config['lr']
__a =int(config['num_epochs'] )
__a =int(config['seed'] )
__a =int(config['batch_size'] )
__a =evaluate.load('glue' , 'mrpc' )
set_seed(_snake_case )
__a , __a =get_dataloaders(_snake_case , _snake_case )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
__a =AutoModelForSequenceClassification.from_pretrained('bert-base-cased' , return_dict=_snake_case )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
__a =model.to(accelerator.device )
# Instantiate optimizer
__a =AdamW(params=model.parameters() , lr=_snake_case )
# Instantiate scheduler
__a =get_linear_schedule_with_warmup(
optimizer=_snake_case , num_warmup_steps=100 , num_training_steps=(len(_snake_case ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
__a , __a , __a , __a , __a =accelerator.prepare(
_snake_case , _snake_case , _snake_case , _snake_case , _snake_case )
# Now we train the model
for epoch in range(_snake_case ):
model.train()
with LocalSGD(
accelerator=_snake_case , model=_snake_case , local_sgd_steps=_snake_case , enabled=local_sgd_steps is not None ) as local_sgd:
for step, batch in enumerate(_snake_case ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
# New code #
# We use the new `accumulate` context manager to perform gradient accumulation
# We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests.
with accelerator.accumulate(_snake_case ):
__a =model(**_snake_case )
__a =output.loss
accelerator.backward(_snake_case )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
# LocalSGD-specific line
local_sgd.step()
model.eval()
for step, batch in enumerate(_snake_case ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
__a =model(**_snake_case )
__a =outputs.logits.argmax(dim=-1 )
__a , __a =accelerator.gather_for_metrics((predictions, batch['labels']) )
metric.add_batch(
predictions=_snake_case , references=_snake_case , )
__a =metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'epoch {epoch}:' , _snake_case )
def UpperCamelCase_( ):
"""simple docstring"""
__a =argparse.ArgumentParser(description='Simple example of training script.' )
parser.add_argument(
'--mixed_precision' , type=_snake_case , default=_snake_case , choices=['no', 'fp16', 'bf16', 'fp8'] , help='Whether to use mixed precision. Choose'
'between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'
'and an Nvidia Ampere GPU.' , )
# New Code #
parser.add_argument(
'--gradient_accumulation_steps' , type=_snake_case , default=1 , help='The number of minibatches to be ran before gradients are accumulated.' , )
parser.add_argument(
'--local_sgd_steps' , type=_snake_case , default=8 , help='Number of local SGD steps or None to disable local SGD' )
parser.add_argument('--cpu' , action='store_true' , help='If passed, will train on the CPU.' )
__a =parser.parse_args()
__a ={'lr': 2e-5, 'num_epochs': 3, 'seed': 42, 'batch_size': 16}
training_function(_snake_case , _snake_case )
if __name__ == "__main__":
main()
| 218 | 0 |
"""simple docstring"""
from scipy.stats import pearsonr
import datasets
__lowercase = """
Pearson correlation coefficient and p-value for testing non-correlation.
The Pearson correlation coefficient measures the linear relationship between two datasets. The calculation of the p-value relies on the assumption that each dataset is normally distributed. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Correlations of -1 or +1 imply an exact linear relationship. Positive correlations imply that as x increases, so does y. Negative correlations imply that as x increases, y decreases.
The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets.
"""
__lowercase = """
Args:
predictions (`list` of `int`): Predicted class labels, as returned by a model.
references (`list` of `int`): Ground truth labels.
return_pvalue (`boolean`): If `True`, returns the p-value, along with the correlation coefficient. If `False`, returns only the correlation coefficient. Defaults to `False`.
Returns:
pearsonr (`float`): Pearson correlation coefficient. Minimum possible value is -1. Maximum possible value is 1. Values of 1 and -1 indicate exact linear positive and negative relationships, respectively. A value of 0 implies no correlation.
p-value (`float`): P-value, which roughly indicates the probability of an The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets. Minimum possible value is 0. Maximum possible value is 1. Higher values indicate higher probabilities.
Examples:
Example 1-A simple example using only predictions and references.
>>> pearsonr_metric = datasets.load_metric(\"pearsonr\")
>>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5])
>>> print(round(results['pearsonr'], 2))
-0.74
Example 2-The same as Example 1, but that also returns the `p-value`.
>>> pearsonr_metric = datasets.load_metric(\"pearsonr\")
>>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5], return_pvalue=True)
>>> print(sorted(list(results.keys())))
['p-value', 'pearsonr']
>>> print(round(results['pearsonr'], 2))
-0.74
>>> print(round(results['p-value'], 2))
0.15
"""
__lowercase = """
@article{2020SciPy-NMeth,
author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and
Haberland, Matt and Reddy, Tyler and Cournapeau, David and
Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and
Bright, Jonathan and {van der Walt}, St{\'e}fan J. and
Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and
Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and
Kern, Robert and Larson, Eric and Carey, C J and
Polat, Ilhan and Feng, Yu and Moore, Eric W. and
{VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and
Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and
Harris, Charles R. and Archibald, Anne M. and
Ribeiro, Antonio H. and Pedregosa, Fabian and
{van Mulbregt}, Paul and {SciPy 1.0 Contributors}},
title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific
Computing in Python}},
journal = {Nature Methods},
year = {2020},
volume = {17},
pages = {261--272},
adsurl = {https://rdcu.be/b08Wh},
doi = {10.1038/s41592-019-0686-2},
}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class _A ( datasets.Metric ):
"""simple docstring"""
def __snake_case ( self : Tuple):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("float"),
"references": datasets.Value("float"),
}) , reference_urls=["https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.pearsonr.html"] , )
def __snake_case ( self : int , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Tuple=False):
if return_pvalue:
a : Union[str, Any] = pearsonr(__UpperCAmelCase , __UpperCAmelCase)
return {"pearsonr": results[0], "p-value": results[1]}
else:
return {"pearsonr": float(pearsonr(__UpperCAmelCase , __UpperCAmelCase)[0])}
| 226 |
"""simple docstring"""
import torch
from transformers import CamembertForMaskedLM, CamembertTokenizer
def lowercase ( A_ , A_ , A_ , A_=5 )-> Union[str, Any]:
'''simple docstring'''
assert masked_input.count("<mask>" ) == 1
a : List[str] = torch.tensor(tokenizer.encode(A_ , add_special_tokens=A_ ) ).unsqueeze(0 ) # Batch size 1
a : Dict = model(A_ )[0] # The last hidden-state is the first element of the output tuple
a : int = (input_ids.squeeze() == tokenizer.mask_token_id).nonzero().item()
a : Optional[Any] = logits[0, masked_index, :]
a : Dict = logits.softmax(dim=0 )
a , a : Any = prob.topk(k=A_ , dim=0 )
a : Optional[Any] = " ".join(
[tokenizer.convert_ids_to_tokens(indices[i].item() ) for i in range(len(A_ ) )] )
a : str = tokenizer.mask_token
a : Any = []
for index, predicted_token_bpe in enumerate(topk_predicted_token_bpe.split(" " ) ):
a : Dict = predicted_token_bpe.replace("\u2581" , " " )
if " {0}".format(A_ ) in masked_input:
topk_filled_outputs.append(
(
masked_input.replace(" {0}".format(A_ ) , A_ ),
values[index].item(),
predicted_token,
) )
else:
topk_filled_outputs.append(
(
masked_input.replace(A_ , A_ ),
values[index].item(),
predicted_token,
) )
return topk_filled_outputs
__lowercase = CamembertTokenizer.from_pretrained("""camembert-base""")
__lowercase = CamembertForMaskedLM.from_pretrained("""camembert-base""")
model.eval()
__lowercase = """Le camembert est <mask> :)"""
print(fill_mask(masked_input, model, tokenizer, topk=3))
| 226 | 1 |
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A : List[str] = logging.get_logger(__name__)
__A : str = {
'Salesforce/blip-vqa-base': 'https://huggingface.co/Salesforce/blip-vqa-base/resolve/main/config.json',
'Salesforce/blip-vqa-capfit-large': (
'https://huggingface.co/Salesforce/blip-vqa-base-capfit/resolve/main/config.json'
),
'Salesforce/blip-image-captioning-base': (
'https://huggingface.co/Salesforce/blip-image-captioning-base/resolve/main/config.json'
),
'Salesforce/blip-image-captioning-large': (
'https://huggingface.co/Salesforce/blip-image-captioning-large/resolve/main/config.json'
),
'Salesforce/blip-itm-base-coco': 'https://huggingface.co/Salesforce/blip-itm-base-coco/resolve/main/config.json',
'Salesforce/blip-itm-large-coco': 'https://huggingface.co/Salesforce/blip-itm-large-coco/resolve/main/config.json',
'Salesforce/blip-itm-base-flikr': 'https://huggingface.co/Salesforce/blip-itm-base-flikr/resolve/main/config.json',
'Salesforce/blip-itm-large-flikr': (
'https://huggingface.co/Salesforce/blip-itm-large-flikr/resolve/main/config.json'
),
}
class _SCREAMING_SNAKE_CASE ( lowerCAmelCase__):
_UpperCamelCase:Tuple = "blip_text_model"
def __init__( self , _SCREAMING_SNAKE_CASE=3_0524 , _SCREAMING_SNAKE_CASE=768 , _SCREAMING_SNAKE_CASE=768 , _SCREAMING_SNAKE_CASE=3072 , _SCREAMING_SNAKE_CASE=768 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=8 , _SCREAMING_SNAKE_CASE=512 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=1E-12 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=0.0_2 , _SCREAMING_SNAKE_CASE=3_0522 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=0 , _SCREAMING_SNAKE_CASE=102 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , **_SCREAMING_SNAKE_CASE , )-> Optional[int]:
super().__init__(
pad_token_id=_SCREAMING_SNAKE_CASE , bos_token_id=_SCREAMING_SNAKE_CASE , eos_token_id=_SCREAMING_SNAKE_CASE , sep_token_id=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
lowerCamelCase_ =vocab_size
lowerCamelCase_ =hidden_size
lowerCamelCase_ =encoder_hidden_size
lowerCamelCase_ =intermediate_size
lowerCamelCase_ =projection_dim
lowerCamelCase_ =hidden_dropout_prob
lowerCamelCase_ =num_hidden_layers
lowerCamelCase_ =num_attention_heads
lowerCamelCase_ =max_position_embeddings
lowerCamelCase_ =layer_norm_eps
lowerCamelCase_ =hidden_act
lowerCamelCase_ =initializer_range
lowerCamelCase_ =attention_probs_dropout_prob
lowerCamelCase_ =is_decoder
lowerCamelCase_ =use_cache
@classmethod
def _snake_case ( cls , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )-> "PretrainedConfig":
cls._set_token_in_kwargs(_SCREAMING_SNAKE_CASE )
lowerCamelCase_ , lowerCamelCase_ =cls.get_config_dict(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
# get the text config dict if we are loading from BlipConfig
if config_dict.get("""model_type""" ) == "blip":
lowerCamelCase_ =config_dict["""text_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
class _SCREAMING_SNAKE_CASE ( lowerCAmelCase__):
_UpperCamelCase:Optional[Any] = "blip_vision_model"
def __init__( self , _SCREAMING_SNAKE_CASE=768 , _SCREAMING_SNAKE_CASE=3072 , _SCREAMING_SNAKE_CASE=512 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=384 , _SCREAMING_SNAKE_CASE=16 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=1E-5 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=1E-10 , **_SCREAMING_SNAKE_CASE , )-> str:
super().__init__(**_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =hidden_size
lowerCamelCase_ =intermediate_size
lowerCamelCase_ =projection_dim
lowerCamelCase_ =num_hidden_layers
lowerCamelCase_ =num_attention_heads
lowerCamelCase_ =patch_size
lowerCamelCase_ =image_size
lowerCamelCase_ =initializer_range
lowerCamelCase_ =attention_dropout
lowerCamelCase_ =layer_norm_eps
lowerCamelCase_ =hidden_act
@classmethod
def _snake_case ( cls , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )-> "PretrainedConfig":
cls._set_token_in_kwargs(_SCREAMING_SNAKE_CASE )
lowerCamelCase_ , lowerCamelCase_ =cls.get_config_dict(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
# get the vision config dict if we are loading from BlipConfig
if config_dict.get("""model_type""" ) == "blip":
lowerCamelCase_ =config_dict["""vision_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
class _SCREAMING_SNAKE_CASE ( lowerCAmelCase__):
_UpperCamelCase:List[str] = "blip"
_UpperCamelCase:int = True
def __init__( self , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=512 , _SCREAMING_SNAKE_CASE=2.6_5_9_2 , _SCREAMING_SNAKE_CASE=256 , **_SCREAMING_SNAKE_CASE , )-> Optional[int]:
super().__init__(**_SCREAMING_SNAKE_CASE )
if text_config is None:
lowerCamelCase_ ={}
logger.info("""`text_config` is `None`. Initializing the `BlipTextConfig` with default values.""" )
if vision_config is None:
lowerCamelCase_ ={}
logger.info("""`vision_config` is `None`. Initializing the `BlipVisionConfig` with default values.""" )
lowerCamelCase_ =BlipTextConfig(**_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =BlipVisionConfig(**_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =self.vision_config.hidden_size
lowerCamelCase_ =projection_dim
lowerCamelCase_ =logit_scale_init_value
lowerCamelCase_ =1.0
lowerCamelCase_ =0.0_2
lowerCamelCase_ =image_text_hidden_size
@classmethod
def _snake_case ( cls , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )-> Optional[Any]:
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **_SCREAMING_SNAKE_CASE )
def _snake_case ( self )-> List[Any]:
lowerCamelCase_ =copy.deepcopy(self.__dict__ )
lowerCamelCase_ =self.text_config.to_dict()
lowerCamelCase_ =self.vision_config.to_dict()
lowerCamelCase_ =self.__class__.model_type
return output
| 154 |
import os
import tempfile
import unittest
from transformers import FlaubertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
FlaubertForMultipleChoice,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertModel,
FlaubertWithLMHeadModel,
)
from transformers.models.flaubert.modeling_flaubert import FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class _SCREAMING_SNAKE_CASE ( lowerCAmelCase__):
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=13 , _SCREAMING_SNAKE_CASE=7 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=99 , _SCREAMING_SNAKE_CASE=0 , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE=5 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=512 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=0.0_2 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE="last" , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , )-> Optional[Any]:
lowerCamelCase_ =parent
lowerCamelCase_ =batch_size
lowerCamelCase_ =seq_length
lowerCamelCase_ =is_training
lowerCamelCase_ =use_input_lengths
lowerCamelCase_ =use_token_type_ids
lowerCamelCase_ =use_labels
lowerCamelCase_ =gelu_activation
lowerCamelCase_ =sinusoidal_embeddings
lowerCamelCase_ =causal
lowerCamelCase_ =asm
lowerCamelCase_ =n_langs
lowerCamelCase_ =vocab_size
lowerCamelCase_ =n_special
lowerCamelCase_ =hidden_size
lowerCamelCase_ =num_hidden_layers
lowerCamelCase_ =num_attention_heads
lowerCamelCase_ =hidden_dropout_prob
lowerCamelCase_ =attention_probs_dropout_prob
lowerCamelCase_ =max_position_embeddings
lowerCamelCase_ =type_vocab_size
lowerCamelCase_ =type_sequence_label_size
lowerCamelCase_ =initializer_range
lowerCamelCase_ =num_labels
lowerCamelCase_ =num_choices
lowerCamelCase_ =summary_type
lowerCamelCase_ =use_proj
lowerCamelCase_ =scope
def _snake_case ( self )-> Dict:
lowerCamelCase_ =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase_ =random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase_ =None
if self.use_input_lengths:
lowerCamelCase_ =(
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
lowerCamelCase_ =None
if self.use_token_type_ids:
lowerCamelCase_ =ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
lowerCamelCase_ =None
lowerCamelCase_ =None
lowerCamelCase_ =None
if self.use_labels:
lowerCamelCase_ =ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase_ =ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCamelCase_ =ids_tensor([self.batch_size] , 2 ).float()
lowerCamelCase_ =ids_tensor([self.batch_size] , self.num_choices )
lowerCamelCase_ =self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def _snake_case ( self )-> List[str]:
return FlaubertConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , )
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , )-> str:
lowerCamelCase_ =FlaubertModel(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
lowerCamelCase_ =model(_SCREAMING_SNAKE_CASE , lengths=_SCREAMING_SNAKE_CASE , langs=_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =model(_SCREAMING_SNAKE_CASE , langs=_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , )-> List[Any]:
lowerCamelCase_ =FlaubertWithLMHeadModel(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
lowerCamelCase_ =model(_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , )-> Optional[Any]:
lowerCamelCase_ =FlaubertForQuestionAnsweringSimple(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
lowerCamelCase_ =model(_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =model(_SCREAMING_SNAKE_CASE , start_positions=_SCREAMING_SNAKE_CASE , end_positions=_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , )-> Optional[int]:
lowerCamelCase_ =FlaubertForQuestionAnswering(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
lowerCamelCase_ =model(_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =model(
_SCREAMING_SNAKE_CASE , start_positions=_SCREAMING_SNAKE_CASE , end_positions=_SCREAMING_SNAKE_CASE , cls_index=_SCREAMING_SNAKE_CASE , is_impossible=_SCREAMING_SNAKE_CASE , p_mask=_SCREAMING_SNAKE_CASE , )
lowerCamelCase_ =model(
_SCREAMING_SNAKE_CASE , start_positions=_SCREAMING_SNAKE_CASE , end_positions=_SCREAMING_SNAKE_CASE , cls_index=_SCREAMING_SNAKE_CASE , is_impossible=_SCREAMING_SNAKE_CASE , )
((lowerCamelCase_) , ) =result_with_labels.to_tuple()
lowerCamelCase_ =model(_SCREAMING_SNAKE_CASE , start_positions=_SCREAMING_SNAKE_CASE , end_positions=_SCREAMING_SNAKE_CASE )
((lowerCamelCase_) , ) =result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape , () )
self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) )
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , )-> Any:
lowerCamelCase_ =FlaubertForSequenceClassification(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
lowerCamelCase_ =model(_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =model(_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , )-> List[Any]:
lowerCamelCase_ =self.num_labels
lowerCamelCase_ =FlaubertForTokenClassification(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
lowerCamelCase_ =model(_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , )-> Dict:
lowerCamelCase_ =self.num_choices
lowerCamelCase_ =FlaubertForMultipleChoice(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
lowerCamelCase_ =input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCamelCase_ =token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCamelCase_ =input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCamelCase_ =model(
_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _snake_case ( self )-> int:
lowerCamelCase_ =self.prepare_config_and_inputs()
(
(
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) ,
) =config_and_inputs
lowerCamelCase_ ={
"""input_ids""": input_ids,
"""token_type_ids""": token_type_ids,
"""lengths""": input_lengths,
"""attention_mask""": input_mask,
}
return config, inputs_dict
@require_torch
class _SCREAMING_SNAKE_CASE ( lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase):
_UpperCamelCase:str = (
(
FlaubertModel,
FlaubertWithLMHeadModel,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertForMultipleChoice,
)
if is_torch_available()
else ()
)
_UpperCamelCase:str = (
{
"feature-extraction": FlaubertModel,
"fill-mask": FlaubertWithLMHeadModel,
"question-answering": FlaubertForQuestionAnsweringSimple,
"text-classification": FlaubertForSequenceClassification,
"token-classification": FlaubertForTokenClassification,
"zero-shot": FlaubertForSequenceClassification,
}
if is_torch_available()
else {}
)
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )-> Optional[Any]:
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith("""Fast""" )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False )-> List[Any]:
lowerCamelCase_ =super()._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , return_labels=_SCREAMING_SNAKE_CASE )
if return_labels:
if model_class.__name__ == "FlaubertForQuestionAnswering":
lowerCamelCase_ =torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_SCREAMING_SNAKE_CASE )
return inputs_dict
def _snake_case ( self )-> Optional[Any]:
lowerCamelCase_ =FlaubertModelTester(self )
lowerCamelCase_ =ConfigTester(self , config_class=_SCREAMING_SNAKE_CASE , emb_dim=37 )
def _snake_case ( self )-> Optional[Any]:
self.config_tester.run_common_tests()
def _snake_case ( self )-> List[Any]:
lowerCamelCase_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*_SCREAMING_SNAKE_CASE )
def _snake_case ( self )-> int:
lowerCamelCase_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*_SCREAMING_SNAKE_CASE )
def _snake_case ( self )-> Tuple:
lowerCamelCase_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_simple_qa(*_SCREAMING_SNAKE_CASE )
def _snake_case ( self )-> List[Any]:
lowerCamelCase_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*_SCREAMING_SNAKE_CASE )
def _snake_case ( self )-> Optional[Any]:
lowerCamelCase_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*_SCREAMING_SNAKE_CASE )
def _snake_case ( self )-> List[Any]:
lowerCamelCase_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_token_classif(*_SCREAMING_SNAKE_CASE )
def _snake_case ( self )-> List[str]:
lowerCamelCase_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_multiple_choice(*_SCREAMING_SNAKE_CASE )
@slow
def _snake_case ( self )-> Optional[Any]:
for model_name in FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase_ =FlaubertModel.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
@slow
@require_torch_gpu
def _snake_case ( self )-> Optional[Any]:
lowerCamelCase_ , lowerCamelCase_ =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# FlauBertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == FlaubertForMultipleChoice:
return
lowerCamelCase_ =True
lowerCamelCase_ =model_class(config=_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
lowerCamelCase_ =torch.jit.trace(
_SCREAMING_SNAKE_CASE , (inputs_dict["""input_ids"""].to("""cpu""" ), inputs_dict["""attention_mask"""].to("""cpu""" )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(_SCREAMING_SNAKE_CASE , os.path.join(_SCREAMING_SNAKE_CASE , """traced_model.pt""" ) )
lowerCamelCase_ =torch.jit.load(os.path.join(_SCREAMING_SNAKE_CASE , """traced_model.pt""" ) , map_location=_SCREAMING_SNAKE_CASE )
loaded(inputs_dict["""input_ids"""].to(_SCREAMING_SNAKE_CASE ) , inputs_dict["""attention_mask"""].to(_SCREAMING_SNAKE_CASE ) )
@require_torch
class _SCREAMING_SNAKE_CASE ( unittest.TestCase):
@slow
def _snake_case ( self )-> Union[str, Any]:
lowerCamelCase_ =FlaubertModel.from_pretrained("""flaubert/flaubert_base_cased""" )
lowerCamelCase_ =torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
with torch.no_grad():
lowerCamelCase_ =model(_SCREAMING_SNAKE_CASE )[0]
lowerCamelCase_ =torch.Size((1, 11, 768) )
self.assertEqual(output.shape , _SCREAMING_SNAKE_CASE )
lowerCamelCase_ =torch.tensor(
[[[-2.6_2_5_1, -1.4_2_9_8, -0.0_2_2_7], [-2.8_5_1_0, -1.6_3_8_7, 0.2_2_5_8], [-2.8_1_1_4, -1.1_8_3_2, -0.3_0_6_6]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , _SCREAMING_SNAKE_CASE , atol=1E-4 ) )
| 154 | 1 |
import logging
import sys
from dataclasses import dataclass, field
from typing import Any, Dict, List, Optional, Union
import librosa
import torch
from datasets import DatasetDict, load_dataset
from packaging import version
from torch import nn
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaForPreTraining,
is_apex_available,
trainer_utils,
)
from transformers.models.wavaveca.modeling_wavaveca import _compute_mask_indices
if is_apex_available():
from apex import amp
if version.parse(version.parse(torch.__version__).base_version) >= version.parse('''1.6'''):
lowerCamelCase__ = True
from torch.cuda.amp import autocast
lowerCamelCase__ = logging.getLogger(__name__)
@dataclass
class _UpperCAmelCase :
'''simple docstring'''
__A = field(
metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} )
__A = field(
default=__lowerCamelCase, metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''}, )
__A = field(
default=__lowerCamelCase, metadata={'''help''': '''Whether to freeze the feature extractor layers of the model.'''} )
__A = field(
default=__lowerCamelCase, metadata={'''help''': '''Whether to log verbose messages or not.'''}, )
__A = field(
default=2.0, metadata={'''help''': '''Maximum temperature for gumbel softmax.'''} )
__A = field(
default=0.5, metadata={'''help''': '''Minimum temperature for gumbel softmax.'''} )
__A = field(
default=0.999_995, metadata={'''help''': '''Decay of gumbel temperature during training.'''} )
def lowerCAmelCase__ ( a__ , a__ ) ->Tuple:
'''simple docstring'''
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , )
_UpperCamelCase = logging.WARNING
if model_args.verbose_logging:
_UpperCamelCase = logging.DEBUG
elif trainer_utils.is_main_process(training_args.local_rank ):
_UpperCamelCase = logging.INFO
logger.setLevel(A__ )
@dataclass
class _UpperCAmelCase :
'''simple docstring'''
__A = field(
default=__lowerCamelCase, metadata={'''help''': '''The name of the dataset to use (via the datasets library).'''} )
__A = field(
default=__lowerCamelCase, metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''} )
__A = field(
default='''train''', metadata={
'''help''': '''The name of the training data set split to use (via the datasets library). Defaults to \'train\''''
}, )
__A = field(
default='''validation''', metadata={
'''help''': (
'''The name of the validation data set split to use (via the datasets library). Defaults to \'validation\''''
)
}, )
__A = field(
default='''file''', metadata={'''help''': '''Column in the dataset that contains speech file path. Defaults to \'file\''''}, )
__A = field(
default=__lowerCamelCase, metadata={'''help''': '''Overwrite the cached preprocessed datasets or not.'''} )
__A = field(
default=1, metadata={
'''help''': '''The percentage of the train set used as validation set in case there\'s no validation split'''
}, )
__A = field(
default=__lowerCamelCase, metadata={'''help''': '''The number of processes to use for the preprocessing.'''}, )
__A = field(
default=20.0, metadata={'''help''': '''Filter audio files that are longer than `max_duration_in_seconds` seconds'''} )
@dataclass
class _UpperCAmelCase :
'''simple docstring'''
__A = 42
__A = 42
__A = "longest"
__A = None
__A = None
def __call__( self : Optional[Any] , lowercase_ : List[Dict[str, Union[List[int], torch.Tensor]]]) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase = self.feature_extractor.pad(
UpperCamelCase_ , max_length=self.max_length , padding=self.padding , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors="pt" , )
_UpperCamelCase = self.model._get_feat_extract_output_lengths(batch["input_values"].shape[-1])
_UpperCamelCase = batch["input_values"].shape[0]
# make sure that no loss is computed on padded inputs
if batch["attention_mask"] is not None:
# compute real output lengths according to convolution formula
_UpperCamelCase = self.model._get_feat_extract_output_lengths(batch["attention_mask"].sum(-1)).to(
torch.long)
_UpperCamelCase = torch.zeros(
(batch_size, mask_indices_seq_length) , dtype=torch.long , device=batch["input_values"].device)
# these two operations makes sure that all values
# before the output lengths indices are attended to
_UpperCamelCase = 1
_UpperCamelCase = attention_mask.flip([-1]).cumsum(-1).flip([-1]).bool()
# sample randomly masked indices
_UpperCamelCase = _compute_mask_indices(
(batch_size, mask_indices_seq_length) , self.model.config.mask_time_prob , self.model.config.mask_time_length , attention_mask=UpperCamelCase_ , min_masks=2 , )
return batch
class _UpperCAmelCase ( __lowerCamelCase ):
'''simple docstring'''
def __init__( self : List[Any] , *lowercase_ : int , lowercase_ : Any=1 , lowercase_ : List[Any]=0 , lowercase_ : List[str]=1.0 , **lowercase_ : Dict) -> List[str]:
"""simple docstring"""
super().__init__(*UpperCamelCase_ , **UpperCamelCase_)
_UpperCamelCase = 0
_UpperCamelCase = max_gumbel_temp
_UpperCamelCase = min_gumbel_temp
_UpperCamelCase = gumbel_temp_decay
def __UpperCAmelCase ( self : Optional[int] , lowercase_ : nn.Module , lowercase_ : Dict[str, Union[torch.Tensor, Any]]) -> str:
"""simple docstring"""
model.train()
_UpperCamelCase = self._prepare_inputs(UpperCamelCase_)
if self.use_amp:
with autocast():
_UpperCamelCase = self.compute_loss(UpperCamelCase_ , UpperCamelCase_)
else:
_UpperCamelCase = self.compute_loss(UpperCamelCase_ , UpperCamelCase_)
if self.args.n_gpu > 1 or self.deepspeed:
if model.module.config.ctc_loss_reduction == "mean":
_UpperCamelCase = loss.mean()
elif model.module.config.ctc_loss_reduction == "sum":
_UpperCamelCase = loss.sum() / (inputs["mask_time_indices"]).sum()
else:
raise ValueError(f'{model.config.ctc_loss_reduction} is not valid. Choose one of [\'mean\', \'sum\']')
if self.args.gradient_accumulation_steps > 1:
_UpperCamelCase = loss / self.args.gradient_accumulation_steps
if self.use_amp:
self.scaler.scale(UpperCamelCase_).backward()
elif self.use_apex:
with amp.scale_loss(UpperCamelCase_ , self.optimizer) as scaled_loss:
scaled_loss.backward()
elif self.deepspeed:
self.deepspeed.backward(UpperCamelCase_)
else:
loss.backward()
self.num_update_step += 1
# make sure gumbel softmax temperature is decayed
if self.args.n_gpu > 1 or self.deepspeed:
model.module.set_gumbel_temperature(
max(self.max_gumbel_temp * self.gumbel_temp_decay**self.num_update_step , self.min_gumbel_temp))
else:
model.set_gumbel_temperature(
max(self.max_gumbel_temp * self.gumbel_temp_decay**self.num_update_step , self.min_gumbel_temp))
return loss.detach()
def lowerCAmelCase__ ( ) ->Optional[int]:
'''simple docstring'''
_UpperCamelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = parser.parse_args_into_dataclasses()
configure_logger(A__ , A__ )
# Downloading and loading a dataset from the hub.
_UpperCamelCase = load_dataset(data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir )
if "validation" not in datasets.keys():
# make sure only "validation" and "train" keys remain"
_UpperCamelCase = DatasetDict()
_UpperCamelCase = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=f'{data_args.train_split_name}[:{data_args.validation_split_percentage}%]' , cache_dir=model_args.cache_dir , )
_UpperCamelCase = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=f'{data_args.train_split_name}[{data_args.validation_split_percentage}%:]' , cache_dir=model_args.cache_dir , )
else:
# make sure only "validation" and "train" keys remain"
_UpperCamelCase = DatasetDict()
_UpperCamelCase = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split="validation" , cache_dir=model_args.cache_dir , )
_UpperCamelCase = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=f'{data_args.train_split_name}' , cache_dir=model_args.cache_dir , )
# only normalized-inputs-training is supported
_UpperCamelCase = WavaVecaFeatureExtractor.from_pretrained(
model_args.model_name_or_path , cache_dir=model_args.cache_dir , do_normalize=A__ )
def prepare_dataset(a__ ):
# check that all files have the correct sampling rate
_UpperCamelCase , _UpperCamelCase = librosa.load(batch[data_args.speech_file_column] , sr=feature_extractor.sampling_rate )
return batch
# load audio files into numpy arrays
_UpperCamelCase = datasets.map(
A__ , num_proc=data_args.preprocessing_num_workers , remove_columns=datasets["train"].column_names )
# filter audio files that are too long
_UpperCamelCase = vectorized_datasets.filter(
lambda a__ : len(data["speech"] ) < int(data_args.max_duration_in_seconds * feature_extractor.sampling_rate ) )
def normalize(a__ ):
return feature_extractor(batch["speech"] , sampling_rate=feature_extractor.sampling_rate )
# normalize and transform to `BatchFeatures`
_UpperCamelCase = vectorized_datasets.map(
A__ , batched=A__ , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , remove_columns=vectorized_datasets["train"].column_names , )
# pretraining is only supported for "newer" stable layer norm architecture
# apply_spec_augment has to be True, mask_feature_prob has to be 0.0
_UpperCamelCase = WavaVecaConfig.from_pretrained(
model_args.model_name_or_path , cache_dir=model_args.cache_dir , gradient_checkpointing=training_args.gradient_checkpointing , )
if not config.do_stable_layer_norm or config.feat_extract_norm != "layer":
raise ValueError(
"PreTraining is only supported for ``config.do_stable_layer_norm=True`` and"
" ``config.feat_extract_norm='layer'" )
_UpperCamelCase = WavaVecaForPreTraining(A__ )
_UpperCamelCase = DataCollatorForWavaVecaPretraining(model=A__ , feature_extractor=A__ )
_UpperCamelCase = WavaVecaPreTrainer(
model=A__ , data_collator=A__ , args=A__ , train_dataset=vectorized_datasets["train"] , eval_dataset=vectorized_datasets["validation"] , tokenizer=A__ , max_gumbel_temp=model_args.max_gumbel_temperature , min_gumbel_temp=model_args.min_gumbel_temperature , gumbel_temp_decay=model_args.gumbel_temperature_decay , )
trainer.train()
if __name__ == "__main__":
main()
| 357 | import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
'''ut/deta''': '''https://huggingface.co/ut/deta/resolve/main/config.json''',
}
class _UpperCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
__A = '''deta'''
__A = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''encoder_attention_heads''',
}
def __init__( self : Tuple , lowercase_ : int=None , lowercase_ : Union[str, Any]=900 , lowercase_ : Any=2048 , lowercase_ : Optional[int]=6 , lowercase_ : Optional[int]=2048 , lowercase_ : List[Any]=8 , lowercase_ : Union[str, Any]=6 , lowercase_ : Optional[Any]=1024 , lowercase_ : Dict=8 , lowercase_ : Any=0.0 , lowercase_ : str=True , lowercase_ : List[Any]="relu" , lowercase_ : Optional[int]=256 , lowercase_ : Optional[int]=0.1 , lowercase_ : Optional[Any]=0.0 , lowercase_ : Optional[int]=0.0 , lowercase_ : Dict=0.02 , lowercase_ : List[str]=1.0 , lowercase_ : List[str]=True , lowercase_ : Any=False , lowercase_ : int="sine" , lowercase_ : str=5 , lowercase_ : int=4 , lowercase_ : Any=4 , lowercase_ : Tuple=True , lowercase_ : List[Any]=300 , lowercase_ : Tuple=True , lowercase_ : Any=True , lowercase_ : str=1 , lowercase_ : List[str]=5 , lowercase_ : Union[str, Any]=2 , lowercase_ : Tuple=1 , lowercase_ : int=1 , lowercase_ : Tuple=5 , lowercase_ : Union[str, Any]=2 , lowercase_ : Dict=0.1 , lowercase_ : List[Any]=0.25 , **lowercase_ : Any , ) -> List[str]:
"""simple docstring"""
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.")
_UpperCamelCase = CONFIG_MAPPING["resnet"](out_features=["stage2", "stage3", "stage4"])
else:
if isinstance(lowercase_ , lowercase_):
_UpperCamelCase = backbone_config.pop("model_type")
_UpperCamelCase = CONFIG_MAPPING[backbone_model_type]
_UpperCamelCase = config_class.from_dict(lowercase_)
_UpperCamelCase = backbone_config
_UpperCamelCase = num_queries
_UpperCamelCase = max_position_embeddings
_UpperCamelCase = d_model
_UpperCamelCase = encoder_ffn_dim
_UpperCamelCase = encoder_layers
_UpperCamelCase = encoder_attention_heads
_UpperCamelCase = decoder_ffn_dim
_UpperCamelCase = decoder_layers
_UpperCamelCase = decoder_attention_heads
_UpperCamelCase = dropout
_UpperCamelCase = attention_dropout
_UpperCamelCase = activation_dropout
_UpperCamelCase = activation_function
_UpperCamelCase = init_std
_UpperCamelCase = init_xavier_std
_UpperCamelCase = encoder_layerdrop
_UpperCamelCase = auxiliary_loss
_UpperCamelCase = position_embedding_type
# deformable attributes
_UpperCamelCase = num_feature_levels
_UpperCamelCase = encoder_n_points
_UpperCamelCase = decoder_n_points
_UpperCamelCase = two_stage
_UpperCamelCase = two_stage_num_proposals
_UpperCamelCase = with_box_refine
_UpperCamelCase = assign_first_stage
if two_stage is True and with_box_refine is False:
raise ValueError("If two_stage is True, with_box_refine must be True.")
# Hungarian matcher
_UpperCamelCase = class_cost
_UpperCamelCase = bbox_cost
_UpperCamelCase = giou_cost
# Loss coefficients
_UpperCamelCase = mask_loss_coefficient
_UpperCamelCase = dice_loss_coefficient
_UpperCamelCase = bbox_loss_coefficient
_UpperCamelCase = giou_loss_coefficient
_UpperCamelCase = eos_coefficient
_UpperCamelCase = focal_alpha
super().__init__(is_encoder_decoder=lowercase_ , **lowercase_)
@property
def __UpperCAmelCase ( self : List[str]) -> int:
"""simple docstring"""
return self.encoder_attention_heads
@property
def __UpperCAmelCase ( self : Optional[Any]) -> int:
"""simple docstring"""
return self.d_model
def __UpperCAmelCase ( self : Any) -> str:
"""simple docstring"""
_UpperCamelCase = copy.deepcopy(self.__dict__)
_UpperCamelCase = self.backbone_config.to_dict()
_UpperCamelCase = self.__class__.model_type
return output
| 63 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase__ = {
"""configuration_time_series_transformer""": [
"""TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""TimeSeriesTransformerConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ = [
"""TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TimeSeriesTransformerForPrediction""",
"""TimeSeriesTransformerModel""",
"""TimeSeriesTransformerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TimeSeriesTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimeSeriesTransformerForPrediction,
TimeSeriesTransformerModel,
TimeSeriesTransformerPreTrainedModel,
)
else:
import sys
lowercase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 96 |
"""simple docstring"""
import json
import logging
import os
import sys
from time import time
from unittest.mock import patch
from transformers.testing_utils import TestCasePlus, require_torch_tpu
logging.basicConfig(level=logging.DEBUG)
lowercase__ = logging.getLogger()
def _snake_case ( lowercase__ ):
_lowerCamelCase : List[Any] = {}
_lowerCamelCase : List[Any] = os.path.join(lowercase__ , 'all_results.json' )
if os.path.exists(lowercase__ ):
with open(lowercase__ , 'r' ) as f:
_lowerCamelCase : List[Any] = json.load(lowercase__ )
else:
raise ValueError(f'''can\'t find {path}''' )
return results
lowercase__ = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
@require_torch_tpu
class lowerCAmelCase__ ( lowercase ):
'''simple docstring'''
def A_ ( self ):
import xla_spawn
_lowerCamelCase : List[Any] = self.get_auto_remove_tmp_dir()
_lowerCamelCase : List[Any] = F'''
./examples/pytorch/text-classification/run_glue.py
--num_cores=8
./examples/pytorch/text-classification/run_glue.py
--model_name_or_path distilbert-base-uncased
--output_dir {tmp_dir}
--overwrite_output_dir
--train_file ./tests/fixtures/tests_samples/MRPC/train.csv
--validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv
--do_train
--do_eval
--debug tpu_metrics_debug
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--learning_rate=1e-4
--max_steps=10
--warmup_steps=2
--seed=42
--max_seq_length=128
'''.split()
with patch.object(lowercase , 'argv' , lowercase ):
_lowerCamelCase : Dict = time()
xla_spawn.main()
_lowerCamelCase : Any = time()
_lowerCamelCase : Optional[int] = get_results(lowercase )
self.assertGreaterEqual(result['eval_accuracy'] , 0.75 )
# Assert that the script takes less than 500 seconds to make sure it doesn't hang.
self.assertLess(end - start , 500 )
def A_ ( self ):
import xla_spawn
_lowerCamelCase : Tuple = '\n ./tests/test_trainer_tpu.py\n --num_cores=8\n ./tests/test_trainer_tpu.py\n '.split()
with patch.object(lowercase , 'argv' , lowercase ):
xla_spawn.main() | 96 | 1 |
def _A ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
while second != 0:
a__ : List[str] =first & second
first ^= second
a__ : Any =c << 1
return first
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCAmelCase : Tuple = int(input("""Enter the first number: """).strip())
UpperCAmelCase : int = int(input("""Enter the second number: """).strip())
print(F"""{add(first, second) = }""")
| 148 |
import unittest
from transformers import AutoTokenizer, NystromformerConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
NystromformerForMaskedLM,
NystromformerForMultipleChoice,
NystromformerForQuestionAnswering,
NystromformerForSequenceClassification,
NystromformerForTokenClassification,
NystromformerModel,
)
from transformers.models.nystromformer.modeling_nystromformer import NYSTROMFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
class __lowerCAmelCase :
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__=1_3 , lowerCAmelCase__=7 , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__=9_9 , lowerCAmelCase__=3_2 , lowerCAmelCase__=5 , lowerCAmelCase__=4 , lowerCAmelCase__=3_7 , lowerCAmelCase__="gelu" , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.1 , lowerCAmelCase__=5_1_2 , lowerCAmelCase__=1_6 , lowerCAmelCase__=2 , lowerCAmelCase__=0.02 , lowerCAmelCase__=3 , lowerCAmelCase__=4 , lowerCAmelCase__=None , ) -> List[Any]:
'''simple docstring'''
a__ : Optional[Any] =parent
a__ : Tuple =batch_size
a__ : List[Any] =seq_length
a__ : Dict =is_training
a__ : Any =use_input_mask
a__ : int =use_token_type_ids
a__ : Optional[Any] =use_labels
a__ : Optional[Any] =vocab_size
a__ : List[str] =hidden_size
a__ : int =num_hidden_layers
a__ : Tuple =num_attention_heads
a__ : Union[str, Any] =intermediate_size
a__ : Optional[int] =hidden_act
a__ : int =hidden_dropout_prob
a__ : Union[str, Any] =attention_probs_dropout_prob
a__ : List[Any] =max_position_embeddings
a__ : str =type_vocab_size
a__ : Optional[Any] =type_sequence_label_size
a__ : Union[str, Any] =initializer_range
a__ : List[Any] =num_labels
a__ : str =num_choices
a__ : int =scope
def _lowercase ( self ) -> int:
'''simple docstring'''
a__ : List[Any] =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
a__ : str =None
if self.use_input_mask:
a__ : List[Any] =random_attention_mask([self.batch_size, self.seq_length] )
a__ : str =None
if self.use_token_type_ids:
a__ : List[str] =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
a__ : Dict =None
a__ : str =None
a__ : str =None
if self.use_labels:
a__ : List[str] =ids_tensor([self.batch_size] , self.type_sequence_label_size )
a__ : Tuple =ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
a__ : Dict =ids_tensor([self.batch_size] , self.num_choices )
a__ : Tuple =self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _lowercase ( self ) -> Tuple:
'''simple docstring'''
return NystromformerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCAmelCase__ , initializer_range=self.initializer_range , )
def _lowercase ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> List[Any]:
'''simple docstring'''
a__ : Tuple =NystromformerModel(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
a__ : Optional[Any] =model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ )
a__ : str =model(lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ )
a__ : Optional[int] =model(lowerCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowercase ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> int:
'''simple docstring'''
a__ : int =NystromformerForMaskedLM(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
a__ : Dict =model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , labels=lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _lowercase ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Any:
'''simple docstring'''
a__ : Optional[int] =NystromformerForQuestionAnswering(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
a__ : str =model(
lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , start_positions=lowerCAmelCase__ , end_positions=lowerCAmelCase__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _lowercase ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Optional[int]:
'''simple docstring'''
a__ : Optional[Any] =self.num_labels
a__ : Dict =NystromformerForSequenceClassification(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
a__ : List[str] =model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , labels=lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _lowercase ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> List[str]:
'''simple docstring'''
a__ : Tuple =self.num_labels
a__ : List[str] =NystromformerForTokenClassification(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
a__ : List[Any] =model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , labels=lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _lowercase ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Tuple:
'''simple docstring'''
a__ : List[Any] =self.num_choices
a__ : Optional[Any] =NystromformerForMultipleChoice(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
a__ : List[str] =input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
a__ : List[Any] =token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
a__ : List[Any] =input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
a__ : Dict =model(
lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , labels=lowerCAmelCase__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _lowercase ( self ) -> Tuple:
'''simple docstring'''
a__ : Optional[Any] =self.prepare_config_and_inputs()
(
(
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) ,
) : List[str] =config_and_inputs
a__ : str ={"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class __lowerCAmelCase ( UpperCamelCase__ , UpperCamelCase__ , unittest.TestCase):
_lowercase : int = (
(
NystromformerModel,
NystromformerForMaskedLM,
NystromformerForMultipleChoice,
NystromformerForQuestionAnswering,
NystromformerForSequenceClassification,
NystromformerForTokenClassification,
)
if is_torch_available()
else ()
)
_lowercase : Union[str, Any] = (
{
"""feature-extraction""": NystromformerModel,
"""fill-mask""": NystromformerForMaskedLM,
"""question-answering""": NystromformerForQuestionAnswering,
"""text-classification""": NystromformerForSequenceClassification,
"""token-classification""": NystromformerForTokenClassification,
"""zero-shot""": NystromformerForSequenceClassification,
}
if is_torch_available()
else {}
)
_lowercase : Union[str, Any] = False
_lowercase : Union[str, Any] = False
def _lowercase ( self ) -> List[str]:
'''simple docstring'''
a__ : Optional[int] =NystromformerModelTester(self )
a__ : Optional[int] =ConfigTester(self , config_class=lowerCAmelCase__ , hidden_size=3_7 )
def _lowercase ( self ) -> int:
'''simple docstring'''
self.config_tester.run_common_tests()
def _lowercase ( self ) -> Dict:
'''simple docstring'''
a__ : List[str] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase__ )
def _lowercase ( self ) -> Optional[Any]:
'''simple docstring'''
a__ : Tuple =self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
a__ : int =type
self.model_tester.create_and_check_model(*lowerCAmelCase__ )
def _lowercase ( self ) -> Optional[Any]:
'''simple docstring'''
a__ : Optional[Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*lowerCAmelCase__ )
def _lowercase ( self ) -> Dict:
'''simple docstring'''
a__ : str =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*lowerCAmelCase__ )
def _lowercase ( self ) -> List[str]:
'''simple docstring'''
a__ : Tuple =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowerCAmelCase__ )
def _lowercase ( self ) -> Union[str, Any]:
'''simple docstring'''
a__ : Tuple =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*lowerCAmelCase__ )
def _lowercase ( self ) -> Optional[Any]:
'''simple docstring'''
a__ : int =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowerCAmelCase__ )
@slow
def _lowercase ( self ) -> str:
'''simple docstring'''
for model_name in NYSTROMFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a__ : int =NystromformerModel.from_pretrained(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
@require_torch
class __lowerCAmelCase ( unittest.TestCase):
@slow
def _lowercase ( self ) -> str:
'''simple docstring'''
a__ : str =NystromformerModel.from_pretrained("uw-madison/nystromformer-512" )
a__ : int =torch.tensor([[0, 1, 2, 3, 4, 5]] )
with torch.no_grad():
a__ : Tuple =model(lowerCAmelCase__ )[0]
a__ : List[str] =torch.Size((1, 6, 7_6_8) )
self.assertEqual(output.shape , lowerCAmelCase__ )
a__ : int =torch.tensor(
[[[-0.45_32, -0.09_36, 0.51_37], [-0.26_76, 0.06_28, 0.61_86], [-0.36_29, -0.17_26, 0.47_16]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , lowerCAmelCase__ , atol=1E-4 ) )
@slow
def _lowercase ( self ) -> Optional[Any]:
'''simple docstring'''
a__ : Union[str, Any] ="the [MASK] of Belgium is Brussels"
a__ : str =AutoTokenizer.from_pretrained("uw-madison/nystromformer-512" )
a__ : int =NystromformerForMaskedLM.from_pretrained("uw-madison/nystromformer-512" )
a__ : List[Any] =tokenizer(lowerCAmelCase__ , return_tensors="pt" )
with torch.no_grad():
a__ : str =model(encoding.input_ids ).logits
a__ : List[str] =token_logits[:, 2, :].argmax(-1 )[0]
self.assertEqual(tokenizer.decode(lowerCAmelCase__ ) , "capital" )
| 148 | 1 |
'''simple docstring'''
from ..utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_scipy_available,
is_torch_available,
is_torchsde_available,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_pt_objects import * # noqa F403
else:
from .scheduling_consistency_models import CMStochasticIterativeScheduler
from .scheduling_ddim import DDIMScheduler
from .scheduling_ddim_inverse import DDIMInverseScheduler
from .scheduling_ddim_parallel import DDIMParallelScheduler
from .scheduling_ddpm import DDPMScheduler
from .scheduling_ddpm_parallel import DDPMParallelScheduler
from .scheduling_deis_multistep import DEISMultistepScheduler
from .scheduling_dpmsolver_multistep import DPMSolverMultistepScheduler
from .scheduling_dpmsolver_multistep_inverse import DPMSolverMultistepInverseScheduler
from .scheduling_dpmsolver_singlestep import DPMSolverSinglestepScheduler
from .scheduling_euler_ancestral_discrete import EulerAncestralDiscreteScheduler
from .scheduling_euler_discrete import EulerDiscreteScheduler
from .scheduling_heun_discrete import HeunDiscreteScheduler
from .scheduling_ipndm import IPNDMScheduler
from .scheduling_k_dpm_2_ancestral_discrete import KDPMaAncestralDiscreteScheduler
from .scheduling_k_dpm_2_discrete import KDPMaDiscreteScheduler
from .scheduling_karras_ve import KarrasVeScheduler
from .scheduling_pndm import PNDMScheduler
from .scheduling_repaint import RePaintScheduler
from .scheduling_sde_ve import ScoreSdeVeScheduler
from .scheduling_sde_vp import ScoreSdeVpScheduler
from .scheduling_unclip import UnCLIPScheduler
from .scheduling_unipc_multistep import UniPCMultistepScheduler
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin
from .scheduling_vq_diffusion import VQDiffusionScheduler
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_flax_objects import * # noqa F403
else:
from .scheduling_ddim_flax import FlaxDDIMScheduler
from .scheduling_ddpm_flax import FlaxDDPMScheduler
from .scheduling_dpmsolver_multistep_flax import FlaxDPMSolverMultistepScheduler
from .scheduling_karras_ve_flax import FlaxKarrasVeScheduler
from .scheduling_lms_discrete_flax import FlaxLMSDiscreteScheduler
from .scheduling_pndm_flax import FlaxPNDMScheduler
from .scheduling_sde_ve_flax import FlaxScoreSdeVeScheduler
from .scheduling_utils_flax import (
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
broadcast_to_shape_from_left,
)
try:
if not (is_torch_available() and is_scipy_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_torch_and_scipy_objects import * # noqa F403
else:
from .scheduling_lms_discrete import LMSDiscreteScheduler
try:
if not (is_torch_available() and is_torchsde_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_torch_and_torchsde_objects import * # noqa F403
else:
from .scheduling_dpmsolver_sde import DPMSolverSDEScheduler
| 318 |
import logging
from pathlib import Path
import numpy as np
import pytorch_lightning as pl
import torch
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.utilities import rank_zero_only
from utils_rag import save_json
def a_ ( lowerCAmelCase_ : Optional[int] ):
__lowerCAmelCase = filter(lambda lowerCAmelCase_ : p.requires_grad, model.parameters() )
__lowerCAmelCase = sum([np.prod(p.size() ) for p in model_parameters] )
return params
_snake_case : Dict = logging.getLogger(__name__)
def a_ ( lowerCAmelCase_ : Optional[int], lowerCAmelCase_ : Optional[int] ):
if metric == "rouge2":
__lowerCAmelCase = '{val_avg_rouge2:.4f}-{step_count}'
elif metric == "bleu":
__lowerCAmelCase = '{val_avg_bleu:.4f}-{step_count}'
elif metric == "em":
__lowerCAmelCase = '{val_avg_em:.4f}-{step_count}'
else:
raise NotImplementedError(
F"""seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this"""
' function.' )
__lowerCAmelCase = ModelCheckpoint(
dirpath=lowerCAmelCase_, filename=lowerCAmelCase_, monitor=F"""val_{metric}""", mode='max', save_top_k=3, every_n_epochs=1, )
return checkpoint_callback
def a_ ( lowerCAmelCase_ : Union[str, Any], lowerCAmelCase_ : Any ):
return EarlyStopping(
monitor=F"""val_{metric}""", mode='min' if 'loss' in metric else 'max', patience=lowerCAmelCase_, verbose=lowerCAmelCase_, )
class _UpperCAmelCase ( pl.Callback ):
"""simple docstring"""
def lowercase ( self : Tuple , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : int ) -> Any:
__lowerCAmelCase = {f"""lr_group_{i}""": param['lr'] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )}
pl_module.logger.log_metrics(lowerCAmelCase_ )
@rank_zero_only
def lowercase ( self : Optional[int] , lowerCAmelCase_ : pl.Trainer , lowerCAmelCase_ : pl.LightningModule , lowerCAmelCase_ : str , lowerCAmelCase_ : List[Any]=True ) -> None:
logger.info(f"""***** {type_path} results at step {trainer.global_step:05d} *****""" )
__lowerCAmelCase = trainer.callback_metrics
trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ['log', 'progress_bar', 'preds']} )
# Log results
__lowerCAmelCase = Path(pl_module.hparams.output_dir )
if type_path == "test":
__lowerCAmelCase = od / 'test_results.txt'
__lowerCAmelCase = od / 'test_generations.txt'
else:
# this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json
# If people want this it will be easy enough to add back.
__lowerCAmelCase = od / f"""{type_path}_results/{trainer.global_step:05d}.txt"""
__lowerCAmelCase = od / f"""{type_path}_generations/{trainer.global_step:05d}.txt"""
results_file.parent.mkdir(exist_ok=lowerCAmelCase_ )
generations_file.parent.mkdir(exist_ok=lowerCAmelCase_ )
with open(lowerCAmelCase_ , 'a+' ) as writer:
for key in sorted(lowerCAmelCase_ ):
if key in ["log", "progress_bar", "preds"]:
continue
__lowerCAmelCase = metrics[key]
if isinstance(lowerCAmelCase_ , torch.Tensor ):
__lowerCAmelCase = val.item()
__lowerCAmelCase = f"""{key}: {val:.6f}\n"""
writer.write(lowerCAmelCase_ )
if not save_generations:
return
if "preds" in metrics:
__lowerCAmelCase = '\n'.join(metrics['preds'] )
generations_file.open('w+' ).write(lowerCAmelCase_ )
@rank_zero_only
def lowercase ( self : Union[str, Any] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : List[str] ) -> Dict:
try:
__lowerCAmelCase = pl_module.model.model.num_parameters()
except AttributeError:
__lowerCAmelCase = pl_module.model.num_parameters()
__lowerCAmelCase = count_trainable_parameters(lowerCAmelCase_ )
# mp stands for million parameters
trainer.logger.log_metrics({'n_params': npars, 'mp': npars / 1e6, 'grad_mp': n_trainable_pars / 1e6} )
@rank_zero_only
def lowercase ( self : int , lowerCAmelCase_ : pl.Trainer , lowerCAmelCase_ : pl.LightningModule ) -> Any:
save_json(pl_module.metrics , pl_module.metrics_save_path )
return self._write_logs(lowerCAmelCase_ , lowerCAmelCase_ , 'test' )
@rank_zero_only
def lowercase ( self : List[Any] , lowerCAmelCase_ : pl.Trainer , lowerCAmelCase_ : Any ) -> int:
save_json(pl_module.metrics , pl_module.metrics_save_path )
# Uncommenting this will save val generations
# return self._write_logs(trainer, pl_module, "valid")
| 284 | 0 |
"""simple docstring"""
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> int:
def count_of_possible_combinations(__lowerCamelCase ) -> int:
if target < 0:
return 0
if target == 0:
return 1
return sum(count_of_possible_combinations(target - item ) for item in array )
return count_of_possible_combinations(__lowerCamelCase )
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> int:
def count_of_possible_combinations_with_dp_array(
__lowerCamelCase , __lowerCamelCase ) -> int:
if target < 0:
return 0
if target == 0:
return 1
if dp_array[target] != -1:
return dp_array[target]
lowercase__ : Dict = sum(
count_of_possible_combinations_with_dp_array(target - item , __lowerCamelCase )
for item in array )
lowercase__ : Optional[int] = answer
return answer
lowercase__ : str = [-1] * (target + 1)
return count_of_possible_combinations_with_dp_array(__lowerCamelCase , __lowerCamelCase )
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> int:
lowercase__ : int = [0] * (target + 1)
lowercase__ : Dict = 1
for i in range(1 , target + 1 ):
for j in range(__lowerCamelCase ):
if i - array[j] >= 0:
dp_array[i] += dp_array[i - array[j]]
return dp_array[target]
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCAmelCase_ = 3
lowerCAmelCase_ = 5
lowerCAmelCase_ = [1, 2, 5]
print(combination_sum_iv(n, array, target))
| 302 |
"""simple docstring"""
import logging
import os
import sys
from dataclasses import dataclass, field
from importlib import import_module
from typing import Dict, List, Optional, Tuple
import numpy as np
from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score
from torch import nn
from utils_ner import Split, TokenClassificationDataset, TokenClassificationTask
import transformers
from transformers import (
AutoConfig,
AutoModelForTokenClassification,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
lowerCAmelCase_ = logging.getLogger(__name__)
@dataclass
class __A :
'''simple docstring'''
lowerCAmelCase : str = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
lowerCAmelCase : Optional[str] = field(
default=A_ ,metadata={"help": "Pretrained config name or path if not the same as model_name"} )
lowerCAmelCase : Optional[str] = field(
default="NER" ,metadata={"help": "Task type to fine tune in training (e.g. NER, POS, etc)"} )
lowerCAmelCase : Optional[str] = field(
default=A_ ,metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
lowerCAmelCase : bool = field(default=A_ ,metadata={"help": "Set this flag to use fast tokenization."} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
lowerCAmelCase : Optional[str] = field(
default=A_ ,metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} ,)
@dataclass
class __A :
'''simple docstring'''
lowerCAmelCase : str = field(
metadata={"help": "The input data dir. Should contain the .txt files for a CoNLL-2003-formatted task."} )
lowerCAmelCase : Optional[str] = field(
default=A_ ,metadata={"help": "Path to a file containing all labels. If not specified, CoNLL-2003 labels are used."} ,)
lowerCAmelCase : int = field(
default=1_2_8 ,metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} ,)
lowerCAmelCase : bool = field(
default=A_ ,metadata={"help": "Overwrite the cached training and evaluation sets"} )
def __UpperCAmelCase ( ) -> Optional[int]:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
lowercase__ : List[str] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
lowercase__ , lowercase__ , lowercase__ : List[str] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
lowercase__ , lowercase__ , lowercase__ : List[str] = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f"""Output directory ({training_args.output_dir}) already exists and is not empty. Use"""
''' --overwrite_output_dir to overcome.''' )
lowercase__ : str = import_module('''tasks''' )
try:
lowercase__ : List[str] = getattr(__lowerCamelCase , model_args.task_type )
lowercase__ : TokenClassificationTask = token_classification_task_clazz()
except AttributeError:
raise ValueError(
f"""Task {model_args.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. """
f"""Available tasks classes are: {TokenClassificationTask.__subclasses__()}""" )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'''Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s''' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('''Training/evaluation parameters %s''' , __lowerCamelCase )
# Set seed
set_seed(training_args.seed )
# Prepare CONLL-2003 task
lowercase__ : Union[str, Any] = token_classification_task.get_labels(data_args.labels )
lowercase__ : Dict[int, str] = dict(enumerate(__lowerCamelCase ) )
lowercase__ : Optional[int] = len(__lowerCamelCase )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowercase__ : List[Any] = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=__lowerCamelCase , idalabel=__lowerCamelCase , labelaid={label: i for i, label in enumerate(__lowerCamelCase )} , cache_dir=model_args.cache_dir , )
lowercase__ : Any = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast , )
lowercase__ : str = AutoModelForTokenClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=__lowerCamelCase , cache_dir=model_args.cache_dir , )
# Get datasets
lowercase__ : str = (
TokenClassificationDataset(
token_classification_task=__lowerCamelCase , data_dir=data_args.data_dir , tokenizer=__lowerCamelCase , labels=__lowerCamelCase , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , )
if training_args.do_train
else None
)
lowercase__ : str = (
TokenClassificationDataset(
token_classification_task=__lowerCamelCase , data_dir=data_args.data_dir , tokenizer=__lowerCamelCase , labels=__lowerCamelCase , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , )
if training_args.do_eval
else None
)
def align_predictions(__lowerCamelCase , __lowerCamelCase ) -> Tuple[List[int], List[int]]:
lowercase__ : Tuple = np.argmax(__lowerCamelCase , axis=2 )
lowercase__ , lowercase__ : Tuple = preds.shape
lowercase__ : List[str] = [[] for _ in range(__lowerCamelCase )]
lowercase__ : Tuple = [[] for _ in range(__lowerCamelCase )]
for i in range(__lowerCamelCase ):
for j in range(__lowerCamelCase ):
if label_ids[i, j] != nn.CrossEntropyLoss().ignore_index:
out_label_list[i].append(label_map[label_ids[i][j]] )
preds_list[i].append(label_map[preds[i][j]] )
return preds_list, out_label_list
def compute_metrics(__lowerCamelCase ) -> Dict:
lowercase__ , lowercase__ : List[Any] = align_predictions(p.predictions , p.label_ids )
return {
"accuracy_score": accuracy_score(__lowerCamelCase , __lowerCamelCase ),
"precision": precision_score(__lowerCamelCase , __lowerCamelCase ),
"recall": recall_score(__lowerCamelCase , __lowerCamelCase ),
"f1": fa_score(__lowerCamelCase , __lowerCamelCase ),
}
# Data collator
lowercase__ : Tuple = DataCollatorWithPadding(__lowerCamelCase , pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
lowercase__ : str = Trainer(
model=__lowerCamelCase , args=__lowerCamelCase , train_dataset=__lowerCamelCase , eval_dataset=__lowerCamelCase , compute_metrics=__lowerCamelCase , data_collator=__lowerCamelCase , )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_process_zero():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
lowercase__ : int = {}
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
lowercase__ : Optional[int] = trainer.evaluate()
lowercase__ : Union[str, Any] = os.path.join(training_args.output_dir , '''eval_results.txt''' )
if trainer.is_world_process_zero():
with open(__lowerCamelCase , '''w''' ) as writer:
logger.info('''***** Eval results *****''' )
for key, value in result.items():
logger.info(''' %s = %s''' , __lowerCamelCase , __lowerCamelCase )
writer.write('''%s = %s\n''' % (key, value) )
results.update(__lowerCamelCase )
# Predict
if training_args.do_predict:
lowercase__ : Optional[int] = TokenClassificationDataset(
token_classification_task=__lowerCamelCase , data_dir=data_args.data_dir , tokenizer=__lowerCamelCase , labels=__lowerCamelCase , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.test , )
lowercase__ , lowercase__ , lowercase__ : Union[str, Any] = trainer.predict(__lowerCamelCase )
lowercase__ , lowercase__ : Tuple = align_predictions(__lowerCamelCase , __lowerCamelCase )
lowercase__ : Dict = os.path.join(training_args.output_dir , '''test_results.txt''' )
if trainer.is_world_process_zero():
with open(__lowerCamelCase , '''w''' ) as writer:
for key, value in metrics.items():
logger.info(''' %s = %s''' , __lowerCamelCase , __lowerCamelCase )
writer.write('''%s = %s\n''' % (key, value) )
# Save predictions
lowercase__ : Dict = os.path.join(training_args.output_dir , '''test_predictions.txt''' )
if trainer.is_world_process_zero():
with open(__lowerCamelCase , '''w''' ) as writer:
with open(os.path.join(data_args.data_dir , '''test.txt''' ) , '''r''' ) as f:
token_classification_task.write_predictions_to_file(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
return results
def __UpperCAmelCase ( __lowerCamelCase ) -> List[Any]:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 302 | 1 |
'''simple docstring'''
import os
import sys
__lowercase : List[Any] = os.path.join(os.path.dirname(__file__), 'src')
sys.path.append(SRC_DIR)
from transformers import (
AutoConfig,
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForQuestionAnswering,
AutoModelForSequenceClassification,
AutoTokenizer,
add_start_docstrings,
)
__lowercase : int = [
'torch',
'numpy',
'tokenizers',
'filelock',
'requests',
'tqdm',
'regex',
'sentencepiece',
'sacremoses',
'importlib_metadata',
'huggingface_hub',
]
@add_start_docstrings(AutoConfig.__doc__ )
def lowerCamelCase (*_SCREAMING_SNAKE_CASE : List[str] , **_SCREAMING_SNAKE_CASE : List[Any] ):
return AutoConfig.from_pretrained(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
@add_start_docstrings(AutoTokenizer.__doc__ )
def lowerCamelCase (*_SCREAMING_SNAKE_CASE : str , **_SCREAMING_SNAKE_CASE : Any ):
return AutoTokenizer.from_pretrained(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
@add_start_docstrings(AutoModel.__doc__ )
def lowerCamelCase (*_SCREAMING_SNAKE_CASE : int , **_SCREAMING_SNAKE_CASE : Union[str, Any] ):
return AutoModel.from_pretrained(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
@add_start_docstrings(AutoModelForCausalLM.__doc__ )
def lowerCamelCase (*_SCREAMING_SNAKE_CASE : List[Any] , **_SCREAMING_SNAKE_CASE : Optional[int] ):
return AutoModelForCausalLM.from_pretrained(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
@add_start_docstrings(AutoModelForMaskedLM.__doc__ )
def lowerCamelCase (*_SCREAMING_SNAKE_CASE : Union[str, Any] , **_SCREAMING_SNAKE_CASE : List[Any] ):
return AutoModelForMaskedLM.from_pretrained(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
@add_start_docstrings(AutoModelForSequenceClassification.__doc__ )
def lowerCamelCase (*_SCREAMING_SNAKE_CASE : Optional[Any] , **_SCREAMING_SNAKE_CASE : Any ):
return AutoModelForSequenceClassification.from_pretrained(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
@add_start_docstrings(AutoModelForQuestionAnswering.__doc__ )
def lowerCamelCase (*_SCREAMING_SNAKE_CASE : Any , **_SCREAMING_SNAKE_CASE : List[str] ):
return AutoModelForQuestionAnswering.from_pretrained(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
| 27 |
# flake8: noqa
# Lint as: python3
from typing import Dict, List, Optional, Type
from .. import config
from ..utils import logging
from .formatting import (
ArrowFormatter,
CustomFormatter,
Formatter,
PandasFormatter,
PythonFormatter,
TensorFormatter,
format_table,
query_table,
)
from .np_formatter import NumpyFormatter
__UpperCamelCase : List[str] = logging.get_logger(__name__)
__UpperCamelCase : Dict[Optional[str], Type[Formatter]] = {}
__UpperCamelCase : Dict[Optional[str], str] = {}
__UpperCamelCase : Dict[Optional[str], Exception] = {}
def __A ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = None , ) -> Optional[int]:
a = aliases if aliases is not None else []
if format_type in _FORMAT_TYPES:
logger.warning(
f'Overwriting format type \'{format_type}\' ({_FORMAT_TYPES[format_type].__name__} -> {formatter_cls.__name__})' )
a = formatter_cls
for alias in set(aliases + [format_type] ):
if alias in _FORMAT_TYPES_ALIASES:
logger.warning(
f'Overwriting format type alias \'{alias}\' ({_FORMAT_TYPES_ALIASES[alias]} -> {format_type})' )
a = format_type
def __A ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = None ) -> List[str]:
a = aliases if aliases is not None else []
for alias in set(aliases + [format_type] ):
a = unavailable_error
# Here we define all the available formatting functions that can be used by `Dataset.set_format`
_register_formatter(PythonFormatter, None, aliases=["python"])
_register_formatter(ArrowFormatter, "arrow", aliases=["pa", "pyarrow"])
_register_formatter(NumpyFormatter, "numpy", aliases=["np"])
_register_formatter(PandasFormatter, "pandas", aliases=["pd"])
_register_formatter(CustomFormatter, "custom")
if config.TORCH_AVAILABLE:
from .torch_formatter import TorchFormatter
_register_formatter(TorchFormatter, "torch", aliases=["pt", "pytorch"])
else:
__UpperCamelCase : str = ValueError("PyTorch needs to be installed to be able to return PyTorch tensors.")
_register_unavailable_formatter(_torch_error, "torch", aliases=["pt", "pytorch"])
if config.TF_AVAILABLE:
from .tf_formatter import TFFormatter
_register_formatter(TFFormatter, "tensorflow", aliases=["tf"])
else:
__UpperCamelCase : List[str] = ValueError("Tensorflow needs to be installed to be able to return Tensorflow tensors.")
_register_unavailable_formatter(_tf_error, "tensorflow", aliases=["tf"])
if config.JAX_AVAILABLE:
from .jax_formatter import JaxFormatter
_register_formatter(JaxFormatter, "jax", aliases=[])
else:
__UpperCamelCase : List[str] = ValueError("JAX needs to be installed to be able to return JAX arrays.")
_register_unavailable_formatter(_jax_error, "jax", aliases=[])
def __A ( __lowerCamelCase ) -> Optional[str]:
if format_type in _FORMAT_TYPES_ALIASES:
return _FORMAT_TYPES_ALIASES[format_type]
else:
return format_type
def __A ( __lowerCamelCase , **__lowerCamelCase ) -> Formatter:
a = get_format_type_from_alias(__lowerCamelCase )
if format_type in _FORMAT_TYPES:
return _FORMAT_TYPES[format_type](**__lowerCamelCase )
if format_type in _FORMAT_TYPES_ALIASES_UNAVAILABLE:
raise _FORMAT_TYPES_ALIASES_UNAVAILABLE[format_type]
else:
raise ValueError(
f'Return type should be None or selected in {list(type for type in _FORMAT_TYPES.keys() if type != None )}, but got \'{format_type}\'' )
| 228 | 0 |
import warnings
from ...utils import logging
from .image_processing_mobilevit import MobileViTImageProcessor
_lowerCAmelCase = logging.get_logger(__name__)
class lowerCAmelCase_( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
def __init__( self ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> None:
warnings.warn(
"""The class MobileViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers."""
""" Please use MobileViTImageProcessor instead.""" ,__UpperCAmelCase ,)
super().__init__(*__UpperCAmelCase ,**__UpperCAmelCase )
| 355 |
'''simple docstring'''
import unittest
import torch
from diffusers import VQModel
from diffusers.utils import floats_tensor, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
enable_full_determinism()
class lowerCAmelCase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
'''simple docstring'''
__lowercase : Optional[Any] = VQModel
__lowercase : Tuple = '''sample'''
@property
def UpperCAmelCase_ ( self ,__UpperCAmelCase=(32, 32) ) -> List[Any]:
lowerCAmelCase__ : Dict = 4
lowerCAmelCase__ : List[str] = 3
lowerCAmelCase__ : Dict = floats_tensor((batch_size, num_channels) + sizes ).to(__UpperCAmelCase )
return {"sample": image}
@property
def UpperCAmelCase_ ( self ) -> List[Any]:
return (3, 32, 32)
@property
def UpperCAmelCase_ ( self ) -> Optional[Any]:
return (3, 32, 32)
def UpperCAmelCase_ ( self ) -> List[Any]:
lowerCAmelCase__ : Dict = {
"""block_out_channels""": [32, 64],
"""in_channels""": 3,
"""out_channels""": 3,
"""down_block_types""": ["""DownEncoderBlock2D""", """DownEncoderBlock2D"""],
"""up_block_types""": ["""UpDecoderBlock2D""", """UpDecoderBlock2D"""],
"""latent_channels""": 3,
}
lowerCAmelCase__ : Any = self.dummy_input
return init_dict, inputs_dict
def UpperCAmelCase_ ( self ) -> Union[str, Any]:
pass
def UpperCAmelCase_ ( self ) -> Optional[int]:
pass
def UpperCAmelCase_ ( self ) -> Tuple:
lowerCAmelCase__ , lowerCAmelCase__ : Union[str, Any] = VQModel.from_pretrained("""fusing/vqgan-dummy""" ,output_loading_info=__UpperCAmelCase )
self.assertIsNotNone(__UpperCAmelCase )
self.assertEqual(len(loading_info["""missing_keys"""] ) ,0 )
model.to(__UpperCAmelCase )
lowerCAmelCase__ : List[Any] = model(**self.dummy_input )
assert image is not None, "Make sure output is not None"
def UpperCAmelCase_ ( self ) -> List[str]:
lowerCAmelCase__ : Tuple = VQModel.from_pretrained("""fusing/vqgan-dummy""" )
model.to(__UpperCAmelCase ).eval()
torch.manual_seed(0 )
if torch.cuda.is_available():
torch.cuda.manual_seed_all(0 )
lowerCAmelCase__ : List[str] = torch.randn(1 ,model.config.in_channels ,model.config.sample_size ,model.config.sample_size )
lowerCAmelCase__ : Optional[Any] = image.to(__UpperCAmelCase )
with torch.no_grad():
lowerCAmelCase__ : Optional[Any] = model(__UpperCAmelCase ).sample
lowerCAmelCase__ : Tuple = output[0, -1, -3:, -3:].flatten().cpu()
# fmt: off
lowerCAmelCase__ : Tuple = torch.tensor([-0.0_1_5_3, -0.4_0_4_4, -0.1_8_8_0, -0.5_1_6_1, -0.2_4_1_8, -0.4_0_7_2, -0.1_6_1_2, -0.0_6_3_3, -0.0_1_4_3] )
# fmt: on
self.assertTrue(torch.allclose(__UpperCAmelCase ,__UpperCAmelCase ,atol=1E-3 ) )
| 184 | 0 |
from random import randint
from tempfile import TemporaryFile
import numpy as np
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> List[str]:
'''simple docstring'''
__UpperCAmelCase = 0
if start < end:
__UpperCAmelCase = randint(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
__UpperCAmelCase = a[end]
__UpperCAmelCase = a[pivot]
__UpperCAmelCase = temp
__UpperCAmelCase , __UpperCAmelCase = _in_place_partition(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
count += _in_place_quick_sort(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , p - 1 )
count += _in_place_quick_sort(SCREAMING_SNAKE_CASE , p + 1 , SCREAMING_SNAKE_CASE )
return count
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> List[str]:
'''simple docstring'''
__UpperCAmelCase = 0
__UpperCAmelCase = randint(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
__UpperCAmelCase = a[end]
__UpperCAmelCase = a[pivot]
__UpperCAmelCase = temp
__UpperCAmelCase = start - 1
for index in range(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
count += 1
if a[index] < a[end]: # check if current val is less than pivot value
__UpperCAmelCase = new_pivot_index + 1
__UpperCAmelCase = a[new_pivot_index]
__UpperCAmelCase = a[index]
__UpperCAmelCase = temp
__UpperCAmelCase = a[new_pivot_index + 1]
__UpperCAmelCase = a[end]
__UpperCAmelCase = temp
return new_pivot_index + 1, count
A_ : str = TemporaryFile()
A_ : Any = 100 # 1000 elements are to be sorted
A_ : Tuple = 0, 1 # mean and standard deviation
A_ : Union[str, Any] = np.random.normal(mu, sigma, p)
np.save(outfile, X)
print('The array is')
print(X)
outfile.seek(0) # using the same array
A_ : Optional[Any] = np.load(outfile)
A_ : Any = len(M) - 1
A_ : str = _in_place_quick_sort(M, 0, r)
print(
'No of Comparisons for 100 elements selected from a standard normal distribution'
'is :'
)
print(z)
| 333 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from tokenizers.pre_tokenizers import BertPreTokenizer, PreTokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roformer import RoFormerTokenizer
from .tokenization_utils import JiebaPreTokenizer
A__ : List[str] =logging.get_logger(__name__)
A__ : Any ={'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
A__ : Any ={
'''vocab_file''': {
'''junnyu/roformer_chinese_small''': '''https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/vocab.txt''',
'''junnyu/roformer_chinese_base''': '''https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/vocab.txt''',
'''junnyu/roformer_chinese_char_small''': (
'''https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/vocab.txt'''
),
'''junnyu/roformer_chinese_char_base''': (
'''https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/vocab.txt'''
),
'''junnyu/roformer_small_discriminator''': (
'''https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/vocab.txt'''
),
'''junnyu/roformer_small_generator''': (
'''https://huggingface.co/junnyu/roformer_small_generator/resolve/main/vocab.txt'''
),
}
}
A__ : Optional[int] ={
'''junnyu/roformer_chinese_small''': 15_36,
'''junnyu/roformer_chinese_base''': 15_36,
'''junnyu/roformer_chinese_char_small''': 5_12,
'''junnyu/roformer_chinese_char_base''': 5_12,
'''junnyu/roformer_small_discriminator''': 1_28,
'''junnyu/roformer_small_generator''': 1_28,
}
A__ : Optional[int] ={
'''junnyu/roformer_chinese_small''': {'''do_lower_case''': True},
'''junnyu/roformer_chinese_base''': {'''do_lower_case''': True},
'''junnyu/roformer_chinese_char_small''': {'''do_lower_case''': True},
'''junnyu/roformer_chinese_char_base''': {'''do_lower_case''': True},
'''junnyu/roformer_small_discriminator''': {'''do_lower_case''': True},
'''junnyu/roformer_small_generator''': {'''do_lower_case''': True},
}
class UpperCAmelCase ( snake_case_ ):
_lowercase: Optional[Any] = VOCAB_FILES_NAMES
_lowercase: Tuple = PRETRAINED_VOCAB_FILES_MAP
_lowercase: Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowercase: str = PRETRAINED_INIT_CONFIGURATION
_lowercase: List[Any] = RoFormerTokenizer
def __init__( self : Dict , __snake_case : str=None , __snake_case : Tuple=None , __snake_case : List[Any]=True , __snake_case : str="[UNK]" , __snake_case : Tuple="[SEP]" , __snake_case : str="[PAD]" , __snake_case : str="[CLS]" , __snake_case : Any="[MASK]" , __snake_case : Dict=True , __snake_case : str=None , **__snake_case : Optional[Any] , ) -> Union[str, Any]:
super().__init__(
__snake_case , tokenizer_file=__snake_case , do_lower_case=__snake_case , unk_token=__snake_case , sep_token=__snake_case , pad_token=__snake_case , cls_token=__snake_case , mask_token=__snake_case , tokenize_chinese_chars=__snake_case , strip_accents=__snake_case , **__snake_case , )
_lowerCAmelCase = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
pre_tok_state.get("""lowercase""" , __snake_case ) != do_lower_case
or pre_tok_state.get("""strip_accents""" , __snake_case ) != strip_accents
):
_lowerCAmelCase = getattr(__snake_case , pre_tok_state.pop("""type""" ) )
_lowerCAmelCase = do_lower_case
_lowerCAmelCase = strip_accents
_lowerCAmelCase = pre_tok_class(**__snake_case )
_lowerCAmelCase = do_lower_case
def __getstate__( self : int ) -> Optional[int]:
_lowerCAmelCase = self.__dict__.copy()
_lowerCAmelCase = BertPreTokenizer()
return state
def __setstate__( self : Tuple , __snake_case : Tuple ) -> List[str]:
_lowerCAmelCase = d
_lowerCAmelCase = self.__dict__["""_tokenizer"""].get_vocab()
_lowerCAmelCase = PreTokenizer.custom(JiebaPreTokenizer(__snake_case ) )
def lowercase__ ( self : Union[str, Any] , __snake_case : Optional[int] , __snake_case : Optional[int]=None ) -> Optional[Any]:
_lowerCAmelCase = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def lowercase__ ( self : List[str] , __snake_case : List[int] , __snake_case : Optional[List[int]] = None ) -> List[int]:
_lowerCAmelCase = [self.sep_token_id]
_lowerCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowercase__ ( self : int , __snake_case : str , __snake_case : Optional[str] = None ) -> Tuple[str]:
_lowerCAmelCase = self._tokenizer.model.save(__snake_case , name=__snake_case )
return tuple(__snake_case )
def lowercase__ ( self : Dict , __snake_case : Dict , __snake_case : int=None , __snake_case : List[Any]=None , __snake_case : List[Any]=False , **__snake_case : Dict , ) -> str:
_lowerCAmelCase = BertPreTokenizer()
return super().save_pretrained(__snake_case , __snake_case , __snake_case , __snake_case , **__snake_case )
| 70 | 0 |
"""simple docstring"""
__A = {
"Pillow": "Pillow",
"accelerate": "accelerate>=0.11.0",
"compel": "compel==0.1.8",
"black": "black~=23.1",
"datasets": "datasets",
"filelock": "filelock",
"flax": "flax>=0.4.1",
"hf-doc-builder": "hf-doc-builder>=0.3.0",
"huggingface-hub": "huggingface-hub>=0.13.2",
"requests-mock": "requests-mock==1.10.0",
"importlib_metadata": "importlib_metadata",
"invisible-watermark": "invisible-watermark",
"isort": "isort>=5.5.4",
"jax": "jax>=0.2.8,!=0.3.2",
"jaxlib": "jaxlib>=0.1.65",
"Jinja2": "Jinja2",
"k-diffusion": "k-diffusion>=0.0.12",
"torchsde": "torchsde",
"note_seq": "note_seq",
"librosa": "librosa",
"numpy": "numpy",
"omegaconf": "omegaconf",
"parameterized": "parameterized",
"protobuf": "protobuf>=3.20.3,<4",
"pytest": "pytest",
"pytest-timeout": "pytest-timeout",
"pytest-xdist": "pytest-xdist",
"ruff": "ruff>=0.0.241",
"safetensors": "safetensors",
"sentencepiece": "sentencepiece>=0.1.91,!=0.1.92",
"scipy": "scipy",
"onnx": "onnx",
"regex": "regex!=2019.12.17",
"requests": "requests",
"tensorboard": "tensorboard",
"torch": "torch>=1.4",
"torchvision": "torchvision",
"transformers": "transformers>=4.25.1",
"urllib3": "urllib3<=2.0.0",
}
| 2 | """simple docstring"""
import os
import re
import shutil
from argparse import ArgumentParser, Namespace
from datasets.commands import BaseDatasetsCLICommand
from datasets.utils.logging import get_logger
__A = "<<<<<<< This should probably be modified because it mentions: "
__A = "=======\n>>>>>>>\n"
__A = [
"TextEncoderConfig",
"ByteTextEncoder",
"SubwordTextEncoder",
"encoder_config",
"maybe_build_from_corpus",
"manual_dir",
]
__A = [
# (pattern, replacement)
# Order is important here for some replacements
(R"tfds\.core", R"datasets"),
(R"tf\.io\.gfile\.GFile", R"open"),
(R"tf\.([\w\d]+)", R"datasets.Value('\1')"),
(R"tfds\.features\.Text\(\)", R"datasets.Value('string')"),
(R"tfds\.features\.Text\(", R"datasets.Value('string'),"),
(R"features\s*=\s*tfds.features.FeaturesDict\(", R"features=datasets.Features("),
(R"tfds\.features\.FeaturesDict\(", R"dict("),
(R"The TensorFlow Datasets Authors", R"The TensorFlow Datasets Authors and the HuggingFace Datasets Authors"),
(R"tfds\.", R"datasets."),
(R"dl_manager\.manual_dir", R"self.config.data_dir"),
(R"self\.builder_config", R"self.config"),
]
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ) -> Tuple:
return ConvertCommand(args.tfds_path , args.datasets_directory )
class UpperCAmelCase (_UpperCAmelCase ):
"""simple docstring"""
@staticmethod
def _snake_case ( _UpperCAmelCase ):
lowercase__: int = parser.add_parser(
'''convert''' , help='''Convert a TensorFlow Datasets dataset to a HuggingFace Datasets dataset.''' , )
train_parser.add_argument(
'''--tfds_path''' , type=_UpperCAmelCase , required=_UpperCAmelCase , help='''Path to a TensorFlow Datasets folder to convert or a single tfds file to convert.''' , )
train_parser.add_argument(
'''--datasets_directory''' , type=_UpperCAmelCase , required=_UpperCAmelCase , help='''Path to the HuggingFace Datasets folder.''' )
train_parser.set_defaults(func=_UpperCAmelCase )
def __init__( self , _UpperCAmelCase , _UpperCAmelCase , *_UpperCAmelCase ):
lowercase__: List[str] = get_logger('''datasets-cli/converting''' )
lowercase__: Optional[Any] = tfds_path
lowercase__: Dict = datasets_directory
def _snake_case ( self ):
if os.path.isdir(self._tfds_path ):
lowercase__: Optional[Any] = os.path.abspath(self._tfds_path )
elif os.path.isfile(self._tfds_path ):
lowercase__: Optional[int] = os.path.dirname(self._tfds_path )
else:
raise ValueError('''--tfds_path is neither a directory nor a file. Please check path.''' )
lowercase__: int = os.path.abspath(self._datasets_directory )
self._logger.info(F"""Converting datasets from {abs_tfds_path} to {abs_datasets_path}""" )
lowercase__: Tuple = []
lowercase__: Dict = []
lowercase__: Any = {}
if os.path.isdir(self._tfds_path ):
lowercase__: Dict = os.listdir(_UpperCAmelCase )
else:
lowercase__: Dict = [os.path.basename(self._tfds_path )]
for f_name in file_names:
self._logger.info(F"""Looking at file {f_name}""" )
lowercase__: Tuple = os.path.join(_UpperCAmelCase , _UpperCAmelCase )
lowercase__: Optional[int] = os.path.join(_UpperCAmelCase , _UpperCAmelCase )
if not os.path.isfile(_UpperCAmelCase ) or "__init__" in f_name or "_test" in f_name or ".py" not in f_name:
self._logger.info('''Skipping file''' )
continue
with open(_UpperCAmelCase , encoding='''utf-8''' ) as f:
lowercase__: Tuple = f.readlines()
lowercase__: Optional[Any] = []
lowercase__: Dict = False
lowercase__: List[str] = False
lowercase__: List[Any] = []
for line in lines:
lowercase__: List[str] = line
# Convert imports
if "import tensorflow.compat.v2 as tf" in out_line:
continue
elif "@tfds.core" in out_line:
continue
elif "builder=self" in out_line:
continue
elif "import tensorflow_datasets.public_api as tfds" in out_line:
lowercase__: Optional[int] = '''import datasets\n'''
elif "import tensorflow" in out_line:
# order is important here
lowercase__: Dict = ''''''
continue
elif "from absl import logging" in out_line:
lowercase__: Tuple = '''from datasets import logging\n'''
elif "getLogger" in out_line:
lowercase__: Optional[Any] = out_line.replace('''getLogger''' , '''get_logger''' )
elif any(expression in out_line for expression in TO_HIGHLIGHT ):
lowercase__: Any = True
lowercase__: str = list(filter(lambda _UpperCAmelCase : e in out_line , _UpperCAmelCase ) )
out_lines.append(HIGHLIGHT_MESSAGE_PRE + str(_UpperCAmelCase ) + '''\n''' )
out_lines.append(_UpperCAmelCase )
out_lines.append(_UpperCAmelCase )
continue
else:
for pattern, replacement in TO_CONVERT:
lowercase__: List[Any] = re.sub(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# Take care of saving utilities (to later move them together with main script)
if "tensorflow_datasets" in out_line:
lowercase__: Any = re.match(r'''from\stensorflow_datasets.*import\s([^\.\r\n]+)''' , _UpperCAmelCase )
tfds_imports.extend(imp.strip() for imp in match.group(1 ).split(''',''' ) )
lowercase__: List[str] = '''from . import ''' + match.group(1 )
# Check we have not forget anything
if "tf." in out_line or "tfds." in out_line or "tensorflow_datasets" in out_line:
raise ValueError(F"""Error converting {out_line.strip()}""" )
if "GeneratorBasedBuilder" in out_line or "BeamBasedBuilder" in out_line:
lowercase__: Optional[Any] = True
out_lines.append(_UpperCAmelCase )
if is_builder or "wmt" in f_name:
# We create a new directory for each dataset
lowercase__: Dict = f_name.replace('''.py''' , '''''' )
lowercase__: Dict = os.path.join(_UpperCAmelCase , _UpperCAmelCase )
lowercase__: Optional[Any] = os.path.join(_UpperCAmelCase , _UpperCAmelCase )
os.makedirs(_UpperCAmelCase , exist_ok=_UpperCAmelCase )
self._logger.info(F"""Adding directory {output_dir}""" )
imports_to_builder_map.update({imp: output_dir for imp in tfds_imports} )
else:
# Utilities will be moved at the end
utils_files.append(_UpperCAmelCase )
if needs_manual_update:
with_manual_update.append(_UpperCAmelCase )
with open(_UpperCAmelCase , '''w''' , encoding='''utf-8''' ) as f:
f.writelines(_UpperCAmelCase )
self._logger.info(F"""Converted in {output_file}""" )
for utils_file in utils_files:
try:
lowercase__: str = os.path.basename(_UpperCAmelCase )
lowercase__: Union[str, Any] = imports_to_builder_map[f_name.replace('''.py''' , '''''' )]
self._logger.info(F"""Moving {dest_folder} to {utils_file}""" )
shutil.copy(_UpperCAmelCase , _UpperCAmelCase )
except KeyError:
self._logger.error(F"""Cannot find destination folder for {utils_file}. Please copy manually.""" )
if with_manual_update:
for file_path in with_manual_update:
self._logger.warning(
F"""You need to manually update file {file_path} to remove configurations using 'TextEncoderConfig'.""" )
| 2 | 1 |
from __future__ import annotations
def __lowerCamelCase ( lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : str ):
'''simple docstring'''
lowerCamelCase = []
create_all_state(1 , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , [] , SCREAMING_SNAKE_CASE__ )
return result
def __lowerCamelCase ( lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : Dict , lowerCamelCase__ : List[str] , lowerCamelCase__ : List[str] , lowerCamelCase__ : Tuple , ):
'''simple docstring'''
if level == 0:
total_list.append(current_list[:] )
return
for i in range(SCREAMING_SNAKE_CASE__ , total_number - level + 2 ):
current_list.append(SCREAMING_SNAKE_CASE__ )
create_all_state(i + 1 , SCREAMING_SNAKE_CASE__ , level - 1 , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
current_list.pop()
def __lowerCamelCase ( lowerCamelCase__ : Union[str, Any] ):
'''simple docstring'''
for i in total_list:
print(*SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
UpperCAmelCase : Dict = 4
UpperCAmelCase : int = 2
UpperCAmelCase : Tuple = generate_all_combinations(n, k)
print_all_state(total_list)
| 252 |
'''simple docstring'''
import os
import sys
from contextlib import contextmanager
# Windows only
if os.name == "nt":
import ctypes
import msvcrt # noqa
class lowercase__ ( ctypes.Structure ):
'''simple docstring'''
A_ : Optional[Any] = [("""size""", ctypes.c_int), ("""visible""", ctypes.c_byte)]
def snake_case_ ( ):
"""simple docstring"""
if os.name == "nt":
_SCREAMING_SNAKE_CASE : Tuple = CursorInfo()
_SCREAMING_SNAKE_CASE : Tuple = ctypes.windll.kernelaa.GetStdHandle(-11 )
ctypes.windll.kernelaa.GetConsoleCursorInfo(SCREAMING_SNAKE_CASE__ , ctypes.byref(SCREAMING_SNAKE_CASE__ ) )
_SCREAMING_SNAKE_CASE : Optional[Any] = False
ctypes.windll.kernelaa.SetConsoleCursorInfo(SCREAMING_SNAKE_CASE__ , ctypes.byref(SCREAMING_SNAKE_CASE__ ) )
elif os.name == "posix":
sys.stdout.write("""\033[?25l""" )
sys.stdout.flush()
def snake_case_ ( ):
"""simple docstring"""
if os.name == "nt":
_SCREAMING_SNAKE_CASE : int = CursorInfo()
_SCREAMING_SNAKE_CASE : List[str] = ctypes.windll.kernelaa.GetStdHandle(-11 )
ctypes.windll.kernelaa.GetConsoleCursorInfo(SCREAMING_SNAKE_CASE__ , ctypes.byref(SCREAMING_SNAKE_CASE__ ) )
_SCREAMING_SNAKE_CASE : Tuple = True
ctypes.windll.kernelaa.SetConsoleCursorInfo(SCREAMING_SNAKE_CASE__ , ctypes.byref(SCREAMING_SNAKE_CASE__ ) )
elif os.name == "posix":
sys.stdout.write("""\033[?25h""" )
sys.stdout.flush()
@contextmanager
def snake_case_ ( ):
"""simple docstring"""
try:
hide_cursor()
yield
finally:
show_cursor()
| 200 | 0 |
__lowerCamelCase : Optional[int] = """Tobias Carryer"""
from time import time
class A__ :
def __init__( self , A_ , A_ , A_ , A_=int(time() ) ): # noqa: B008
'''simple docstring'''
UpperCamelCase : int = multiplier
UpperCamelCase : Tuple = increment
UpperCamelCase : List[str] = modulo
UpperCamelCase : Tuple = seed
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Tuple = (self.multiplier * self.seed + self.increment) % self.modulo
return self.seed
if __name__ == "__main__":
# Show the LCG in action.
__lowerCamelCase : List[str] = LinearCongruentialGenerator(166_4525, 10_1390_4223, 2 << 31)
while True:
print(lcg.next_number())
| 140 |
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import tensorflow as tf
from transformers import AutoTokenizer, TFAutoModelForSeqaSeqLM
@require_tf
@require_sentencepiece
@require_tokenizers
class A__ ( unittest.TestCase ):
@slow
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : List[str] = TFAutoModelForSeqaSeqLM.from_pretrained("google/mt5-small" )
UpperCamelCase : Tuple = AutoTokenizer.from_pretrained("google/mt5-small" )
UpperCamelCase : Dict = tokenizer("Hello there" , return_tensors="tf" ).input_ids
UpperCamelCase : int = tokenizer("Hi I am" , return_tensors="tf" ).input_ids
UpperCamelCase : Union[str, Any] = model(A_ , labels=A_ ).loss
UpperCamelCase : List[str] = -tf.math.reduce_mean(A_ ).numpy()
UpperCamelCase : Union[str, Any] = -21.22_81_68
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 2e-4 )
| 140 | 1 |
'''simple docstring'''
import argparse
import json
from typing import List
from ltp import LTP
from transformers.models.bert.tokenization_bert import BertTokenizer
def __lowerCamelCase ( A__ ) -> Optional[Any]:
"""simple docstring"""
# This defines a "chinese character" as anything in the CJK Unicode block:
# https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
#
# Note that the CJK Unicode block is NOT all Japanese and Korean characters,
# despite its name. The modern Korean Hangul alphabet is a different block,
# as is Japanese Hiragana and Katakana. Those alphabets are used to write
# space-separated words, so they are not treated specially and handled
# like the all of the other languages.
if (
(cp >= 0x4E00 and cp <= 0x9FFF)
or (cp >= 0x3400 and cp <= 0x4DBF) #
or (cp >= 0x2_0000 and cp <= 0x2_A6DF) #
or (cp >= 0x2_A700 and cp <= 0x2_B73F) #
or (cp >= 0x2_B740 and cp <= 0x2_B81F) #
or (cp >= 0x2_B820 and cp <= 0x2_CEAF) #
or (cp >= 0xF900 and cp <= 0xFAFF)
or (cp >= 0x2_F800 and cp <= 0x2_FA1F) #
): #
return True
return False
def __lowerCamelCase ( A__ ) -> Dict:
"""simple docstring"""
# word like '180' or '身高' or '神'
for char in word:
UpperCamelCase = ord(__UpperCamelCase )
if not _is_chinese_char(__UpperCamelCase ):
return 0
return 1
def __lowerCamelCase ( A__ ) -> int:
"""simple docstring"""
UpperCamelCase = set()
for token in tokens:
UpperCamelCase = len(__UpperCamelCase ) > 1 and is_chinese(__UpperCamelCase )
if chinese_word:
word_set.add(__UpperCamelCase )
UpperCamelCase = list(__UpperCamelCase )
return word_list
def __lowerCamelCase ( A__ , A__ ) -> List[Any]:
"""simple docstring"""
if not chinese_word_set:
return bert_tokens
UpperCamelCase = max([len(__UpperCamelCase ) for w in chinese_word_set] )
UpperCamelCase = bert_tokens
UpperCamelCase , UpperCamelCase = 0, len(__UpperCamelCase )
while start < end:
UpperCamelCase = True
if is_chinese(bert_word[start] ):
UpperCamelCase = min(end - start , __UpperCamelCase )
for i in range(__UpperCamelCase , 1 , -1 ):
UpperCamelCase = ''.join(bert_word[start : start + i] )
if whole_word in chinese_word_set:
for j in range(start + 1 , start + i ):
UpperCamelCase = '##' + bert_word[j]
UpperCamelCase = start + i
UpperCamelCase = False
break
if single_word:
start += 1
return bert_word
def __lowerCamelCase ( A__ , A__ , A__ ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase = []
for i in range(0 , len(__UpperCamelCase ) , 100 ):
UpperCamelCase = ltp_tokenizer.pipeline(lines[i : i + 100] , tasks=['cws'] ).cws
UpperCamelCase = [get_chinese_word(__UpperCamelCase ) for r in res]
ltp_res.extend(__UpperCamelCase )
assert len(__UpperCamelCase ) == len(__UpperCamelCase )
UpperCamelCase = []
for i in range(0 , len(__UpperCamelCase ) , 100 ):
UpperCamelCase = bert_tokenizer(lines[i : i + 100] , add_special_tokens=__UpperCamelCase , truncation=__UpperCamelCase , max_length=512 )
bert_res.extend(res['input_ids'] )
assert len(__UpperCamelCase ) == len(__UpperCamelCase )
UpperCamelCase = []
for input_ids, chinese_word in zip(__UpperCamelCase , __UpperCamelCase ):
UpperCamelCase = []
for id in input_ids:
UpperCamelCase = bert_tokenizer._convert_id_to_token(__UpperCamelCase )
input_tokens.append(__UpperCamelCase )
UpperCamelCase = add_sub_symbol(__UpperCamelCase , __UpperCamelCase )
UpperCamelCase = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(__UpperCamelCase ):
if token[:2] == "##":
UpperCamelCase = token[2:]
# save chinese tokens' pos
if len(__UpperCamelCase ) == 1 and _is_chinese_char(ord(__UpperCamelCase ) ):
ref_id.append(__UpperCamelCase )
ref_ids.append(__UpperCamelCase )
assert len(__UpperCamelCase ) == len(__UpperCamelCase )
return ref_ids
def __lowerCamelCase ( A__ ) -> str:
"""simple docstring"""
# For Chinese (Ro)Bert, the best result is from : RoBERTa-wwm-ext (https://github.com/ymcui/Chinese-BERT-wwm)
# If we want to fine-tune these model, we have to use same tokenizer : LTP (https://github.com/HIT-SCIR/ltp)
with open(args.file_name , 'r' , encoding='utf-8' ) as f:
UpperCamelCase = f.readlines()
UpperCamelCase = [line.strip() for line in data if len(__UpperCamelCase ) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
UpperCamelCase = LTP(args.ltp ) # faster in GPU device
UpperCamelCase = BertTokenizer.from_pretrained(args.bert )
UpperCamelCase = prepare_ref(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
with open(args.save_path , 'w' , encoding='utf-8' ) as f:
UpperCamelCase = [json.dumps(__UpperCamelCase ) + '\n' for ref in ref_ids]
f.writelines(__UpperCamelCase )
if __name__ == "__main__":
_lowerCamelCase : Union[str, Any] = argparse.ArgumentParser(description="prepare_chinese_ref")
parser.add_argument(
"--file_name",
required=False,
type=str,
default="./resources/chinese-demo.txt",
help="file need process, same as training data in lm",
)
parser.add_argument(
"--ltp",
required=False,
type=str,
default="./resources/ltp",
help="resources for LTP tokenizer, usually a path",
)
parser.add_argument(
"--bert",
required=False,
type=str,
default="./resources/robert",
help="resources for Bert tokenizer",
)
parser.add_argument(
"--save_path",
required=False,
type=str,
default="./resources/ref.txt",
help="path to save res",
)
_lowerCamelCase : int = parser.parse_args()
main(args)
| 28 | import warnings
from transformers import AutoTokenizer
from transformers.utils import is_torch_available
from transformers.utils.generic import ExplicitEnum
from ...processing_utils import ProcessorMixin
if is_torch_available():
import torch
class lowerCamelCase (SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
lowerCamelCase__ = '''char'''
lowerCamelCase__ = '''bpe'''
lowerCamelCase__ = '''wp'''
A : Tuple = (DecodeType.CHARACTER, DecodeType.BPE, DecodeType.WORDPIECE)
class lowerCamelCase (SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
lowerCamelCase__ = ['''image_processor''', '''char_tokenizer''']
lowerCamelCase__ = '''ViTImageProcessor'''
lowerCamelCase__ = '''MgpstrTokenizer'''
def __init__( self : Optional[Any] , __magic_name__ : Tuple=None , __magic_name__ : int=None , **__magic_name__ : Optional[Any] ) -> str:
SCREAMING_SNAKE_CASE_ = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , __magic_name__ , )
SCREAMING_SNAKE_CASE_ = kwargs.pop("feature_extractor" )
SCREAMING_SNAKE_CASE_ = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
SCREAMING_SNAKE_CASE_ = tokenizer
SCREAMING_SNAKE_CASE_ = AutoTokenizer.from_pretrained("gpt2" )
SCREAMING_SNAKE_CASE_ = AutoTokenizer.from_pretrained("bert-base-uncased" )
super().__init__(__magic_name__ , __magic_name__ )
def __call__( self : Dict , __magic_name__ : Union[str, Any]=None , __magic_name__ : Union[str, Any]=None , __magic_name__ : Dict=None , **__magic_name__ : Tuple ) -> int:
if images is None and text is None:
raise ValueError("You need to specify either an `images` or `text` input to process." )
if images is not None:
SCREAMING_SNAKE_CASE_ = self.image_processor(__magic_name__ , return_tensors=__magic_name__ , **__magic_name__ )
if text is not None:
SCREAMING_SNAKE_CASE_ = self.char_tokenizer(__magic_name__ , return_tensors=__magic_name__ , **__magic_name__ )
if text is None:
return inputs
elif images is None:
return encodings
else:
SCREAMING_SNAKE_CASE_ = encodings["input_ids"]
return inputs
def __A ( self : Tuple , __magic_name__ : int ) -> Any:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = sequences
SCREAMING_SNAKE_CASE_ = char_preds.size(0 )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self._decode_helper(__magic_name__ , "char" )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self._decode_helper(__magic_name__ , "bpe" )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self._decode_helper(__magic_name__ , "wp" )
SCREAMING_SNAKE_CASE_ = []
SCREAMING_SNAKE_CASE_ = []
for i in range(__magic_name__ ):
SCREAMING_SNAKE_CASE_ = [char_scores[i], bpe_scores[i], wp_scores[i]]
SCREAMING_SNAKE_CASE_ = [char_strs[i], bpe_strs[i], wp_strs[i]]
SCREAMING_SNAKE_CASE_ = scores.index(max(__magic_name__ ) )
final_strs.append(strs[max_score_index] )
final_scores.append(scores[max_score_index] )
SCREAMING_SNAKE_CASE_ = {}
SCREAMING_SNAKE_CASE_ = final_strs
SCREAMING_SNAKE_CASE_ = final_scores
SCREAMING_SNAKE_CASE_ = char_strs
SCREAMING_SNAKE_CASE_ = bpe_strs
SCREAMING_SNAKE_CASE_ = wp_strs
return out
def __A ( self : int , __magic_name__ : List[Any] , __magic_name__ : str ) -> Any:
if format == DecodeType.CHARACTER:
SCREAMING_SNAKE_CASE_ = self.char_decode
SCREAMING_SNAKE_CASE_ = 1
SCREAMING_SNAKE_CASE_ = "[s]"
elif format == DecodeType.BPE:
SCREAMING_SNAKE_CASE_ = self.bpe_decode
SCREAMING_SNAKE_CASE_ = 2
SCREAMING_SNAKE_CASE_ = "#"
elif format == DecodeType.WORDPIECE:
SCREAMING_SNAKE_CASE_ = self.wp_decode
SCREAMING_SNAKE_CASE_ = 102
SCREAMING_SNAKE_CASE_ = "[SEP]"
else:
raise ValueError(F'''Format {format} is not supported.''' )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = [], []
SCREAMING_SNAKE_CASE_ = pred_logits.size(0 )
SCREAMING_SNAKE_CASE_ = pred_logits.size(1 )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = pred_logits.topk(1 , dim=-1 , largest=__magic_name__ , sorted=__magic_name__ )
SCREAMING_SNAKE_CASE_ = preds_index.view(-1 , __magic_name__ )[:, 1:]
SCREAMING_SNAKE_CASE_ = decoder(__magic_name__ )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = torch.nn.functional.softmax(__magic_name__ , dim=2 ).max(dim=2 )
SCREAMING_SNAKE_CASE_ = preds_max_prob[:, 1:]
for index in range(__magic_name__ ):
SCREAMING_SNAKE_CASE_ = preds_str[index].find(__magic_name__ )
SCREAMING_SNAKE_CASE_ = preds_str[index][:pred_eos]
SCREAMING_SNAKE_CASE_ = preds_index[index].cpu().tolist()
SCREAMING_SNAKE_CASE_ = pred_index.index(__magic_name__ ) if eos_token in pred_index else -1
SCREAMING_SNAKE_CASE_ = preds_max_prob[index][: pred_eos_index + 1]
SCREAMING_SNAKE_CASE_ = pred_max_prob.cumprod(dim=0 )[-1] if pred_max_prob.nelement() != 0 else 0.0
dec_strs.append(__magic_name__ )
conf_scores.append(__magic_name__ )
return dec_strs, conf_scores
def __A ( self : Any , __magic_name__ : Dict ) -> List[str]:
SCREAMING_SNAKE_CASE_ = [seq.replace(" " , "" ) for seq in self.char_tokenizer.batch_decode(__magic_name__ )]
return decode_strs
def __A ( self : Any , __magic_name__ : Union[str, Any] ) -> Tuple:
return self.bpe_tokenizer.batch_decode(__magic_name__ )
def __A ( self : str , __magic_name__ : Dict ) -> List[Any]:
SCREAMING_SNAKE_CASE_ = [seq.replace(" " , "" ) for seq in self.wp_tokenizer.batch_decode(__magic_name__ )]
return decode_strs
| 118 | 0 |
"""simple docstring"""
import json
import os
import unittest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _UpperCAmelCase ( lowerCAmelCase__ , unittest.TestCase):
_lowerCAmelCase : Union[str, Any] = CLIPTokenizer
_lowerCAmelCase : Optional[Any] = CLIPTokenizerFast
_lowerCAmelCase : List[Any] = True
_lowerCAmelCase : Any = {}
_lowerCAmelCase : Optional[int] = False
def _snake_case ( self : Optional[int] ):
super().setUp()
# fmt: off
snake_case_ : Optional[int] = ['''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''lo''', '''l</w>''', '''w</w>''', '''r</w>''', '''t</w>''', '''low</w>''', '''er</w>''', '''lowest</w>''', '''newer</w>''', '''wider''', '''<unk>''', '''<|startoftext|>''', '''<|endoftext|>''']
# fmt: on
snake_case_ : List[Any] = dict(zip(lowercase_ , range(len(lowercase_ ) ) ) )
snake_case_ : Union[str, Any] = ['''#version: 0.2''', '''l o''', '''lo w</w>''', '''e r</w>''']
snake_case_ : Dict = {'''unk_token''': '''<unk>'''}
snake_case_ : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
snake_case_ : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(lowercase_ ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(lowercase_ ) )
def _snake_case ( self : Dict , **lowercase_ : int ):
kwargs.update(self.special_tokens_map )
return CLIPTokenizer.from_pretrained(self.tmpdirname , **lowercase_ )
def _snake_case ( self : Union[str, Any] , **lowercase_ : int ):
kwargs.update(self.special_tokens_map )
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **lowercase_ )
def _snake_case ( self : Any , lowercase_ : List[Any] ):
snake_case_ : Any = '''lower newer'''
snake_case_ : List[Any] = '''lower newer'''
return input_text, output_text
def _snake_case ( self : Any ):
snake_case_ : Union[str, Any] = CLIPTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
snake_case_ : List[str] = '''lower newer'''
snake_case_ : List[str] = ['''lo''', '''w''', '''er</w>''', '''n''', '''e''', '''w''', '''er</w>''']
snake_case_ : Any = tokenizer.tokenize(lowercase_ )
self.assertListEqual(lowercase_ , lowercase_ )
snake_case_ : str = tokens + [tokenizer.unk_token]
snake_case_ : Tuple = [10, 2, 16, 9, 3, 2, 16, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowercase_ ) , lowercase_ )
@require_ftfy
def _snake_case ( self : Union[str, Any] ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ):
snake_case_ : List[str] = self.tokenizer_class.from_pretrained(lowercase_ , **lowercase_ )
snake_case_ : str = self.rust_tokenizer_class.from_pretrained(lowercase_ , **lowercase_ )
snake_case_ : Tuple = '''A\n\'ll 11p223RF☆ho!!to?\'d\'d\'\'d of a cat to-$\'\'d.'''
snake_case_ : List[str] = tokenizer_s.tokenize(lowercase_ )
snake_case_ : Union[str, Any] = tokenizer_r.tokenize(lowercase_ )
self.assertListEqual(lowercase_ , lowercase_ )
# Test that the tokenization is identical on an example containing a character (Latin Small Letter A
# with Tilde) encoded in 2 different ways
snake_case_ : str = '''xa\u0303y''' + ''' ''' + '''x\xe3y'''
snake_case_ : Optional[int] = tokenizer_s.tokenize(lowercase_ )
snake_case_ : Optional[int] = tokenizer_r.tokenize(lowercase_ )
self.assertListEqual(lowercase_ , lowercase_ )
# Test that the tokenization is identical on unicode of space type
snake_case_ : Optional[int] = [
'''\u0009''', # (horizontal tab, '\t')
'''\u000B''', # (vertical tab)
'''\u000C''', # (form feed)
'''\u0020''', # (space, ' ')
'''\u200E''', # (left-to-right mark):w
'''\u200F''', # (right-to-left mark)
]
for unicode_seq in spaces_unicodes:
snake_case_ : int = tokenizer_s.tokenize(lowercase_ )
snake_case_ : Any = tokenizer_r.tokenize(lowercase_ )
self.assertListEqual(lowercase_ , lowercase_ )
# Test that the tokenization is identical on unicode of line break type
snake_case_ : int = [
'''\u000A''', # (line feed, '\n')
'''\r\n''', # (carriage return and line feed, '\r\n')
'''\u000D''', # (carriage return, '\r')
'''\r''', # (carriage return, '\r')
'''\u000D''', # (carriage return, '\r')
'''\u2028''', # (line separator)
'''\u2029''', # (paragraph separator)
# "\u0085", # (next line)
]
# The tokenization is not identical for the character "\u0085" (next line). The slow version using ftfy transforms
# it into the Horizontal Ellipsis character "…" ("\u2026") while the fast version transforms it into a
# space (and thus into an empty list).
for unicode_seq in line_break_unicodes:
snake_case_ : List[Any] = tokenizer_s.tokenize(lowercase_ )
snake_case_ : int = tokenizer_r.tokenize(lowercase_ )
self.assertListEqual(lowercase_ , lowercase_ )
def _snake_case ( self : Union[str, Any] ):
# Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space`
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ):
snake_case_ : int = '''hello''' # `hello` is a token in the vocabulary of `pretrained_name`
snake_case_ : Optional[Any] = f"{text_of_1_token} {text_of_1_token}"
snake_case_ : Optional[int] = self.rust_tokenizer_class.from_pretrained(
lowercase_ , use_fast=lowercase_ , )
snake_case_ : str = tokenizer_r(lowercase_ , return_offsets_mapping=lowercase_ , add_special_tokens=lowercase_ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(lowercase_ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(lowercase_ ) + 1, len(lowercase_ ) + 1 + len(lowercase_ )) , )
snake_case_ : List[str] = f" {text}"
snake_case_ : Optional[Any] = self.rust_tokenizer_class.from_pretrained(
lowercase_ , use_fast=lowercase_ , )
snake_case_ : Tuple = tokenizer_r(lowercase_ , return_offsets_mapping=lowercase_ , add_special_tokens=lowercase_ )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(lowercase_ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(lowercase_ ) + 1, 1 + len(lowercase_ ) + 1 + len(lowercase_ )) , )
def _snake_case ( self : List[str] ):
# Test related to the breaking change introduced in transformers v4.17.0
# We need to check that an error in raised when the user try to load a previous version of the tokenizer.
with self.assertRaises(lowercase_ ) as context:
self.rust_tokenizer_class.from_pretrained('''robot-test/old-clip-tokenizer''' )
self.assertTrue(
context.exception.args[0].startswith(
'''The `backend_tokenizer` provided does not match the expected format.''' ) )
@require_ftfy
def _snake_case ( self : Optional[Any] ):
super().test_tokenization_python_rust_equals()
def _snake_case ( self : Any ):
# CLIP always lower cases letters
pass
| 357 |
"""simple docstring"""
import math
import numpy as np
import qiskit
from qiskit import Aer, ClassicalRegister, QuantumCircuit, QuantumRegister, execute
def __lowercase ( _a = 3 ):
if isinstance(_a , _a ):
raise TypeError('''number of qubits must be a integer.''' )
if number_of_qubits <= 0:
raise ValueError('''number of qubits must be > 0.''' )
if math.floor(_a ) != number_of_qubits:
raise ValueError('''number of qubits must be exact integer.''' )
if number_of_qubits > 10:
raise ValueError('''number of qubits too large to simulate(>10).''' )
snake_case_ : Tuple = QuantumRegister(_a , '''qr''' )
snake_case_ : Optional[Any] = ClassicalRegister(_a , '''cr''' )
snake_case_ : Any = QuantumCircuit(_a , _a )
snake_case_ : int = number_of_qubits
for i in range(_a ):
quantum_circuit.h(number_of_qubits - i - 1 )
counter -= 1
for j in range(_a ):
quantum_circuit.cp(np.pi / 2 ** (counter - j) , _a , _a )
for k in range(number_of_qubits // 2 ):
quantum_circuit.swap(_a , number_of_qubits - k - 1 )
# measure all the qubits
quantum_circuit.measure(_a , _a )
# simulate with 10000 shots
snake_case_ : Any = Aer.get_backend('''qasm_simulator''' )
snake_case_ : Optional[int] = execute(_a , _a , shots=10_000 )
return job.result().get_counts(_a )
if __name__ == "__main__":
print(
f'Total count for quantum fourier transform state is: \
{quantum_fourier_transform(3)}'
)
| 155 | 0 |
import argparse
import json
import torch
from diffusers import DDPMScheduler, LDMPipeline, UNetaDModel, VQModel
def a ( _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Tuple=1 ):
'''simple docstring'''
if n_shave_prefix_segments >= 0:
return ".".join(path.split('''.''' )[n_shave_prefix_segments:] )
else:
return ".".join(path.split('''.''' )[:n_shave_prefix_segments] )
def a ( _UpperCAmelCase : int , _UpperCAmelCase : int=0 ):
'''simple docstring'''
__UpperCAmelCase : Dict = []
for old_item in old_list:
__UpperCAmelCase : Optional[Any] = old_item.replace('''in_layers.0''' , '''norm1''' )
__UpperCAmelCase : List[str] = new_item.replace('''in_layers.2''' , '''conv1''' )
__UpperCAmelCase : Tuple = new_item.replace('''out_layers.0''' , '''norm2''' )
__UpperCAmelCase : Tuple = new_item.replace('''out_layers.3''' , '''conv2''' )
__UpperCAmelCase : Optional[Any] = new_item.replace('''emb_layers.1''' , '''time_emb_proj''' )
__UpperCAmelCase : Optional[Any] = new_item.replace('''skip_connection''' , '''conv_shortcut''' )
__UpperCAmelCase : Optional[int] = shave_segments(_UpperCAmelCase , n_shave_prefix_segments=_UpperCAmelCase )
mapping.append({'''old''': old_item, '''new''': new_item} )
return mapping
def a ( _UpperCAmelCase : Tuple , _UpperCAmelCase : int=0 ):
'''simple docstring'''
__UpperCAmelCase : str = []
for old_item in old_list:
__UpperCAmelCase : Optional[int] = old_item
__UpperCAmelCase : List[str] = new_item.replace('''norm.weight''' , '''group_norm.weight''' )
__UpperCAmelCase : Any = new_item.replace('''norm.bias''' , '''group_norm.bias''' )
__UpperCAmelCase : Tuple = new_item.replace('''proj_out.weight''' , '''proj_attn.weight''' )
__UpperCAmelCase : Optional[int] = new_item.replace('''proj_out.bias''' , '''proj_attn.bias''' )
__UpperCAmelCase : List[str] = shave_segments(_UpperCAmelCase , n_shave_prefix_segments=_UpperCAmelCase )
mapping.append({'''old''': old_item, '''new''': new_item} )
return mapping
def a ( _UpperCAmelCase : List[Any] , _UpperCAmelCase : List[str] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : List[str]=None , _UpperCAmelCase : List[str]=None , _UpperCAmelCase : Tuple=None ):
'''simple docstring'''
assert isinstance(_UpperCAmelCase , _UpperCAmelCase ), "Paths should be a list of dicts containing 'old' and 'new' keys."
# Splits the attention layers into three variables.
if attention_paths_to_split is not None:
for path, path_map in attention_paths_to_split.items():
__UpperCAmelCase : Tuple = old_checkpoint[path]
__UpperCAmelCase : Optional[Any] = old_tensor.shape[0] // 3
__UpperCAmelCase : Any = (-1, channels) if len(old_tensor.shape ) == 3 else (-1)
__UpperCAmelCase : Optional[int] = old_tensor.shape[0] // config['''num_head_channels'''] // 3
__UpperCAmelCase : Optional[int] = old_tensor.reshape((num_heads, 3 * channels // num_heads) + old_tensor.shape[1:] )
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = old_tensor.split(channels // num_heads , dim=1 )
__UpperCAmelCase : Union[str, Any] = query.reshape(_UpperCAmelCase )
__UpperCAmelCase : Dict = key.reshape(_UpperCAmelCase )
__UpperCAmelCase : Tuple = value.reshape(_UpperCAmelCase )
for path in paths:
__UpperCAmelCase : str = path['''new''']
# These have already been assigned
if attention_paths_to_split is not None and new_path in attention_paths_to_split:
continue
# Global renaming happens here
__UpperCAmelCase : List[str] = new_path.replace('''middle_block.0''' , '''mid_block.resnets.0''' )
__UpperCAmelCase : int = new_path.replace('''middle_block.1''' , '''mid_block.attentions.0''' )
__UpperCAmelCase : List[str] = new_path.replace('''middle_block.2''' , '''mid_block.resnets.1''' )
if additional_replacements is not None:
for replacement in additional_replacements:
__UpperCAmelCase : Union[str, Any] = new_path.replace(replacement['''old'''] , replacement['''new'''] )
# proj_attn.weight has to be converted from conv 1D to linear
if "proj_attn.weight" in new_path:
__UpperCAmelCase : str = old_checkpoint[path['''old''']][:, :, 0]
else:
__UpperCAmelCase : Any = old_checkpoint[path['''old''']]
def a ( _UpperCAmelCase : List[Any] , _UpperCAmelCase : Optional[Any] ):
'''simple docstring'''
__UpperCAmelCase : Tuple = {}
__UpperCAmelCase : Union[str, Any] = checkpoint['''time_embed.0.weight''']
__UpperCAmelCase : int = checkpoint['''time_embed.0.bias''']
__UpperCAmelCase : List[Any] = checkpoint['''time_embed.2.weight''']
__UpperCAmelCase : List[Any] = checkpoint['''time_embed.2.bias''']
__UpperCAmelCase : Optional[Any] = checkpoint['''input_blocks.0.0.weight''']
__UpperCAmelCase : Optional[Any] = checkpoint['''input_blocks.0.0.bias''']
__UpperCAmelCase : str = checkpoint['''out.0.weight''']
__UpperCAmelCase : List[str] = checkpoint['''out.0.bias''']
__UpperCAmelCase : Any = checkpoint['''out.2.weight''']
__UpperCAmelCase : Union[str, Any] = checkpoint['''out.2.bias''']
# Retrieves the keys for the input blocks only
__UpperCAmelCase : Union[str, Any] = len({'''.'''.join(layer.split('''.''' )[:2] ) for layer in checkpoint if '''input_blocks''' in layer} )
__UpperCAmelCase : Optional[Any] = {
layer_id: [key for key in checkpoint if f'input_blocks.{layer_id}' in key]
for layer_id in range(_UpperCAmelCase )
}
# Retrieves the keys for the middle blocks only
__UpperCAmelCase : Optional[int] = len({'''.'''.join(layer.split('''.''' )[:2] ) for layer in checkpoint if '''middle_block''' in layer} )
__UpperCAmelCase : int = {
layer_id: [key for key in checkpoint if f'middle_block.{layer_id}' in key]
for layer_id in range(_UpperCAmelCase )
}
# Retrieves the keys for the output blocks only
__UpperCAmelCase : Optional[int] = len({'''.'''.join(layer.split('''.''' )[:2] ) for layer in checkpoint if '''output_blocks''' in layer} )
__UpperCAmelCase : List[Any] = {
layer_id: [key for key in checkpoint if f'output_blocks.{layer_id}' in key]
for layer_id in range(_UpperCAmelCase )
}
for i in range(1 , _UpperCAmelCase ):
__UpperCAmelCase : int = (i - 1) // (config['''num_res_blocks'''] + 1)
__UpperCAmelCase : List[str] = (i - 1) % (config['''num_res_blocks'''] + 1)
__UpperCAmelCase : int = [key for key in input_blocks[i] if f'input_blocks.{i}.0' in key]
__UpperCAmelCase : str = [key for key in input_blocks[i] if f'input_blocks.{i}.1' in key]
if f'input_blocks.{i}.0.op.weight' in checkpoint:
__UpperCAmelCase : List[Any] = checkpoint[
f'input_blocks.{i}.0.op.weight'
]
__UpperCAmelCase : Optional[int] = checkpoint[
f'input_blocks.{i}.0.op.bias'
]
continue
__UpperCAmelCase : str = renew_resnet_paths(_UpperCAmelCase )
__UpperCAmelCase : Any = {'''old''': f'input_blocks.{i}.0', '''new''': f'down_blocks.{block_id}.resnets.{layer_in_block_id}'}
__UpperCAmelCase : Any = {'''old''': '''resnets.2.op''', '''new''': '''downsamplers.0.op'''}
assign_to_checkpoint(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , additional_replacements=[meta_path, resnet_op] , config=_UpperCAmelCase )
if len(_UpperCAmelCase ):
__UpperCAmelCase : Optional[int] = renew_attention_paths(_UpperCAmelCase )
__UpperCAmelCase : Optional[Any] = {
'''old''': f'input_blocks.{i}.1',
'''new''': f'down_blocks.{block_id}.attentions.{layer_in_block_id}',
}
__UpperCAmelCase : Dict = {
f'input_blocks.{i}.1.qkv.bias': {
'''key''': f'down_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias',
'''query''': f'down_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias',
'''value''': f'down_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias',
},
f'input_blocks.{i}.1.qkv.weight': {
'''key''': f'down_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight',
'''query''': f'down_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight',
'''value''': f'down_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight',
},
}
assign_to_checkpoint(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , additional_replacements=[meta_path] , attention_paths_to_split=_UpperCAmelCase , config=_UpperCAmelCase , )
__UpperCAmelCase : List[Any] = middle_blocks[0]
__UpperCAmelCase : Optional[Any] = middle_blocks[1]
__UpperCAmelCase : Union[str, Any] = middle_blocks[2]
__UpperCAmelCase : str = renew_resnet_paths(_UpperCAmelCase )
assign_to_checkpoint(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , config=_UpperCAmelCase )
__UpperCAmelCase : Union[str, Any] = renew_resnet_paths(_UpperCAmelCase )
assign_to_checkpoint(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , config=_UpperCAmelCase )
__UpperCAmelCase : Tuple = renew_attention_paths(_UpperCAmelCase )
__UpperCAmelCase : str = {
'''middle_block.1.qkv.bias''': {
'''key''': '''mid_block.attentions.0.key.bias''',
'''query''': '''mid_block.attentions.0.query.bias''',
'''value''': '''mid_block.attentions.0.value.bias''',
},
'''middle_block.1.qkv.weight''': {
'''key''': '''mid_block.attentions.0.key.weight''',
'''query''': '''mid_block.attentions.0.query.weight''',
'''value''': '''mid_block.attentions.0.value.weight''',
},
}
assign_to_checkpoint(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , attention_paths_to_split=_UpperCAmelCase , config=_UpperCAmelCase )
for i in range(_UpperCAmelCase ):
__UpperCAmelCase : Any = i // (config['''num_res_blocks'''] + 1)
__UpperCAmelCase : Optional[int] = i % (config['''num_res_blocks'''] + 1)
__UpperCAmelCase : Optional[Any] = [shave_segments(_UpperCAmelCase , 2 ) for name in output_blocks[i]]
__UpperCAmelCase : int = {}
for layer in output_block_layers:
__UpperCAmelCase , __UpperCAmelCase : Optional[int] = layer.split('''.''' )[0], shave_segments(_UpperCAmelCase , 1 )
if layer_id in output_block_list:
output_block_list[layer_id].append(_UpperCAmelCase )
else:
__UpperCAmelCase : str = [layer_name]
if len(_UpperCAmelCase ) > 1:
__UpperCAmelCase : List[str] = [key for key in output_blocks[i] if f'output_blocks.{i}.0' in key]
__UpperCAmelCase : Optional[int] = [key for key in output_blocks[i] if f'output_blocks.{i}.1' in key]
__UpperCAmelCase : Tuple = renew_resnet_paths(_UpperCAmelCase )
__UpperCAmelCase : Optional[int] = renew_resnet_paths(_UpperCAmelCase )
__UpperCAmelCase : Dict = {'''old''': f'output_blocks.{i}.0', '''new''': f'up_blocks.{block_id}.resnets.{layer_in_block_id}'}
assign_to_checkpoint(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , additional_replacements=[meta_path] , config=_UpperCAmelCase )
if ["conv.weight", "conv.bias"] in output_block_list.values():
__UpperCAmelCase : int = list(output_block_list.values() ).index(['''conv.weight''', '''conv.bias'''] )
__UpperCAmelCase : str = checkpoint[
f'output_blocks.{i}.{index}.conv.weight'
]
__UpperCAmelCase : List[Any] = checkpoint[
f'output_blocks.{i}.{index}.conv.bias'
]
# Clear attentions as they have been attributed above.
if len(_UpperCAmelCase ) == 2:
__UpperCAmelCase : int = []
if len(_UpperCAmelCase ):
__UpperCAmelCase : Dict = renew_attention_paths(_UpperCAmelCase )
__UpperCAmelCase : Optional[int] = {
'''old''': f'output_blocks.{i}.1',
'''new''': f'up_blocks.{block_id}.attentions.{layer_in_block_id}',
}
__UpperCAmelCase : Optional[int] = {
f'output_blocks.{i}.1.qkv.bias': {
'''key''': f'up_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias',
'''query''': f'up_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias',
'''value''': f'up_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias',
},
f'output_blocks.{i}.1.qkv.weight': {
'''key''': f'up_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight',
'''query''': f'up_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight',
'''value''': f'up_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight',
},
}
assign_to_checkpoint(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , additional_replacements=[meta_path] , attention_paths_to_split=to_split if any('''qkv''' in key for key in attentions ) else None , config=_UpperCAmelCase , )
else:
__UpperCAmelCase : Optional[int] = renew_resnet_paths(_UpperCAmelCase , n_shave_prefix_segments=1 )
for path in resnet_0_paths:
__UpperCAmelCase : List[str] = '''.'''.join(['''output_blocks''', str(_UpperCAmelCase ), path['''old''']] )
__UpperCAmelCase : Dict = '''.'''.join(['''up_blocks''', str(_UpperCAmelCase ), '''resnets''', str(_UpperCAmelCase ), path['''new''']] )
__UpperCAmelCase : int = checkpoint[old_path]
return new_checkpoint
if __name__ == "__main__":
__A =argparse.ArgumentParser()
parser.add_argument(
"--checkpoint_path", default=None, type=str, required=True, help="Path to the checkpoint to convert."
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help="The config json file corresponding to the architecture.",
)
parser.add_argument("--dump_path", default=None, type=str, required=True, help="Path to the output model.")
__A =parser.parse_args()
__A =torch.load(args.checkpoint_path)
with open(args.config_file) as f:
__A =json.loads(f.read())
__A =convert_ldm_checkpoint(checkpoint, config)
if "ldm" in config:
del config["ldm"]
__A =UNetaDModel(**config)
model.load_state_dict(converted_checkpoint)
try:
__A =DDPMScheduler.from_config("/".join(args.checkpoint_path.split("/")[:-1]))
__A =VQModel.from_pretrained("/".join(args.checkpoint_path.split("/")[:-1]))
__A =LDMPipeline(unet=model, scheduler=scheduler, vae=vqvae)
pipe.save_pretrained(args.dump_path)
except: # noqa: E722
model.save_pretrained(args.dump_path)
| 226 |
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to properly calculate the metrics on the
# validation dataset when in a distributed system, and builds off the
# `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
__A =1_6
__A =3_2
def a ( _UpperCAmelCase : Accelerator , _UpperCAmelCase : int = 16 ):
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = AutoTokenizer.from_pretrained('''bert-base-cased''' )
__UpperCAmelCase : Any = load_dataset('''glue''' , '''mrpc''' )
def tokenize_function(_UpperCAmelCase : List[Any] ):
# max_length=None => use the model max length (it's actually the default)
__UpperCAmelCase : str = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=_UpperCAmelCase , max_length=_UpperCAmelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
__UpperCAmelCase : Optional[int] = datasets.map(
_UpperCAmelCase , batched=_UpperCAmelCase , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
__UpperCAmelCase : Optional[Any] = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(_UpperCAmelCase : str ):
# On TPU it's best to pad everything to the same length or training will be very slow.
__UpperCAmelCase : List[Any] = 1_28 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
__UpperCAmelCase : Any = 16
elif accelerator.mixed_precision != "no":
__UpperCAmelCase : Tuple = 8
else:
__UpperCAmelCase : Optional[int] = None
return tokenizer.pad(
_UpperCAmelCase , padding='''longest''' , max_length=_UpperCAmelCase , pad_to_multiple_of=_UpperCAmelCase , return_tensors='''pt''' , )
# Instantiate dataloaders.
__UpperCAmelCase : Optional[Any] = DataLoader(
tokenized_datasets['''train'''] , shuffle=_UpperCAmelCase , collate_fn=_UpperCAmelCase , batch_size=_UpperCAmelCase )
__UpperCAmelCase : str = DataLoader(
tokenized_datasets['''validation'''] , shuffle=_UpperCAmelCase , collate_fn=_UpperCAmelCase , batch_size=_UpperCAmelCase )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("TESTING_MOCKED_DATALOADERS", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
__A =mocked_dataloaders # noqa: F811
def a ( _UpperCAmelCase : List[str] , _UpperCAmelCase : Any ):
'''simple docstring'''
if os.environ.get('''TESTING_MOCKED_DATALOADERS''' , _UpperCAmelCase ) == "1":
__UpperCAmelCase : Dict = 2
# Initialize accelerator
__UpperCAmelCase : Dict = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
__UpperCAmelCase : List[Any] = config['''lr''']
__UpperCAmelCase : Optional[Any] = int(config['''num_epochs'''] )
__UpperCAmelCase : Optional[int] = int(config['''seed'''] )
__UpperCAmelCase : Any = int(config['''batch_size'''] )
__UpperCAmelCase : Optional[Any] = evaluate.load('''glue''' , '''mrpc''' )
# If the batch size is too big we use gradient accumulation
__UpperCAmelCase : List[str] = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
__UpperCAmelCase : Tuple = batch_size // MAX_GPU_BATCH_SIZE
__UpperCAmelCase : List[str] = MAX_GPU_BATCH_SIZE
set_seed(_UpperCAmelCase )
__UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = get_dataloaders(_UpperCAmelCase , _UpperCAmelCase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
__UpperCAmelCase : List[str] = AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''' , return_dict=_UpperCAmelCase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
__UpperCAmelCase : Tuple = model.to(accelerator.device )
# Instantiate optimizer
__UpperCAmelCase : Any = AdamW(params=model.parameters() , lr=_UpperCAmelCase )
# Instantiate scheduler
__UpperCAmelCase : str = get_linear_schedule_with_warmup(
optimizer=_UpperCAmelCase , num_warmup_steps=1_00 , num_training_steps=(len(_UpperCAmelCase ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : List[str] = accelerator.prepare(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# Now we train the model
for epoch in range(_UpperCAmelCase ):
model.train()
for step, batch in enumerate(_UpperCAmelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
__UpperCAmelCase : int = model(**_UpperCAmelCase )
__UpperCAmelCase : str = outputs.loss
__UpperCAmelCase : str = loss / gradient_accumulation_steps
accelerator.backward(_UpperCAmelCase )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
__UpperCAmelCase : Tuple = 0
for step, batch in enumerate(_UpperCAmelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
__UpperCAmelCase : Dict = model(**_UpperCAmelCase )
__UpperCAmelCase : str = outputs.logits.argmax(dim=-1 )
__UpperCAmelCase , __UpperCAmelCase : List[Any] = accelerator.gather((predictions, batch['''labels''']) )
# New Code #
# First we check if it's a distributed system
if accelerator.use_distributed:
# Then see if we're on the last batch of our eval dataloader
if step == len(_UpperCAmelCase ) - 1:
# Last batch needs to be truncated on distributed systems as it contains additional samples
__UpperCAmelCase : Dict = predictions[: len(eval_dataloader.dataset ) - samples_seen]
__UpperCAmelCase : List[str] = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
# Otherwise we add the number of samples seen
samples_seen += references.shape[0]
# All of this can be avoided if you use `Accelerator.gather_for_metrics` instead of `Accelerator.gather`:
# accelerator.gather_for_metrics((predictions, batch["labels"]))
metric.add_batch(
predictions=_UpperCAmelCase , references=_UpperCAmelCase , )
__UpperCAmelCase : Tuple = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f'epoch {epoch}:' , _UpperCAmelCase )
def a ( ):
'''simple docstring'''
__UpperCAmelCase : int = argparse.ArgumentParser(description='''Simple example of training script.''' )
parser.add_argument(
'''--mixed_precision''' , type=_UpperCAmelCase , default=_UpperCAmelCase , choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] , help='''Whether to use mixed precision. Choose'''
'''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'''
'''and an Nvidia Ampere GPU.''' , )
parser.add_argument('''--cpu''' , action='''store_true''' , help='''If passed, will train on the CPU.''' )
__UpperCAmelCase : int = parser.parse_args()
__UpperCAmelCase : Union[str, Any] = {'''lr''': 2e-5, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 16}
training_function(_UpperCAmelCase , _UpperCAmelCase )
if __name__ == "__main__":
main()
| 226 | 1 |
from copy import deepcopy
import torch
import torch.nn.functional as F
from torch.optim import AdamW
from torch.optim.lr_scheduler import LambdaLR
from torch.utils.data import DataLoader
from accelerate.accelerator import Accelerator
from accelerate.state import GradientState
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import DistributedType, is_torch_version, set_seed
def _UpperCAmelCase (UpperCamelCase_ : Optional[int] , UpperCamelCase_ : List[str] , UpperCamelCase_ : Dict , UpperCamelCase_ : Optional[Any] ):
'''simple docstring'''
for param, grad_param in zip(model_a.parameters() , model_b.parameters() ):
if not param.requires_grad:
continue
if not did_step:
# Grads should not be in sync
assert (
torch.allclose(param.grad , grad_param.grad ) is False
), F"Gradients in sync when they should not be at iteration {iteration}:\nmodel_a grad ({param.grad}) == model_b grad ({grad_param.grad})"
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , grad_param.grad ) is True
), F"Gradients not in sync when they should be at iteration {iteration}:\nmodel_a grad ({param.grad}) != model_b grad ({grad_param.grad})"
def _UpperCAmelCase (UpperCamelCase_ : Optional[int] , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : str=True ):
'''simple docstring'''
model.train()
_lowerCAmelCase : Optional[Any] = model(UpperCamelCase_ )
_lowerCAmelCase : int = F.mse_loss(UpperCamelCase_ , target.to(output.device ) )
if not do_backward:
loss /= accelerator.gradient_accumulation_steps
loss.backward()
else:
accelerator.backward(UpperCamelCase_ )
def _UpperCAmelCase (UpperCamelCase_ : Any , UpperCamelCase_ : Any=False ):
'''simple docstring'''
set_seed(42 )
_lowerCAmelCase : Optional[Any] = RegressionModel()
_lowerCAmelCase : Union[str, Any] = deepcopy(UpperCamelCase_ )
_lowerCAmelCase : List[Any] = RegressionDataset(length=80 )
_lowerCAmelCase : str = DataLoader(UpperCamelCase_ , batch_size=16 )
model.to(accelerator.device )
if sched:
_lowerCAmelCase : List[str] = AdamW(params=model.parameters() , lr=1E-3 )
_lowerCAmelCase : Union[str, Any] = AdamW(params=ddp_model.parameters() , lr=1E-3 )
_lowerCAmelCase : List[Any] = LambdaLR(UpperCamelCase_ , lr_lambda=lambda UpperCamelCase_ : epoch**0.65 )
_lowerCAmelCase : List[Any] = LambdaLR(UpperCamelCase_ , lr_lambda=lambda UpperCamelCase_ : epoch**0.65 )
# Make a copy of `model`
if sched:
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Union[str, Any] = accelerator.prepare(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
else:
_lowerCAmelCase , _lowerCAmelCase : str = accelerator.prepare(UpperCamelCase_ , UpperCamelCase_ )
if sched:
return (model, opt, sched, dataloader, ddp_model, ddp_opt, ddp_sched)
return model, ddp_model, dataloader
def _UpperCAmelCase (UpperCamelCase_ : Optional[Any] ):
'''simple docstring'''
# Test when on a single CPU or GPU that the context manager does nothing
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : int = get_training_setup(UpperCamelCase_ )
# Use a single batch
_lowerCAmelCase , _lowerCAmelCase : Dict = next(iter(UpperCamelCase_ ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
_lowerCAmelCase , _lowerCAmelCase : int = accelerator.gather((ddp_input, ddp_target) )
_lowerCAmelCase , _lowerCAmelCase : Any = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(UpperCamelCase_ ):
step_model(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
else:
# Sync grads
step_model(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
# Since `no_sync` is a noop, `ddp_model` and `model` grads should always be in sync
check_model_parameters(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
assert torch.allclose(
param.grad , ddp_param.grad ), F"Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})"
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
_lowerCAmelCase : Dict = ddp_input[torch.randperm(len(UpperCamelCase_ ) )]
def _UpperCAmelCase (UpperCamelCase_ : Optional[Any] ):
'''simple docstring'''
# Test on distributed setup that context manager behaves properly
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : List[str] = get_training_setup(UpperCamelCase_ )
# Use a single batch
_lowerCAmelCase , _lowerCAmelCase : List[str] = next(iter(UpperCamelCase_ ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
_lowerCAmelCase , _lowerCAmelCase : str = accelerator.gather((ddp_input, ddp_target) )
_lowerCAmelCase , _lowerCAmelCase : List[Any] = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(UpperCamelCase_ ):
step_model(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
else:
# Sync grads
step_model(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
if iteration % 2 == 0:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is False
), F"Gradients in sync when they should not be:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})"
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is True
), F"Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})"
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
_lowerCAmelCase : str = ddp_input[torch.randperm(len(UpperCamelCase_ ) )]
def _UpperCAmelCase (UpperCamelCase_ : Optional[Any]=False , UpperCamelCase_ : Dict=False ):
'''simple docstring'''
_lowerCAmelCase : Tuple = Accelerator(
split_batches=UpperCamelCase_ , dispatch_batches=UpperCamelCase_ , gradient_accumulation_steps=2 )
# Test that context manager behaves properly
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Union[str, Any] = get_training_setup(UpperCamelCase_ )
for iteration, batch in enumerate(UpperCamelCase_ ):
_lowerCAmelCase , _lowerCAmelCase : int = batch.values()
# Gather the distributed inputs and targs for the base model
_lowerCAmelCase , _lowerCAmelCase : str = accelerator.gather((ddp_input, ddp_target) )
_lowerCAmelCase , _lowerCAmelCase : Dict = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
# Do "gradient accumulation" (noop)
with accelerator.accumulate(UpperCamelCase_ ):
step_model(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
if ((iteration + 1) % 2 == 0) or (iteration == len(UpperCamelCase_ ) - 1):
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is True
), F"Gradients not in sync when they should be at iteration {iteration}:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})"
else:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is False
), F"Gradients in sync when they should not be at iteration {iteration}:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})"
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
_lowerCAmelCase : Union[str, Any] = ddp_input[torch.randperm(len(UpperCamelCase_ ) )]
GradientState._reset_state()
def _UpperCAmelCase (UpperCamelCase_ : str=False , UpperCamelCase_ : Optional[int]=False ):
'''simple docstring'''
_lowerCAmelCase : Dict = Accelerator(
split_batches=UpperCamelCase_ , dispatch_batches=UpperCamelCase_ , gradient_accumulation_steps=2 )
# Test that context manager behaves properly
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : str = get_training_setup(UpperCamelCase_ , UpperCamelCase_ )
for iteration, batch in enumerate(UpperCamelCase_ ):
_lowerCAmelCase , _lowerCAmelCase : List[str] = batch.values()
# Gather the distributed inputs and targs for the base model
_lowerCAmelCase , _lowerCAmelCase : Optional[int] = accelerator.gather((ddp_input, ddp_target) )
_lowerCAmelCase , _lowerCAmelCase : Union[str, Any] = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
model.train()
ddp_model.train()
step_model(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
opt.step()
if ((iteration + 1) % 2 == 0) or ((iteration + 1) == len(UpperCamelCase_ )):
if split_batches:
sched.step()
else:
for _ in range(accelerator.num_processes ):
sched.step()
opt.zero_grad()
# Perform gradient accumulation under wrapper
with accelerator.accumulate(UpperCamelCase_ ):
step_model(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
ddp_opt.step()
ddp_sched.step()
ddp_opt.zero_grad()
# Learning rates should be the same
assert (
opt.param_groups[0]["lr"] == ddp_opt.param_groups[0]["lr"]
), F"Learning rates found in each optimizer did not align\nopt: {opt.param_groups[0]['lr']}\nDDP opt: {ddp_opt.param_groups[0]['lr']}\n"
_lowerCAmelCase : List[str] = (((iteration + 1) % 2) == 0) or ((iteration + 1) == len(UpperCamelCase_ ))
if accelerator.num_processes > 1:
check_model_parameters(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
GradientState._reset_state()
def _UpperCAmelCase ():
'''simple docstring'''
_lowerCAmelCase : List[Any] = Accelerator()
_lowerCAmelCase : List[Any] = RegressionDataset(length=80 )
_lowerCAmelCase : Dict = DataLoader(UpperCamelCase_ , batch_size=16 )
_lowerCAmelCase : str = RegressionDataset(length=96 )
_lowerCAmelCase : Tuple = DataLoader(UpperCamelCase_ , batch_size=16 )
_lowerCAmelCase , _lowerCAmelCase : Any = accelerator.prepare(UpperCamelCase_ , UpperCamelCase_ )
assert accelerator.gradient_state.active_dataloader is None
for iteration, _ in enumerate(UpperCamelCase_ ):
assert id(accelerator.gradient_state.active_dataloader ) == id(UpperCamelCase_ )
if iteration < len(UpperCamelCase_ ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
if iteration == 1:
for batch_num, _ in enumerate(UpperCamelCase_ ):
assert id(accelerator.gradient_state.active_dataloader ) == id(UpperCamelCase_ )
if batch_num < len(UpperCamelCase_ ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
assert accelerator.gradient_state.active_dataloader is None
def _UpperCAmelCase ():
'''simple docstring'''
_lowerCAmelCase : List[Any] = Accelerator()
_lowerCAmelCase : Any = accelerator.state
if state.local_process_index == 0:
print("""**Test `accumulate` gradient accumulation with dataloader break**""" )
test_dataloader_break()
if state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print("""**Test NOOP `no_sync` context manager**""" )
test_noop_sync(UpperCamelCase_ )
if state.distributed_type in (DistributedType.MULTI_GPU, DistributedType.MULTI_CPU):
if state.local_process_index == 0:
print("""**Test Distributed `no_sync` context manager**""" )
test_distributed_sync(UpperCamelCase_ )
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if state.local_process_index == 0:
print(
"""**Test `accumulate` gradient accumulation, """ , F"`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**" , )
test_gradient_accumulation(UpperCamelCase_ , UpperCamelCase_ )
# Currently will break on torch 2.0 +, need to investigate why
if is_torch_version("""<""" , """2.0""" ) or state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print(
"""**Test `accumulate` gradient accumulation with optimizer and scheduler, """ , """`split_batches=False`, `dispatch_batches=False`**""" , )
test_gradient_accumulation_with_opt_and_scheduler()
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if not split_batch and not dispatch_batches:
continue
if state.local_process_index == 0:
print(
"""**Test `accumulate` gradient accumulation with optimizer and scheduler, """ , F"`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**" , )
test_gradient_accumulation_with_opt_and_scheduler(UpperCamelCase_ , UpperCamelCase_ )
def _UpperCAmelCase (UpperCamelCase_ : Optional[int] ):
'''simple docstring'''
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 159 |
import gc
import random
import unittest
import torch
from diffusers import (
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
)
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import floats_tensor, load_numpy, require_torch_gpu, skip_mps, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
from . import IFPipelineTesterMixin
@skip_mps
class __snake_case (_a , _a , unittest.TestCase ):
lowerCAmelCase__ = IFPipeline
lowerCAmelCase__ = TEXT_TO_IMAGE_PARAMS - {"width", "height", "latents"}
lowerCAmelCase__ = TEXT_TO_IMAGE_BATCH_PARAMS
lowerCAmelCase__ = PipelineTesterMixin.required_optional_params - {"latents"}
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[str]:
'''simple docstring'''
return self._get_dummy_components()
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , _UpperCAmelCase : List[str] , _UpperCAmelCase : List[str]=0 ) -> Optional[Any]:
'''simple docstring'''
if str(_UpperCAmelCase ).startswith("""mps""" ):
_lowerCAmelCase : Tuple = torch.manual_seed(_UpperCAmelCase )
else:
_lowerCAmelCase : str = torch.Generator(device=_UpperCAmelCase ).manual_seed(_UpperCAmelCase )
_lowerCAmelCase : List[str] = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> str:
'''simple docstring'''
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != """cuda""" , reason="""float16 requires CUDA""" )
def SCREAMING_SNAKE_CASE ( self : int ) -> List[Any]:
'''simple docstring'''
super().test_save_load_floataa(expected_max_diff=1E-1 )
def SCREAMING_SNAKE_CASE ( self : int ) -> str:
'''simple docstring'''
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> List[Any]:
'''simple docstring'''
self._test_save_load_local()
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> List[str]:
'''simple docstring'''
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Optional[int]:
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
@slow
@require_torch_gpu
class __snake_case (unittest.TestCase ):
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Optional[int]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE ( self : str ) -> Any:
'''simple docstring'''
_lowerCAmelCase : str = IFPipeline.from_pretrained("""DeepFloyd/IF-I-XL-v1.0""" , variant="""fp16""" , torch_dtype=torch.floataa )
_lowerCAmelCase : Tuple = IFSuperResolutionPipeline.from_pretrained(
"""DeepFloyd/IF-II-L-v1.0""" , variant="""fp16""" , torch_dtype=torch.floataa , text_encoder=_UpperCAmelCase , tokenizer=_UpperCAmelCase )
# pre compute text embeddings and remove T5 to save memory
pipe_a.text_encoder.to("""cuda""" )
_lowerCAmelCase , _lowerCAmelCase : Tuple = pipe_a.encode_prompt("""anime turtle""" , device="""cuda""" )
del pipe_a.tokenizer
del pipe_a.text_encoder
gc.collect()
_lowerCAmelCase : Optional[Any] = None
_lowerCAmelCase : Optional[int] = None
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# img2img
_lowerCAmelCase : Optional[Any] = IFImgaImgPipeline(**pipe_a.components )
_lowerCAmelCase : Optional[int] = IFImgaImgSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_imgaimg(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# inpainting
_lowerCAmelCase : List[Any] = IFInpaintingPipeline(**pipe_a.components )
_lowerCAmelCase : Optional[Any] = IFInpaintingSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_inpainting(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Tuple , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : List[str] , _UpperCAmelCase : Any , _UpperCAmelCase : Any ) -> Any:
'''simple docstring'''
_start_torch_memory_measurement()
_lowerCAmelCase : str = torch.Generator(device="""cpu""" ).manual_seed(0 )
_lowerCAmelCase : Optional[int] = pipe_a(
prompt_embeds=_UpperCAmelCase , negative_prompt_embeds=_UpperCAmelCase , num_inference_steps=2 , generator=_UpperCAmelCase , output_type="""np""" , )
_lowerCAmelCase : List[str] = output.images[0]
assert image.shape == (64, 64, 3)
_lowerCAmelCase : Optional[int] = torch.cuda.max_memory_allocated()
assert mem_bytes < 13 * 10**9
_lowerCAmelCase : str = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if.npy""" )
assert_mean_pixel_difference(_UpperCAmelCase , _UpperCAmelCase )
# pipeline 2
_start_torch_memory_measurement()
_lowerCAmelCase : str = torch.Generator(device="""cpu""" ).manual_seed(0 )
_lowerCAmelCase : Any = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(_UpperCAmelCase )
_lowerCAmelCase : List[str] = pipe_a(
prompt_embeds=_UpperCAmelCase , negative_prompt_embeds=_UpperCAmelCase , image=_UpperCAmelCase , generator=_UpperCAmelCase , num_inference_steps=2 , output_type="""np""" , )
_lowerCAmelCase : Optional[Any] = output.images[0]
assert image.shape == (256, 256, 3)
_lowerCAmelCase : str = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
_lowerCAmelCase : Tuple = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_superresolution_stage_II.npy""" )
assert_mean_pixel_difference(_UpperCAmelCase , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : str , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : str , _UpperCAmelCase : Dict , _UpperCAmelCase : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
_start_torch_memory_measurement()
_lowerCAmelCase : Union[str, Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(_UpperCAmelCase )
_lowerCAmelCase : Optional[Any] = torch.Generator(device="""cpu""" ).manual_seed(0 )
_lowerCAmelCase : Optional[int] = pipe_a(
prompt_embeds=_UpperCAmelCase , negative_prompt_embeds=_UpperCAmelCase , image=_UpperCAmelCase , num_inference_steps=2 , generator=_UpperCAmelCase , output_type="""np""" , )
_lowerCAmelCase : Union[str, Any] = output.images[0]
assert image.shape == (64, 64, 3)
_lowerCAmelCase : Optional[Any] = torch.cuda.max_memory_allocated()
assert mem_bytes < 10 * 10**9
_lowerCAmelCase : List[str] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img.npy""" )
assert_mean_pixel_difference(_UpperCAmelCase , _UpperCAmelCase )
# pipeline 2
_start_torch_memory_measurement()
_lowerCAmelCase : str = torch.Generator(device="""cpu""" ).manual_seed(0 )
_lowerCAmelCase : List[Any] = floats_tensor((1, 3, 256, 256) , rng=random.Random(0 ) ).to(_UpperCAmelCase )
_lowerCAmelCase : List[Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(_UpperCAmelCase )
_lowerCAmelCase : List[str] = pipe_a(
prompt_embeds=_UpperCAmelCase , negative_prompt_embeds=_UpperCAmelCase , image=_UpperCAmelCase , original_image=_UpperCAmelCase , generator=_UpperCAmelCase , num_inference_steps=2 , output_type="""np""" , )
_lowerCAmelCase : List[Any] = output.images[0]
assert image.shape == (256, 256, 3)
_lowerCAmelCase : List[Any] = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
_lowerCAmelCase : Optional[Any] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img_superresolution_stage_II.npy""" )
assert_mean_pixel_difference(_UpperCAmelCase , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : List[str] , _UpperCAmelCase : Any , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Dict ) -> List[Any]:
'''simple docstring'''
_start_torch_memory_measurement()
_lowerCAmelCase : List[str] = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(_UpperCAmelCase )
_lowerCAmelCase : Tuple = floats_tensor((1, 3, 64, 64) , rng=random.Random(1 ) ).to(_UpperCAmelCase )
_lowerCAmelCase : Union[str, Any] = torch.Generator(device="""cpu""" ).manual_seed(0 )
_lowerCAmelCase : Optional[int] = pipe_a(
prompt_embeds=_UpperCAmelCase , negative_prompt_embeds=_UpperCAmelCase , image=_UpperCAmelCase , mask_image=_UpperCAmelCase , num_inference_steps=2 , generator=_UpperCAmelCase , output_type="""np""" , )
_lowerCAmelCase : List[str] = output.images[0]
assert image.shape == (64, 64, 3)
_lowerCAmelCase : Optional[int] = torch.cuda.max_memory_allocated()
assert mem_bytes < 10 * 10**9
_lowerCAmelCase : int = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting.npy""" )
assert_mean_pixel_difference(_UpperCAmelCase , _UpperCAmelCase )
# pipeline 2
_start_torch_memory_measurement()
_lowerCAmelCase : Union[str, Any] = torch.Generator(device="""cpu""" ).manual_seed(0 )
_lowerCAmelCase : List[str] = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(_UpperCAmelCase )
_lowerCAmelCase : Tuple = floats_tensor((1, 3, 256, 256) , rng=random.Random(0 ) ).to(_UpperCAmelCase )
_lowerCAmelCase : int = floats_tensor((1, 3, 256, 256) , rng=random.Random(1 ) ).to(_UpperCAmelCase )
_lowerCAmelCase : Optional[int] = pipe_a(
prompt_embeds=_UpperCAmelCase , negative_prompt_embeds=_UpperCAmelCase , image=_UpperCAmelCase , mask_image=_UpperCAmelCase , original_image=_UpperCAmelCase , generator=_UpperCAmelCase , num_inference_steps=2 , output_type="""np""" , )
_lowerCAmelCase : Dict = output.images[0]
assert image.shape == (256, 256, 3)
_lowerCAmelCase : Any = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
_lowerCAmelCase : str = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting_superresolution_stage_II.npy""" )
assert_mean_pixel_difference(_UpperCAmelCase , _UpperCAmelCase )
def _UpperCAmelCase ():
'''simple docstring'''
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
| 159 | 1 |
'''simple docstring'''
from typing import Any
class __UpperCamelCase :
def __init__( self , __a ):
'''simple docstring'''
__a : Optional[int] = data
__a : List[Any] = None
def __repr__( self ):
'''simple docstring'''
return f"""Node({self.data})"""
class __UpperCamelCase :
def __init__( self ):
'''simple docstring'''
__a : Optional[Any] = None
def __iter__( self ):
'''simple docstring'''
__a : List[str] = self.head
while node:
yield node.data
__a : Union[str, Any] = node.next
def __len__( self ):
'''simple docstring'''
return sum(1 for _ in self )
def __repr__( self ):
'''simple docstring'''
return "->".join([str(__a ) for item in self] )
def __getitem__( self , __a ):
'''simple docstring'''
if not 0 <= index < len(self ):
raise ValueError('list index out of range.' )
for i, node in enumerate(self ):
if i == index:
return node
return None
def __setitem__( self , __a , __a ):
'''simple docstring'''
if not 0 <= index < len(self ):
raise ValueError('list index out of range.' )
__a : Any = self.head
for _ in range(__a ):
__a : int = current.next
__a : str = data
def __UpperCAmelCase ( self , __a ):
'''simple docstring'''
self.insert_nth(len(self ) , __a )
def __UpperCAmelCase ( self , __a ):
'''simple docstring'''
self.insert_nth(0 , __a )
def __UpperCAmelCase ( self , __a , __a ):
'''simple docstring'''
if not 0 <= index <= len(self ):
raise IndexError('list index out of range' )
__a : List[Any] = Node(__a )
if self.head is None:
__a : Optional[Any] = new_node
elif index == 0:
__a : Optional[Any] = self.head # link new_node to head
__a : Union[str, Any] = new_node
else:
__a : Any = self.head
for _ in range(index - 1 ):
__a : Optional[int] = temp.next
__a : List[Any] = temp.next
__a : List[str] = new_node
def __UpperCAmelCase ( self ): # print every node data
'''simple docstring'''
print(self )
def __UpperCAmelCase ( self ):
'''simple docstring'''
return self.delete_nth(0 )
def __UpperCAmelCase ( self ): # delete from tail
'''simple docstring'''
return self.delete_nth(len(self ) - 1 )
def __UpperCAmelCase ( self , __a = 0 ):
'''simple docstring'''
if not 0 <= index <= len(self ) - 1: # test if index is valid
raise IndexError('List index out of range.' )
__a : Optional[int] = self.head # default first node
if index == 0:
__a : Optional[Any] = self.head.next
else:
__a : int = self.head
for _ in range(index - 1 ):
__a : Any = temp.next
__a : Any = temp.next
__a : Any = temp.next.next
return delete_node.data
def __UpperCAmelCase ( self ):
'''simple docstring'''
return self.head is None
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Any = None
__a : Dict = self.head
while current:
# Store the current node's next node.
__a : Optional[int] = current.next
# Make the current node's next point backwards
__a : Any = prev
# Make the previous node be the current node
__a : str = current
# Make the current node the next node (to progress iteration)
__a : Dict = next_node
# Return prev in order to put the head at the end
__a : Tuple = prev
def lowerCamelCase ():
__a : Tuple = LinkedList()
assert linked_list.is_empty() is True
assert str(_SCREAMING_SNAKE_CASE ) == ""
try:
linked_list.delete_head()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
try:
linked_list.delete_tail()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
for i in range(10 ):
assert len(_SCREAMING_SNAKE_CASE ) == i
linked_list.insert_nth(_SCREAMING_SNAKE_CASE , i + 1 )
assert str(_SCREAMING_SNAKE_CASE ) == "->".join(str(_SCREAMING_SNAKE_CASE ) for i in range(1 , 11 ) )
linked_list.insert_head(0 )
linked_list.insert_tail(11 )
assert str(_SCREAMING_SNAKE_CASE ) == "->".join(str(_SCREAMING_SNAKE_CASE ) for i in range(0 , 12 ) )
assert linked_list.delete_head() == 0
assert linked_list.delete_nth(9 ) == 10
assert linked_list.delete_tail() == 11
assert len(_SCREAMING_SNAKE_CASE ) == 9
assert str(_SCREAMING_SNAKE_CASE ) == "->".join(str(_SCREAMING_SNAKE_CASE ) for i in range(1 , 10 ) )
assert all(linked_list[i] == i + 1 for i in range(0 , 9 ) ) is True
for i in range(0 , 9 ):
__a : Dict = -i
assert all(linked_list[i] == -i for i in range(0 , 9 ) ) is True
linked_list.reverse()
assert str(_SCREAMING_SNAKE_CASE ) == "->".join(str(_SCREAMING_SNAKE_CASE ) for i in range(-8 , 1 ) )
def lowerCamelCase ():
__a : Tuple = [
-9,
100,
Node(77_345_112 ),
'dlrow olleH',
7,
5_555,
0,
-1_9_2.5_5_5_5_5,
'Hello, world!',
7_7.9,
Node(10 ),
None,
None,
1_2.2_0,
]
__a : Any = LinkedList()
for i in test_input:
linked_list.insert_tail(_SCREAMING_SNAKE_CASE )
# Check if it's empty or not
assert linked_list.is_empty() is False
assert (
str(_SCREAMING_SNAKE_CASE ) == "-9->100->Node(77345112)->dlrow olleH->7->5555->0->"
"-192.55555->Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the head
__a : Union[str, Any] = linked_list.delete_head()
assert result == -9
assert (
str(_SCREAMING_SNAKE_CASE ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the tail
__a : Dict = linked_list.delete_tail()
assert result == 1_2.2
assert (
str(_SCREAMING_SNAKE_CASE ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None"
)
# Delete a node in specific location in linked list
__a : Union[str, Any] = linked_list.delete_nth(10 )
assert result is None
assert (
str(_SCREAMING_SNAKE_CASE ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None"
)
# Add a Node instance to its head
linked_list.insert_head(Node('Hello again, world!' ) )
assert (
str(_SCREAMING_SNAKE_CASE )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None"
)
# Add None to its tail
linked_list.insert_tail(_SCREAMING_SNAKE_CASE )
assert (
str(_SCREAMING_SNAKE_CASE )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None->None"
)
# Reverse the linked list
linked_list.reverse()
assert (
str(_SCREAMING_SNAKE_CASE )
== "None->None->Node(10)->77.9->Hello, world!->-192.55555->0->5555->"
"7->dlrow olleH->Node(77345112)->100->Node(Hello again, world!)"
)
def lowerCamelCase ():
from doctest import testmod
testmod()
__a : List[str] = LinkedList()
linked_list.insert_head(input('Inserting 1st at head ' ).strip() )
linked_list.insert_head(input('Inserting 2nd at head ' ).strip() )
print('\nPrint list:' )
linked_list.print_list()
linked_list.insert_tail(input('\nInserting 1st at tail ' ).strip() )
linked_list.insert_tail(input('Inserting 2nd at tail ' ).strip() )
print('\nPrint list:' )
linked_list.print_list()
print('\nDelete head' )
linked_list.delete_head()
print('Delete tail' )
linked_list.delete_tail()
print('\nPrint list:' )
linked_list.print_list()
print('\nReverse linked list' )
linked_list.reverse()
print('\nPrint list:' )
linked_list.print_list()
print('\nString representation of linked list:' )
print(_SCREAMING_SNAKE_CASE )
print('\nReading/changing Node data using indexing:' )
print(F"""Element at Position 1: {linked_list[1]}""" )
__a : Union[str, Any] = input('Enter New Value: ' ).strip()
print('New list:' )
print(_SCREAMING_SNAKE_CASE )
print(F"""length of linked_list is : {len(_SCREAMING_SNAKE_CASE )}""" )
if __name__ == "__main__":
main()
| 27 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available
from ...utils import OptionalDependencyNotAvailable
lowerCAmelCase_ : int = {'configuration_gpt_neox': ['GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP', 'GPTNeoXConfig']}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ : Optional[int] = ['GPTNeoXTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ : List[str] = [
'GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST',
'GPTNeoXForCausalLM',
'GPTNeoXForQuestionAnswering',
'GPTNeoXForSequenceClassification',
'GPTNeoXForTokenClassification',
'GPTNeoXLayer',
'GPTNeoXModel',
'GPTNeoXPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_gpt_neox import GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXConfig
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_gpt_neox_fast import GPTNeoXTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neox import (
GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
GPTNeoXLayer,
GPTNeoXModel,
GPTNeoXPreTrainedModel,
)
else:
import sys
lowerCAmelCase_ : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 63 | 0 |
'''simple docstring'''
import numpy as np
class snake_case__ :
"""simple docstring"""
def __init__( self : Dict ) -> List[Any]:
"""simple docstring"""
snake_case : List[str] = (0, 0)
snake_case : int = None
snake_case : Optional[int] = 0
snake_case : Dict = 0
snake_case : Optional[Any] = 0
def __eq__( self : str , UpperCamelCase__ : List[Any] ) -> Optional[Any]:
"""simple docstring"""
return self.position == cell.position
def lowerCAmelCase ( self : List[str] ) -> Any:
"""simple docstring"""
print(self.position )
class snake_case__ :
"""simple docstring"""
def __init__( self : Optional[int] , UpperCamelCase__ : int=(5, 5) ) -> Optional[int]:
"""simple docstring"""
snake_case : Dict = np.zeros(__lowercase )
snake_case : Tuple = world_size[0]
snake_case : List[Any] = world_size[1]
def lowerCAmelCase ( self : int ) -> List[Any]:
"""simple docstring"""
print(self.w )
def lowerCAmelCase ( self : Optional[Any] , UpperCamelCase__ : Tuple ) -> Optional[Any]:
"""simple docstring"""
snake_case : Optional[int] = [
(-1, -1),
(-1, 0),
(-1, 1),
(0, -1),
(0, 1),
(1, -1),
(1, 0),
(1, 1),
]
snake_case : List[str] = cell.position[0]
snake_case : Dict = cell.position[1]
snake_case : int = []
for n in neughbour_cord:
snake_case : int = current_x + n[0]
snake_case : Tuple = current_y + n[1]
if 0 <= x < self.world_x_limit and 0 <= y < self.world_y_limit:
snake_case : int = Cell()
snake_case : Optional[int] = (x, y)
snake_case : Union[str, Any] = cell
neighbours.append(__lowercase )
return neighbours
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Any:
'''simple docstring'''
snake_case : str = []
snake_case : str = []
_open.append(SCREAMING_SNAKE_CASE__ )
while _open:
snake_case : int = np.argmin([n.f for n in _open] )
snake_case : Dict = _open[min_f]
_closed.append(_open.pop(SCREAMING_SNAKE_CASE__ ) )
if current == goal:
break
for n in world.get_neigbours(SCREAMING_SNAKE_CASE__ ):
for c in _closed:
if c == n:
continue
snake_case : List[Any] = current.g + 1
snake_case : int = n.position
snake_case : List[Any] = goal.position
snake_case : List[str] = (ya - ya) ** 2 + (xa - xa) ** 2
snake_case : List[str] = n.h + n.g
for c in _open:
if c == n and c.f < n.f:
continue
_open.append(SCREAMING_SNAKE_CASE__ )
snake_case : int = []
while current.parent is not None:
path.append(current.position )
snake_case : Optional[int] = current.parent
path.append(current.position )
return path[::-1]
if __name__ == "__main__":
lowercase__ = Gridworld()
# Start position and goal
lowercase__ = Cell()
lowercase__ = (0, 0)
lowercase__ = Cell()
lowercase__ = (4, 4)
print(f"path from {start.position} to {goal.position}")
lowercase__ = astar(world, start, goal)
# Just for visual reasons.
for i in s:
lowercase__ = 1
print(world.w)
| 366 |
'''simple docstring'''
from typing import Optional, Tuple, Union
import flax
import flax.linen as nn
import jax
import jax.numpy as jnp
from flax.core.frozen_dict import FrozenDict
from ..configuration_utils import ConfigMixin, flax_register_to_config
from ..utils import BaseOutput
from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps
from .modeling_flax_utils import FlaxModelMixin
from .unet_ad_blocks_flax import (
FlaxCrossAttnDownBlockaD,
FlaxDownBlockaD,
FlaxUNetMidBlockaDCrossAttn,
)
@flax.struct.dataclass
class snake_case__ ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowerCamelCase = 42
lowerCamelCase = 42
class snake_case__ ( nn.Module ):
"""simple docstring"""
lowerCamelCase = 42
lowerCamelCase = (16, 32, 96, 256)
lowerCamelCase = jnp.floataa
def lowerCAmelCase ( self : List[Any] ) -> Dict:
"""simple docstring"""
snake_case : Optional[int] = nn.Conv(
self.block_out_channels[0] , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
snake_case : Union[str, Any] = []
for i in range(len(self.block_out_channels ) - 1 ):
snake_case : Optional[Any] = self.block_out_channels[i]
snake_case : Optional[int] = self.block_out_channels[i + 1]
snake_case : Optional[int] = nn.Conv(
UpperCamelCase__ , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
blocks.append(UpperCamelCase__ )
snake_case : Optional[int] = nn.Conv(
UpperCamelCase__ , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
blocks.append(UpperCamelCase__ )
snake_case : Tuple = blocks
snake_case : Tuple = nn.Conv(
self.conditioning_embedding_channels , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
def __call__( self : Optional[int] , UpperCamelCase__ : Any ) -> Tuple:
"""simple docstring"""
snake_case : Dict = self.conv_in(UpperCamelCase__ )
snake_case : int = nn.silu(UpperCamelCase__ )
for block in self.blocks:
snake_case : str = block(UpperCamelCase__ )
snake_case : Optional[Any] = nn.silu(UpperCamelCase__ )
snake_case : Optional[Any] = self.conv_out(UpperCamelCase__ )
return embedding
@flax_register_to_config
class snake_case__ ( nn.Module , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowerCamelCase = 32
lowerCamelCase = 4
lowerCamelCase = (
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"DownBlock2D",
)
lowerCamelCase = False
lowerCamelCase = (320, 640, 1280, 1280)
lowerCamelCase = 2
lowerCamelCase = 8
lowerCamelCase = None
lowerCamelCase = 1280
lowerCamelCase = 0.0
lowerCamelCase = False
lowerCamelCase = jnp.floataa
lowerCamelCase = True
lowerCamelCase = 0
lowerCamelCase = "rgb"
lowerCamelCase = (16, 32, 96, 256)
def lowerCAmelCase ( self : Tuple , UpperCamelCase__ : jax.random.KeyArray ) -> FrozenDict:
"""simple docstring"""
snake_case : Union[str, Any] = (1, self.in_channels, self.sample_size, self.sample_size)
snake_case : Any = jnp.zeros(UpperCamelCase__ , dtype=jnp.floataa )
snake_case : Dict = jnp.ones((1,) , dtype=jnp.intaa )
snake_case : List[str] = jnp.zeros((1, 1, self.cross_attention_dim) , dtype=jnp.floataa )
snake_case : Optional[int] = (1, 3, self.sample_size * 8, self.sample_size * 8)
snake_case : int = jnp.zeros(UpperCamelCase__ , dtype=jnp.floataa )
snake_case ,snake_case : Optional[int] = jax.random.split(UpperCamelCase__ )
snake_case : Optional[int] = {'''params''': params_rng, '''dropout''': dropout_rng}
return self.init(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )["params"]
def lowerCAmelCase ( self : List[str] ) -> Dict:
"""simple docstring"""
snake_case : Optional[int] = self.block_out_channels
snake_case : Optional[int] = block_out_channels[0] * 4
# If `num_attention_heads` is not defined (which is the case for most models)
# it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
# The reason for this behavior is to correct for incorrectly named variables that were introduced
# when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
# Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
# which is why we correct for the naming here.
snake_case : Union[str, Any] = self.num_attention_heads or self.attention_head_dim
# input
snake_case : List[Any] = nn.Conv(
block_out_channels[0] , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
# time
snake_case : Any = FlaxTimesteps(
block_out_channels[0] , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.config.freq_shift )
snake_case : List[Any] = FlaxTimestepEmbedding(UpperCamelCase__ , dtype=self.dtype )
snake_case : int = FlaxControlNetConditioningEmbedding(
conditioning_embedding_channels=block_out_channels[0] , block_out_channels=self.conditioning_embedding_out_channels , )
snake_case : Any = self.only_cross_attention
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
snake_case : Union[str, Any] = (only_cross_attention,) * len(self.down_block_types )
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
snake_case : str = (num_attention_heads,) * len(self.down_block_types )
# down
snake_case : str = []
snake_case : List[str] = []
snake_case : Union[str, Any] = block_out_channels[0]
snake_case : Tuple = nn.Conv(
UpperCamelCase__ , kernel_size=(1, 1) , padding='''VALID''' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(UpperCamelCase__ )
for i, down_block_type in enumerate(self.down_block_types ):
snake_case : Dict = output_channel
snake_case : Union[str, Any] = block_out_channels[i]
snake_case : Tuple = i == len(UpperCamelCase__ ) - 1
if down_block_type == "CrossAttnDownBlock2D":
snake_case : List[Any] = FlaxCrossAttnDownBlockaD(
in_channels=UpperCamelCase__ , out_channels=UpperCamelCase__ , dropout=self.dropout , num_layers=self.layers_per_block , num_attention_heads=num_attention_heads[i] , add_downsample=not is_final_block , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , dtype=self.dtype , )
else:
snake_case : str = FlaxDownBlockaD(
in_channels=UpperCamelCase__ , out_channels=UpperCamelCase__ , dropout=self.dropout , num_layers=self.layers_per_block , add_downsample=not is_final_block , dtype=self.dtype , )
down_blocks.append(UpperCamelCase__ )
for _ in range(self.layers_per_block ):
snake_case : Union[str, Any] = nn.Conv(
UpperCamelCase__ , kernel_size=(1, 1) , padding='''VALID''' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(UpperCamelCase__ )
if not is_final_block:
snake_case : str = nn.Conv(
UpperCamelCase__ , kernel_size=(1, 1) , padding='''VALID''' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(UpperCamelCase__ )
snake_case : List[Any] = down_blocks
snake_case : List[Any] = controlnet_down_blocks
# mid
snake_case : Optional[int] = block_out_channels[-1]
snake_case : Optional[Any] = FlaxUNetMidBlockaDCrossAttn(
in_channels=UpperCamelCase__ , dropout=self.dropout , num_attention_heads=num_attention_heads[-1] , use_linear_projection=self.use_linear_projection , dtype=self.dtype , )
snake_case : List[Any] = nn.Conv(
UpperCamelCase__ , kernel_size=(1, 1) , padding='''VALID''' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
def __call__( self : str , UpperCamelCase__ : List[Any] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Any , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : float = 1.0 , UpperCamelCase__ : bool = True , UpperCamelCase__ : bool = False , ) -> Union[FlaxControlNetOutput, Tuple]:
"""simple docstring"""
snake_case : Optional[Any] = self.controlnet_conditioning_channel_order
if channel_order == "bgr":
snake_case : Dict = jnp.flip(UpperCamelCase__ , axis=1 )
# 1. time
if not isinstance(UpperCamelCase__ , jnp.ndarray ):
snake_case : str = jnp.array([timesteps] , dtype=jnp.intaa )
elif isinstance(UpperCamelCase__ , jnp.ndarray ) and len(timesteps.shape ) == 0:
snake_case : Any = timesteps.astype(dtype=jnp.floataa )
snake_case : Optional[Any] = jnp.expand_dims(UpperCamelCase__ , 0 )
snake_case : int = self.time_proj(UpperCamelCase__ )
snake_case : Tuple = self.time_embedding(UpperCamelCase__ )
# 2. pre-process
snake_case : Dict = jnp.transpose(UpperCamelCase__ , (0, 2, 3, 1) )
snake_case : Optional[int] = self.conv_in(UpperCamelCase__ )
snake_case : str = jnp.transpose(UpperCamelCase__ , (0, 2, 3, 1) )
snake_case : Optional[int] = self.controlnet_cond_embedding(UpperCamelCase__ )
sample += controlnet_cond
# 3. down
snake_case : Optional[Any] = (sample,)
for down_block in self.down_blocks:
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
snake_case ,snake_case : Dict = down_block(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , deterministic=not train )
else:
snake_case ,snake_case : Dict = down_block(UpperCamelCase__ , UpperCamelCase__ , deterministic=not train )
down_block_res_samples += res_samples
# 4. mid
snake_case : List[str] = self.mid_block(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , deterministic=not train )
# 5. contronet blocks
snake_case : Tuple = ()
for down_block_res_sample, controlnet_block in zip(UpperCamelCase__ , self.controlnet_down_blocks ):
snake_case : Any = controlnet_block(UpperCamelCase__ )
controlnet_down_block_res_samples += (down_block_res_sample,)
snake_case : Optional[Any] = controlnet_down_block_res_samples
snake_case : int = self.controlnet_mid_block(UpperCamelCase__ )
# 6. scaling
snake_case : Optional[int] = [sample * conditioning_scale for sample in down_block_res_samples]
mid_block_res_sample *= conditioning_scale
if not return_dict:
return (down_block_res_samples, mid_block_res_sample)
return FlaxControlNetOutput(
down_block_res_samples=UpperCamelCase__ , mid_block_res_sample=UpperCamelCase__ )
| 83 | 0 |
"""simple docstring"""
from typing import Dict
import numpy as np
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline, PipelineException
if is_tf_available():
import tensorflow as tf
from ..tf_utils import stable_softmax
if is_torch_available():
import torch
__A = logging.get_logger(__name__)
@add_end_docstrings(
lowerCamelCase_ , R"""
top_k (`int`, defaults to 5):
The number of predictions to return.
targets (`str` or `List[str]`, *optional*):
When passed, the model will limit the scores to the passed targets instead of looking up in the whole
vocab. If the provided targets are not in the model vocab, they will be tokenized and the first resulting
token will be used (with a warning, and that might be slower).
""" , )
class lowerCamelCase__ ( lowerCamelCase_ ):
def lowerCamelCase_ ( self , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if self.framework == "tf":
snake_case : int = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()
elif self.framework == "pt":
snake_case : List[str] = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=SCREAMING_SNAKE_CASE )
else:
raise ValueError("Unsupported framework" )
return masked_index
def lowerCamelCase_ ( self , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
snake_case : Dict = self.get_masked_index(SCREAMING_SNAKE_CASE )
snake_case : List[str] = np.prod(masked_index.shape )
if numel < 1:
raise PipelineException(
"fill-mask" , self.model.base_model_prefix , F'''No mask_token ({self.tokenizer.mask_token}) found on the input''' , )
def lowerCamelCase_ ( self , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
for model_input in model_inputs:
self._ensure_exactly_one_mask_token(model_input["input_ids"][0] )
else:
for input_ids in model_inputs["input_ids"]:
self._ensure_exactly_one_mask_token(SCREAMING_SNAKE_CASE )
def lowerCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=None , **SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if return_tensors is None:
snake_case : Union[str, Any] = self.framework
snake_case : Dict = self.tokenizer(SCREAMING_SNAKE_CASE , return_tensors=SCREAMING_SNAKE_CASE )
self.ensure_exactly_one_mask_token(SCREAMING_SNAKE_CASE )
return model_inputs
def lowerCamelCase_ ( self , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
snake_case : Any = self.model(**SCREAMING_SNAKE_CASE )
snake_case : Optional[int] = model_inputs["input_ids"]
return model_outputs
def lowerCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=5 , SCREAMING_SNAKE_CASE=None ):
"""simple docstring"""
if target_ids is not None and target_ids.shape[0] < top_k:
snake_case : str = target_ids.shape[0]
snake_case : Union[str, Any] = model_outputs["input_ids"][0]
snake_case : str = model_outputs["logits"]
if self.framework == "tf":
snake_case : str = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()[:, 0]
snake_case : Any = outputs.numpy()
snake_case : Any = outputs[0, masked_index, :]
snake_case : List[str] = stable_softmax(SCREAMING_SNAKE_CASE , axis=-1 )
if target_ids is not None:
snake_case : str = tf.gather_nd(tf.squeeze(SCREAMING_SNAKE_CASE , 0 ) , target_ids.reshape(-1 , 1 ) )
snake_case : Dict = tf.expand_dims(SCREAMING_SNAKE_CASE , 0 )
snake_case : str = tf.math.top_k(SCREAMING_SNAKE_CASE , k=SCREAMING_SNAKE_CASE )
snake_case , snake_case : Optional[Any] = topk.values.numpy(), topk.indices.numpy()
else:
snake_case : List[Any] = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=SCREAMING_SNAKE_CASE ).squeeze(-1 )
# Fill mask pipeline supports only one ${mask_token} per sample
snake_case : List[Any] = outputs[0, masked_index, :]
snake_case : str = logits.softmax(dim=-1 )
if target_ids is not None:
snake_case : Optional[int] = probs[..., target_ids]
snake_case , snake_case : Any = probs.topk(SCREAMING_SNAKE_CASE )
snake_case : Any = []
snake_case : Tuple = values.shape[0] == 1
for i, (_values, _predictions) in enumerate(zip(values.tolist() , predictions.tolist() ) ):
snake_case : int = []
for v, p in zip(_values , _predictions ):
# Copy is important since we're going to modify this array in place
snake_case : Tuple = input_ids.numpy().copy()
if target_ids is not None:
snake_case : Union[str, Any] = target_ids[p].tolist()
snake_case : Any = p
# Filter padding out:
snake_case : Any = tokens[np.where(tokens != self.tokenizer.pad_token_id )]
# Originally we skip special tokens to give readable output.
# For multi masks though, the other [MASK] would be removed otherwise
# making the output look odd, so we add them back
snake_case : Union[str, Any] = self.tokenizer.decode(SCREAMING_SNAKE_CASE , skip_special_tokens=SCREAMING_SNAKE_CASE )
snake_case : Tuple = {"score": v, "token": p, "token_str": self.tokenizer.decode([p] ), "sequence": sequence}
row.append(SCREAMING_SNAKE_CASE )
result.append(SCREAMING_SNAKE_CASE )
if single_mask:
return result[0]
return result
def lowerCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=None ):
"""simple docstring"""
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
snake_case : List[str] = [targets]
try:
snake_case : Any = self.tokenizer.get_vocab()
except Exception:
snake_case : Any = {}
snake_case : List[Any] = []
for target in targets:
snake_case : List[Any] = vocab.get(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if id_ is None:
snake_case : Any = self.tokenizer(
SCREAMING_SNAKE_CASE , add_special_tokens=SCREAMING_SNAKE_CASE , return_attention_mask=SCREAMING_SNAKE_CASE , return_token_type_ids=SCREAMING_SNAKE_CASE , max_length=1 , truncation=SCREAMING_SNAKE_CASE , )["input_ids"]
if len(SCREAMING_SNAKE_CASE ) == 0:
logger.warning(
F'''The specified target token `{target}` does not exist in the model vocabulary. '''
"We cannot replace it with anything meaningful, ignoring it" )
continue
snake_case : str = input_ids[0]
# XXX: If users encounter this pass
# it becomes pretty slow, so let's make sure
# The warning enables them to fix the input to
# get faster performance.
logger.warning(
F'''The specified target token `{target}` does not exist in the model vocabulary. '''
F'''Replacing with `{self.tokenizer.convert_ids_to_tokens(id_ )}`.''' )
target_ids.append(id_ )
snake_case : Union[str, Any] = list(set(SCREAMING_SNAKE_CASE ) )
if len(SCREAMING_SNAKE_CASE ) == 0:
raise ValueError("At least one target must be provided when passed." )
snake_case : Optional[Any] = np.array(SCREAMING_SNAKE_CASE )
return target_ids
def lowerCamelCase_ ( self , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None ):
"""simple docstring"""
snake_case : int = {}
if targets is not None:
snake_case : List[str] = self.get_target_ids(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
snake_case : str = target_ids
if top_k is not None:
snake_case : int = top_k
if self.tokenizer.mask_token_id is None:
raise PipelineException(
"fill-mask" , self.model.base_model_prefix , "The tokenizer does not define a `mask_token`." )
return {}, {}, postprocess_params
def __call__( self , SCREAMING_SNAKE_CASE , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ):
"""simple docstring"""
snake_case : List[str] = super().__call__(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) and len(SCREAMING_SNAKE_CASE ) == 1:
return outputs[0]
return outputs
| 148 |
"""simple docstring"""
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
__A = abspath(join(dirname(dirname(dirname(__file__))), "src"))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action="ignore", category=FutureWarning)
def UpperCamelCase__ ( lowercase__ : Any ):
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(lowercase__ )
def UpperCamelCase__ ( lowercase__ : Optional[int] ):
from transformers.testing_utils import pytest_terminal_summary_main
snake_case : Any = terminalreporter.config.getoption("--make-reports" )
if make_reports:
pytest_terminal_summary_main(lowercase__ , id=lowercase__ )
| 148 | 1 |
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import is_speech_available, is_vision_available
from transformers.testing_utils import require_torch
if is_vision_available():
from transformers import TvltImageProcessor
if is_speech_available():
from transformers import TvltFeatureExtractor
from transformers import TvltProcessor
@require_torch
class a ( unittest.TestCase ):
def _UpperCAmelCase ( self ):
'''simple docstring'''
_UpperCAmelCase : int = "ZinengTang/tvlt-base"
_UpperCAmelCase : int = tempfile.mkdtemp()
def _UpperCAmelCase ( self , **A_ ):
'''simple docstring'''
return TvltImageProcessor.from_pretrained(self.checkpoint , **A_ )
def _UpperCAmelCase ( self , **A_ ):
'''simple docstring'''
return TvltFeatureExtractor.from_pretrained(self.checkpoint , **A_ )
def _UpperCAmelCase ( self ):
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def _UpperCAmelCase ( self ):
'''simple docstring'''
_UpperCAmelCase : Dict = self.get_image_processor()
_UpperCAmelCase : Optional[int] = self.get_feature_extractor()
_UpperCAmelCase : Tuple = TvltProcessor(image_processor=A_ , feature_extractor=A_ )
processor.save_pretrained(self.tmpdirname )
_UpperCAmelCase : str = TvltProcessor.from_pretrained(self.tmpdirname )
self.assertIsInstance(processor.feature_extractor , A_ )
self.assertIsInstance(processor.image_processor , A_ )
def _UpperCAmelCase ( self ):
'''simple docstring'''
_UpperCAmelCase : List[Any] = self.get_image_processor()
_UpperCAmelCase : int = self.get_feature_extractor()
_UpperCAmelCase : Tuple = TvltProcessor(image_processor=A_ , feature_extractor=A_ )
_UpperCAmelCase : List[str] = np.ones([12000] )
_UpperCAmelCase : int = feature_extractor(A_ , return_tensors="np" )
_UpperCAmelCase : int = processor(audio=A_ , return_tensors="np" )
for key in audio_dict.keys():
self.assertAlmostEqual(audio_dict[key].sum() , input_processor[key].sum() , delta=1e-2 )
def _UpperCAmelCase ( self ):
'''simple docstring'''
_UpperCAmelCase : List[str] = self.get_image_processor()
_UpperCAmelCase : int = self.get_feature_extractor()
_UpperCAmelCase : Optional[Any] = TvltProcessor(image_processor=A_ , feature_extractor=A_ )
_UpperCAmelCase : Union[str, Any] = np.ones([3, 224, 224] )
_UpperCAmelCase : Tuple = image_processor(A_ , return_tensors="np" )
_UpperCAmelCase : List[str] = processor(images=A_ , return_tensors="np" )
for key in image_dict.keys():
self.assertAlmostEqual(image_dict[key].sum() , input_processor[key].sum() , delta=1e-2 )
def _UpperCAmelCase ( self ):
'''simple docstring'''
_UpperCAmelCase : List[str] = self.get_image_processor()
_UpperCAmelCase : Any = self.get_feature_extractor()
_UpperCAmelCase : Dict = TvltProcessor(image_processor=A_ , feature_extractor=A_ )
_UpperCAmelCase : str = np.ones([12000] )
_UpperCAmelCase : Optional[Any] = np.ones([3, 224, 224] )
_UpperCAmelCase : List[Any] = processor(audio=A_ , images=A_ )
self.assertListEqual(list(inputs.keys() ) , ["audio_values", "audio_mask", "pixel_values", "pixel_mask"] )
# test if it raises when no input is passed
with pytest.raises(A_ ):
processor()
def _UpperCAmelCase ( self ):
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = self.get_image_processor()
_UpperCAmelCase : int = self.get_feature_extractor()
_UpperCAmelCase : str = TvltProcessor(image_processor=A_ , feature_extractor=A_ )
self.assertListEqual(
processor.model_input_names , image_processor.model_input_names + feature_extractor.model_input_names , msg="`processor` and `image_processor`+`feature_extractor` model input names do not match" , )
| 189 |
from typing import Any
class a :
def __init__( self , A_ ):
'''simple docstring'''
_UpperCAmelCase : List[Any] = data
_UpperCAmelCase : Any = None
class a :
def __init__( self ):
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = None
def _UpperCAmelCase ( self ):
'''simple docstring'''
_UpperCAmelCase : str = self.head
while temp is not None:
print(temp.data , end=" " )
_UpperCAmelCase : str = temp.next
print()
def _UpperCAmelCase ( self , A_ ):
'''simple docstring'''
_UpperCAmelCase : Optional[int] = Node(A_ )
_UpperCAmelCase : Tuple = self.head
_UpperCAmelCase : Tuple = new_node
def _UpperCAmelCase ( self , A_ , A_ ):
'''simple docstring'''
if node_data_a == node_data_a:
return
else:
_UpperCAmelCase : int = self.head
while node_a is not None and node_a.data != node_data_a:
_UpperCAmelCase : Tuple = node_a.next
_UpperCAmelCase : Dict = self.head
while node_a is not None and node_a.data != node_data_a:
_UpperCAmelCase : List[Any] = node_a.next
if node_a is None or node_a is None:
return
_UpperCAmelCase , _UpperCAmelCase : Optional[int] = node_a.data, node_a.data
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ = LinkedList()
for i in range(5, 0, -1):
ll.push(i)
ll.print_list()
ll.swap_nodes(1, 4)
print('After swapping')
ll.print_list()
| 189 | 1 |
from __future__ import annotations
from decimal import Decimal
from numpy import array
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : list[list[float]] ):
"""simple docstring"""
__a = Decimal
# Check if the provided matrix has 2 rows and 2 columns
# since this implementation only works for 2x2 matrices
if len(_SCREAMING_SNAKE_CASE ) == 2 and len(matrix[0] ) == 2 and len(matrix[1] ) == 2:
# Calculate the determinant of the matrix
__a = float(
d(matrix[0][0] ) * d(matrix[1][1] ) - d(matrix[1][0] ) * d(matrix[0][1] ) )
if determinant == 0:
raise ValueError("""This matrix has no inverse.""" )
# Creates a copy of the matrix with swapped positions of the elements
__a = [[0.0, 0.0], [0.0, 0.0]]
__a , __a = matrix[1][1], matrix[0][0]
__a , __a = -matrix[1][0], -matrix[0][1]
# Calculate the inverse of the matrix
return [
[(float(d(_SCREAMING_SNAKE_CASE ) ) / determinant) or 0.0 for n in row] for row in swapped_matrix
]
elif (
len(_SCREAMING_SNAKE_CASE ) == 3
and len(matrix[0] ) == 3
and len(matrix[1] ) == 3
and len(matrix[2] ) == 3
):
# Calculate the determinant of the matrix using Sarrus rule
__a = float(
(
(d(matrix[0][0] ) * d(matrix[1][1] ) * d(matrix[2][2] ))
+ (d(matrix[0][1] ) * d(matrix[1][2] ) * d(matrix[2][0] ))
+ (d(matrix[0][2] ) * d(matrix[1][0] ) * d(matrix[2][1] ))
)
- (
(d(matrix[0][2] ) * d(matrix[1][1] ) * d(matrix[2][0] ))
+ (d(matrix[0][1] ) * d(matrix[1][0] ) * d(matrix[2][2] ))
+ (d(matrix[0][0] ) * d(matrix[1][2] ) * d(matrix[2][1] ))
) )
if determinant == 0:
raise ValueError("""This matrix has no inverse.""" )
# Creating cofactor matrix
__a = [
[d(0.0 ), d(0.0 ), d(0.0 )],
[d(0.0 ), d(0.0 ), d(0.0 )],
[d(0.0 ), d(0.0 ), d(0.0 )],
]
__a = (d(matrix[1][1] ) * d(matrix[2][2] )) - (
d(matrix[1][2] ) * d(matrix[2][1] )
)
__a = -(
(d(matrix[1][0] ) * d(matrix[2][2] )) - (d(matrix[1][2] ) * d(matrix[2][0] ))
)
__a = (d(matrix[1][0] ) * d(matrix[2][1] )) - (
d(matrix[1][1] ) * d(matrix[2][0] )
)
__a = -(
(d(matrix[0][1] ) * d(matrix[2][2] )) - (d(matrix[0][2] ) * d(matrix[2][1] ))
)
__a = (d(matrix[0][0] ) * d(matrix[2][2] )) - (
d(matrix[0][2] ) * d(matrix[2][0] )
)
__a = -(
(d(matrix[0][0] ) * d(matrix[2][1] )) - (d(matrix[0][1] ) * d(matrix[2][0] ))
)
__a = (d(matrix[0][1] ) * d(matrix[1][2] )) - (
d(matrix[0][2] ) * d(matrix[1][1] )
)
__a = -(
(d(matrix[0][0] ) * d(matrix[1][2] )) - (d(matrix[0][2] ) * d(matrix[1][0] ))
)
__a = (d(matrix[0][0] ) * d(matrix[1][1] )) - (
d(matrix[0][1] ) * d(matrix[1][0] )
)
# Transpose the cofactor matrix (Adjoint matrix)
__a = array(_SCREAMING_SNAKE_CASE )
for i in range(3 ):
for j in range(3 ):
__a = cofactor_matrix[j][i]
# Inverse of the matrix using the formula (1/determinant) * adjoint matrix
__a = array(_SCREAMING_SNAKE_CASE )
for i in range(3 ):
for j in range(3 ):
inverse_matrix[i][j] /= d(_SCREAMING_SNAKE_CASE )
# Calculate the inverse of the matrix
return [[float(d(_SCREAMING_SNAKE_CASE ) ) or 0.0 for n in row] for row in inverse_matrix]
raise ValueError("""Please provide a matrix of size 2x2 or 3x3.""" )
| 302 |
import tempfile
import unittest
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from transformers.testing_utils import (
is_torch_available,
require_optimum,
require_torch,
slow,
)
if is_torch_available():
import torch
@require_torch
@require_optimum
@slow
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
__a = """hf-internal-testing/tiny-random-t5"""
__a = AutoTokenizer.from_pretrained(__lowercase )
__a = AutoModelForSeqaSeqLM.from_pretrained(__lowercase )
__a = tokenizer("""This is me""" , return_tensors="""pt""" )
__a = model.to_bettertransformer()
self.assertTrue(any("""BetterTransformer""" in mod.__class__.__name__ for _, mod in model.named_modules() ) )
__a = model.generate(**__lowercase )
__a = model.reverse_bettertransformer()
self.assertFalse(any("""BetterTransformer""" in mod.__class__.__name__ for _, mod in model.named_modules() ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__lowercase )
__a = AutoModelForSeqaSeqLM.from_pretrained(__lowercase )
self.assertFalse(
any("""BetterTransformer""" in mod.__class__.__name__ for _, mod in model_reloaded.named_modules() ) )
__a = model_reloaded.generate(**__lowercase )
self.assertTrue(torch.allclose(__lowercase , __lowercase ) )
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
__a = """hf-internal-testing/tiny-random-t5"""
__a = AutoModelForSeqaSeqLM.from_pretrained(__lowercase )
__a = model.to_bettertransformer()
with tempfile.TemporaryDirectory() as tmpdirname:
with self.assertRaises(__lowercase ):
model.save_pretrained(__lowercase )
__a = model.reverse_bettertransformer()
model.save_pretrained(__lowercase )
| 302 | 1 |
import argparse
import torch
from transformers import (
UniSpeechSatConfig,
UniSpeechSatForAudioFrameClassification,
UniSpeechSatForSequenceClassification,
UniSpeechSatForXVector,
WavaVecaFeatureExtractor,
logging,
)
logging.set_verbosity_info()
__A =logging.get_logger(__name__)
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
lowerCamelCase_ = UniSpeechSatForSequenceClassification.from_pretrained(snake_case_ , config=snake_case_ )
lowerCamelCase_ = downstream_dict["""projector.weight"""]
lowerCamelCase_ = downstream_dict["""projector.bias"""]
lowerCamelCase_ = downstream_dict["""model.post_net.linear.weight"""]
lowerCamelCase_ = downstream_dict["""model.post_net.linear.bias"""]
return model
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
lowerCamelCase_ = UniSpeechSatForAudioFrameClassification.from_pretrained(snake_case_ , config=snake_case_ )
lowerCamelCase_ = downstream_dict["""model.linear.weight"""]
lowerCamelCase_ = downstream_dict["""model.linear.bias"""]
return model
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
lowerCamelCase_ = UniSpeechSatForXVector.from_pretrained(snake_case_ , config=snake_case_ )
lowerCamelCase_ = downstream_dict["""connector.weight"""]
lowerCamelCase_ = downstream_dict["""connector.bias"""]
for i, kernel_size in enumerate(hf_config.tdnn_kernel ):
lowerCamelCase_ = downstream_dict[
F'model.framelevel_feature_extractor.module.{i}.kernel.weight'
]
lowerCamelCase_ = downstream_dict[F'model.framelevel_feature_extractor.module.{i}.kernel.bias']
lowerCamelCase_ = downstream_dict["""model.utterancelevel_feature_extractor.linear1.weight"""]
lowerCamelCase_ = downstream_dict["""model.utterancelevel_feature_extractor.linear1.bias"""]
lowerCamelCase_ = downstream_dict["""model.utterancelevel_feature_extractor.linear2.weight"""]
lowerCamelCase_ = downstream_dict["""model.utterancelevel_feature_extractor.linear2.bias"""]
lowerCamelCase_ = downstream_dict["""objective.W"""]
return model
@torch.no_grad()
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
lowerCamelCase_ = torch.load(snake_case_ , map_location="cpu" )
lowerCamelCase_ = checkpoint["""Downstream"""]
lowerCamelCase_ = UniSpeechSatConfig.from_pretrained(snake_case_ )
lowerCamelCase_ = WavaVecaFeatureExtractor.from_pretrained(
snake_case_ , return_attention_mask=snake_case_ , do_normalize=snake_case_ )
lowerCamelCase_ = hf_config.architectures[0]
if arch.endswith("ForSequenceClassification" ):
lowerCamelCase_ = convert_classification(snake_case_ , snake_case_ , snake_case_ )
elif arch.endswith("ForAudioFrameClassification" ):
lowerCamelCase_ = convert_diarization(snake_case_ , snake_case_ , snake_case_ )
elif arch.endswith("ForXVector" ):
lowerCamelCase_ = convert_xvector(snake_case_ , snake_case_ , snake_case_ )
else:
raise NotImplementedError(F'S3PRL weights conversion is not supported for {arch}' )
if hf_config.use_weighted_layer_sum:
lowerCamelCase_ = checkpoint["""Featurizer"""]["""weights"""]
hf_feature_extractor.save_pretrained(snake_case_ )
hf_model.save_pretrained(snake_case_ )
if __name__ == "__main__":
__A =argparse.ArgumentParser()
parser.add_argument(
'''--base_model_name''', default=None, type=str, help='''Name of the huggingface pretrained base model.'''
)
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to the huggingface classifier config.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to the s3prl checkpoint.''')
parser.add_argument('''--model_dump_path''', default=None, type=str, help='''Path to the final converted model.''')
__A =parser.parse_args()
convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path)
| 355 |
import numpy as np
import pandas as pd
from sklearn.preprocessing import MinMaxScaler
from tensorflow.keras.layers import LSTM, Dense
from tensorflow.keras.models import Sequential
if __name__ == "__main__":
__A =pd.read_csv('''sample_data.csv''', header=None)
__A =df.shape[:1][0]
# If you're using some other dataset input the target column
__A =df.iloc[:, 1:2]
__A =actual_data.values.reshape(len_data, 1)
__A =MinMaxScaler().fit_transform(actual_data)
__A =1_0
__A =5
__A =2_0
__A =len_data - periods * look_back
__A =actual_data[:division]
__A =actual_data[division - look_back :]
__A, __A =[], []
__A, __A =[], []
for i in range(0, len(train_data) - forward_days - look_back + 1):
train_x.append(train_data[i : i + look_back])
train_y.append(train_data[i + look_back : i + look_back + forward_days])
for i in range(0, len(test_data) - forward_days - look_back + 1):
test_x.append(test_data[i : i + look_back])
test_y.append(test_data[i + look_back : i + look_back + forward_days])
__A =np.array(train_x)
__A =np.array(test_x)
__A =np.array([list(i.ravel()) for i in train_y])
__A =np.array([list(i.ravel()) for i in test_y])
__A =Sequential()
model.add(LSTM(1_2_8, input_shape=(look_back, 1), return_sequences=True))
model.add(LSTM(6_4, input_shape=(1_2_8, 1)))
model.add(Dense(forward_days))
model.compile(loss='''mean_squared_error''', optimizer='''adam''')
__A =model.fit(
x_train, y_train, epochs=1_5_0, verbose=1, shuffle=True, batch_size=4
)
__A =model.predict(x_test)
| 47 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
__A : str = logging.get_logger(__name__)
class _a ( lowercase__ , lowercase__):
"""simple docstring"""
UpperCamelCase__ = """maskformer-swin"""
UpperCamelCase__ = {
"""num_attention_heads""": """num_heads""",
"""num_hidden_layers""": """num_layers""",
}
def __init__( self : Union[str, Any] , __UpperCamelCase : Optional[Any]=2_2_4 , __UpperCamelCase : List[Any]=4 , __UpperCamelCase : Optional[int]=3 , __UpperCamelCase : Dict=9_6 , __UpperCamelCase : List[Any]=[2, 2, 6, 2] , __UpperCamelCase : Optional[int]=[3, 6, 1_2, 2_4] , __UpperCamelCase : int=7 , __UpperCamelCase : Optional[Any]=4.0 , __UpperCamelCase : Union[str, Any]=True , __UpperCamelCase : List[str]=0.0 , __UpperCamelCase : int=0.0 , __UpperCamelCase : List[Any]=0.1 , __UpperCamelCase : Any="gelu" , __UpperCamelCase : str=False , __UpperCamelCase : Dict=0.0_2 , __UpperCamelCase : List[Any]=1e-5 , __UpperCamelCase : Optional[Any]=None , __UpperCamelCase : Any=None , **__UpperCamelCase : Dict , )->int:
super().__init__(**__lowerCamelCase )
_UpperCAmelCase = image_size
_UpperCAmelCase = patch_size
_UpperCAmelCase = num_channels
_UpperCAmelCase = embed_dim
_UpperCAmelCase = depths
_UpperCAmelCase = len(__lowerCamelCase )
_UpperCAmelCase = num_heads
_UpperCAmelCase = window_size
_UpperCAmelCase = mlp_ratio
_UpperCAmelCase = qkv_bias
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = drop_path_rate
_UpperCAmelCase = hidden_act
_UpperCAmelCase = use_absolute_embeddings
_UpperCAmelCase = layer_norm_eps
_UpperCAmelCase = initializer_range
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
_UpperCAmelCase = int(embed_dim * 2 ** (len(__lowerCamelCase ) - 1) )
_UpperCAmelCase = ["stem"] + [F'stage{idx}' for idx in range(1 , len(__lowerCamelCase ) + 1 )]
_UpperCAmelCase = get_aligned_output_features_output_indices(
out_features=__lowerCamelCase , out_indices=__lowerCamelCase , stage_names=self.stage_names )
| 260 |
from __future__ import annotations
from typing import Any
def lowercase_ ( _A : list[Any] ):
"""simple docstring"""
create_state_space_tree(_A , [] , 0 )
def lowercase_ ( _A : list[Any] , _A : list[Any] , _A : int ):
"""simple docstring"""
if index == len(_A ):
print(_A )
return
create_state_space_tree(_A , _A , index + 1 )
current_subsequence.append(sequence[index] )
create_state_space_tree(_A , _A , index + 1 )
current_subsequence.pop()
if __name__ == "__main__":
A : list[Any] = [3, 1, 2, 4]
generate_all_subsequences(seq)
seq.clear()
seq.extend(["A", "B", "C"])
generate_all_subsequences(seq)
| 184 | 0 |
import unittest
from transformers import DebertaVaTokenizer, DebertaVaTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
lowercase : Optional[int] = get_tests_dir('fixtures/spiece.model')
@require_sentencepiece
@require_tokenizers
class lowerCamelCase__ ( __lowercase , unittest.TestCase):
'''simple docstring'''
_A = DebertaVaTokenizer
_A = DebertaVaTokenizerFast
_A = True
_A = True
def _lowerCamelCase ( self :int ) -> int:
super().setUp()
# We have a SentencePiece fixture for testing
__UpperCamelCase : Any = DebertaVaTokenizer(a , unk_token="<unk>" )
tokenizer.save_pretrained(self.tmpdirname )
def _lowerCamelCase ( self :Optional[int] , a :List[str] ) -> List[str]:
__UpperCamelCase : Any = "this is a test"
__UpperCamelCase : Optional[int] = "this is a test"
return input_text, output_text
def _lowerCamelCase ( self :str ) -> Any:
__UpperCamelCase : Optional[Any] = "<pad>"
__UpperCamelCase : Union[str, Any] = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(a ) , a )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(a ) , a )
def _lowerCamelCase ( self :Union[str, Any] ) -> Tuple:
__UpperCamelCase : Dict = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<pad>" )
self.assertEqual(vocab_keys[1] , "<unk>" )
self.assertEqual(vocab_keys[-1] , "[PAD]" )
self.assertEqual(len(a ) , 3_0_0_0_1 )
def _lowerCamelCase ( self :Union[str, Any] ) -> Optional[Any]:
self.assertEqual(self.get_tokenizer().vocab_size , 3_0_0_0_0 )
def _lowerCamelCase ( self :List[Any] ) -> str:
# fmt: off
__UpperCamelCase : int = " \tHeLLo!how \n Are yoU? "
__UpperCamelCase : Optional[int] = ["▁hello", "!", "how", "▁are", "▁you", "?"]
# fmt: on
__UpperCamelCase : Dict = DebertaVaTokenizer(a , do_lower_case=a )
__UpperCamelCase : int = tokenizer.convert_ids_to_tokens(tokenizer.encode(a , add_special_tokens=a ) )
self.assertListEqual(a , a )
__UpperCamelCase : List[Any] = DebertaVaTokenizerFast(a , do_lower_case=a )
__UpperCamelCase : Tuple = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(a , add_special_tokens=a ) )
self.assertListEqual(a , a )
@unittest.skip("There is an inconsistency between slow and fast tokenizer due to a bug in the fast one." )
def _lowerCamelCase ( self :Dict ) -> Optional[Any]:
pass
@unittest.skip("There is an inconsistency between slow and fast tokenizer due to a bug in the fast one." )
def _lowerCamelCase ( self :str ) -> Any:
pass
def _lowerCamelCase ( self :Tuple ) -> Dict:
# fmt: off
__UpperCamelCase : Optional[int] = "I was born in 92000, and this is falsé."
__UpperCamelCase : Optional[int] = ["▁", "<unk>", "▁was", "▁born", "▁in", "▁9", "2000", "▁", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", "▁", ".", ]
# fmt: on
__UpperCamelCase : Dict = DebertaVaTokenizer(a , split_by_punct=a )
__UpperCamelCase : List[Any] = tokenizer.convert_ids_to_tokens(tokenizer.encode(a , add_special_tokens=a ) )
self.assertListEqual(a , a )
__UpperCamelCase : Optional[Any] = DebertaVaTokenizerFast(a , split_by_punct=a )
__UpperCamelCase : List[Any] = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(a , add_special_tokens=a ) )
self.assertListEqual(a , a )
def _lowerCamelCase ( self :List[Any] ) -> str:
# fmt: off
__UpperCamelCase : Dict = "I was born in 92000, and this is falsé."
__UpperCamelCase : Any = ["▁i", "▁was", "▁born", "▁in", "▁9", "2000", "▁", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", "▁", ".", ]
# fmt: on
__UpperCamelCase : Any = DebertaVaTokenizer(a , do_lower_case=a , split_by_punct=a )
__UpperCamelCase : Optional[int] = tokenizer.convert_ids_to_tokens(tokenizer.encode(a , add_special_tokens=a ) )
self.assertListEqual(a , a )
__UpperCamelCase : Dict = DebertaVaTokenizerFast(a , do_lower_case=a , split_by_punct=a )
__UpperCamelCase : List[str] = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(a , add_special_tokens=a ) )
self.assertListEqual(a , a )
def _lowerCamelCase ( self :Dict ) -> Any:
# fmt: off
__UpperCamelCase : Optional[int] = "I was born in 92000, and this is falsé."
__UpperCamelCase : Tuple = ["▁i", "▁was", "▁born", "▁in", "▁9", "2000", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", ".", ]
# fmt: on
__UpperCamelCase : Optional[int] = DebertaVaTokenizer(a , do_lower_case=a , split_by_punct=a )
__UpperCamelCase : str = tokenizer.convert_ids_to_tokens(tokenizer.encode(a , add_special_tokens=a ) )
self.assertListEqual(a , a )
__UpperCamelCase : List[Any] = DebertaVaTokenizerFast(a , do_lower_case=a , split_by_punct=a )
__UpperCamelCase : Tuple = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(a , add_special_tokens=a ) )
self.assertListEqual(a , a )
def _lowerCamelCase ( self :List[str] ) -> Tuple:
# fmt: off
__UpperCamelCase : Dict = "I was born in 92000, and this is falsé."
__UpperCamelCase : List[str] = ["▁", "<unk>", "▁was", "▁born", "▁in", "▁9", "2000", "▁", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", "▁", ".", ]
# fmt: on
__UpperCamelCase : List[str] = DebertaVaTokenizer(a , do_lower_case=a , split_by_punct=a )
__UpperCamelCase : int = tokenizer.convert_ids_to_tokens(tokenizer.encode(a , add_special_tokens=a ) )
self.assertListEqual(a , a )
__UpperCamelCase : List[str] = DebertaVaTokenizerFast(a , do_lower_case=a , split_by_punct=a )
__UpperCamelCase : Optional[int] = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(a , add_special_tokens=a ) )
self.assertListEqual(a , a )
def _lowerCamelCase ( self :Union[str, Any] ) -> Any:
# fmt: off
__UpperCamelCase : Optional[int] = " \tHeLLo!how \n Are yoU? "
__UpperCamelCase : str = ["▁", "<unk>", "e", "<unk>", "o", "!", "how", "▁", "<unk>", "re", "▁yo", "<unk>", "?"]
# fmt: on
__UpperCamelCase : int = DebertaVaTokenizer(a , do_lower_case=a , split_by_punct=a )
__UpperCamelCase : Union[str, Any] = tokenizer.convert_ids_to_tokens(tokenizer.encode(a , add_special_tokens=a ) )
self.assertListEqual(a , a )
__UpperCamelCase : Tuple = DebertaVaTokenizerFast(a , do_lower_case=a , split_by_punct=a )
__UpperCamelCase : int = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(a , add_special_tokens=a ) )
self.assertListEqual(a , a )
def _lowerCamelCase ( self :int ) -> Any:
__UpperCamelCase : Tuple = self.get_tokenizer()
__UpperCamelCase : List[Any] = self.get_rust_tokenizer()
__UpperCamelCase : Dict = "I was born in 92000, and this is falsé."
__UpperCamelCase : List[str] = tokenizer.convert_ids_to_tokens(tokenizer.encode(a , add_special_tokens=a ) )
__UpperCamelCase : Dict = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(a , add_special_tokens=a ) )
self.assertListEqual(a , a )
__UpperCamelCase : str = tokenizer.encode(a , add_special_tokens=a )
__UpperCamelCase : Union[str, Any] = rust_tokenizer.encode(a , add_special_tokens=a )
self.assertListEqual(a , a )
__UpperCamelCase : Optional[int] = self.get_rust_tokenizer()
__UpperCamelCase : List[Any] = tokenizer.encode(a )
__UpperCamelCase : Union[str, Any] = rust_tokenizer.encode(a )
self.assertListEqual(a , a )
def _lowerCamelCase ( self :List[Any] ) -> List[str]:
__UpperCamelCase : Optional[int] = "This is a test"
__UpperCamelCase : List[Any] = [1_3, 1, 4_3_9_8, 2_5, 2_1, 1_2_8_9]
__UpperCamelCase : Tuple = ["▁", "T", "his", "▁is", "▁a", "▁test"]
__UpperCamelCase : Union[str, Any] = ["▁", "<unk>", "his", "▁is", "▁a", "▁test"]
__UpperCamelCase : Union[str, Any] = DebertaVaTokenizer(a , keep_accents=a )
__UpperCamelCase : int = DebertaVaTokenizerFast(a , keep_accents=a )
__UpperCamelCase : Tuple = tokenizer.encode(a , add_special_tokens=a )
self.assertListEqual(a , a )
__UpperCamelCase : List[str] = tokenizer.tokenize(a )
self.assertListEqual(a , a )
__UpperCamelCase : List[Any] = tokenizer.convert_ids_to_tokens(a )
self.assertListEqual(a , a )
__UpperCamelCase : List[Any] = rust_tokenizer.encode(a , add_special_tokens=a )
self.assertListEqual(a , a )
__UpperCamelCase : Optional[Any] = rust_tokenizer.tokenize(a )
self.assertListEqual(a , a )
__UpperCamelCase : Optional[Any] = rust_tokenizer.convert_ids_to_tokens(a )
self.assertListEqual(a , a )
# fmt: off
__UpperCamelCase : Optional[int] = "I was born in 92000, and this is falsé."
__UpperCamelCase : int = [1_3, 1, 2_3, 3_8_6, 1_9, 5_6_1, 3_0_5_0, 1_5, 1_7, 4_8, 2_5, 8_2_5_6, 1_8, 1, 9]
__UpperCamelCase : Optional[int] = ["▁", "I", "▁was", "▁born", "▁in", "▁9", "2000", ",", "▁and", "▁this", "▁is", "▁fal", "s", "é", ".", ]
__UpperCamelCase : Union[str, Any] = ["▁", "<unk>", "▁was", "▁born", "▁in", "▁9", "2000", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", ".", ]
# fmt: on
__UpperCamelCase : List[str] = tokenizer.encode(a , add_special_tokens=a )
self.assertListEqual(a , a )
__UpperCamelCase : Dict = tokenizer.tokenize(a )
self.assertListEqual(a , a )
__UpperCamelCase : Optional[int] = tokenizer.convert_ids_to_tokens(a )
self.assertListEqual(a , a )
__UpperCamelCase : Dict = rust_tokenizer.encode(a , add_special_tokens=a )
self.assertListEqual(a , a )
__UpperCamelCase : int = rust_tokenizer.tokenize(a )
self.assertListEqual(a , a )
__UpperCamelCase : Optional[int] = rust_tokenizer.convert_ids_to_tokens(a )
self.assertListEqual(a , a )
def _lowerCamelCase ( self :Union[str, Any] ) -> str:
__UpperCamelCase : List[Any] = DebertaVaTokenizer(a )
__UpperCamelCase : Optional[int] = tokenizer.encode("sequence builders" )
__UpperCamelCase : Optional[int] = tokenizer.encode("multi-sequence build" )
__UpperCamelCase : Union[str, Any] = tokenizer.build_inputs_with_special_tokens(a )
__UpperCamelCase : Optional[int] = tokenizer.build_inputs_with_special_tokens(a , a )
self.assertEqual([tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] , a )
self.assertEqual(
[tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [tokenizer.sep_token_id] , a , )
@slow
def _lowerCamelCase ( self :Dict ) -> int:
# fmt: off
__UpperCamelCase : Dict = {"input_ids": [[1, 3_9_8_6_7, 3_6, 1_9_3_9_0, 4_8_6, 2_7, 3_5_0_5_2, 8_1_4_3_6, 1_8, 6_0_6_8_5, 1_2_2_5, 7, 3_5_0_5_2, 8_1_4_3_6, 1_8, 9_3_6_7, 1_6_8_9_9, 1_8, 1_5_9_3_7, 5_3, 5_9_4, 7_7_3, 1_8, 1_6_2_8_7, 3_0_4_6_5, 3_6, 1_5_9_3_7, 6, 4_1_1_3_9, 3_8, 3_6_9_7_9, 6_0_7_6_3, 1_9_1, 6, 3_4_1_3_2, 9_9, 6, 5_0_5_3_8, 3_9_0, 4_3_2_3_0, 6, 3_4_1_3_2, 2_7_7_9, 2_0_8_5_0, 1_4, 6_9_9, 1_0_7_2, 1_1_9_4, 3_6, 3_8_2, 1_0_9_0_1, 5_3, 7, 6_9_9, 1_0_7_2, 2_0_8_4, 3_6, 2_0_4_2_2, 6_3_0, 5_3, 1_9, 1_0_5, 3_0_4_9, 1_8_9_6, 1_0_5_3, 1_6_8_9_9, 1_5_0_6, 1_1, 3_7_9_7_8, 4_2_4_3, 7, 1_2_3_7, 3_1_8_6_9, 2_0_0, 1_6_5_6_6, 6_5_4, 6, 3_5_0_5_2, 8_1_4_3_6, 7, 5_5_6_3_0, 1_3_5_9_3, 4, 2], [1, 2_6, 1_5_0_1_1, 1_3, 6_6_7, 8, 1_0_5_3, 1_8, 2_3_6_1_1, 1_2_3_7, 7_2_3_5_6, 1_2_8_2_0, 3_4, 1_0_4_1_3_4, 1_2_0_9, 3_5, 1_3_3_1_3, 6_6_2_7, 2_1, 2_0_2, 3_4_7, 7, 1_6_4, 2_3_9_9, 1_1, 4_6, 4_4_8_5, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 5, 1_2_3_2, 2_8_6_4, 1_5_7_8_5, 1_4_9_5_1, 1_0_5, 5, 8_5_8_1, 1_2_5_0, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "token_type_ids": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=a , model_name="microsoft/deberta-v2-xlarge" , revision="ad6e42c1532ddf3a15c39246b63f5559d558b670" , ) | 352 |
import json
import os
from pathlib import Path
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple, Union
import sentencepiece
from ...tokenization_utils import BatchEncoding, PreTrainedTokenizer
from ...utils import logging
lowercase : Dict = logging.get_logger(__name__)
lowercase : Tuple = '▁'
lowercase : str = {
'vocab_file': 'vocab.json',
'spm_file': 'sentencepiece.bpe.model',
'tokenizer_config_file': 'tokenizer_config.json',
}
lowercase : Optional[int] = {
'vocab_file': {
'facebook/m2m100_418M': 'https://huggingface.co/facebook/m2m100_418M/resolve/main/vocab.json',
'facebook/m2m100_1.2B': 'https://huggingface.co/facebook/m2m100_1.2B/resolve/main/vocab.json',
},
'spm_file': {
'facebook/m2m100_418M': 'https://huggingface.co/facebook/m2m100_418M/resolve/main/sentencepiece.bpe.model',
'facebook/m2m100_1.2B': 'https://huggingface.co/facebook/m2m100_1.2B/resolve/main/sentencepiece.bpe.model',
},
'tokenizer_config_file': {
'facebook/m2m100_418M': 'https://huggingface.co/facebook/m2m100_418M/resolve/main/tokenizer_config.json',
'facebook/m2m100_1.2B': 'https://huggingface.co/facebook/m2m100_1.2B/resolve/main/tokenizer_config.json',
},
}
lowercase : List[Any] = {
'facebook/m2m100_418M': 1024,
}
# fmt: off
lowercase : Optional[int] = {
'm2m100': ['af', 'am', 'ar', 'ast', 'az', 'ba', 'be', 'bg', 'bn', 'br', 'bs', 'ca', 'ceb', 'cs', 'cy', 'da', 'de', 'el', 'en', 'es', 'et', 'fa', 'ff', 'fi', 'fr', 'fy', 'ga', 'gd', 'gl', 'gu', 'ha', 'he', 'hi', 'hr', 'ht', 'hu', 'hy', 'id', 'ig', 'ilo', 'is', 'it', 'ja', 'jv', 'ka', 'kk', 'km', 'kn', 'ko', 'lb', 'lg', 'ln', 'lo', 'lt', 'lv', 'mg', 'mk', 'ml', 'mn', 'mr', 'ms', 'my', 'ne', 'nl', 'no', 'ns', 'oc', 'or', 'pa', 'pl', 'ps', 'pt', 'ro', 'ru', 'sd', 'si', 'sk', 'sl', 'so', 'sq', 'sr', 'ss', 'su', 'sv', 'sw', 'ta', 'th', 'tl', 'tn', 'tr', 'uk', 'ur', 'uz', 'vi', 'wo', 'xh', 'yi', 'yo', 'zh', 'zu'],
'wmt21': ['en', 'ha', 'is', 'ja', 'cs', 'ru', 'zh', 'de']
}
class lowerCamelCase__ ( __lowercase):
'''simple docstring'''
_A = VOCAB_FILES_NAMES
_A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_A = PRETRAINED_VOCAB_FILES_MAP
_A = ['input_ids', 'attention_mask']
_A = []
_A = []
def __init__( self :Tuple , a :List[str] , a :int , a :Dict=None , a :List[Any]=None , a :List[str]="<s>" , a :str="</s>" , a :Dict="</s>" , a :Optional[Any]="<pad>" , a :Union[str, Any]="<unk>" , a :List[Any]="m2m100" , a :Optional[Dict[str, Any]] = None , a :List[str]=8 , **a :Tuple , ) -> None:
__UpperCamelCase : List[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
__UpperCamelCase : List[str] = language_codes
__UpperCamelCase : Tuple = FAIRSEQ_LANGUAGE_CODES[language_codes]
__UpperCamelCase : str = {lang_code: f'__{lang_code}__' for lang_code in fairseq_language_code}
__UpperCamelCase : Union[str, Any] = kwargs.get("additional_special_tokens" , [] )
kwargs["additional_special_tokens"] += [
self.get_lang_token(a )
for lang_code in fairseq_language_code
if self.get_lang_token(a ) not in kwargs["additional_special_tokens"]
]
super().__init__(
src_lang=a , tgt_lang=a , bos_token=a , eos_token=a , sep_token=a , unk_token=a , pad_token=a , language_codes=a , sp_model_kwargs=self.sp_model_kwargs , num_madeup_words=a , **a , )
__UpperCamelCase : Optional[Any] = vocab_file
__UpperCamelCase : List[str] = load_json(a )
__UpperCamelCase : Dict = {v: k for k, v in self.encoder.items()}
__UpperCamelCase : int = spm_file
__UpperCamelCase : List[Any] = load_spm(a , self.sp_model_kwargs )
__UpperCamelCase : int = len(self.encoder )
__UpperCamelCase : Tuple = {
self.get_lang_token(a ): self.encoder_size + i for i, lang_code in enumerate(a )
}
__UpperCamelCase : int = {lang_code: self.encoder_size + i for i, lang_code in enumerate(a )}
__UpperCamelCase : Dict = {v: k for k, v in self.lang_token_to_id.items()}
__UpperCamelCase : int = src_lang if src_lang is not None else "en"
__UpperCamelCase : int = tgt_lang
__UpperCamelCase : Tuple = self.get_lang_id(self._src_lang )
self.set_src_lang_special_tokens(self._src_lang )
__UpperCamelCase : Union[str, Any] = num_madeup_words
@property
def _lowerCamelCase ( self :int ) -> int:
return len(self.encoder ) + len(self.lang_token_to_id )
@property
def _lowerCamelCase ( self :List[str] ) -> str:
return self._src_lang
@src_lang.setter
def _lowerCamelCase ( self :Any , a :str ) -> None:
__UpperCamelCase : Any = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def _lowerCamelCase ( self :int , a :str ) -> List[str]:
return self.sp_model.encode(a , out_type=a )
def _lowerCamelCase ( self :List[str] , a :str ) -> str:
if token in self.lang_token_to_id:
return self.lang_token_to_id[token]
return self.encoder.get(a , self.encoder[self.unk_token] )
def _lowerCamelCase ( self :List[Any] , a :int ) -> str:
if index in self.id_to_lang_token:
return self.id_to_lang_token[index]
return self.decoder.get(a , self.unk_token )
def _lowerCamelCase ( self :List[str] , a :Optional[Any] ) -> Tuple:
__UpperCamelCase : List[Any] = []
__UpperCamelCase : Any = ""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(a ) + token
__UpperCamelCase : List[Any] = []
else:
current_sub_tokens.append(a )
out_string += self.sp_model.decode(a )
return out_string.strip()
def _lowerCamelCase ( self :Optional[int] , a :List[int] , a :Optional[List[int]] = None , a :bool = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=a , token_ids_a=a , already_has_special_tokens=a )
__UpperCamelCase : Optional[Any] = [1] * len(self.prefix_tokens )
__UpperCamelCase : Any = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(a )) + suffix_ones
return prefix_ones + ([0] * len(a )) + ([0] * len(a )) + suffix_ones
def _lowerCamelCase ( self :List[Any] , a :List[int] , a :Optional[List[int]] = None ) -> List[int]:
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def _lowerCamelCase ( self :Dict ) -> Dict:
__UpperCamelCase : int = {self.convert_ids_to_tokens(a ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self :str ) -> Dict:
__UpperCamelCase : Union[str, Any] = self.__dict__.copy()
__UpperCamelCase : int = None
return state
def __setstate__( self :List[Any] , a :Dict ) -> None:
__UpperCamelCase : Dict = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
__UpperCamelCase : Optional[Any] = {}
__UpperCamelCase : Optional[Any] = load_spm(self.spm_file , self.sp_model_kwargs )
def _lowerCamelCase ( self :List[Any] , a :str , a :Optional[str] = None ) -> Tuple[str]:
__UpperCamelCase : str = Path(a )
if not save_dir.is_dir():
raise OSError(f'{save_directory} should be a directory' )
__UpperCamelCase : List[Any] = save_dir / (
(filename_prefix + "-" if filename_prefix else "") + self.vocab_files_names["vocab_file"]
)
__UpperCamelCase : List[Any] = save_dir / (
(filename_prefix + "-" if filename_prefix else "") + self.vocab_files_names["spm_file"]
)
save_json(self.encoder , a )
if os.path.abspath(self.spm_file ) != os.path.abspath(a ) and os.path.isfile(self.spm_file ):
copyfile(self.spm_file , a )
elif not os.path.isfile(self.spm_file ):
with open(a , "wb" ) as fi:
__UpperCamelCase : List[Any] = self.sp_model.serialized_model_proto()
fi.write(a )
return (str(a ), str(a ))
def _lowerCamelCase ( self :Dict , a :List[str] , a :str = "en" , a :Optional[List[str]] = None , a :str = "ro" , **a :Union[str, Any] , ) -> BatchEncoding:
__UpperCamelCase : List[str] = src_lang
__UpperCamelCase : Union[str, Any] = tgt_lang
self.set_src_lang_special_tokens(self.src_lang )
return super().prepare_seqaseq_batch(a , a , **a )
def _lowerCamelCase ( self :Union[str, Any] , a :int , a :Optional[str] , a :Optional[str] , **a :List[str] ) -> Dict:
if src_lang is None or tgt_lang is None:
raise ValueError("Translation requires a `src_lang` and a `tgt_lang` for this model" )
__UpperCamelCase : int = src_lang
__UpperCamelCase : Tuple = self(a , add_special_tokens=a , **a )
__UpperCamelCase : Optional[int] = self.get_lang_id(a )
__UpperCamelCase : Any = tgt_lang_id
return inputs
def _lowerCamelCase ( self :Any ) -> str:
self.set_src_lang_special_tokens(self.src_lang )
def _lowerCamelCase ( self :Optional[int] ) -> Any:
self.set_tgt_lang_special_tokens(self.tgt_lang )
def _lowerCamelCase ( self :Union[str, Any] , a :str ) -> None:
__UpperCamelCase : str = self.get_lang_token(a )
__UpperCamelCase : Union[str, Any] = self.lang_token_to_id[lang_token]
__UpperCamelCase : Optional[int] = [self.cur_lang_id]
__UpperCamelCase : str = [self.eos_token_id]
def _lowerCamelCase ( self :int , a :str ) -> None:
__UpperCamelCase : Any = self.get_lang_token(a )
__UpperCamelCase : Dict = self.lang_token_to_id[lang_token]
__UpperCamelCase : List[Any] = [self.cur_lang_id]
__UpperCamelCase : Tuple = [self.eos_token_id]
def _lowerCamelCase ( self :Optional[Any] , a :str ) -> str:
return self.lang_code_to_token[lang]
def _lowerCamelCase ( self :Optional[Any] , a :str ) -> int:
__UpperCamelCase : Dict = self.get_lang_token(a )
return self.lang_token_to_id[lang_token]
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : str , _lowerCamelCase : Dict[str, Any]) -> sentencepiece.SentencePieceProcessor:
'''simple docstring'''
__UpperCamelCase : str = sentencepiece.SentencePieceProcessor(**_lowerCamelCase)
spm.Load(str(_lowerCamelCase))
return spm
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : str) -> Union[Dict, List]:
'''simple docstring'''
with open(_lowerCamelCase , "r") as f:
return json.load(_lowerCamelCase)
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : Any , _lowerCamelCase : str) -> None:
'''simple docstring'''
with open(_lowerCamelCase , "w") as f:
json.dump(_lowerCamelCase , _lowerCamelCase , indent=2) | 151 | 0 |
'''simple docstring'''
lowerCamelCase : List[Any] = {
'Pillow': 'Pillow',
'accelerate': 'accelerate>=0.11.0',
'compel': 'compel==0.1.8',
'black': 'black~=23.1',
'datasets': 'datasets',
'filelock': 'filelock',
'flax': 'flax>=0.4.1',
'hf-doc-builder': 'hf-doc-builder>=0.3.0',
'huggingface-hub': 'huggingface-hub>=0.13.2',
'requests-mock': 'requests-mock==1.10.0',
'importlib_metadata': 'importlib_metadata',
'invisible-watermark': 'invisible-watermark',
'isort': 'isort>=5.5.4',
'jax': 'jax>=0.2.8,!=0.3.2',
'jaxlib': 'jaxlib>=0.1.65',
'Jinja2': 'Jinja2',
'k-diffusion': 'k-diffusion>=0.0.12',
'torchsde': 'torchsde',
'note_seq': 'note_seq',
'librosa': 'librosa',
'numpy': 'numpy',
'omegaconf': 'omegaconf',
'parameterized': 'parameterized',
'protobuf': 'protobuf>=3.20.3,<4',
'pytest': 'pytest',
'pytest-timeout': 'pytest-timeout',
'pytest-xdist': 'pytest-xdist',
'ruff': 'ruff>=0.0.241',
'safetensors': 'safetensors',
'sentencepiece': 'sentencepiece>=0.1.91,!=0.1.92',
'scipy': 'scipy',
'onnx': 'onnx',
'regex': 'regex!=2019.12.17',
'requests': 'requests',
'tensorboard': 'tensorboard',
'torch': 'torch>=1.4',
'torchvision': 'torchvision',
'transformers': 'transformers>=4.25.1',
'urllib3': 'urllib3<=2.0.0',
}
| 2 |
'''simple docstring'''
import unittest
from transformers import TrOCRConfig
from transformers.testing_utils import is_torch_available, require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers.models.trocr.modeling_trocr import TrOCRDecoder, TrOCRForCausalLM
@require_torch
class __lowerCAmelCase :
'''simple docstring'''
def __init__(self : str , UpperCamelCase : Tuple , UpperCamelCase : Optional[int]=99 , UpperCamelCase : Optional[int]=13 , UpperCamelCase : Tuple=16 , UpperCamelCase : Union[str, Any]=7 , UpperCamelCase : List[Any]=True , UpperCamelCase : List[str]=True , UpperCamelCase : str=True , UpperCamelCase : Tuple=False , UpperCamelCase : str=True , UpperCamelCase : Tuple=2 , UpperCamelCase : Optional[int]=32 , UpperCamelCase : Any=4 , UpperCamelCase : Optional[int]=4 , UpperCamelCase : Tuple=30 , UpperCamelCase : str=0 , UpperCamelCase : Tuple=1 , UpperCamelCase : List[Any]=2 , UpperCamelCase : str=None , ):
'''simple docstring'''
lowercase__ = parent
lowercase__ = batch_size
lowercase__ = decoder_seq_length
# For common tests
lowercase__ = self.decoder_seq_length
lowercase__ = is_training
lowercase__ = use_attention_mask
lowercase__ = use_labels
lowercase__ = vocab_size
lowercase__ = d_model
lowercase__ = d_model
lowercase__ = decoder_layers
lowercase__ = decoder_layers
lowercase__ = decoder_ffn_dim
lowercase__ = decoder_attention_heads
lowercase__ = decoder_attention_heads
lowercase__ = eos_token_id
lowercase__ = bos_token_id
lowercase__ = pad_token_id
lowercase__ = decoder_start_token_id
lowercase__ = use_cache
lowercase__ = max_position_embeddings
lowercase__ = None
lowercase__ = decoder_seq_length
lowercase__ = 2
lowercase__ = 1
def UpperCamelCase__ (self : str ):
'''simple docstring'''
lowercase__ = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
lowercase__ = None
if self.use_attention_mask:
lowercase__ = ids_tensor([self.batch_size, self.decoder_seq_length] , vocab_size=2 )
lowercase__ = None
if self.use_labels:
lowercase__ = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
lowercase__ = TrOCRConfig(
vocab_size=self.vocab_size , d_model=self.d_model , decoder_layers=self.decoder_layers , decoder_ffn_dim=self.decoder_ffn_dim , decoder_attention_heads=self.decoder_attention_heads , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , use_cache=self.use_cache , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , max_position_embeddings=self.max_position_embeddings , )
return (config, input_ids, attention_mask, lm_labels)
def UpperCamelCase__ (self : Tuple , UpperCamelCase : List[Any] , UpperCamelCase : Optional[Any] , UpperCamelCase : Tuple , UpperCamelCase : List[str] , ):
'''simple docstring'''
lowercase__ = True
lowercase__ = TrOCRDecoder(config=UpperCamelCase ).to(UpperCamelCase ).eval()
lowercase__ = input_ids[:2]
input_ids[input_ids == 0] += 1
# first forward pass
lowercase__ = model(UpperCamelCase , use_cache=UpperCamelCase )
lowercase__ = model(UpperCamelCase )
lowercase__ = model(UpperCamelCase , use_cache=UpperCamelCase )
self.parent.assertTrue(len(UpperCamelCase ) == len(UpperCamelCase ) )
self.parent.assertTrue(len(UpperCamelCase ) == len(UpperCamelCase ) + 1 )
lowercase__ = outputs['''past_key_values''']
# create hypothetical next token and extent to next_input_ids
lowercase__ = ids_tensor((2, 1) , config.vocab_size - 1 ) + 1
# append to next input_ids and
lowercase__ = torch.cat([input_ids, next_tokens] , dim=-1 )
lowercase__ = model(UpperCamelCase )['''last_hidden_state''']
lowercase__ = model(UpperCamelCase , past_key_values=UpperCamelCase )['''last_hidden_state''']
# select random slice
lowercase__ = ids_tensor((1,) , output_from_past.shape[-1] ).item()
lowercase__ = output_from_no_past[:, next_input_ids.shape[-1] - 1, random_slice_idx].detach()
lowercase__ = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
assert torch.allclose(UpperCamelCase , UpperCamelCase , atol=1E-3 )
def UpperCamelCase__ (self : Optional[Any] ):
'''simple docstring'''
lowercase__ = self.prepare_config_and_inputs()
lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ = config_and_inputs
lowercase__ = {'''input_ids''': input_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
@require_torch
class __lowerCAmelCase (lowercase_ , lowercase_ , lowercase_ , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = (TrOCRDecoder, TrOCRForCausalLM) if is_torch_available() else ()
lowerCAmelCase__ : List[Any] = (TrOCRForCausalLM,) if is_torch_available() else ()
lowerCAmelCase__ : Optional[Any] = {"""text-generation""": TrOCRForCausalLM} if is_torch_available() else {}
lowerCAmelCase__ : Optional[Any] = True
lowerCAmelCase__ : List[str] = False
def UpperCamelCase__ (self : Any ):
'''simple docstring'''
lowercase__ = TrOCRStandaloneDecoderModelTester(self , is_training=UpperCamelCase )
lowercase__ = ConfigTester(self , config_class=UpperCamelCase )
def UpperCamelCase__ (self : List[str] ):
'''simple docstring'''
pass
def UpperCamelCase__ (self : Optional[int] ):
'''simple docstring'''
pass
def UpperCamelCase__ (self : Any ):
'''simple docstring'''
pass
def UpperCamelCase__ (self : Any ):
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCamelCase__ (self : Union[str, Any] ):
'''simple docstring'''
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past(*UpperCamelCase )
def UpperCamelCase__ (self : Optional[int] ):
'''simple docstring'''
return
@unittest.skip('''The model doesn\'t support left padding''' ) # and it's not used enough to be worth fixing :)
def UpperCamelCase__ (self : List[str] ):
'''simple docstring'''
pass
| 2 | 1 |
'''simple docstring'''
import json
import re
from typing import TYPE_CHECKING, List, Optional, Tuple, Union
import numpy as np
from ...utils import is_tf_available, is_torch_available, logging
if TYPE_CHECKING:
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_codegen import CodeGenTokenizer
_lowerCamelCase : Optional[int] = logging.get_logger(__name__)
_lowerCamelCase : int = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'}
_lowerCamelCase : List[str] = {
'vocab_file': {
'Salesforce/codegen-350M-mono': 'https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/vocab.json',
},
'merges_file': {
'Salesforce/codegen-350M-mono': 'https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/merges.txt',
},
'tokenizer_file': {
'Salesforce/codegen-350M-mono': (
'https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/tokenizer.json'
),
},
}
_lowerCamelCase : List[str] = {
'Salesforce/codegen-350M-mono': 2048,
}
class __UpperCAmelCase ( A__ ):
'''simple docstring'''
__lowerCAmelCase = VOCAB_FILES_NAMES
__lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
__lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCAmelCase = ['''input_ids''', '''attention_mask''']
__lowerCAmelCase = CodeGenTokenizer
def __init__(self : int , _lowerCAmelCase : Optional[int]=None , _lowerCAmelCase : Tuple=None , _lowerCAmelCase : List[str]=None , _lowerCAmelCase : Optional[Any]="<|endoftext|>" , _lowerCAmelCase : Dict="<|endoftext|>" , _lowerCAmelCase : Dict="<|endoftext|>" , _lowerCAmelCase : Any=False , **_lowerCAmelCase : Optional[int] , ):
super().__init__(
_lowerCAmelCase , _lowerCAmelCase , tokenizer_file=_lowerCAmelCase , unk_token=_lowerCAmelCase , bos_token=_lowerCAmelCase , eos_token=_lowerCAmelCase , add_prefix_space=_lowerCAmelCase , **_lowerCAmelCase , )
if kwargs.pop("""add_bos_token""" , _lowerCAmelCase ):
A = kwargs.pop("""name_or_path""" , """""" )
raise ValueError(
"""Currenty GPT2's fast tokenizer does NOT support adding a BOS token."""
"""Instead you should use GPT2's slow tokenizer class `CodeGenTokenizer` as follows: \n"""
F"""`CodeGenTokenizer.from_pretrained('{model_id}')`\nor\n"""
F"""`AutoTokenizer.from_pretrained('{model_id}', use_fast=False)`\n"""
"""This issue will be fixed soon, see: https://github.com/huggingface/tokenizers/pull/1005."""
""" so that the fast tokenizer works correctly.""" )
A = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("""add_prefix_space""" , _lowerCAmelCase ) != add_prefix_space:
A = getattr(_lowerCAmelCase , pre_tok_state.pop("""type""" ) )
A = add_prefix_space
A = pre_tok_class(**_lowerCAmelCase )
A = add_prefix_space
def A (self : int , *_lowerCAmelCase : int , **_lowerCAmelCase : List[Any] ):
A = kwargs.get("""is_split_into_words""" , _lowerCAmelCase )
assert self.add_prefix_space or not is_split_into_words, (
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*_lowerCAmelCase , **_lowerCAmelCase )
def A (self : Dict , *_lowerCAmelCase : List[str] , **_lowerCAmelCase : Optional[Any] ):
A = kwargs.get("""is_split_into_words""" , _lowerCAmelCase )
assert self.add_prefix_space or not is_split_into_words, (
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs."
)
return super()._encode_plus(*_lowerCAmelCase , **_lowerCAmelCase )
def A (self : str , _lowerCAmelCase : str , _lowerCAmelCase : Optional[str] = None ):
A = self._tokenizer.model.save(_lowerCAmelCase , name=_lowerCAmelCase )
return tuple(_lowerCAmelCase )
def A (self : Tuple , _lowerCAmelCase : Union[int, List[int], "np.ndarray", "torch.Tensor", "tf.Tensor"] , _lowerCAmelCase : bool = False , _lowerCAmelCase : bool = None , _lowerCAmelCase : Optional[List[str]] = None , **_lowerCAmelCase : Tuple , ):
A = super().decode(
token_ids=_lowerCAmelCase , skip_special_tokens=_lowerCAmelCase , clean_up_tokenization_spaces=_lowerCAmelCase , **_lowerCAmelCase , )
if truncate_before_pattern is not None and len(_lowerCAmelCase ) > 0:
A = self.truncate(_lowerCAmelCase , _lowerCAmelCase )
return decoded_text
def A (self : List[str] , _lowerCAmelCase : List[str] , _lowerCAmelCase : Union[str, Any] ):
def find_re(_lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Tuple ):
A = pattern.search(_lowerCAmelCase , _lowerCAmelCase )
return m.start() if m else -1
A = [re.compile(_lowerCAmelCase , re.MULTILINE ) for pattern in truncate_before_pattern]
A = list(re.finditer("""^print""" , _lowerCAmelCase , re.MULTILINE ) )
if len(_lowerCAmelCase ) > 1:
A = completion[: prints[1].start()]
A = list(re.finditer("""^def""" , _lowerCAmelCase , re.MULTILINE ) )
if len(_lowerCAmelCase ) > 1:
A = completion[: defs[1].start()]
A = 0
A = [
pos for pos in [find_re(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) for terminal in terminals] if pos != -1
]
if len(_lowerCAmelCase ) > 0:
return completion[: min(_lowerCAmelCase )]
else:
return completion
| 337 |
'''simple docstring'''
import pickle
import unittest
import torch
from accelerate import Accelerator
from accelerate.state import AcceleratorState
from accelerate.test_utils import require_cpu
@require_cpu
class __UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def A (self : Optional[Any] ):
A = torch.nn.Linear(10 , 10 )
A = torch.optim.SGD(model.parameters() , 0.1 )
A = Accelerator()
A = accelerator.prepare(_lowerCAmelCase )
try:
pickle.loads(pickle.dumps(_lowerCAmelCase ) )
except Exception as e:
self.fail(F"""Accelerated optimizer pickling failed with {e}""" )
AcceleratorState._reset_state()
| 337 | 1 |
from dataclasses import asdict, dataclass
from typing import Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_UpperCAmelCase = logging.get_logger(__name__)
# TODO Update this
_UpperCAmelCase = {
"""facebook/esm-1b""": """https://huggingface.co/facebook/esm-1b/resolve/main/config.json""",
# See all ESM models at https://huggingface.co/models?filter=esm
}
class UpperCAmelCase ( __A ):
'''simple docstring'''
lowerCamelCase_ = '''esm'''
def __init__( self , lowercase=None , lowercase=None , lowercase=None , lowercase=7_6_8 , lowercase=1_2 , lowercase=1_2 , lowercase=3_0_7_2 , lowercase=0.1 , lowercase=0.1 , lowercase=1_0_2_6 , lowercase=0.02 , lowercase=1E-12 , lowercase="absolute" , lowercase=True , lowercase=None , lowercase=False , lowercase=False , lowercase=None , lowercase=None , **lowercase , ):
"""simple docstring"""
super().__init__(pad_token_id=lowercase , mask_token_id=lowercase , **lowercase )
A_ : int = vocab_size
A_ : Optional[int] = hidden_size
A_ : Union[str, Any] = num_hidden_layers
A_ : int = num_attention_heads
A_ : Dict = intermediate_size
A_ : List[str] = hidden_dropout_prob
A_ : List[Any] = attention_probs_dropout_prob
A_ : str = max_position_embeddings
A_ : Any = initializer_range
A_ : Tuple = layer_norm_eps
A_ : List[str] = position_embedding_type
A_ : Optional[int] = use_cache
A_ : Any = emb_layer_norm_before
A_ : Tuple = token_dropout
A_ : List[str] = is_folding_model
if is_folding_model:
if esmfold_config is None:
logger.info('No esmfold_config supplied for folding model, using default values.' )
A_ : List[str] = EsmFoldConfig()
elif isinstance(lowercase , lowercase ):
A_ : Optional[Any] = EsmFoldConfig(**lowercase )
A_ : List[str] = esmfold_config
if vocab_list is None:
logger.warning('No vocab_list supplied for folding model, assuming the ESM-2 vocabulary!' )
A_ : Optional[Any] = get_default_vocab_list()
else:
A_ : int = vocab_list
else:
A_ : Union[str, Any] = None
A_ : Optional[int] = None
if self.esmfold_config is not None and getattr(self.esmfold_config , 'use_esm_attn_map' , lowercase ):
raise ValueError('The HuggingFace port of ESMFold does not support use_esm_attn_map at this time!' )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : List[str] = super().to_dict()
if isinstance(self.esmfold_config , lowercase ):
A_ : Tuple = self.esmfold_config.to_dict()
return output
@dataclass
class UpperCAmelCase :
'''simple docstring'''
lowerCamelCase_ = None
lowerCamelCase_ = True
lowerCamelCase_ = False
lowerCamelCase_ = False
lowerCamelCase_ = False
lowerCamelCase_ = 0
lowerCamelCase_ = True
lowerCamelCase_ = False
lowerCamelCase_ = 1_2_8
lowerCamelCase_ = None
def lowerCAmelCase_ ( self ):
"""simple docstring"""
if self.trunk is None:
A_ : List[str] = TrunkConfig()
elif isinstance(self.trunk , lowercase ):
A_ : List[str] = TrunkConfig(**self.trunk )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : int = asdict(self )
A_ : Any = self.trunk.to_dict()
return output
@dataclass
class UpperCAmelCase :
'''simple docstring'''
lowerCamelCase_ = 4_8
lowerCamelCase_ = 1_0_2_4
lowerCamelCase_ = 1_2_8
lowerCamelCase_ = 3_2
lowerCamelCase_ = 3_2
lowerCamelCase_ = 3_2
lowerCamelCase_ = 0
lowerCamelCase_ = 0
lowerCamelCase_ = False
lowerCamelCase_ = 4
lowerCamelCase_ = 1_2_8
lowerCamelCase_ = None
def lowerCAmelCase_ ( self ):
"""simple docstring"""
if self.structure_module is None:
A_ : List[Any] = StructureModuleConfig()
elif isinstance(self.structure_module , lowercase ):
A_ : int = StructureModuleConfig(**self.structure_module )
if self.max_recycles <= 0:
raise ValueError(F'''`max_recycles` should be positive, got {self.max_recycles}.''' )
if self.sequence_state_dim % self.sequence_state_dim != 0:
raise ValueError(
'`sequence_state_dim` should be a round multiple of `sequence_state_dim`, got'
F''' {self.sequence_state_dim} and {self.sequence_state_dim}.''' )
if self.pairwise_state_dim % self.pairwise_state_dim != 0:
raise ValueError(
'`pairwise_state_dim` should be a round multiple of `pairwise_state_dim`, got'
F''' {self.pairwise_state_dim} and {self.pairwise_state_dim}.''' )
A_ : Optional[int] = self.sequence_state_dim // self.sequence_head_width
A_ : Optional[Any] = self.pairwise_state_dim // self.pairwise_head_width
if self.sequence_state_dim != sequence_num_heads * self.sequence_head_width:
raise ValueError(
'`sequence_state_dim` should be equal to `sequence_num_heads * sequence_head_width, got'
F''' {self.sequence_state_dim} != {sequence_num_heads} * {self.sequence_head_width}.''' )
if self.pairwise_state_dim != pairwise_num_heads * self.pairwise_head_width:
raise ValueError(
'`pairwise_state_dim` should be equal to `pairwise_num_heads * pairwise_head_width, got'
F''' {self.pairwise_state_dim} != {pairwise_num_heads} * {self.pairwise_head_width}.''' )
if self.pairwise_state_dim % 2 != 0:
raise ValueError(F'''`pairwise_state_dim` should be even, got {self.pairwise_state_dim}.''' )
if self.dropout >= 0.4:
raise ValueError(F'''`dropout` should not be greater than 0.4, got {self.dropout}.''' )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : List[Any] = asdict(self )
A_ : Union[str, Any] = self.structure_module.to_dict()
return output
@dataclass
class UpperCAmelCase :
'''simple docstring'''
lowerCamelCase_ = 3_8_4
lowerCamelCase_ = 1_2_8
lowerCamelCase_ = 1_6
lowerCamelCase_ = 1_2_8
lowerCamelCase_ = 1_2
lowerCamelCase_ = 4
lowerCamelCase_ = 8
lowerCamelCase_ = 0.1
lowerCamelCase_ = 8
lowerCamelCase_ = 1
lowerCamelCase_ = 2
lowerCamelCase_ = 7
lowerCamelCase_ = 1_0
lowerCamelCase_ = 1E-8
lowerCamelCase_ = 1E5
def lowerCAmelCase_ ( self ):
"""simple docstring"""
return asdict(self )
def UpperCamelCase ( ):
'''simple docstring'''
return (
"<cls>",
"<pad>",
"<eos>",
"<unk>",
"L",
"A",
"G",
"V",
"S",
"E",
"R",
"T",
"I",
"D",
"P",
"K",
"Q",
"N",
"F",
"Y",
"M",
"H",
"W",
"C",
"X",
"B",
"U",
"Z",
"O",
".",
"-",
"<null_1>",
"<mask>",
)
| 140 | import logging
from transformers import PretrainedConfig
_UpperCAmelCase = logging.getLogger(__name__)
_UpperCAmelCase = {
"""bertabs-finetuned-cnndm""": """https://huggingface.co/remi/bertabs-finetuned-cnndm-extractive-abstractive-summarization/resolve/main/config.json""",
}
class UpperCAmelCase ( __A ):
'''simple docstring'''
lowerCamelCase_ = '''bertabs'''
def __init__( self , lowercase=3_0_5_2_2 , lowercase=5_1_2 , lowercase=6 , lowercase=5_1_2 , lowercase=8 , lowercase=5_1_2 , lowercase=0.2 , lowercase=6 , lowercase=7_6_8 , lowercase=8 , lowercase=2_0_4_8 , lowercase=0.2 , **lowercase , ):
"""simple docstring"""
super().__init__(**lowercase )
A_ : Optional[int] = vocab_size
A_ : Union[str, Any] = max_pos
A_ : List[str] = enc_layers
A_ : Tuple = enc_hidden_size
A_ : List[Any] = enc_heads
A_ : str = enc_ff_size
A_ : Optional[Any] = enc_dropout
A_ : Dict = dec_layers
A_ : Optional[Any] = dec_hidden_size
A_ : int = dec_heads
A_ : Any = dec_ff_size
A_ : List[str] = dec_dropout
| 140 | 1 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_squeezebert import SqueezeBertTokenizer
A_ : Tuple = logging.get_logger(__name__)
A_ : Optional[Any] = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
A_ : Optional[Any] = {
'vocab_file': {
'squeezebert/squeezebert-uncased': (
'https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/vocab.txt'
),
'squeezebert/squeezebert-mnli': 'https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/vocab.txt',
'squeezebert/squeezebert-mnli-headless': (
'https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'squeezebert/squeezebert-uncased': (
'https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/tokenizer.json'
),
'squeezebert/squeezebert-mnli': (
'https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/tokenizer.json'
),
'squeezebert/squeezebert-mnli-headless': (
'https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/tokenizer.json'
),
},
}
A_ : str = {
'squeezebert/squeezebert-uncased': 512,
'squeezebert/squeezebert-mnli': 512,
'squeezebert/squeezebert-mnli-headless': 512,
}
A_ : Optional[int] = {
'squeezebert/squeezebert-uncased': {'do_lower_case': True},
'squeezebert/squeezebert-mnli': {'do_lower_case': True},
'squeezebert/squeezebert-mnli-headless': {'do_lower_case': True},
}
class _a (__magic_name__ ):
'''simple docstring'''
UpperCAmelCase__: Dict = VOCAB_FILES_NAMES
UpperCAmelCase__: str = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase__: int = PRETRAINED_INIT_CONFIGURATION
UpperCAmelCase__: Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase__: Union[str, Any] = SqueezeBertTokenizer
def __init__( self , A__=None , A__=None , A__=True , A__="[UNK]" , A__="[SEP]" , A__="[PAD]" , A__="[CLS]" , A__="[MASK]" , A__=True , A__=None , **A__ , ):
super().__init__(
A__ , tokenizer_file=A__ , do_lower_case=A__ , unk_token=A__ , sep_token=A__ , pad_token=A__ , cls_token=A__ , mask_token=A__ , tokenize_chinese_chars=A__ , strip_accents=A__ , **A__ , )
A__ : List[Any] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("""lowercase""" , A__ ) != do_lower_case
or normalizer_state.get("""strip_accents""" , A__ ) != strip_accents
or normalizer_state.get("""handle_chinese_chars""" , A__ ) != tokenize_chinese_chars
):
A__ : Tuple = getattr(A__ , normalizer_state.pop("""type""" ) )
A__ : Dict = do_lower_case
A__ : Tuple = strip_accents
A__ : int = tokenize_chinese_chars
A__ : Optional[int] = normalizer_class(**A__ )
A__ : Dict = do_lower_case
def __A ( self , A__ , A__=None ):
A__ : Tuple = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __A ( self , A__ , A__ = None ):
A__ : Union[str, Any] = [self.sep_token_id]
A__ : Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __A ( self , A__ , A__ = None ):
A__ : List[str] = self._tokenizer.model.save(A__ , name=A__ )
return tuple(A__ )
| 141 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
A_ : Optional[int] = {'configuration_fnet': ['FNET_PRETRAINED_CONFIG_ARCHIVE_MAP', 'FNetConfig']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Optional[Any] = ['FNetTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Tuple = ['FNetTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : List[Any] = [
'FNET_PRETRAINED_MODEL_ARCHIVE_LIST',
'FNetForMaskedLM',
'FNetForMultipleChoice',
'FNetForNextSentencePrediction',
'FNetForPreTraining',
'FNetForQuestionAnswering',
'FNetForSequenceClassification',
'FNetForTokenClassification',
'FNetLayer',
'FNetModel',
'FNetPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_fnet import FNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_fnet import FNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_fnet_fast import FNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_fnet import (
FNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FNetForMaskedLM,
FNetForMultipleChoice,
FNetForNextSentencePrediction,
FNetForPreTraining,
FNetForQuestionAnswering,
FNetForSequenceClassification,
FNetForTokenClassification,
FNetLayer,
FNetModel,
FNetPreTrainedModel,
)
else:
import sys
A_ : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 141 | 1 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roberta import RobertaTokenizer
_lowerCamelCase : List[Any] = logging.get_logger(__name__)
_lowerCamelCase : Any = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'}
_lowerCamelCase : str = {
'vocab_file': {
'roberta-base': 'https://huggingface.co/roberta-base/resolve/main/vocab.json',
'roberta-large': 'https://huggingface.co/roberta-large/resolve/main/vocab.json',
'roberta-large-mnli': 'https://huggingface.co/roberta-large-mnli/resolve/main/vocab.json',
'distilroberta-base': 'https://huggingface.co/distilroberta-base/resolve/main/vocab.json',
'roberta-base-openai-detector': 'https://huggingface.co/roberta-base-openai-detector/resolve/main/vocab.json',
'roberta-large-openai-detector': (
'https://huggingface.co/roberta-large-openai-detector/resolve/main/vocab.json'
),
},
'merges_file': {
'roberta-base': 'https://huggingface.co/roberta-base/resolve/main/merges.txt',
'roberta-large': 'https://huggingface.co/roberta-large/resolve/main/merges.txt',
'roberta-large-mnli': 'https://huggingface.co/roberta-large-mnli/resolve/main/merges.txt',
'distilroberta-base': 'https://huggingface.co/distilroberta-base/resolve/main/merges.txt',
'roberta-base-openai-detector': 'https://huggingface.co/roberta-base-openai-detector/resolve/main/merges.txt',
'roberta-large-openai-detector': (
'https://huggingface.co/roberta-large-openai-detector/resolve/main/merges.txt'
),
},
'tokenizer_file': {
'roberta-base': 'https://huggingface.co/roberta-base/resolve/main/tokenizer.json',
'roberta-large': 'https://huggingface.co/roberta-large/resolve/main/tokenizer.json',
'roberta-large-mnli': 'https://huggingface.co/roberta-large-mnli/resolve/main/tokenizer.json',
'distilroberta-base': 'https://huggingface.co/distilroberta-base/resolve/main/tokenizer.json',
'roberta-base-openai-detector': (
'https://huggingface.co/roberta-base-openai-detector/resolve/main/tokenizer.json'
),
'roberta-large-openai-detector': (
'https://huggingface.co/roberta-large-openai-detector/resolve/main/tokenizer.json'
),
},
}
_lowerCamelCase : Union[str, Any] = {
'roberta-base': 512,
'roberta-large': 512,
'roberta-large-mnli': 512,
'distilroberta-base': 512,
'roberta-base-openai-detector': 512,
'roberta-large-openai-detector': 512,
}
class __UpperCAmelCase ( _a ):
'''simple docstring'''
__lowerCAmelCase = VOCAB_FILES_NAMES
__lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
__lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCAmelCase = ['''input_ids''', '''attention_mask''']
__lowerCAmelCase = RobertaTokenizer
def __init__(self : str , _lowerCAmelCase : List[Any]=None , _lowerCAmelCase : Dict=None , _lowerCAmelCase : str=None , _lowerCAmelCase : Optional[int]="replace" , _lowerCAmelCase : int="<s>" , _lowerCAmelCase : int="</s>" , _lowerCAmelCase : Optional[Any]="</s>" , _lowerCAmelCase : Dict="<s>" , _lowerCAmelCase : Optional[int]="<unk>" , _lowerCAmelCase : Optional[int]="<pad>" , _lowerCAmelCase : Tuple="<mask>" , _lowerCAmelCase : Dict=False , _lowerCAmelCase : str=True , **_lowerCAmelCase : Dict , ):
super().__init__(
_lowerCAmelCase , _lowerCAmelCase , tokenizer_file=_lowerCAmelCase , errors=_lowerCAmelCase , bos_token=_lowerCAmelCase , eos_token=_lowerCAmelCase , sep_token=_lowerCAmelCase , cls_token=_lowerCAmelCase , unk_token=_lowerCAmelCase , pad_token=_lowerCAmelCase , mask_token=_lowerCAmelCase , add_prefix_space=_lowerCAmelCase , trim_offsets=_lowerCAmelCase , **_lowerCAmelCase , )
A = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("""add_prefix_space""" , _lowerCAmelCase ) != add_prefix_space:
A = getattr(_lowerCAmelCase , pre_tok_state.pop("""type""" ) )
A = add_prefix_space
A = pre_tok_class(**_lowerCAmelCase )
A = add_prefix_space
A = """post_processor"""
A = getattr(self.backend_tokenizer , _lowerCAmelCase , _lowerCAmelCase )
if tokenizer_component_instance:
A = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
A = tuple(state["""sep"""] )
if "cls" in state:
A = tuple(state["""cls"""] )
A = False
if state.get("""add_prefix_space""" , _lowerCAmelCase ) != add_prefix_space:
A = add_prefix_space
A = True
if state.get("""trim_offsets""" , _lowerCAmelCase ) != trim_offsets:
A = trim_offsets
A = True
if changes_to_apply:
A = getattr(_lowerCAmelCase , state.pop("""type""" ) )
A = component_class(**_lowerCAmelCase )
setattr(self.backend_tokenizer , _lowerCAmelCase , _lowerCAmelCase )
@property
def A (self : Dict ):
if self._mask_token is None:
if self.verbose:
logger.error("""Using mask_token, but it is not set yet.""" )
return None
return str(self._mask_token )
@mask_token.setter
def A (self : Any , _lowerCAmelCase : Union[str, Any] ):
A = AddedToken(_lowerCAmelCase , lstrip=_lowerCAmelCase , rstrip=_lowerCAmelCase ) if isinstance(_lowerCAmelCase , _lowerCAmelCase ) else value
A = value
def A (self : List[Any] , *_lowerCAmelCase : int , **_lowerCAmelCase : List[str] ):
A = kwargs.get("""is_split_into_words""" , _lowerCAmelCase )
assert self.add_prefix_space or not is_split_into_words, (
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*_lowerCAmelCase , **_lowerCAmelCase )
def A (self : Tuple , *_lowerCAmelCase : Tuple , **_lowerCAmelCase : Optional[Any] ):
A = kwargs.get("""is_split_into_words""" , _lowerCAmelCase )
assert self.add_prefix_space or not is_split_into_words, (
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs."
)
return super()._encode_plus(*_lowerCAmelCase , **_lowerCAmelCase )
def A (self : Any , _lowerCAmelCase : str , _lowerCAmelCase : Optional[str] = None ):
A = self._tokenizer.model.save(_lowerCAmelCase , name=_lowerCAmelCase )
return tuple(_lowerCAmelCase )
def A (self : List[str] , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Optional[Any]=None ):
A = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def A (self : Union[str, Any] , _lowerCAmelCase : List[int] , _lowerCAmelCase : Optional[List[int]] = None ):
A = [self.sep_token_id]
A = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 258 |
"""simple docstring"""
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from transformers import (
VisualBertConfig,
VisualBertForMultipleChoice,
VisualBertForPreTraining,
VisualBertForQuestionAnswering,
VisualBertForVisualReasoning,
)
from transformers.utils import logging
logging.set_verbosity_info()
a = logging.get_logger(__name__)
a = [
('bert.bert', 'visual_bert'),
('bert.cls', 'cls'),
('bert.classifier', 'cls'),
('token_type_embeddings_visual', 'visual_token_type_embeddings'),
('position_embeddings_visual', 'visual_position_embeddings'),
('projection', 'visual_projection'),
]
a = [
'nlvr2_coco_pre_trained.th',
'nlvr2_fine_tuned.th',
'nlvr2_pre_trained.th',
'vcr_coco_pre_train.th',
'vcr_fine_tune.th',
'vcr_pre_train.th',
'vqa_coco_pre_trained.th',
'vqa_fine_tuned.th',
'vqa_pre_trained.th',
]
def lowercase (snake_case__ : Dict ) -> str:
'''simple docstring'''
lowerCAmelCase = torch.load(snake_case__ , map_location="""cpu""" )
return sd
def lowercase (snake_case__ : List[str] , snake_case__ : List[Any] , snake_case__ : Union[str, Any]=rename_keys_prefix ) -> Dict:
'''simple docstring'''
lowerCAmelCase = OrderedDict()
lowerCAmelCase = torch.arange(config.max_position_embeddings ).expand((1, -1) )
# detector_d = OrderedDict()
for key in d:
if "detector" in key:
# detector_d[key.replace('detector.','')] = d[key]
continue
lowerCAmelCase = key
for name_pair in rename_keys_prefix:
lowerCAmelCase = new_key.replace(name_pair[0] , name_pair[1] )
lowerCAmelCase = d[key]
if key == "bert.cls.predictions.decoder.weight":
# Old bert code didn't have `decoder.bias`, but was added separately
lowerCAmelCase = new_d["""cls.predictions.bias"""]
return new_d
@torch.no_grad()
def lowercase (snake_case__ : List[Any] , snake_case__ : Optional[int] ) -> List[str]:
'''simple docstring'''
assert (
checkpoint_path.split("""/""" )[-1] in ACCEPTABLE_CHECKPOINTS
), f'''The checkpoint provided must be in {ACCEPTABLE_CHECKPOINTS}.'''
# Get Config
if "pre" in checkpoint_path:
lowerCAmelCase = """pretraining"""
if "vcr" in checkpoint_path:
lowerCAmelCase = {"""visual_embedding_dim""": 512}
elif "vqa_advanced" in checkpoint_path:
lowerCAmelCase = {"""visual_embedding_dim""": 2_048}
elif "vqa" in checkpoint_path:
lowerCAmelCase = {"""visual_embedding_dim""": 2_048}
elif "nlvr" in checkpoint_path:
lowerCAmelCase = {"""visual_embedding_dim""": 1_024}
else:
raise NotImplementedError(f'''No implementation found for `{checkpoint_path}`.''' )
else:
if "vcr" in checkpoint_path:
lowerCAmelCase = {"""visual_embedding_dim""": 512}
lowerCAmelCase = """multichoice"""
elif "vqa_advanced" in checkpoint_path:
lowerCAmelCase = {"""visual_embedding_dim""": 2_048}
lowerCAmelCase = """vqa_advanced"""
elif "vqa" in checkpoint_path:
lowerCAmelCase = {"""visual_embedding_dim""": 2_048, """num_labels""": 3_129}
lowerCAmelCase = """vqa"""
elif "nlvr" in checkpoint_path:
lowerCAmelCase = {
"""visual_embedding_dim""": 1_024,
"""num_labels""": 2,
}
lowerCAmelCase = """nlvr"""
lowerCAmelCase = VisualBertConfig(**snake_case__ )
# Load State Dict
lowerCAmelCase = load_state_dict(snake_case__ )
lowerCAmelCase = get_new_dict(snake_case__ , snake_case__ )
if model_type == "pretraining":
lowerCAmelCase = VisualBertForPreTraining(snake_case__ )
elif model_type == "vqa":
lowerCAmelCase = VisualBertForQuestionAnswering(snake_case__ )
elif model_type == "nlvr":
lowerCAmelCase = VisualBertForVisualReasoning(snake_case__ )
elif model_type == "multichoice":
lowerCAmelCase = VisualBertForMultipleChoice(snake_case__ )
model.load_state_dict(snake_case__ )
# Save Checkpoints
Path(snake_case__ ).mkdir(exist_ok=snake_case__ )
model.save_pretrained(snake_case__ )
if __name__ == "__main__":
a = argparse.ArgumentParser()
# Required parameters
parser.add_argument('orig_checkpoint_path', type=str, help='A path to .th on local filesystem.')
parser.add_argument('pytorch_dump_folder_path', type=str, help='Path to the output PyTorch model.')
a = parser.parse_args()
convert_visual_bert_checkpoint(args.orig_checkpoint_path, args.pytorch_dump_folder_path)
| 155 | 0 |
import os
from collections import deque
import torch
from torch.utils.data import Dataset
class SCREAMING_SNAKE_CASE__ ( lowercase__ ):
def __init__( self : Tuple , SCREAMING_SNAKE_CASE__ : List[Any]="" , SCREAMING_SNAKE_CASE__ : Union[str, Any]="train" ) -> Tuple:
assert os.path.isdir(SCREAMING_SNAKE_CASE__ )
a_ : int = []
a_ : Optional[int] = os.listdir(SCREAMING_SNAKE_CASE__ )
for story_filename in story_filenames_list:
if "summary" in story_filename:
continue
a_ : List[str] = os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if not os.path.isfile(SCREAMING_SNAKE_CASE__ ):
continue
self.documents.append(SCREAMING_SNAKE_CASE__ )
def __len__( self : Dict ) -> str:
return len(self.documents )
def __getitem__( self : Dict , SCREAMING_SNAKE_CASE__ : str ) -> str:
a_ : int = self.documents[idx]
a_ : Tuple = document_path.split('/' )[-1]
with open(SCREAMING_SNAKE_CASE__ , encoding='utf-8' ) as source:
a_ : Dict = source.read()
a_ , a_ : Optional[Any] = process_story(SCREAMING_SNAKE_CASE__ )
return document_name, story_lines, summary_lines
def SCREAMING_SNAKE_CASE_ ( __A : Optional[int] ) -> Any:
"""simple docstring"""
a_ : Optional[Any] = list(filter(lambda __A : len(__A ) != 0 , [line.strip() for line in raw_story.split('\n' )] ) )
# for some unknown reason some lines miss a period, add it
a_ : List[Any] = [_add_missing_period(__A ) for line in nonempty_lines]
# gather article lines
a_ : int = []
a_ : List[Any] = deque(__A )
while True:
try:
a_ : Dict = lines.popleft()
if element.startswith('@highlight' ):
break
story_lines.append(__A )
except IndexError:
# if "@highlight" is absent from the file we pop
# all elements until there is None, raising an exception.
return story_lines, []
# gather summary lines
a_ : List[str] = list(filter(lambda __A : not t.startswith('@highlight' ) , __A ) )
return story_lines, summary_lines
def SCREAMING_SNAKE_CASE_ ( __A : Union[str, Any] ) -> Any:
"""simple docstring"""
a_ : Any = ['.', '!', '?', '...', '\'', '`', '"', '\u2019', '\u2019', ')']
if line.startswith('@highlight' ):
return line
if line[-1] in END_TOKENS:
return line
return line + "."
def SCREAMING_SNAKE_CASE_ ( __A : Optional[int] , __A : Union[str, Any] , __A : List[str] ) -> Union[str, Any]:
"""simple docstring"""
if len(__A ) > block_size:
return sequence[:block_size]
else:
sequence.extend([pad_token_id] * (block_size - len(__A )) )
return sequence
def SCREAMING_SNAKE_CASE_ ( __A : Tuple , __A : str ) -> Any:
"""simple docstring"""
a_ : Optional[int] = torch.ones_like(__A )
a_ : List[str] = sequence == pad_token_id
a_ : str = 0
return mask
def SCREAMING_SNAKE_CASE_ ( __A : Union[str, Any] , __A : Optional[Any] , __A : Dict ) -> List[str]:
"""simple docstring"""
a_ : Optional[int] = [tokenizer.encode(__A ) for line in story_lines]
a_ : int = [token for sentence in story_lines_token_ids for token in sentence]
a_ : Dict = [tokenizer.encode(__A ) for line in summary_lines]
a_ : int = [token for sentence in summary_lines_token_ids for token in sentence]
return story_token_ids, summary_token_ids
def SCREAMING_SNAKE_CASE_ ( __A : Tuple , __A : List[str] ) -> Optional[Any]:
"""simple docstring"""
a_ : int = []
for sequence in batch:
a_ : int = -1
a_ : Dict = []
for s in sequence:
if s == separator_token_id:
sentence_num += 1
embeddings.append(sentence_num % 2 )
batch_embeddings.append(__A )
return torch.tensor(__A )
| 120 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.activations import gelu_new, gelu_python, get_activation
@require_torch
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE ( self : Dict ) -> List[str]:
a_ : List[str] = torch.tensor([-1_0_0, -1, -0.1, 0, 0.1, 1.0, 1_0_0] )
a_ : Union[str, Any] = get_activation('gelu' )
self.assertTrue(torch.allclose(gelu_python(SCREAMING_SNAKE_CASE__ ) , torch_builtin(SCREAMING_SNAKE_CASE__ ) ) )
self.assertFalse(torch.allclose(gelu_python(SCREAMING_SNAKE_CASE__ ) , gelu_new(SCREAMING_SNAKE_CASE__ ) ) )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Any:
a_ : List[str] = torch.tensor([-1_0_0, -1, -0.1, 0, 0.1, 1.0, 1_0_0] )
a_ : Union[str, Any] = get_activation('gelu' )
a_ : str = get_activation('gelu_10' )
a_ : Tuple = torch_builtin(SCREAMING_SNAKE_CASE__ )
a_ : str = geluaa(SCREAMING_SNAKE_CASE__ )
a_ : List[Any] = torch.where(y_gelu_aa < 10.0 , 1 , 0 )
self.assertTrue(torch.max(SCREAMING_SNAKE_CASE__ ).item() == 10.0 )
self.assertTrue(torch.allclose(y_gelu * clipped_mask , y_gelu_aa * clipped_mask ) )
def SCREAMING_SNAKE_CASE ( self : Dict ) -> List[Any]:
get_activation('gelu' )
get_activation('gelu_10' )
get_activation('gelu_fast' )
get_activation('gelu_new' )
get_activation('gelu_python' )
get_activation('gelu_pytorch_tanh' )
get_activation('linear' )
get_activation('mish' )
get_activation('quick_gelu' )
get_activation('relu' )
get_activation('sigmoid' )
get_activation('silu' )
get_activation('swish' )
get_activation('tanh' )
with self.assertRaises(SCREAMING_SNAKE_CASE__ ):
get_activation('bogus' )
with self.assertRaises(SCREAMING_SNAKE_CASE__ ):
get_activation(SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE ( self : str ) -> str:
a_ : Any = get_activation('gelu' )
a_ : Any = 1
a_ : int = get_activation('gelu' )
self.assertEqual(acta.a , 1 )
with self.assertRaises(SCREAMING_SNAKE_CASE__ ):
a_ : Tuple = acta.a
| 120 | 1 |
import os
import unittest
from transformers.models.transfo_xl.tokenization_transfo_xl import VOCAB_FILES_NAMES, TransfoXLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class __lowerCAmelCase ( a , unittest.TestCase ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = TransfoXLTokenizer
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = False
def lowerCAmelCase__ ( self : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
super().setUp()
snake_case_ = [
"<unk>",
"[CLS]",
"[SEP]",
"want",
"unwanted",
"wa",
"un",
"running",
",",
"low",
"l",
]
snake_case_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
def lowerCAmelCase__ ( self : Tuple , **_lowerCAmelCase : Tuple ) -> Dict:
"""simple docstring"""
snake_case_ = True
return TransfoXLTokenizer.from_pretrained(self.tmpdirname , **_lowerCAmelCase )
def lowerCAmelCase__ ( self : List[str] , _lowerCAmelCase : int ) -> Union[str, Any]:
"""simple docstring"""
snake_case_ = "<unk> UNwanted , running"
snake_case_ = "<unk> unwanted, running"
return input_text, output_text
def lowerCAmelCase__ ( self : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
snake_case_ = TransfoXLTokenizer(vocab_file=self.vocab_file , lower_case=_lowerCAmelCase )
snake_case_ = tokenizer.tokenize("<unk> UNwanted , running" )
self.assertListEqual(_lowerCAmelCase , ["<unk>", "unwanted", ",", "running"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_lowerCAmelCase ) , [0, 4, 8, 7] )
def lowerCAmelCase__ ( self : Tuple ) -> int:
"""simple docstring"""
snake_case_ = TransfoXLTokenizer(lower_case=_lowerCAmelCase )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo ! how \n Are yoU ? " ) , ["hello", "!", "how", "are", "you", "?"] )
def lowerCAmelCase__ ( self : Any ) -> Optional[int]:
"""simple docstring"""
snake_case_ = TransfoXLTokenizer(lower_case=_lowerCAmelCase )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo ! how \n Are yoU ? " ) , ["HeLLo", "!", "how", "Are", "yoU", "?"] )
def lowerCAmelCase__ ( self : Any ) -> Any:
"""simple docstring"""
snake_case_ = TransfoXLTokenizer(lower_case=_lowerCAmelCase )
snake_case_ = "Hello (bracket) and side-scrolled [and] Henry's $5,000 with 3.34 m. What's up!?"
snake_case_ = [
"Hello",
"(",
"bracket",
")",
"and",
"side",
"@-@",
"scrolled",
"[",
"and",
"]",
"Henry",
"'s",
"$",
"5",
"@,@",
"000",
"with",
"3",
"@.@",
"34",
"m",
".",
"What",
"'s",
"up",
"!",
"?",
]
self.assertListEqual(tokenizer.tokenize(_lowerCAmelCase ) , _lowerCAmelCase )
self.assertEqual(tokenizer.convert_tokens_to_string(_lowerCAmelCase ) , _lowerCAmelCase )
def lowerCAmelCase__ ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
snake_case_ = self.get_tokenizer()
snake_case_ = len(_lowerCAmelCase )
tokenizer.add_tokens(["new1", "new2"] )
tokenizer.move_added_token("new1" , 1 )
# Check that moved token is not copied (duplicate)
self.assertEqual(len(_lowerCAmelCase ) , original_len + 2 )
# Check that token is moved to specified id
self.assertEqual(tokenizer.encode("new1" ) , [1] )
self.assertEqual(tokenizer.decode([1] ) , "new1" )
| 159 |
import unittest
from knapsack import greedy_knapsack as kp
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def lowerCAmelCase__ ( self : Any ) -> str:
"""simple docstring"""
snake_case_ = [1_0, 2_0, 3_0, 4_0, 5_0, 6_0]
snake_case_ = [2, 4, 6, 8, 1_0, 1_2]
snake_case_ = 1_0_0
self.assertEqual(kp.calc_profit(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) , 2_1_0 )
def lowerCAmelCase__ ( self : List[str] ) -> Tuple:
"""simple docstring"""
self.assertRaisesRegex(_lowerCAmelCase , "max_weight must greater than zero." )
def lowerCAmelCase__ ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
self.assertRaisesRegex(_lowerCAmelCase , "Weight can not be negative." )
def lowerCAmelCase__ ( self : Dict ) -> Tuple:
"""simple docstring"""
self.assertRaisesRegex(_lowerCAmelCase , "Profit can not be negative." )
def lowerCAmelCase__ ( self : Optional[int] ) -> str:
"""simple docstring"""
self.assertRaisesRegex(_lowerCAmelCase , "max_weight must greater than zero." )
def lowerCAmelCase__ ( self : Union[str, Any] ) -> str:
"""simple docstring"""
self.assertRaisesRegex(
_lowerCAmelCase , "The length of profit and weight must be same." )
if __name__ == "__main__":
unittest.main()
| 159 | 1 |
import collections
import json
import math
import os
import re
import time
from fnmatch import fnmatch
from typing import Dict
import requests
from slack_sdk import WebClient
A_ : Dict = WebClient(token=os.environ['CI_SLACK_BOT_TOKEN'])
def snake_case (UpperCAmelCase__ ) -> Dict:
UpperCamelCase_: Any = test_results.split(' ' )
UpperCamelCase_: Any = 0
UpperCamelCase_: Union[str, Any] = 0
# When the output is short enough, the output is surrounded by = signs: "== OUTPUT =="
# When it is too long, those signs are not present.
UpperCamelCase_: Dict = expressions[-2] if '=' in expressions[-1] else expressions[-1]
for i, expression in enumerate(lowercase_ ):
if "failed" in expression:
failed += int(expressions[i - 1] )
if "passed" in expression:
success += int(expressions[i - 1] )
return failed, success, time_spent
def snake_case (UpperCAmelCase__ ) -> List[str]:
UpperCamelCase_: Dict = {}
UpperCamelCase_: int = None
UpperCamelCase_: int = False
for line in failures_short_lines.split('\n' ):
if re.search(R'_ \[doctest\]' , lowercase_ ):
UpperCamelCase_: Any = True
UpperCamelCase_: Optional[Any] = line.split(' ' )[2]
elif in_error and not line.split(' ' )[0].isdigit():
UpperCamelCase_: str = line
UpperCamelCase_: List[Any] = False
return failures
class _lowerCAmelCase:
"""simple docstring"""
def __init__( self , _lowerCamelCase , _lowerCamelCase ):
UpperCamelCase_: List[str] = title
UpperCamelCase_: Optional[Any] = doc_test_results['time_spent'].split(',' )[0]
UpperCamelCase_: str = doc_test_results['success']
UpperCamelCase_: Union[str, Any] = doc_test_results['failures']
UpperCamelCase_: List[str] = self.n_success + self.n_failures
# Failures and success of the modeling tests
UpperCamelCase_: int = doc_test_results
@property
def _a ( self ):
UpperCamelCase_: Dict = [self._time_spent]
UpperCamelCase_: int = 0
for time in time_spent:
UpperCamelCase_: Any = time.split(':' )
# Time can be formatted as xx:xx:xx, as .xx, or as x.xx if the time spent was less than a minute.
if len(_lowerCamelCase ) == 1:
UpperCamelCase_: Union[str, Any] = [0, 0, time_parts[0]]
UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_: str = int(time_parts[0] ), int(time_parts[1] ), float(time_parts[2] )
total_secs += hours * 3_6_0_0 + minutes * 6_0 + seconds
UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_: Tuple = total_secs // 3_6_0_0, (total_secs % 3_6_0_0) // 6_0, total_secs % 6_0
return f'''{int(_lowerCamelCase )}h{int(_lowerCamelCase )}m{int(_lowerCamelCase )}s'''
@property
def _a ( self ):
return {"type": "header", "text": {"type": "plain_text", "text": self.title}}
@property
def _a ( self ):
return {
"type": "section",
"text": {
"type": "plain_text",
"text": f'''🌞 There were no failures: all {self.n_tests} tests passed. The suite ran in {self.time}.''',
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": f'''https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}''',
},
}
@property
def _a ( self ):
return {
"type": "section",
"text": {
"type": "plain_text",
"text": (
f'''There were {self.n_failures} failures, out of {self.n_tests} tests.\nThe suite ran in'''
f''' {self.time}.'''
),
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": f'''https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}''',
},
}
@property
def _a ( self ):
UpperCamelCase_: Dict = 4_0
UpperCamelCase_: Union[str, Any] = {k: v['failed'] for k, v in doc_test_results.items() if isinstance(_lowerCamelCase , _lowerCamelCase )}
UpperCamelCase_: List[Any] = ''
for category, failures in category_failures.items():
if len(_lowerCamelCase ) == 0:
continue
if report != "":
report += "\n\n"
report += f'''*{category} failures*:'''.ljust(line_length // 2 ).rjust(line_length // 2 ) + "\n"
report += "`"
report += "`\n`".join(_lowerCamelCase )
report += "`"
return {
"type": "section",
"text": {
"type": "mrkdwn",
"text": f'''The following examples had failures:\n\n\n{report}\n''',
},
}
@property
def _a ( self ):
UpperCamelCase_: Tuple = [self.header]
if self.n_failures > 0:
blocks.append(self.failures )
if self.n_failures > 0:
blocks.extend([self.category_failures] )
if self.n_failures == 0:
blocks.append(self.no_failures )
return json.dumps(_lowerCamelCase )
@staticmethod
def _a ( ):
UpperCamelCase_: Any = [
{
'type': 'section',
'text': {
'type': 'plain_text',
'text': 'There was an issue running the tests.',
},
'accessory': {
'type': 'button',
'text': {'type': 'plain_text', 'text': 'Check Action results', 'emoji': True},
'url': f'''https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}''',
},
}
]
print('Sending the following payload' )
print(json.dumps({'blocks': json.loads(_lowerCamelCase )} ) )
client.chat_postMessage(
channel=os.environ['CI_SLACK_CHANNEL_ID_DAILY'] , text='There was an issue running the tests.' , blocks=_lowerCamelCase , )
def _a ( self ):
print('Sending the following payload' )
print(json.dumps({'blocks': json.loads(self.payload )} ) )
UpperCamelCase_: Union[str, Any] = f'''{self.n_failures} failures out of {self.n_tests} tests,''' if self.n_failures else 'All tests passed.'
UpperCamelCase_: Optional[int] = client.chat_postMessage(
channel=os.environ['CI_SLACK_CHANNEL_ID_DAILY'] , blocks=self.payload , text=_lowerCamelCase , )
def _a ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
UpperCamelCase_: Optional[Any] = ''
for key, value in failures.items():
UpperCamelCase_: Union[str, Any] = value[:2_0_0] + ' [Truncated]' if len(_lowerCamelCase ) > 2_5_0 else value
failures_text += f'''*{key}*\n_{value}_\n\n'''
UpperCamelCase_: Optional[int] = job_name
UpperCamelCase_: List[Any] = {'type': 'section', 'text': {'type': 'mrkdwn', 'text': text}}
if job_link is not None:
UpperCamelCase_: Union[str, Any] = {
'type': 'button',
'text': {'type': 'plain_text', 'text': 'GitHub Action job', 'emoji': True},
'url': job_link,
}
return [
{"type": "header", "text": {"type": "plain_text", "text": title.upper(), "emoji": True}},
content,
{"type": "section", "text": {"type": "mrkdwn", "text": failures_text}},
]
def _a ( self ):
if self.thread_ts is None:
raise ValueError('Can only post reply if a post has been made.' )
UpperCamelCase_: Optional[Any] = self.doc_test_results.pop('job_link' )
self.doc_test_results.pop('failures' )
self.doc_test_results.pop('success' )
self.doc_test_results.pop('time_spent' )
UpperCamelCase_: List[str] = sorted(self.doc_test_results.items() , key=lambda _lowerCamelCase : t[0] )
for job, job_result in sorted_dict:
if len(job_result['failures'] ):
UpperCamelCase_: Tuple = f'''*Num failures* :{len(job_result['failed'] )} \n'''
UpperCamelCase_: Any = job_result['failures']
UpperCamelCase_: Optional[int] = self.get_reply_blocks(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , text=_lowerCamelCase )
print('Sending the following reply' )
print(json.dumps({'blocks': blocks} ) )
client.chat_postMessage(
channel=os.environ['CI_SLACK_CHANNEL_ID_DAILY'] , text=f'''Results for {job}''' , blocks=_lowerCamelCase , thread_ts=self.thread_ts['ts'] , )
time.sleep(1 )
def snake_case () -> Tuple:
UpperCamelCase_: Optional[Any] = os.environ['GITHUB_RUN_ID']
UpperCamelCase_: str = F'''https://api.github.com/repos/huggingface/transformers/actions/runs/{run_id}/jobs?per_page=100'''
UpperCamelCase_: List[str] = requests.get(lowercase_ ).json()
UpperCamelCase_: Any = {}
try:
jobs.update({job['name']: job['html_url'] for job in result['jobs']} )
UpperCamelCase_: List[Any] = math.ceil((result['total_count'] - 1_0_0) / 1_0_0 )
for i in range(lowercase_ ):
UpperCamelCase_: Dict = requests.get(url + F'''&page={i + 2}''' ).json()
jobs.update({job['name']: job['html_url'] for job in result['jobs']} )
return jobs
except Exception as e:
print('Unknown error, could not fetch links.' , lowercase_ )
return {}
def snake_case (UpperCAmelCase__ ) -> Tuple:
UpperCamelCase_: Any = {}
if os.path.exists(lowercase_ ):
UpperCamelCase_: Tuple = os.listdir(lowercase_ )
for file in files:
try:
with open(os.path.join(lowercase_ , lowercase_ ) , encoding='utf-8' ) as f:
UpperCamelCase_: List[Any] = f.read()
except UnicodeDecodeError as e:
raise ValueError(F'''Could not open {os.path.join(lowercase_ , lowercase_ )}.''' ) from e
return _artifact
def snake_case () -> Union[str, Any]:
class _lowerCAmelCase:
"""simple docstring"""
def __init__( self , _lowerCamelCase ):
UpperCamelCase_: Tuple = name
UpperCamelCase_: Union[str, Any] = []
def __str__( self ):
return self.name
def _a ( self , _lowerCamelCase ):
self.paths.append({'name': self.name, 'path': path} )
UpperCamelCase_: Optional[Any] = {}
UpperCamelCase_: Dict = filter(os.path.isdir , os.listdir() )
for directory in directories:
UpperCamelCase_: Dict = directory
if artifact_name not in _available_artifacts:
UpperCamelCase_: List[Any] = Artifact(lowercase_ )
_available_artifacts[artifact_name].add_path(lowercase_ )
return _available_artifacts
if __name__ == "__main__":
A_ : str = get_job_links()
A_ : List[Any] = retrieve_available_artifacts()
A_ : Tuple = collections.OrderedDict(
[
('*.py', 'API Examples'),
('*.md', 'MD Examples'),
]
)
# This dict will contain all the information relative to each doc test category:
# - failed: list of failed tests
# - failures: dict in the format 'test': 'error_message'
A_ : Optional[Any] = {
v: {
'failed': [],
'failures': {},
}
for v in docs.values()
}
# Link to the GitHub Action job
A_ : Dict = github_actions_job_links.get('run_doctests')
A_ : List[str] = available_artifacts['doc_tests_gpu_test_reports'].paths[0]
A_ : Tuple = retrieve_artifact(artifact_path['name'])
if "stats" in artifact:
A_ , A_ , A_ : Any = handle_test_results(artifact['stats'])
A_ : List[Any] = failed
A_ : List[str] = success
A_ : str = time_spent[1:-1] + ', '
A_ : str = extract_first_line_failure(artifact['failures_short'])
for line in artifact["summary_short"].split('\n'):
if re.search('FAILED', line):
A_ : Tuple = line.replace('FAILED ', '')
A_ : List[Any] = line.split()[0].replace('\n', '')
if "::" in line:
A_ , A_ : List[str] = line.split('::')
else:
A_ , A_ : str = line, line
for file_regex in docs.keys():
if fnmatch(file_path, file_regex):
A_ : str = docs[file_regex]
doc_test_results[category]["failed"].append(test)
A_ : str = all_failures[test] if test in all_failures else 'N/A'
A_ : int = failure
break
A_ : List[str] = Message('🤗 Results of the doc tests.', doc_test_results)
message.post()
message.post_reply() | 362 |
import os
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from huggingface_hub.file_download import http_get
from requests.exceptions import HTTPError
from transformers import (
AlbertTokenizer,
AutoTokenizer,
BertTokenizer,
BertTokenizerFast,
GPTaTokenizerFast,
is_tokenizers_available,
)
from transformers.testing_utils import TOKEN, USER, is_staging_test, require_tokenizers
from transformers.tokenization_utils import Trie
sys.path.append(str(Path(__file__).parent.parent / 'utils'))
from test_module.custom_tokenization import CustomTokenizer # noqa E402
if is_tokenizers_available():
from test_module.custom_tokenization_fast import CustomTokenizerFast
class _lowerCAmelCase( unittest.TestCase ):
"""simple docstring"""
def _a ( self ):
# A mock response for an HTTP head request to emulate server down
UpperCamelCase_: Any = mock.Mock()
UpperCamelCase_: Dict = 5_0_0
UpperCamelCase_: Any = {}
UpperCamelCase_: Tuple = HTTPError
UpperCamelCase_: List[str] = {}
# Download this model to make sure it's in the cache.
UpperCamelCase_: int = BertTokenizer.from_pretrained('hf-internal-testing/tiny-random-bert' )
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch('requests.Session.request' , return_value=_lowerCamelCase ) as mock_head:
UpperCamelCase_: Optional[int] = BertTokenizer.from_pretrained('hf-internal-testing/tiny-random-bert' )
# This check we did call the fake head request
mock_head.assert_called()
@require_tokenizers
def _a ( self ):
# A mock response for an HTTP head request to emulate server down
UpperCamelCase_: Union[str, Any] = mock.Mock()
UpperCamelCase_: Union[str, Any] = 5_0_0
UpperCamelCase_: str = {}
UpperCamelCase_: List[str] = HTTPError
UpperCamelCase_: Optional[int] = {}
# Download this model to make sure it's in the cache.
UpperCamelCase_: List[str] = GPTaTokenizerFast.from_pretrained('gpt2' )
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch('requests.Session.request' , return_value=_lowerCamelCase ) as mock_head:
UpperCamelCase_: str = GPTaTokenizerFast.from_pretrained('gpt2' )
# This check we did call the fake head request
mock_head.assert_called()
def _a ( self ):
# This test is for deprecated behavior and can be removed in v5
try:
UpperCamelCase_: Optional[int] = tempfile.mktemp()
with open(_lowerCamelCase , 'wb' ) as f:
http_get('https://huggingface.co/albert-base-v1/resolve/main/spiece.model' , _lowerCamelCase )
UpperCamelCase_: Tuple = AlbertTokenizer.from_pretrained(_lowerCamelCase )
finally:
os.remove(_lowerCamelCase )
# Supporting this legacy load introduced a weird bug where the tokenizer would load local files if they are in
# the current folder and have the right name.
if os.path.isfile('tokenizer.json' ):
# We skip the test if the user has a `tokenizer.json` in this folder to avoid deleting it.
return
try:
with open('tokenizer.json' , 'wb' ) as f:
http_get('https://huggingface.co/hf-internal-testing/tiny-random-bert/blob/main/tokenizer.json' , _lowerCamelCase )
UpperCamelCase_: List[str] = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-gpt2' )
# The tiny random BERT has a vocab size of 1024, tiny gpt2 as a vocab size of 1000
self.assertEqual(tokenizer.vocab_size , 1_0_0_0 )
# Tokenizer should depend on the remote checkpoint, not the local tokenizer.json file.
finally:
os.remove('tokenizer.json' )
def _a ( self ):
# This test is for deprecated behavior and can be removed in v5
UpperCamelCase_: Any = AlbertTokenizer.from_pretrained('https://huggingface.co/albert-base-v1/resolve/main/spiece.model' )
@is_staging_test
class _lowerCAmelCase( unittest.TestCase ):
"""simple docstring"""
a : Dict =['''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''bla''', '''blou''']
@classmethod
def _a ( cls ):
UpperCamelCase_: Optional[int] = TOKEN
HfFolder.save_token(_lowerCamelCase )
@classmethod
def _a ( cls ):
try:
delete_repo(token=cls._token , repo_id='test-tokenizer' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='valid_org/test-tokenizer-org' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='test-dynamic-tokenizer' )
except HTTPError:
pass
def _a ( self ):
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCamelCase_: List[Any] = os.path.join(_lowerCamelCase , 'vocab.txt' )
with open(_lowerCamelCase , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in self.vocab_tokens] ) )
UpperCamelCase_: int = BertTokenizer(_lowerCamelCase )
tokenizer.push_to_hub('test-tokenizer' , use_auth_token=self._token )
UpperCamelCase_: Union[str, Any] = BertTokenizer.from_pretrained(f'''{USER}/test-tokenizer''' )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
# Reset repo
delete_repo(token=self._token , repo_id='test-tokenizer' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(_lowerCamelCase , repo_id='test-tokenizer' , push_to_hub=_lowerCamelCase , use_auth_token=self._token )
UpperCamelCase_: List[str] = BertTokenizer.from_pretrained(f'''{USER}/test-tokenizer''' )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
def _a ( self ):
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCamelCase_: Optional[Any] = os.path.join(_lowerCamelCase , 'vocab.txt' )
with open(_lowerCamelCase , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in self.vocab_tokens] ) )
UpperCamelCase_: Union[str, Any] = BertTokenizer(_lowerCamelCase )
tokenizer.push_to_hub('valid_org/test-tokenizer-org' , use_auth_token=self._token )
UpperCamelCase_: Dict = BertTokenizer.from_pretrained('valid_org/test-tokenizer-org' )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
# Reset repo
delete_repo(token=self._token , repo_id='valid_org/test-tokenizer-org' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(
_lowerCamelCase , repo_id='valid_org/test-tokenizer-org' , push_to_hub=_lowerCamelCase , use_auth_token=self._token )
UpperCamelCase_: Optional[Any] = BertTokenizer.from_pretrained('valid_org/test-tokenizer-org' )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
@require_tokenizers
def _a ( self ):
CustomTokenizer.register_for_auto_class()
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCamelCase_: Optional[Any] = os.path.join(_lowerCamelCase , 'vocab.txt' )
with open(_lowerCamelCase , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in self.vocab_tokens] ) )
UpperCamelCase_: Optional[int] = CustomTokenizer(_lowerCamelCase )
# No fast custom tokenizer
tokenizer.push_to_hub('test-dynamic-tokenizer' , use_auth_token=self._token )
UpperCamelCase_: str = AutoTokenizer.from_pretrained(f'''{USER}/test-dynamic-tokenizer''' , trust_remote_code=_lowerCamelCase )
# Can't make an isinstance check because the new_model.config is from the CustomTokenizer class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ , 'CustomTokenizer' )
# Fast and slow custom tokenizer
CustomTokenizerFast.register_for_auto_class()
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCamelCase_: Optional[Any] = os.path.join(_lowerCamelCase , 'vocab.txt' )
with open(_lowerCamelCase , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in self.vocab_tokens] ) )
UpperCamelCase_: Dict = BertTokenizerFast.from_pretrained(_lowerCamelCase )
bert_tokenizer.save_pretrained(_lowerCamelCase )
UpperCamelCase_: List[str] = CustomTokenizerFast.from_pretrained(_lowerCamelCase )
tokenizer.push_to_hub('test-dynamic-tokenizer' , use_auth_token=self._token )
UpperCamelCase_: Tuple = AutoTokenizer.from_pretrained(f'''{USER}/test-dynamic-tokenizer''' , trust_remote_code=_lowerCamelCase )
# Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ , 'CustomTokenizerFast' )
UpperCamelCase_: int = AutoTokenizer.from_pretrained(
f'''{USER}/test-dynamic-tokenizer''' , use_fast=_lowerCamelCase , trust_remote_code=_lowerCamelCase )
# Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ , 'CustomTokenizer' )
class _lowerCAmelCase( unittest.TestCase ):
"""simple docstring"""
def _a ( self ):
UpperCamelCase_: Dict = Trie()
trie.add('Hello 友達' )
self.assertEqual(trie.data , {'H': {'e': {'l': {'l': {'o': {' ': {'友': {'達': {'': 1}}}}}}}}} )
trie.add('Hello' )
trie.data
self.assertEqual(trie.data , {'H': {'e': {'l': {'l': {'o': {'': 1, ' ': {'友': {'達': {'': 1}}}}}}}}} )
def _a ( self ):
UpperCamelCase_: Optional[int] = Trie()
self.assertEqual(trie.split('[CLS] This is a extra_id_100' ) , ['[CLS] This is a extra_id_100'] )
trie.add('[CLS]' )
trie.add('extra_id_1' )
trie.add('extra_id_100' )
self.assertEqual(trie.split('[CLS] This is a extra_id_100' ) , ['[CLS]', ' This is a ', 'extra_id_100'] )
def _a ( self ):
UpperCamelCase_: int = Trie()
trie.add('A' )
self.assertEqual(trie.split('ABC' ) , ['A', 'BC'] )
self.assertEqual(trie.split('BCA' ) , ['BC', 'A'] )
def _a ( self ):
UpperCamelCase_: str = Trie()
trie.add('TOKEN]' )
trie.add('[SPECIAL_TOKEN]' )
self.assertEqual(trie.split('This is something [SPECIAL_TOKEN]' ) , ['This is something ', '[SPECIAL_TOKEN]'] )
def _a ( self ):
UpperCamelCase_: Union[str, Any] = Trie()
trie.add('A' )
trie.add('P' )
trie.add('[SPECIAL_TOKEN]' )
self.assertEqual(trie.split('This is something [SPECIAL_TOKEN]' ) , ['This is something ', '[SPECIAL_TOKEN]'] )
def _a ( self ):
UpperCamelCase_: List[str] = Trie()
trie.add('AB' )
trie.add('B' )
trie.add('C' )
self.assertEqual(trie.split('ABC' ) , ['AB', 'C'] )
def _a ( self ):
UpperCamelCase_: List[str] = Trie()
trie.add('ABC' )
trie.add('B' )
trie.add('CD' )
self.assertEqual(trie.split('ABCD' ) , ['ABC', 'D'] )
def _a ( self ):
# Even if the offsets are wrong, we necessarily output correct string
# parts.
UpperCamelCase_: Union[str, Any] = Trie()
UpperCamelCase_: Any = trie.cut_text('ABC' , [0, 0, 2, 1, 2, 3] )
self.assertEqual(_lowerCamelCase , ['AB', 'C'] ) | 292 | 0 |
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
A__ : Optional[int] = abspath(join(dirname(dirname(dirname(__file__))), '''src'''))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='''ignore''', category=FutureWarning)
def UpperCamelCase( __UpperCamelCase : Optional[int] ):
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(__UpperCamelCase )
def UpperCamelCase( __UpperCamelCase : str ):
from transformers.testing_utils import pytest_terminal_summary_main
lowerCAmelCase_ : Union[str, Any] = terminalreporter.config.getoption('''--make-reports''' )
if make_reports:
pytest_terminal_summary_main(__UpperCamelCase ,id=__UpperCamelCase )
| 103 |
'''simple docstring'''
import os
import tempfile
import unittest
from transformers.models.marian.convert_marian_tatoeba_to_pytorch import DEFAULT_REPO, TatoebaConverter
from transformers.testing_utils import slow
from transformers.utils import cached_property
@unittest.skipUnless(os.path.exists(lowercase ) , """Tatoeba directory does not exist.""" )
class lowercase__ ( unittest.TestCase ):
@cached_property
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
_UpperCamelCase : str = tempfile.mkdtemp()
return TatoebaConverter(save_dir=lowerCamelCase__ )
@slow
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
self.resolver.convert_models(['heb-eng'] )
@slow
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase : Dict = self.resolver.write_model_card('opus-mt-he-en' ,dry_run=lowerCamelCase__ )
assert mmeta["long_pair"] == "heb-eng"
| 83 | 0 |
"""simple docstring"""
def lowercase__ ( _UpperCAmelCase ) -> int:
'''simple docstring'''
if not numbers:
return 0
if not isinstance(_UpperCAmelCase , (list, tuple) ) or not all(
isinstance(_UpperCAmelCase , _UpperCAmelCase ) for number in numbers ):
raise ValueError('numbers must be an iterable of integers' )
lowercase : List[Any] = numbers[0]
for i in range(1 , len(_UpperCAmelCase ) ):
# update the maximum and minimum subarray products
lowercase : List[Any] = numbers[i]
if number < 0:
lowercase , lowercase : str = min_till_now, max_till_now
lowercase : int = max(_UpperCAmelCase , max_till_now * number )
lowercase : Tuple = min(_UpperCAmelCase , min_till_now * number )
# update the maximum product found till now
lowercase : List[Any] = max(_UpperCAmelCase , _UpperCAmelCase )
return max_prod
| 53 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
import torch
from ..models.auto import AutoModelForVisualQuestionAnswering, AutoProcessor
from ..utils import requires_backends
from .base import PipelineTool
if TYPE_CHECKING:
from PIL import Image
class a__ ( SCREAMING_SNAKE_CASE__ ):
_lowerCamelCase = 'dandelin/vilt-b32-finetuned-vqa'
_lowerCamelCase = (
'This is a tool that answers a question about an image. It takes an input named `image` which should be the '
'image containing the information, as well as a `question` which should be the question in English. It '
'returns a text that is the answer to the question.'
)
_lowerCamelCase = 'image_qa'
_lowerCamelCase = AutoProcessor
_lowerCamelCase = AutoModelForVisualQuestionAnswering
_lowerCamelCase = ['image', 'text']
_lowerCamelCase = ['text']
def __init__( self : List[str], *lowerCAmelCase : Optional[Any], **lowerCAmelCase : Optional[Any] ) -> str:
requires_backends(self, ['vision'] )
super().__init__(*lowerCAmelCase, **lowerCAmelCase )
def lowercase ( self : Optional[Any], lowerCAmelCase : "Image", lowerCAmelCase : str ) -> Dict:
return self.pre_processor(lowerCAmelCase, lowerCAmelCase, return_tensors='pt' )
def lowercase ( self : List[Any], lowerCAmelCase : int ) -> Tuple:
with torch.no_grad():
return self.model(**lowerCAmelCase ).logits
def lowercase ( self : List[str], lowerCAmelCase : Union[str, Any] ) -> Union[str, Any]:
lowercase : Optional[int] = outputs.argmax(-1 ).item()
return self.model.config.idalabel[idx]
| 53 | 1 |
import unittest
from transformers import (
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TextClassificationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow
from .test_pipelines_common import ANY
# These 2 model types require different inputs than those of the usual text models.
lowerCamelCase : Optional[Any] ={'''LayoutLMv2Config''', '''LayoutLMv3Config'''}
@is_pipeline_test
class __a ( unittest.TestCase ):
_lowerCAmelCase : Tuple = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
_lowerCAmelCase : Dict = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if model_mapping is not None:
_lowerCAmelCase : List[Any] = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP}
if tf_model_mapping is not None:
_lowerCAmelCase : Tuple = {
config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP
}
@require_torch
def __lowercase ( self : Dict ):
'''simple docstring'''
UpperCamelCase__ : Dict = pipeline(
task="text-classification" , model="hf-internal-testing/tiny-random-distilbert" , framework="pt" )
UpperCamelCase__ : List[Any] = text_classifier("This is great !" )
self.assertEqual(nested_simplify(SCREAMING_SNAKE_CASE ) , [{"label": "LABEL_0", "score": 0.5_0_4}] )
UpperCamelCase__ : int = text_classifier("This is great !" , top_k=2 )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE ) , [{"label": "LABEL_0", "score": 0.5_0_4}, {"label": "LABEL_1", "score": 0.4_9_6}] )
UpperCamelCase__ : List[Any] = text_classifier(["This is great !", "This is bad"] , top_k=2 )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE ) , [
[{"label": "LABEL_0", "score": 0.5_0_4}, {"label": "LABEL_1", "score": 0.4_9_6}],
[{"label": "LABEL_0", "score": 0.5_0_4}, {"label": "LABEL_1", "score": 0.4_9_6}],
] , )
UpperCamelCase__ : List[Any] = text_classifier("This is great !" , top_k=1 )
self.assertEqual(nested_simplify(SCREAMING_SNAKE_CASE ) , [{"label": "LABEL_0", "score": 0.5_0_4}] )
# Legacy behavior
UpperCamelCase__ : List[Any] = text_classifier("This is great !" , return_all_scores=SCREAMING_SNAKE_CASE )
self.assertEqual(nested_simplify(SCREAMING_SNAKE_CASE ) , [{"label": "LABEL_0", "score": 0.5_0_4}] )
UpperCamelCase__ : Dict = text_classifier("This is great !" , return_all_scores=SCREAMING_SNAKE_CASE )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE ) , [[{"label": "LABEL_0", "score": 0.5_0_4}, {"label": "LABEL_1", "score": 0.4_9_6}]] )
UpperCamelCase__ : Any = text_classifier(["This is great !", "Something else"] , return_all_scores=SCREAMING_SNAKE_CASE )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE ) , [
[{"label": "LABEL_0", "score": 0.5_0_4}, {"label": "LABEL_1", "score": 0.4_9_6}],
[{"label": "LABEL_0", "score": 0.5_0_4}, {"label": "LABEL_1", "score": 0.4_9_6}],
] , )
UpperCamelCase__ : List[Any] = text_classifier(["This is great !", "Something else"] , return_all_scores=SCREAMING_SNAKE_CASE )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE ) , [
{"label": "LABEL_0", "score": 0.5_0_4},
{"label": "LABEL_0", "score": 0.5_0_4},
] , )
@require_torch
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
import torch
UpperCamelCase__ : Union[str, Any] = pipeline(
task="text-classification" , model="hf-internal-testing/tiny-random-distilbert" , framework="pt" , device=torch.device("cpu" ) , )
UpperCamelCase__ : Optional[int] = text_classifier("This is great !" )
self.assertEqual(nested_simplify(SCREAMING_SNAKE_CASE ) , [{"label": "LABEL_0", "score": 0.5_0_4}] )
@require_tf
def __lowercase ( self : Any ):
'''simple docstring'''
UpperCamelCase__ : str = pipeline(
task="text-classification" , model="hf-internal-testing/tiny-random-distilbert" , framework="tf" )
UpperCamelCase__ : List[str] = text_classifier("This is great !" )
self.assertEqual(nested_simplify(SCREAMING_SNAKE_CASE ) , [{"label": "LABEL_0", "score": 0.5_0_4}] )
@slow
@require_torch
def __lowercase ( self : List[str] ):
'''simple docstring'''
UpperCamelCase__ : str = pipeline("text-classification" )
UpperCamelCase__ : Optional[int] = text_classifier("This is great !" )
self.assertEqual(nested_simplify(SCREAMING_SNAKE_CASE ) , [{"label": "POSITIVE", "score": 1.0}] )
UpperCamelCase__ : str = text_classifier("This is bad !" )
self.assertEqual(nested_simplify(SCREAMING_SNAKE_CASE ) , [{"label": "NEGATIVE", "score": 1.0}] )
UpperCamelCase__ : List[Any] = text_classifier("Birds are a type of animal" )
self.assertEqual(nested_simplify(SCREAMING_SNAKE_CASE ) , [{"label": "POSITIVE", "score": 0.9_8_8}] )
@slow
@require_tf
def __lowercase ( self : Any ):
'''simple docstring'''
UpperCamelCase__ : str = pipeline("text-classification" , framework="tf" )
UpperCamelCase__ : List[str] = text_classifier("This is great !" )
self.assertEqual(nested_simplify(SCREAMING_SNAKE_CASE ) , [{"label": "POSITIVE", "score": 1.0}] )
UpperCamelCase__ : int = text_classifier("This is bad !" )
self.assertEqual(nested_simplify(SCREAMING_SNAKE_CASE ) , [{"label": "NEGATIVE", "score": 1.0}] )
UpperCamelCase__ : str = text_classifier("Birds are a type of animal" )
self.assertEqual(nested_simplify(SCREAMING_SNAKE_CASE ) , [{"label": "POSITIVE", "score": 0.9_8_8}] )
def __lowercase ( self : Tuple , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : List[str] ):
'''simple docstring'''
UpperCamelCase__ : Any = TextClassificationPipeline(model=SCREAMING_SNAKE_CASE , tokenizer=SCREAMING_SNAKE_CASE )
return text_classifier, ["HuggingFace is in", "This is another test"]
def __lowercase ( self : Optional[int] , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Union[str, Any] ):
'''simple docstring'''
UpperCamelCase__ : Dict = text_classifier.model
# Small inputs because BartTokenizer tiny has maximum position embeddings = 22
UpperCamelCase__ : Optional[Any] = "HuggingFace is in"
UpperCamelCase__ : List[str] = text_classifier(SCREAMING_SNAKE_CASE )
self.assertEqual(nested_simplify(SCREAMING_SNAKE_CASE ) , [{"label": ANY(SCREAMING_SNAKE_CASE ), "score": ANY(SCREAMING_SNAKE_CASE )}] )
self.assertTrue(outputs[0]["label"] in model.config.idalabel.values() )
UpperCamelCase__ : Union[str, Any] = ["HuggingFace is in ", "Paris is in France"]
UpperCamelCase__ : Optional[Any] = text_classifier(SCREAMING_SNAKE_CASE )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE ) , [{"label": ANY(SCREAMING_SNAKE_CASE ), "score": ANY(SCREAMING_SNAKE_CASE )}, {"label": ANY(SCREAMING_SNAKE_CASE ), "score": ANY(SCREAMING_SNAKE_CASE )}] , )
self.assertTrue(outputs[0]["label"] in model.config.idalabel.values() )
self.assertTrue(outputs[1]["label"] in model.config.idalabel.values() )
# Forcing to get all results with `top_k=None`
# This is NOT the legacy format
UpperCamelCase__ : Tuple = text_classifier(SCREAMING_SNAKE_CASE , top_k=SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Dict = len(model.config.idalabel.values() )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE ) , [[{"label": ANY(SCREAMING_SNAKE_CASE ), "score": ANY(SCREAMING_SNAKE_CASE )}] * N, [{"label": ANY(SCREAMING_SNAKE_CASE ), "score": ANY(SCREAMING_SNAKE_CASE )}] * N] , )
UpperCamelCase__ : Union[str, Any] = {"text": "HuggingFace is in ", "text_pair": "Paris is in France"}
UpperCamelCase__ : int = text_classifier(SCREAMING_SNAKE_CASE )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE ) , {"label": ANY(SCREAMING_SNAKE_CASE ), "score": ANY(SCREAMING_SNAKE_CASE )} , )
self.assertTrue(outputs["label"] in model.config.idalabel.values() )
# This might be used a text pair, but tokenizer + pipe interaction
# makes it hard to understand that it's not using the pair properly
# https://github.com/huggingface/transformers/issues/17305
# We disabled this usage instead as it was outputting wrong outputs.
UpperCamelCase__ : List[Any] = [["HuggingFace is in ", "Paris is in France"]]
with self.assertRaises(SCREAMING_SNAKE_CASE ):
text_classifier(SCREAMING_SNAKE_CASE )
# This used to be valid for doing text pairs
# We're keeping it working because of backward compatibility
UpperCamelCase__ : List[str] = text_classifier([[["HuggingFace is in ", "Paris is in France"]]] )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE ) , [{"label": ANY(SCREAMING_SNAKE_CASE ), "score": ANY(SCREAMING_SNAKE_CASE )}] , )
self.assertTrue(outputs[0]["label"] in model.config.idalabel.values() ) | 189 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase : Optional[int] =logging.get_logger(__name__)
lowerCamelCase : Dict ={
'''facebook/s2t-wav2vec2-large-en-de''': (
'''https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/config.json'''
),
# See all Speech2Text models at https://huggingface.co/models?filter=speech2text2
}
class __a ( A__ ):
_lowerCAmelCase : Tuple = '''speech_to_text_2'''
_lowerCAmelCase : Dict = ['''past_key_values''']
_lowerCAmelCase : Any = {'''num_attention_heads''': '''decoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__( self : List[str] , SCREAMING_SNAKE_CASE : Union[str, Any]=1_00_00 , SCREAMING_SNAKE_CASE : List[Any]=6 , SCREAMING_SNAKE_CASE : List[Any]=20_48 , SCREAMING_SNAKE_CASE : Dict=4 , SCREAMING_SNAKE_CASE : List[Any]=0.0 , SCREAMING_SNAKE_CASE : Optional[int]=True , SCREAMING_SNAKE_CASE : List[Any]="relu" , SCREAMING_SNAKE_CASE : Tuple=2_56 , SCREAMING_SNAKE_CASE : List[str]=0.1 , SCREAMING_SNAKE_CASE : Tuple=0.0 , SCREAMING_SNAKE_CASE : Any=0.0 , SCREAMING_SNAKE_CASE : int=0.0_2 , SCREAMING_SNAKE_CASE : Dict=2 , SCREAMING_SNAKE_CASE : List[str]=True , SCREAMING_SNAKE_CASE : Any=1 , SCREAMING_SNAKE_CASE : List[Any]=0 , SCREAMING_SNAKE_CASE : Any=2 , SCREAMING_SNAKE_CASE : str=10_24 , **SCREAMING_SNAKE_CASE : int , ):
'''simple docstring'''
UpperCamelCase__ : int = vocab_size
UpperCamelCase__ : Optional[Any] = d_model
UpperCamelCase__ : Optional[Any] = decoder_ffn_dim
UpperCamelCase__ : str = decoder_layers
UpperCamelCase__ : Any = decoder_attention_heads
UpperCamelCase__ : List[str] = dropout
UpperCamelCase__ : int = attention_dropout
UpperCamelCase__ : Optional[int] = activation_dropout
UpperCamelCase__ : Union[str, Any] = activation_function
UpperCamelCase__ : Tuple = init_std
UpperCamelCase__ : Optional[int] = decoder_layerdrop
UpperCamelCase__ : Dict = use_cache
UpperCamelCase__ : str = decoder_layers
UpperCamelCase__ : Union[str, Any] = scale_embedding # scale factor will be sqrt(d_model) if True
UpperCamelCase__ : Optional[Any] = max_target_positions
super().__init__(
pad_token_id=SCREAMING_SNAKE_CASE , bos_token_id=SCREAMING_SNAKE_CASE , eos_token_id=SCREAMING_SNAKE_CASE , decoder_start_token_id=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE , ) | 189 | 1 |
'''simple docstring'''
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
UniSpeechConfig,
UniSpeechForCTC,
UniSpeechForPreTraining,
WavaVecaFeatureExtractor,
WavaVecaPhonemeCTCTokenizer,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
_lowerCamelCase : Any = logging.get_logger(__name__)
_lowerCamelCase : List[str] = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'ctc_proj',
'mask_emb': 'masked_spec_embed',
}
_lowerCamelCase : Any = [
'ctc_proj',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
]
def __a ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ->Union[str, Any]:
"""simple docstring"""
for attribute in key.split(""".""" ):
if is_finetuned:
if attribute in ["quantizer", "project_q", "project_hid"]:
# those layers are only relevant for pretraining and should be dropped
return
if attribute == "ctc_proj":
# we should rename `ctc_proj` to `lm_head` for fine-tuned phoneme models
A = """lm_head"""
A = getattr(UpperCAmelCase , UpperCAmelCase )
if weight_type is not None:
A = getattr(UpperCAmelCase , UpperCAmelCase ).shape
else:
A = hf_pointer.shape
assert hf_shape == value.shape, (
f"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be"""
f""" {value.shape} for {full_name}"""
)
if weight_type == "weight":
A = value
elif weight_type == "weight_g":
A = value
elif weight_type == "weight_v":
A = value
elif weight_type == "bias":
A = value
else:
A = value
logger.info(f"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" )
def __a ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ->Any:
"""simple docstring"""
A = []
A = fairseq_model.state_dict()
A = hf_model.unispeech.feature_extractor
for name, value in fairseq_dict.items():
A = False
if "conv_layers" in name:
load_conv_layer(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , hf_model.config.feat_extract_norm == """group""" , )
A = True
else:
for key, mapped_key in MAPPING.items():
A = """unispeech.""" + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]:
A = True
if "*" in mapped_key:
A = name.split(UpperCAmelCase )[0].split(""".""" )[-2]
A = mapped_key.replace("""*""" , UpperCAmelCase )
if "weight_g" in name:
A = """weight_g"""
elif "weight_v" in name:
A = """weight_v"""
elif "bias" in name:
A = """bias"""
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
A = """weight"""
else:
A = None
set_recursively(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
continue
if not is_used:
unused_weights.append(UpperCAmelCase )
logger.warning(f"""Unused weights: {unused_weights}""" )
def __a ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ->List[str]:
"""simple docstring"""
A = full_name.split("""conv_layers.""" )[-1]
A = name.split(""".""" )
A = int(items[0] )
A = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."""
)
A = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."""
)
A = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"""
" found."
)
A = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."""
)
A = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(UpperCAmelCase )
@torch.no_grad()
def __a ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=True ) ->Optional[Any]:
"""simple docstring"""
if config_path is not None:
A = UniSpeechConfig.from_pretrained(UpperCAmelCase )
else:
A = UniSpeechConfig()
if is_finetuned:
if dict_path:
A = Dictionary.load_from_json(UpperCAmelCase )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
A = target_dict.pad_index
A = target_dict.bos_index
A = target_dict.eos_index
A = len(target_dict.symbols )
A = os.path.join(UpperCAmelCase , """vocab.json""" )
if not os.path.isdir(UpperCAmelCase ):
logger.error("""--pytorch_dump_folder_path ({}) should be a directory""".format(UpperCAmelCase ) )
return
os.makedirs(UpperCAmelCase , exist_ok=UpperCAmelCase )
A = target_dict.indices
# fairseq has the <pad> and <s> switched
A = 42
A = 43
with open(UpperCAmelCase , """w""" , encoding="""utf-8""" ) as vocab_handle:
json.dump(UpperCAmelCase , UpperCAmelCase )
A = WavaVecaPhonemeCTCTokenizer(
UpperCAmelCase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="""|""" , do_lower_case=UpperCAmelCase , )
A = True if config.feat_extract_norm == """layer""" else False
A = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=UpperCAmelCase , return_attention_mask=UpperCAmelCase , )
A = WavaVecaProcessor(feature_extractor=UpperCAmelCase , tokenizer=UpperCAmelCase )
processor.save_pretrained(UpperCAmelCase )
A = UniSpeechForCTC(UpperCAmelCase )
else:
A = UniSpeechForPreTraining(UpperCAmelCase )
if is_finetuned:
A , A , A = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] ), """w2v_path""": checkpoint_path} )
else:
A , A , A = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
A = model[0].eval()
recursively_load_weights(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
hf_unispeech.save_pretrained(UpperCAmelCase )
if __name__ == "__main__":
_lowerCamelCase : str = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--not_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not'
)
_lowerCamelCase : List[Any] = parser.parse_args()
convert_unispeech_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 337 |
'''simple docstring'''
from collections import OrderedDict
from typing import Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...feature_extraction_utils import FeatureExtractionMixin
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType, logging
_lowerCamelCase : int = logging.get_logger(__name__)
_lowerCamelCase : Any = {
'deepmind/language-perceiver': 'https://huggingface.co/deepmind/language-perceiver/resolve/main/config.json',
# See all Perceiver models at https://huggingface.co/models?filter=perceiver
}
class __UpperCAmelCase ( A__ ):
'''simple docstring'''
__lowerCAmelCase = '''perceiver'''
def __init__(self : Dict , _lowerCAmelCase : List[str]=256 , _lowerCAmelCase : Any=1280 , _lowerCAmelCase : Dict=768 , _lowerCAmelCase : List[str]=1 , _lowerCAmelCase : Optional[int]=26 , _lowerCAmelCase : Any=8 , _lowerCAmelCase : Any=8 , _lowerCAmelCase : Dict=None , _lowerCAmelCase : List[str]=None , _lowerCAmelCase : List[Any]="kv" , _lowerCAmelCase : Optional[Any]=1 , _lowerCAmelCase : int=1 , _lowerCAmelCase : Dict="gelu" , _lowerCAmelCase : str=0.1 , _lowerCAmelCase : List[str]=0.02 , _lowerCAmelCase : Any=1e-12 , _lowerCAmelCase : Optional[Any]=True , _lowerCAmelCase : int=262 , _lowerCAmelCase : int=2048 , _lowerCAmelCase : int=56 , _lowerCAmelCase : List[Any]=[368, 496] , _lowerCAmelCase : List[Any]=16 , _lowerCAmelCase : Any=1920 , _lowerCAmelCase : Optional[int]=16 , _lowerCAmelCase : List[Any]=[1, 16, 224, 224] , **_lowerCAmelCase : Union[str, Any] , ):
super().__init__(**_lowerCAmelCase )
A = num_latents
A = d_latents
A = d_model
A = num_blocks
A = num_self_attends_per_block
A = num_self_attention_heads
A = num_cross_attention_heads
A = qk_channels
A = v_channels
A = cross_attention_shape_for_attention
A = self_attention_widening_factor
A = cross_attention_widening_factor
A = hidden_act
A = attention_probs_dropout_prob
A = initializer_range
A = layer_norm_eps
A = use_query_residual
# masked language modeling attributes
A = vocab_size
A = max_position_embeddings
# image classification attributes
A = image_size
# flow attributes
A = train_size
# multimodal autoencoding attributes
A = num_frames
A = audio_samples_per_frame
A = samples_per_patch
A = output_shape
class __UpperCAmelCase ( A__ ):
'''simple docstring'''
@property
def A (self : List[str] ):
if self.task == "multiple-choice":
A = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
A = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""inputs""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
@property
def A (self : Dict ):
return 1e-4
def A (self : List[Any] , _lowerCAmelCase : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] , _lowerCAmelCase : int = -1 , _lowerCAmelCase : int = -1 , _lowerCAmelCase : int = -1 , _lowerCAmelCase : bool = False , _lowerCAmelCase : Optional[TensorType] = None , _lowerCAmelCase : int = 3 , _lowerCAmelCase : int = 40 , _lowerCAmelCase : int = 40 , ):
# copied from `transformers.onnx.config.OnnxConfig` and slightly altered/simplified
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
A = compute_effective_axis_dimension(
_lowerCAmelCase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
A = preprocessor.num_special_tokens_to_add(_lowerCAmelCase )
A = compute_effective_axis_dimension(
_lowerCAmelCase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=_lowerCAmelCase )
# Generate dummy inputs according to compute batch and sequence
A = [""" """.join(["""a"""] ) * seq_length] * batch_size
A = dict(preprocessor(_lowerCAmelCase , return_tensors=_lowerCAmelCase ) )
A = inputs.pop("""input_ids""" )
return inputs
elif isinstance(_lowerCAmelCase , _lowerCAmelCase ) and preprocessor.model_input_names[0] == "pixel_values":
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
A = compute_effective_axis_dimension(_lowerCAmelCase , fixed_dimension=OnnxConfig.default_fixed_batch )
A = self._generate_dummy_images(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
A = dict(preprocessor(images=_lowerCAmelCase , return_tensors=_lowerCAmelCase ) )
A = inputs.pop("""pixel_values""" )
return inputs
else:
raise ValueError(
"""Unable to generate dummy inputs for the model. Please provide a tokenizer or a preprocessor.""" )
| 337 | 1 |
"""simple docstring"""
import unittest
import numpy as np
from datasets import load_dataset
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import BeitImageProcessor
class lowerCAmelCase_ (unittest.TestCase ):
"""simple docstring"""
def __init__(self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=7 , SCREAMING_SNAKE_CASE__=3 , SCREAMING_SNAKE_CASE__=18 , SCREAMING_SNAKE_CASE__=30 , SCREAMING_SNAKE_CASE__=4_00 , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=[0.5, 0.5, 0.5] , SCREAMING_SNAKE_CASE__=[0.5, 0.5, 0.5] , SCREAMING_SNAKE_CASE__=False , ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = size if size is not None else {"""height""": 20, """width""": 20}
SCREAMING_SNAKE_CASE__ : Optional[Any] = crop_size if crop_size is not None else {"""height""": 18, """width""": 18}
SCREAMING_SNAKE_CASE__ : int = parent
SCREAMING_SNAKE_CASE__ : Any = batch_size
SCREAMING_SNAKE_CASE__ : Tuple = num_channels
SCREAMING_SNAKE_CASE__ : Optional[int] = image_size
SCREAMING_SNAKE_CASE__ : Optional[Any] = min_resolution
SCREAMING_SNAKE_CASE__ : int = max_resolution
SCREAMING_SNAKE_CASE__ : Dict = do_resize
SCREAMING_SNAKE_CASE__ : Any = size
SCREAMING_SNAKE_CASE__ : Union[str, Any] = do_center_crop
SCREAMING_SNAKE_CASE__ : Union[str, Any] = crop_size
SCREAMING_SNAKE_CASE__ : Any = do_normalize
SCREAMING_SNAKE_CASE__ : str = image_mean
SCREAMING_SNAKE_CASE__ : str = image_std
SCREAMING_SNAKE_CASE__ : Any = do_reduce_labels
def __magic_name__ (self ) -> List[str]:
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_reduce_labels": self.do_reduce_labels,
}
def lowercase_ ( ):
SCREAMING_SNAKE_CASE__ : Any = load_dataset("""hf-internal-testing/fixtures_ade20k""" ,split="""test""" )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = Image.open(dataset[0]["""file"""] )
SCREAMING_SNAKE_CASE__ : int = Image.open(dataset[1]["""file"""] )
return image, map
def lowercase_ ( ):
SCREAMING_SNAKE_CASE__ : str = load_dataset("""hf-internal-testing/fixtures_ade20k""" ,split="""test""" )
SCREAMING_SNAKE_CASE__ : Dict = Image.open(ds[0]["""file"""] )
SCREAMING_SNAKE_CASE__ : Tuple = Image.open(ds[1]["""file"""] )
SCREAMING_SNAKE_CASE__ : List[str] = Image.open(ds[2]["""file"""] )
SCREAMING_SNAKE_CASE__ : Tuple = Image.open(ds[3]["""file"""] )
return [imagea, imagea], [mapa, mapa]
@require_torch
@require_vision
class lowerCAmelCase_ (a__ , unittest.TestCase ):
"""simple docstring"""
__UpperCamelCase : Union[str, Any] = BeitImageProcessor if is_vision_available() else None
def __magic_name__ (self ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = BeitImageProcessingTester(self )
@property
def __magic_name__ (self ) -> Optional[int]:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def __magic_name__ (self ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , """do_resize""" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , """size""" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , """do_center_crop""" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , """center_crop""" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , """do_normalize""" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , """image_mean""" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , """image_std""" ) )
def __magic_name__ (self ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""height""": 20, """width""": 20} )
self.assertEqual(image_processor.crop_size , {"""height""": 18, """width""": 18} )
self.assertEqual(image_processor.do_reduce_labels , SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Optional[int] = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , crop_size=84 , reduce_labels=SCREAMING_SNAKE_CASE__ )
self.assertEqual(image_processor.size , {"""height""": 42, """width""": 42} )
self.assertEqual(image_processor.crop_size , {"""height""": 84, """width""": 84} )
self.assertEqual(image_processor.do_reduce_labels , SCREAMING_SNAKE_CASE__ )
def __magic_name__ (self ) -> Optional[int]:
"""simple docstring"""
pass
def __magic_name__ (self ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
SCREAMING_SNAKE_CASE__ : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE__ )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , Image.Image )
# Test not batched input
SCREAMING_SNAKE_CASE__ : Tuple = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
SCREAMING_SNAKE_CASE__ : int = image_processing(SCREAMING_SNAKE_CASE__ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def __magic_name__ (self ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
SCREAMING_SNAKE_CASE__ : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE__ , numpify=SCREAMING_SNAKE_CASE__ )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , np.ndarray )
# Test not batched input
SCREAMING_SNAKE_CASE__ : Optional[Any] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
SCREAMING_SNAKE_CASE__ : Any = image_processing(SCREAMING_SNAKE_CASE__ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def __magic_name__ (self ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
SCREAMING_SNAKE_CASE__ : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE__ , torchify=SCREAMING_SNAKE_CASE__ )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , torch.Tensor )
# Test not batched input
SCREAMING_SNAKE_CASE__ : Union[str, Any] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
SCREAMING_SNAKE_CASE__ : Optional[Any] = image_processing(SCREAMING_SNAKE_CASE__ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def __magic_name__ (self ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
SCREAMING_SNAKE_CASE__ : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE__ , torchify=SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = []
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , torch.Tensor )
maps.append(torch.zeros(image.shape[-2:] ).long() )
# Test not batched input
SCREAMING_SNAKE_CASE__ : Optional[Any] = image_processing(image_inputs[0] , maps[0] , return_tensors="""pt""" )
self.assertEqual(
encoding["""pixel_values"""].shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
self.assertEqual(
encoding["""labels"""].shape , (
1,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
self.assertEqual(encoding["""labels"""].dtype , torch.long )
self.assertTrue(encoding["""labels"""].min().item() >= 0 )
self.assertTrue(encoding["""labels"""].max().item() <= 2_55 )
# Test batched
SCREAMING_SNAKE_CASE__ : Optional[int] = image_processing(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , return_tensors="""pt""" )
self.assertEqual(
encoding["""pixel_values"""].shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
self.assertEqual(
encoding["""labels"""].shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
self.assertEqual(encoding["""labels"""].dtype , torch.long )
self.assertTrue(encoding["""labels"""].min().item() >= 0 )
self.assertTrue(encoding["""labels"""].max().item() <= 2_55 )
# Test not batched input (PIL images)
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : str = prepare_semantic_single_inputs()
SCREAMING_SNAKE_CASE__ : Dict = image_processing(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , return_tensors="""pt""" )
self.assertEqual(
encoding["""pixel_values"""].shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
self.assertEqual(
encoding["""labels"""].shape , (
1,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
self.assertEqual(encoding["""labels"""].dtype , torch.long )
self.assertTrue(encoding["""labels"""].min().item() >= 0 )
self.assertTrue(encoding["""labels"""].max().item() <= 2_55 )
# Test batched input (PIL images)
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : str = prepare_semantic_batch_inputs()
SCREAMING_SNAKE_CASE__ : Optional[Any] = image_processing(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , return_tensors="""pt""" )
self.assertEqual(
encoding["""pixel_values"""].shape , (
2,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
self.assertEqual(
encoding["""labels"""].shape , (
2,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
self.assertEqual(encoding["""labels"""].dtype , torch.long )
self.assertTrue(encoding["""labels"""].min().item() >= 0 )
self.assertTrue(encoding["""labels"""].max().item() <= 2_55 )
def __magic_name__ (self ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Any = self.image_processing_class(**self.image_processor_dict )
# ADE20k has 150 classes, and the background is included, so labels should be between 0 and 150
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Dict = prepare_semantic_single_inputs()
SCREAMING_SNAKE_CASE__ : Optional[Any] = image_processing(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , return_tensors="""pt""" )
self.assertTrue(encoding["""labels"""].min().item() >= 0 )
self.assertTrue(encoding["""labels"""].max().item() <= 1_50 )
SCREAMING_SNAKE_CASE__ : List[Any] = True
SCREAMING_SNAKE_CASE__ : int = image_processing(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , return_tensors="""pt""" )
self.assertTrue(encoding["""labels"""].min().item() >= 0 )
self.assertTrue(encoding["""labels"""].max().item() <= 2_55 )
| 25 |
'''simple docstring'''
import numpy as np
from scipy.spatial.distance import cdist
from sklearn.metrics import fa_score
import datasets
lowerCamelCase : List[Any] = "\\n @inproceedings{kakwani2020indicnlpsuite,\n title={{IndicNLPSuite: Monolingual Corpora, Evaluation Benchmarks and Pre-trained Multilingual Language Models for Indian Languages}},\n author={Divyanshu Kakwani and Anoop Kunchukuttan and Satish Golla and Gokul N.C. and Avik Bhattacharyya and Mitesh M. Khapra and Pratyush Kumar},\n year={2020},\n booktitle={Findings of EMNLP},\n}\n"
lowerCamelCase : Optional[Any] = "\\n IndicGLUE is a natural language understanding benchmark for Indian languages. It contains a wide\n variety of tasks and covers 11 major Indian languages - as, bn, gu, hi, kn, ml, mr, or, pa, ta, te.\n"
lowerCamelCase : int = "\nCompute IndicGLUE evaluation metric associated to each IndicGLUE dataset.\nArgs:\n predictions: list of predictions to score (as int64),\n except for 'cvit-mkb-clsr' where each prediction is a vector (of float32).\n references: list of ground truth labels corresponding to the predictions (as int64),\n except for 'cvit-mkb-clsr' where each reference is a vector (of float32).\nReturns: depending on the IndicGLUE subset, one or several of:\n \"accuracy\": Accuracy\n \"f1\": F1 score\n \"precision\": Precision@10\nExamples:\n\n >>> indic_glue_metric = datasets.load_metric('indic_glue', 'wnli') # 'wnli' or any of [\"copa\", \"sna\", \"csqa\", \"wstp\", \"inltkh\", \"bbca\", \"iitp-mr\", \"iitp-pr\", \"actsa-sc\", \"md\"]\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = indic_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'accuracy': 1.0}\n\n >>> indic_glue_metric = datasets.load_metric('indic_glue', 'wiki-ner')\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = indic_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'accuracy': 1.0, 'f1': 1.0}\n\n >>> indic_glue_metric = datasets.load_metric('indic_glue', 'cvit-mkb-clsr')\n >>> references = [[0.5, 0.5, 0.5], [0.1, 0.2, 0.3]]\n >>> predictions = [[0.5, 0.5, 0.5], [0.1, 0.2, 0.3]]\n >>> results = indic_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'precision@10': 1.0}\n\n"
def _lowerCAmelCase ( _UpperCamelCase : List[str] , _UpperCamelCase : Tuple ) -> List[Any]:
"""simple docstring"""
return float((preds == labels).mean() )
def _lowerCAmelCase ( _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Any ) -> List[str]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =simple_accuracy(_UpperCamelCase , _UpperCamelCase )
_SCREAMING_SNAKE_CASE =float(fa_score(y_true=_UpperCamelCase , y_pred=_UpperCamelCase ) )
return {
"accuracy": acc,
"f1": fa,
}
def _lowerCAmelCase ( _UpperCamelCase : Any , _UpperCamelCase : int ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =np.array(_UpperCamelCase )
_SCREAMING_SNAKE_CASE =np.array(_UpperCamelCase )
_SCREAMING_SNAKE_CASE =en_sentvecs.shape[0]
# mean centering
_SCREAMING_SNAKE_CASE =en_sentvecs - np.mean(_UpperCamelCase , axis=0 )
_SCREAMING_SNAKE_CASE =in_sentvecs - np.mean(_UpperCamelCase , axis=0 )
_SCREAMING_SNAKE_CASE =cdist(_UpperCamelCase , _UpperCamelCase , 'cosine' )
_SCREAMING_SNAKE_CASE =np.array(range(_UpperCamelCase ) )
_SCREAMING_SNAKE_CASE =sim.argsort(axis=1 )[:, :10]
_SCREAMING_SNAKE_CASE =np.any(preds == actual[:, None] , axis=1 )
return float(matches.mean() )
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A__ ( datasets.Metric ):
def A ( self : Any ) -> List[str]:
'''simple docstring'''
if self.config_name not in [
"wnli",
"copa",
"sna",
"csqa",
"wstp",
"inltkh",
"bbca",
"cvit-mkb-clsr",
"iitp-mr",
"iitp-pr",
"actsa-sc",
"md",
"wiki-ner",
]:
raise KeyError(
'You should supply a configuration name selected in '
'["wnli", "copa", "sna", "csqa", "wstp", "inltkh", "bbca", '
'"cvit-mkb-clsr", "iitp-mr", "iitp-pr", "actsa-sc", "md", '
'"wiki-ner"]' )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('int64' )
if self.config_name != 'cvit-mkb-clsr'
else datasets.Sequence(datasets.Value('float32' ) ),
'references': datasets.Value('int64' )
if self.config_name != 'cvit-mkb-clsr'
else datasets.Sequence(datasets.Value('float32' ) ),
} ) , codebase_urls=[] , reference_urls=[] , format='numpy' if self.config_name != 'cvit-mkb-clsr' else None , )
def A ( self : List[str] , _a : Tuple , _a : Optional[int] ) -> int:
'''simple docstring'''
if self.config_name == "cvit-mkb-clsr":
return {"precision@10": precision_at_aa(_a , _a )}
elif self.config_name in ["wiki-ner"]:
return acc_and_fa(_a , _a )
elif self.config_name in [
"wnli",
"copa",
"sna",
"csqa",
"wstp",
"inltkh",
"bbca",
"iitp-mr",
"iitp-pr",
"actsa-sc",
"md",
]:
return {"accuracy": simple_accuracy(_a , _a )}
else:
raise KeyError(
'You should supply a configuration name selected in '
'["wnli", "copa", "sna", "csqa", "wstp", "inltkh", "bbca", '
'"cvit-mkb-clsr", "iitp-mr", "iitp-pr", "actsa-sc", "md", '
'"wiki-ner"]' )
| 47 | 0 |
import math
from collections.abc import Callable
def UpperCamelCase ( _A : Callable[[float], float] , _A : float , _A : float )-> float:
"""simple docstring"""
A__ = xa
A__ = xa
while True:
if x_n == x_na or function(_A ) == function(_A ):
raise ZeroDivisionError("float division by zero, could not find root" )
A__ = x_na - (
function(_A ) / ((function(_A ) - function(_A )) / (x_na - x_n))
)
if abs(x_na - x_na ) < 10**-5:
return x_na
A__ = x_na
A__ = x_na
def UpperCamelCase ( _A : float )-> float:
"""simple docstring"""
return math.pow(_A , 3 ) - (2 * x) - 5
if __name__ == "__main__":
print(intersection(f, 3, 3.5))
| 198 |
from __future__ import annotations
class UpperCamelCase :
def __init__( self , UpperCAmelCase__=None ):
A__ = data
A__ = None
def __repr__( self ):
A__ = []
A__ = self
while temp:
string_rep.append(F"""{temp.data}""" )
A__ = temp.next
return "->".join(UpperCAmelCase__ )
def UpperCamelCase ( _A : list )-> Dict:
"""simple docstring"""
if not elements_list:
raise Exception("The Elements List is empty" )
A__ = A__ = Node(elements_list[0] )
for i in range(1 , len(_A ) ):
A__ = Node(elements_list[i] )
A__ = current.next
return head
def UpperCamelCase ( _A : Node )-> None:
"""simple docstring"""
if head_node is not None and isinstance(_A , _A ):
print_reverse(head_node.next )
print(head_node.data )
def UpperCamelCase ( )-> Tuple:
"""simple docstring"""
from doctest import testmod
testmod()
A__ = make_linked_list([14, 52, 14, 12, 43] )
print("Linked List:" )
print(_A )
print("Elements in Reverse:" )
print_reverse(_A )
if __name__ == "__main__":
main()
| 198 | 1 |
'''simple docstring'''
from typing import Optional, Tuple, Union
import tensorflow as tf
from ...activations_tf import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_tf_outputs import (
TFBaseModelOutputWithNoAttention,
TFBaseModelOutputWithPoolingAndNoAttention,
TFSequenceClassifierOutput,
)
from ...modeling_tf_utils import TFPreTrainedModel, TFSequenceClassificationLoss, keras_serializable, unpack_inputs
from ...tf_utils import shape_list
from ...utils import logging
from .configuration_regnet import RegNetConfig
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
# General docstring
SCREAMING_SNAKE_CASE__ = 'RegNetConfig'
# Base docstring
SCREAMING_SNAKE_CASE__ = 'facebook/regnet-y-040'
SCREAMING_SNAKE_CASE__ = [1, 1_0_8_8, 7, 7]
# Image classification docstring
SCREAMING_SNAKE_CASE__ = 'facebook/regnet-y-040'
SCREAMING_SNAKE_CASE__ = 'tabby, tabby cat'
SCREAMING_SNAKE_CASE__ = [
'facebook/regnet-y-040',
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class a_ ( tf.keras.layers.Layer ):
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 3 , _SCREAMING_SNAKE_CASE = 1 , _SCREAMING_SNAKE_CASE = 1 , _SCREAMING_SNAKE_CASE = "relu" , **_SCREAMING_SNAKE_CASE , ) -> str:
"""simple docstring"""
super().__init__(**lowercase_ )
# The padding and conv has been verified in
# https://colab.research.google.com/gist/sayakpaul/854bc10eeaf21c9ee2119e0b9f3841a7/scratchpad.ipynb
UpperCamelCase = tf.keras.layers.ZeroPaddingaD(padding=kernel_size // 2 )
UpperCamelCase = tf.keras.layers.ConvaD(
filters=lowercase_ , kernel_size=lowercase_ , strides=lowercase_ , padding="""VALID""" , groups=lowercase_ , use_bias=lowercase_ , name="""convolution""" , )
UpperCamelCase = tf.keras.layers.BatchNormalization(epsilon=1e-5 , momentum=0.9 , name="""normalization""" )
UpperCamelCase = ACTaFN[activation] if activation is not None else tf.identity
def A__ ( self , _SCREAMING_SNAKE_CASE ) -> Any:
"""simple docstring"""
UpperCamelCase = self.convolution(self.padding(lowercase_ ) )
UpperCamelCase = self.normalization(lowercase_ )
UpperCamelCase = self.activation(lowercase_ )
return hidden_state
class a_ ( tf.keras.layers.Layer ):
def __init__( self , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
super().__init__(**lowercase_ )
UpperCamelCase = config.num_channels
UpperCamelCase = TFRegNetConvLayer(
out_channels=config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act , name="""embedder""" , )
def A__ ( self , _SCREAMING_SNAKE_CASE ) -> List[Any]:
"""simple docstring"""
UpperCamelCase = shape_list(lowercase_ )[1]
if tf.executing_eagerly() and num_channels != self.num_channels:
raise ValueError(
"""Make sure that the channel dimension of the pixel values match with the one set in the configuration.""" )
# When running on CPU, `tf.keras.layers.Conv2D` doesn't support `NCHW` format.
# So change the input format from `NCHW` to `NHWC`.
# shape = (batch_size, in_height, in_width, in_channels=num_channels)
UpperCamelCase = tf.transpose(lowercase_ , perm=(0, 2, 3, 1) )
UpperCamelCase = self.embedder(lowercase_ )
return hidden_state
class a_ ( tf.keras.layers.Layer ):
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 2 , **_SCREAMING_SNAKE_CASE ) -> List[Any]:
"""simple docstring"""
super().__init__(**lowercase_ )
UpperCamelCase = tf.keras.layers.ConvaD(
filters=lowercase_ , kernel_size=1 , strides=lowercase_ , use_bias=lowercase_ , name="""convolution""" )
UpperCamelCase = tf.keras.layers.BatchNormalization(epsilon=1e-5 , momentum=0.9 , name="""normalization""" )
def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = False ) -> tf.Tensor:
"""simple docstring"""
return self.normalization(self.convolution(lowercase_ ) , training=lowercase_ )
class a_ ( tf.keras.layers.Layer ):
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> List[str]:
"""simple docstring"""
super().__init__(**lowercase_ )
UpperCamelCase = tf.keras.layers.GlobalAveragePoolingaD(keepdims=lowercase_ , name="""pooler""" )
UpperCamelCase = [
tf.keras.layers.ConvaD(filters=lowercase_ , kernel_size=1 , activation="""relu""" , name="""attention.0""" ),
tf.keras.layers.ConvaD(filters=lowercase_ , kernel_size=1 , activation="""sigmoid""" , name="""attention.2""" ),
]
def A__ ( self , _SCREAMING_SNAKE_CASE ) -> Dict:
"""simple docstring"""
UpperCamelCase = self.pooler(lowercase_ )
for layer_module in self.attention:
UpperCamelCase = layer_module(lowercase_ )
UpperCamelCase = hidden_state * pooled
return hidden_state
class a_ ( tf.keras.layers.Layer ):
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 1 , **_SCREAMING_SNAKE_CASE ) -> List[str]:
"""simple docstring"""
super().__init__(**lowercase_ )
UpperCamelCase = in_channels != out_channels or stride != 1
UpperCamelCase = max(1 , out_channels // config.groups_width )
UpperCamelCase = (
TFRegNetShortCut(lowercase_ , stride=lowercase_ , name="""shortcut""" )
if should_apply_shortcut
else tf.keras.layers.Activation("""linear""" , name="""shortcut""" )
)
# `self.layers` instead of `self.layer` because that is a reserved argument.
UpperCamelCase = [
TFRegNetConvLayer(lowercase_ , kernel_size=1 , activation=config.hidden_act , name="""layer.0""" ),
TFRegNetConvLayer(
lowercase_ , stride=lowercase_ , groups=lowercase_ , activation=config.hidden_act , name="""layer.1""" ),
TFRegNetConvLayer(lowercase_ , kernel_size=1 , activation=lowercase_ , name="""layer.2""" ),
]
UpperCamelCase = ACTaFN[config.hidden_act]
def A__ ( self , _SCREAMING_SNAKE_CASE ) -> List[Any]:
"""simple docstring"""
UpperCamelCase = hidden_state
for layer_module in self.layers:
UpperCamelCase = layer_module(lowercase_ )
UpperCamelCase = self.shortcut(lowercase_ )
hidden_state += residual
UpperCamelCase = self.activation(lowercase_ )
return hidden_state
class a_ ( tf.keras.layers.Layer ):
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 1 , **_SCREAMING_SNAKE_CASE ) -> Optional[int]:
"""simple docstring"""
super().__init__(**lowercase_ )
UpperCamelCase = in_channels != out_channels or stride != 1
UpperCamelCase = max(1 , out_channels // config.groups_width )
UpperCamelCase = (
TFRegNetShortCut(lowercase_ , stride=lowercase_ , name="""shortcut""" )
if should_apply_shortcut
else tf.keras.layers.Activation("""linear""" , name="""shortcut""" )
)
UpperCamelCase = [
TFRegNetConvLayer(lowercase_ , kernel_size=1 , activation=config.hidden_act , name="""layer.0""" ),
TFRegNetConvLayer(
lowercase_ , stride=lowercase_ , groups=lowercase_ , activation=config.hidden_act , name="""layer.1""" ),
TFRegNetSELayer(lowercase_ , reduced_channels=int(round(in_channels / 4 ) ) , name="""layer.2""" ),
TFRegNetConvLayer(lowercase_ , kernel_size=1 , activation=lowercase_ , name="""layer.3""" ),
]
UpperCamelCase = ACTaFN[config.hidden_act]
def A__ ( self , _SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
UpperCamelCase = hidden_state
for layer_module in self.layers:
UpperCamelCase = layer_module(lowercase_ )
UpperCamelCase = self.shortcut(lowercase_ )
hidden_state += residual
UpperCamelCase = self.activation(lowercase_ )
return hidden_state
class a_ ( tf.keras.layers.Layer ):
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 2 , _SCREAMING_SNAKE_CASE = 2 , **_SCREAMING_SNAKE_CASE ) -> Tuple:
"""simple docstring"""
super().__init__(**lowercase_ )
UpperCamelCase = TFRegNetXLayer if config.layer_type == 'x' else TFRegNetYLayer
UpperCamelCase = [
# downsampling is done in the first layer with stride of 2
layer(lowercase_ , lowercase_ , lowercase_ , stride=lowercase_ , name="""layers.0""" ),
*[layer(lowercase_ , lowercase_ , lowercase_ , name=F"layers.{i+1}" ) for i in range(depth - 1 )],
]
def A__ ( self , _SCREAMING_SNAKE_CASE ) -> List[str]:
"""simple docstring"""
for layer_module in self.layers:
UpperCamelCase = layer_module(lowercase_ )
return hidden_state
class a_ ( tf.keras.layers.Layer ):
def __init__( self , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
super().__init__(**lowercase_ )
UpperCamelCase = []
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
TFRegNetStage(
lowercase_ , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , name="""stages.0""" , ) )
UpperCamelCase = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for i, ((in_channels, out_channels), depth) in enumerate(zip(lowercase_ , config.depths[1:] ) ):
self.stages.append(TFRegNetStage(lowercase_ , lowercase_ , lowercase_ , depth=lowercase_ , name=F"stages.{i+1}" ) )
def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = False , _SCREAMING_SNAKE_CASE = True ) -> TFBaseModelOutputWithNoAttention:
"""simple docstring"""
UpperCamelCase = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
UpperCamelCase = hidden_states + (hidden_state,)
UpperCamelCase = stage_module(lowercase_ )
if output_hidden_states:
UpperCamelCase = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return TFBaseModelOutputWithNoAttention(last_hidden_state=lowercase_ , hidden_states=lowercase_ )
@keras_serializable
class a_ ( tf.keras.layers.Layer ):
lowercase = RegNetConfig
def __init__( self , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> Dict:
"""simple docstring"""
super().__init__(**lowercase_ )
UpperCamelCase = config
UpperCamelCase = TFRegNetEmbeddings(lowercase_ , name="""embedder""" )
UpperCamelCase = TFRegNetEncoder(lowercase_ , name="""encoder""" )
UpperCamelCase = tf.keras.layers.GlobalAveragePoolingaD(keepdims=lowercase_ , name="""pooler""" )
@unpack_inputs
def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = False , ) -> TFBaseModelOutputWithPoolingAndNoAttention:
"""simple docstring"""
UpperCamelCase = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
UpperCamelCase = return_dict if return_dict is not None else self.config.use_return_dict
UpperCamelCase = self.embedder(lowercase_ , training=lowercase_ )
UpperCamelCase = self.encoder(
lowercase_ , output_hidden_states=lowercase_ , return_dict=lowercase_ , training=lowercase_ )
UpperCamelCase = encoder_outputs[0]
UpperCamelCase = self.pooler(lowercase_ )
# Change to NCHW output format have uniformity in the modules
UpperCamelCase = tf.transpose(lowercase_ , perm=(0, 3, 1, 2) )
UpperCamelCase = tf.transpose(lowercase_ , perm=(0, 3, 1, 2) )
# Change the other hidden state outputs to NCHW as well
if output_hidden_states:
UpperCamelCase = tuple([tf.transpose(lowercase_ , perm=(0, 3, 1, 2) ) for h in encoder_outputs[1]] )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=lowercase_ , pooler_output=lowercase_ , hidden_states=hidden_states if output_hidden_states else encoder_outputs.hidden_states , )
class a_ ( _snake_case ):
lowercase = RegNetConfig
lowercase = """regnet"""
lowercase = """pixel_values"""
@property
def A__ ( self ) -> Dict:
"""simple docstring"""
return {"pixel_values": tf.TensorSpec(shape=(None, self.config.num_channels, 224, 224) , dtype=tf.floataa )}
SCREAMING_SNAKE_CASE__ = R'\n Parameters:\n This model is a Tensorflow\n [tf.keras.layers.Layer](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Layer) sub-class. Use it as a\n regular Tensorflow Module and refer to the Tensorflow documentation for all matter related to general usage and\n behavior.\n config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.\n'
SCREAMING_SNAKE_CASE__ = R'\n Args:\n pixel_values (`tf.Tensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`ConveNextImageProcessor.__call__`] for details.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n'
@add_start_docstrings(
"""The bare RegNet model outputting raw features without any specific head on top.""" , _snake_case , )
class a_ ( _snake_case ):
def __init__( self , _SCREAMING_SNAKE_CASE , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> List[str]:
"""simple docstring"""
super().__init__(lowercase_ , *lowercase_ , **lowercase_ )
UpperCamelCase = TFRegNetMainLayer(lowercase_ , name="""regnet""" )
@unpack_inputs
@add_start_docstrings_to_model_forward(lowercase_ )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=lowercase_ , config_class=_CONFIG_FOR_DOC , modality="""vision""" , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE=False , ) -> Union[TFBaseModelOutputWithPoolingAndNoAttention, Tuple[tf.Tensor]]:
"""simple docstring"""
UpperCamelCase = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
UpperCamelCase = return_dict if return_dict is not None else self.config.use_return_dict
UpperCamelCase = self.regnet(
pixel_values=lowercase_ , output_hidden_states=lowercase_ , return_dict=lowercase_ , training=lowercase_ , )
if not return_dict:
return (outputs[0],) + outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=outputs.last_hidden_state , pooler_output=outputs.pooler_output , hidden_states=outputs.hidden_states , )
@add_start_docstrings(
"""
RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for
ImageNet.
""" , _snake_case , )
class a_ ( _snake_case , _snake_case ):
def __init__( self , _SCREAMING_SNAKE_CASE , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> Tuple:
"""simple docstring"""
super().__init__(lowercase_ , *lowercase_ , **lowercase_ )
UpperCamelCase = config.num_labels
UpperCamelCase = TFRegNetMainLayer(lowercase_ , name="""regnet""" )
# classification head
UpperCamelCase = [
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(config.num_labels , name="""classifier.1""" ) if config.num_labels > 0 else tf.identity,
]
@unpack_inputs
@add_start_docstrings_to_model_forward(lowercase_ )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=lowercase_ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def A__ ( self , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE=False , ) -> Union[TFSequenceClassifierOutput, Tuple[tf.Tensor]]:
"""simple docstring"""
UpperCamelCase = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
UpperCamelCase = return_dict if return_dict is not None else self.config.use_return_dict
UpperCamelCase = self.regnet(
lowercase_ , output_hidden_states=lowercase_ , return_dict=lowercase_ , training=lowercase_ )
UpperCamelCase = outputs.pooler_output if return_dict else outputs[1]
UpperCamelCase = self.classifier[0](lowercase_ )
UpperCamelCase = self.classifier[1](lowercase_ )
UpperCamelCase = None if labels is None else self.hf_compute_loss(labels=lowercase_ , logits=lowercase_ )
if not return_dict:
UpperCamelCase = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return TFSequenceClassifierOutput(loss=lowercase_ , logits=lowercase_ , hidden_states=outputs.hidden_states )
| 321 |
'''simple docstring'''
import argparse
import math
import os
import torch
from neural_compressor.utils.pytorch import load
from PIL import Image
from transformers import CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, StableDiffusionPipeline, UNetaDConditionModel
def UpperCamelCase( ):
UpperCAmelCase : Any = argparse.ArgumentParser()
parser.add_argument(
'-m' , '--pretrained_model_name_or_path' , type=UpperCAmelCase_ , default=UpperCAmelCase_ , required=UpperCAmelCase_ , help='Path to pretrained model or model identifier from huggingface.co/models.' , )
parser.add_argument(
'-c' , '--caption' , type=UpperCAmelCase_ , default='robotic cat with wings' , help='Text used to generate images.' , )
parser.add_argument(
'-n' , '--images_num' , type=UpperCAmelCase_ , default=4 , help='How much images to generate.' , )
parser.add_argument(
'-s' , '--seed' , type=UpperCAmelCase_ , default=42 , help='Seed for random process.' , )
parser.add_argument(
'-ci' , '--cuda_id' , type=UpperCAmelCase_ , default=0 , help='cuda_id.' , )
UpperCAmelCase : str = parser.parse_args()
return args
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
if not len(UpperCAmelCase_ ) == rows * cols:
raise ValueError('The specified number of rows and columns are not correct.' )
UpperCAmelCase , UpperCAmelCase : List[Any] = imgs[0].size
UpperCAmelCase : str = Image.new('RGB' , size=(cols * w, rows * h) )
UpperCAmelCase , UpperCAmelCase : Optional[Any] = grid.size
for i, img in enumerate(UpperCAmelCase_ ):
grid.paste(UpperCAmelCase_ , box=(i % cols * w, i // cols * h) )
return grid
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_="robotic cat with wings" , UpperCAmelCase_=7.5 , UpperCAmelCase_=50 , UpperCAmelCase_=1 , UpperCAmelCase_=42 , ):
UpperCAmelCase : Optional[int] = torch.Generator(pipeline.device ).manual_seed(UpperCAmelCase_ )
UpperCAmelCase : Optional[int] = pipeline(
UpperCAmelCase_ , guidance_scale=UpperCAmelCase_ , num_inference_steps=UpperCAmelCase_ , generator=UpperCAmelCase_ , num_images_per_prompt=UpperCAmelCase_ , ).images
UpperCAmelCase : Dict = int(math.sqrt(UpperCAmelCase_ ) )
UpperCAmelCase : Optional[int] = image_grid(UpperCAmelCase_ , rows=_rows , cols=num_images_per_prompt // _rows )
return grid, images
lowercase__ = parse_args()
# Load models and create wrapper for stable diffusion
lowercase__ = CLIPTokenizer.from_pretrained(args.pretrained_model_name_or_path, subfolder="tokenizer")
lowercase__ = CLIPTextModel.from_pretrained(args.pretrained_model_name_or_path, subfolder="text_encoder")
lowercase__ = AutoencoderKL.from_pretrained(args.pretrained_model_name_or_path, subfolder="vae")
lowercase__ = UNetaDConditionModel.from_pretrained(args.pretrained_model_name_or_path, subfolder="unet")
lowercase__ = StableDiffusionPipeline.from_pretrained(
args.pretrained_model_name_or_path, text_encoder=text_encoder, vae=vae, unet=unet, tokenizer=tokenizer
)
lowercase__ = lambda images, clip_input: (images, False)
if os.path.exists(os.path.join(args.pretrained_model_name_or_path, "best_model.pt")):
lowercase__ = load(args.pretrained_model_name_or_path, model=unet)
unet.eval()
setattr(pipeline, "unet", unet)
else:
lowercase__ = unet.to(torch.device("cuda", args.cuda_id))
lowercase__ = pipeline.to(unet.device)
lowercase__ , lowercase__ = generate_images(pipeline, prompt=args.caption, num_images_per_prompt=args.images_num, seed=args.seed)
grid.save(os.path.join(args.pretrained_model_name_or_path, "{}.png".format("_".join(args.caption.split()))))
lowercase__ = os.path.join(args.pretrained_model_name_or_path, "_".join(args.caption.split()))
os.makedirs(dirname, exist_ok=True)
for idx, image in enumerate(images):
image.save(os.path.join(dirname, "{}.png".format(idx + 1)))
| 151 | 0 |
'''simple docstring'''
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
lowercase =os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, 'utils'))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If BertLMPredictionHead is changed in modeling_bert.py, this code needs to be manually updated.
lowercase =' def __init__(self, config):\n super().__init__()\n self.transform = BertPredictionHeadTransform(config)\n\n # The output weights are the same as the input embeddings, but there is\n # an output-only bias for each token.\n self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False)\n\n self.bias = nn.Parameter(torch.zeros(config.vocab_size))\n\n # Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings`\n self.decoder.bias = self.bias\n\n def forward(self, hidden_states):\n hidden_states = self.transform(hidden_states)\n hidden_states = self.decoder(hidden_states)\n return hidden_states\n'
class __magic_name__ ( unittest.TestCase ):
def lowerCAmelCase ( self) -> int:
'''simple docstring'''
_UpperCAmelCase : str =tempfile.mkdtemp()
os.makedirs(os.path.join(self.transformer_dir , 'models/bert/'))
_UpperCAmelCase : Optional[int] =self.transformer_dir
shutil.copy(
os.path.join(snake_case , 'src/transformers/models/bert/modeling_bert.py') , os.path.join(self.transformer_dir , 'models/bert/modeling_bert.py') , )
def lowerCAmelCase ( self) -> Any:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] ='src/transformers'
shutil.rmtree(self.transformer_dir)
def lowerCAmelCase ( self , snake_case , snake_case , snake_case , snake_case=None) -> Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase : Tuple =comment + f"\nclass {class_name}(nn.Module):\n" + class_code
if overwrite_result is not None:
_UpperCAmelCase : List[str] =comment + f"\nclass {class_name}(nn.Module):\n" + overwrite_result
_UpperCAmelCase : Optional[Any] =black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=1_1_9)
_UpperCAmelCase : Any =black.format_str(snake_case , mode=snake_case)
_UpperCAmelCase : Union[str, Any] =os.path.join(self.transformer_dir , 'new_code.py')
with open(snake_case , 'w' , newline='\n') as f:
f.write(snake_case)
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(snake_case)) == 0)
else:
check_copies.is_copy_consistent(f.name , overwrite=snake_case)
with open(snake_case , 'r') as f:
self.assertTrue(f.read() , snake_case)
def lowerCAmelCase ( self) -> Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase : Any =check_copies.find_code_in_transformers('models.bert.modeling_bert.BertLMPredictionHead')
self.assertEqual(snake_case , snake_case)
def lowerCAmelCase ( self) -> int:
'''simple docstring'''
self.check_copy_consistency(
'# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead' , 'BertLMPredictionHead' , REFERENCE_CODE + '\n' , )
# With no empty line at the end
self.check_copy_consistency(
'# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead' , 'BertLMPredictionHead' , snake_case , )
# Copy consistency with rename
self.check_copy_consistency(
'# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel' , 'TestModelLMPredictionHead' , re.sub('Bert' , 'TestModel' , snake_case) , )
# Copy consistency with a really long name
_UpperCAmelCase : List[str] ='TestModelWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason'
self.check_copy_consistency(
f"# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->{long_class_name}" , f"{long_class_name}LMPredictionHead" , re.sub('Bert' , snake_case , snake_case) , )
# Copy consistency with overwrite
self.check_copy_consistency(
'# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel' , 'TestModelLMPredictionHead' , snake_case , overwrite_result=re.sub('Bert' , 'TestModel' , snake_case) , )
def lowerCAmelCase ( self) -> Optional[Any]:
'''simple docstring'''
_UpperCAmelCase : int =check_copies.LOCALIZED_READMES['README_zh-hans.md']
_UpperCAmelCase : List[str] =(
'1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the'
' Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for'
' Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong'
' Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut.\n1.'
' **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (from HuggingFace),'
' released together with the paper [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and'
' lighter](https://arxiv.org/abs/1910.01108) by Victor Sanh, Lysandre Debut and Thomas Wolf. The same'
' method has been applied to compress GPT2 into'
' [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into'
' [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),'
' Multilingual BERT into'
' [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German'
' version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)**'
' (from Google Research/Stanford University) released with the paper [ELECTRA: Pre-training text encoders'
' as discriminators rather than generators](https://arxiv.org/abs/2003.10555) by Kevin Clark, Minh-Thang'
' Luong, Quoc V. Le, Christopher D. Manning.'
)
_UpperCAmelCase : Any =(
'1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the'
' Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of'
' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian'
' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n'
)
_UpperCAmelCase : Tuple =(
'1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the'
' Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of'
' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian'
' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n1.'
' **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (来自 HuggingFace) 伴随论文'
' [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and'
' lighter](https://arxiv.org/abs/1910.01108) 由 Victor Sanh, Lysandre Debut and Thomas Wolf 发布。 The same'
' method has been applied to compress GPT2 into'
' [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into'
' [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),'
' Multilingual BERT into'
' [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German'
' version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)** (来自'
' Google Research/Stanford University) 伴随论文 [ELECTRA: Pre-training text encoders as discriminators rather'
' than generators](https://arxiv.org/abs/2003.10555) 由 Kevin Clark, Minh-Thang Luong, Quoc V. Le,'
' Christopher D. Manning 发布。\n'
)
_UpperCAmelCase : str =check_copies.convert_to_localized_md(
snake_case , snake_case , localized_readme['format_model_list'])
self.assertFalse(snake_case)
self.assertEqual(snake_case , snake_case)
_UpperCAmelCase : Any =check_copies.convert_to_localized_md(
snake_case , snake_case , localized_readme['format_model_list'])
# Check whether the number of models is equal to README.md after conversion.
self.assertTrue(snake_case)
_UpperCAmelCase : Optional[int] =(
'1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the'
' Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for'
' Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong'
' Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut.'
)
_UpperCAmelCase : Any =(
'1. **[ALBERT](https://huggingface.co/transformers/main/model_doc/albert.html)** (来自 Google Research and'
' the Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of'
' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian'
' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n'
)
_UpperCAmelCase : Any =(
'1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the'
' Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of'
' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian'
' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n'
)
_UpperCAmelCase : str =check_copies.convert_to_localized_md(
snake_case , snake_case , localized_readme['format_model_list'])
# Check if the model link is synchronized.
self.assertEqual(snake_case , snake_case) | 356 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_deformable_detr import DeformableDetrImageProcessor
lowercase =logging.get_logger(__name__)
class __magic_name__ ( lowerCAmelCase ):
def __init__( self , *snake_case , **snake_case) -> None:
'''simple docstring'''
warnings.warn(
'The class DeformableDetrFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use DeformableDetrImageProcessor instead.' , snake_case , )
super().__init__(*snake_case , **snake_case)
| 242 | 0 |
import json
import re
from typing import TYPE_CHECKING, List, Optional, Tuple, Union
import numpy as np
from ...utils import is_tf_available, is_torch_available, logging
if TYPE_CHECKING:
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_codegen import CodeGenTokenizer
__a = logging.get_logger(__name__)
__a = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
__a = {
'''vocab_file''': {
'''Salesforce/codegen-350M-mono''': '''https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/vocab.json''',
},
'''merges_file''': {
'''Salesforce/codegen-350M-mono''': '''https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/merges.txt''',
},
'''tokenizer_file''': {
'''Salesforce/codegen-350M-mono''': (
'''https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/tokenizer.json'''
),
},
}
__a = {
'''Salesforce/codegen-350M-mono''': 20_48,
}
class __SCREAMING_SNAKE_CASE ( A__ ):
A : Any = VOCAB_FILES_NAMES
A : str = PRETRAINED_VOCAB_FILES_MAP
A : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A : List[Any] = ['input_ids', 'attention_mask']
A : List[str] = CodeGenTokenizer
def __init__( self , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__="<|endoftext|>" , SCREAMING_SNAKE_CASE__="<|endoftext|>" , SCREAMING_SNAKE_CASE__="<|endoftext|>" , SCREAMING_SNAKE_CASE__=False , **SCREAMING_SNAKE_CASE__ , ):
super().__init__(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , tokenizer_file=SCREAMING_SNAKE_CASE__ , unk_token=SCREAMING_SNAKE_CASE__ , bos_token=SCREAMING_SNAKE_CASE__ , eos_token=SCREAMING_SNAKE_CASE__ , add_prefix_space=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
if kwargs.pop('''add_bos_token''' , SCREAMING_SNAKE_CASE__ ):
lowercase : Optional[Any] = kwargs.pop('''name_or_path''' , '''''' )
raise ValueError(
'''Currenty GPT2\'s fast tokenizer does NOT support adding a BOS token.'''
'''Instead you should use GPT2\'s slow tokenizer class `CodeGenTokenizer` as follows: \n'''
f"""`CodeGenTokenizer.from_pretrained('{model_id}')`\nor\n"""
f"""`AutoTokenizer.from_pretrained('{model_id}', use_fast=False)`\n"""
'''This issue will be fixed soon, see: https://github.com/huggingface/tokenizers/pull/1005.'''
''' so that the fast tokenizer works correctly.''' )
lowercase : Optional[Any] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''' , SCREAMING_SNAKE_CASE__ ) != add_prefix_space:
lowercase : int = getattr(SCREAMING_SNAKE_CASE__ , pre_tok_state.pop('''type''' ) )
lowercase : Union[str, Any] = add_prefix_space
lowercase : Dict = pre_tok_class(**SCREAMING_SNAKE_CASE__ )
lowercase : Union[str, Any] = add_prefix_space
def __lowerCamelCase ( self , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ):
lowercase : List[Any] = kwargs.get('''is_split_into_words''' , SCREAMING_SNAKE_CASE__ )
assert self.add_prefix_space or not is_split_into_words, (
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def __lowerCamelCase ( self , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ):
lowercase : List[str] = kwargs.get('''is_split_into_words''' , SCREAMING_SNAKE_CASE__ )
assert self.add_prefix_space or not is_split_into_words, (
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs."
)
return super()._encode_plus(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None ):
lowercase : Union[str, Any] = self._tokenizer.model.save(SCREAMING_SNAKE_CASE__ , name=SCREAMING_SNAKE_CASE__ )
return tuple(SCREAMING_SNAKE_CASE__ )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = False , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , **SCREAMING_SNAKE_CASE__ , ):
lowercase : List[str] = super().decode(
token_ids=SCREAMING_SNAKE_CASE__ , skip_special_tokens=SCREAMING_SNAKE_CASE__ , clean_up_tokenization_spaces=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
if truncate_before_pattern is not None and len(SCREAMING_SNAKE_CASE__ ) > 0:
lowercase : Optional[Any] = self.truncate(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return decoded_text
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
def find_re(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
lowercase : int = pattern.search(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return m.start() if m else -1
lowercase : Dict = [re.compile(SCREAMING_SNAKE_CASE__ , re.MULTILINE ) for pattern in truncate_before_pattern]
lowercase : List[Any] = list(re.finditer('''^print''' , SCREAMING_SNAKE_CASE__ , re.MULTILINE ) )
if len(SCREAMING_SNAKE_CASE__ ) > 1:
lowercase : Any = completion[: prints[1].start()]
lowercase : Optional[Any] = list(re.finditer('''^def''' , SCREAMING_SNAKE_CASE__ , re.MULTILINE ) )
if len(SCREAMING_SNAKE_CASE__ ) > 1:
lowercase : Union[str, Any] = completion[: defs[1].start()]
lowercase : Any = 0
lowercase : Optional[int] = [
pos for pos in [find_re(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) for terminal in terminals] if pos != -1
]
if len(SCREAMING_SNAKE_CASE__ ) > 0:
return completion[: min(SCREAMING_SNAKE_CASE__ )]
else:
return completion
| 337 |
from typing import List
from .keymap import KEYMAP, get_character
def __lowercase ( _UpperCamelCase ) ->int:
"""simple docstring"""
def decorator(_UpperCamelCase ):
lowercase : str = getattr(_UpperCamelCase, '''handle_key''', [] )
handle += [key]
setattr(_UpperCamelCase, '''handle_key''', _UpperCamelCase )
return func
return decorator
def __lowercase ( *_UpperCamelCase ) ->Any:
"""simple docstring"""
def decorator(_UpperCamelCase ):
lowercase : List[Any] = getattr(_UpperCamelCase, '''handle_key''', [] )
handle += keys
setattr(_UpperCamelCase, '''handle_key''', _UpperCamelCase )
return func
return decorator
class __SCREAMING_SNAKE_CASE ( A__ ):
def __new__( cls , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
lowercase : str = super().__new__(cls , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if not hasattr(SCREAMING_SNAKE_CASE__ , '''key_handler''' ):
setattr(SCREAMING_SNAKE_CASE__ , '''key_handler''' , {} )
setattr(SCREAMING_SNAKE_CASE__ , '''handle_input''' , KeyHandler.handle_input )
for value in attrs.values():
lowercase : Dict = getattr(SCREAMING_SNAKE_CASE__ , '''handle_key''' , [] )
for key in handled_keys:
lowercase : List[Any] = value
return new_cls
@staticmethod
def __lowerCamelCase ( cls ):
lowercase : Dict = get_character()
if char != KEYMAP["undefined"]:
lowercase : Optional[int] = ord(SCREAMING_SNAKE_CASE__ )
lowercase : Optional[Any] = cls.key_handler.get(SCREAMING_SNAKE_CASE__ )
if handler:
lowercase : Tuple = char
return handler(cls )
else:
return None
def __lowercase ( cls ) ->Any:
"""simple docstring"""
return KeyHandler(cls.__name__, cls.__bases__, cls.__dict__.copy() )
| 337 | 1 |
"""simple docstring"""
import random
import unittest
import numpy as np
import torch
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionUpscalePipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class UpperCamelCase_ ( UpperCamelCase , unittest.TestCase):
"""simple docstring"""
snake_case__ : List[Any] = "ssube/stable-diffusion-x4-upscaler-onnx"
def UpperCAmelCase_ ( self : str , UpperCAmelCase__ : List[Any]=0 ) -> str:
__SCREAMING_SNAKE_CASE = floats_tensor((1, 3, 1_2_8, 1_2_8) , rng=random.Random(UpperCAmelCase__ ) )
__SCREAMING_SNAKE_CASE = torch.manual_seed(UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = {
"prompt": "A painting of a squirrel eating a burger",
"image": image,
"generator": generator,
"num_inference_steps": 3,
"guidance_scale": 7.5,
"output_type": "numpy",
}
return inputs
def UpperCAmelCase_ ( self : int ) -> List[str]:
__SCREAMING_SNAKE_CASE = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = self.get_dummy_inputs()
__SCREAMING_SNAKE_CASE = pipe(**UpperCAmelCase__ ).images
__SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1].flatten()
# started as 128, should now be 512
assert image.shape == (1, 5_1_2, 5_1_2, 3)
__SCREAMING_SNAKE_CASE = np.array(
[0.6_974_782, 0.68_902_093, 0.70_135_885, 0.7_583_618, 0.7_804_545, 0.7_854_912, 0.78_667_426, 0.78_743_863, 0.78_070_223] )
assert np.abs(image_slice - expected_slice ).max() < 1E-1
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Optional[Any]:
__SCREAMING_SNAKE_CASE = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
__SCREAMING_SNAKE_CASE = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=UpperCAmelCase__ )
pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = self.get_dummy_inputs()
__SCREAMING_SNAKE_CASE = pipe(**UpperCAmelCase__ ).images
__SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
__SCREAMING_SNAKE_CASE = np.array(
[0.6_898_892, 0.59_240_556, 0.52_499_527, 0.58_866_215, 0.52_258_235, 0.52_572_715, 0.62_414_473, 0.6_174_387, 0.6_214_964] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def UpperCAmelCase_ ( self : Dict ) -> Dict:
__SCREAMING_SNAKE_CASE = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
__SCREAMING_SNAKE_CASE = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = self.get_dummy_inputs()
__SCREAMING_SNAKE_CASE = pipe(**UpperCAmelCase__ ).images
__SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
__SCREAMING_SNAKE_CASE = np.array(
[0.7_659_278, 0.76_437_664, 0.75_579_107, 0.7_691_116, 0.77_666_986, 0.7_727_672, 0.7_758_664, 0.7_812_226, 0.76_942_515] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def UpperCAmelCase_ ( self : List[str] ) -> Optional[int]:
__SCREAMING_SNAKE_CASE = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
__SCREAMING_SNAKE_CASE = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = self.get_dummy_inputs()
__SCREAMING_SNAKE_CASE = pipe(**UpperCAmelCase__ ).images
__SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
__SCREAMING_SNAKE_CASE = np.array(
[0.6_974_782, 0.68_902_093, 0.70_135_885, 0.7_583_618, 0.7_804_545, 0.7_854_912, 0.78_667_426, 0.78_743_863, 0.78_070_223] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
__SCREAMING_SNAKE_CASE = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = self.get_dummy_inputs()
__SCREAMING_SNAKE_CASE = pipe(**UpperCAmelCase__ ).images
__SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
__SCREAMING_SNAKE_CASE = np.array(
[0.77_424_496, 0.773_601, 0.7_645_288, 0.7_769_598, 0.7_772_739, 0.7_738_688, 0.78_187_233, 0.77_879_584, 0.767_043] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
@nightly
@require_onnxruntime
@require_torch_gpu
class UpperCamelCase_ ( unittest.TestCase):
"""simple docstring"""
@property
def UpperCAmelCase_ ( self : int ) -> List[str]:
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def UpperCAmelCase_ ( self : Any ) -> Any:
__SCREAMING_SNAKE_CASE = ort.SessionOptions()
__SCREAMING_SNAKE_CASE = False
return options
def UpperCAmelCase_ ( self : int ) -> Any:
__SCREAMING_SNAKE_CASE = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/img2img/sketch-mountains-input.jpg" )
__SCREAMING_SNAKE_CASE = init_image.resize((1_2_8, 1_2_8) )
# using the PNDM scheduler by default
__SCREAMING_SNAKE_CASE = OnnxStableDiffusionUpscalePipeline.from_pretrained(
"ssube/stable-diffusion-x4-upscaler-onnx" , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = "A fantasy landscape, trending on artstation"
__SCREAMING_SNAKE_CASE = torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE = pipe(
prompt=UpperCAmelCase__ , image=UpperCAmelCase__ , guidance_scale=7.5 , num_inference_steps=1_0 , generator=UpperCAmelCase__ , output_type="np" , )
__SCREAMING_SNAKE_CASE = output.images
__SCREAMING_SNAKE_CASE = images[0, 2_5_5:2_5_8, 3_8_3:3_8_6, -1]
assert images.shape == (1, 5_1_2, 5_1_2, 3)
__SCREAMING_SNAKE_CASE = np.array([0.4_883, 0.4_947, 0.4_980, 0.4_975, 0.4_982, 0.4_980, 0.5_000, 0.5_006, 0.4_972] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
def UpperCAmelCase_ ( self : Optional[Any] ) -> Optional[int]:
__SCREAMING_SNAKE_CASE = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/img2img/sketch-mountains-input.jpg" )
__SCREAMING_SNAKE_CASE = init_image.resize((1_2_8, 1_2_8) )
__SCREAMING_SNAKE_CASE = LMSDiscreteScheduler.from_pretrained(
"ssube/stable-diffusion-x4-upscaler-onnx" , subfolder="scheduler" )
__SCREAMING_SNAKE_CASE = OnnxStableDiffusionUpscalePipeline.from_pretrained(
"ssube/stable-diffusion-x4-upscaler-onnx" , scheduler=UpperCAmelCase__ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = "A fantasy landscape, trending on artstation"
__SCREAMING_SNAKE_CASE = torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE = pipe(
prompt=UpperCAmelCase__ , image=UpperCAmelCase__ , guidance_scale=7.5 , num_inference_steps=2_0 , generator=UpperCAmelCase__ , output_type="np" , )
__SCREAMING_SNAKE_CASE = output.images
__SCREAMING_SNAKE_CASE = images[0, 2_5_5:2_5_8, 3_8_3:3_8_6, -1]
assert images.shape == (1, 5_1_2, 5_1_2, 3)
__SCREAMING_SNAKE_CASE = np.array(
[0.50_173_753, 0.50_223_356, 0.502_039, 0.50_233_036, 0.5_023_725, 0.5_022_601, 0.5_018_758, 0.50_234_085, 0.50_241_566] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
| 195 |
"""simple docstring"""
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
flip_channel_order,
get_resize_output_image_size,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, is_vision_available, logging
if is_vision_available():
import PIL
if is_torch_available():
import torch
a__ : Optional[int] = logging.get_logger(__name__)
class UpperCamelCase_ ( UpperCamelCase):
"""simple docstring"""
snake_case__ : Optional[Any] = ["pixel_values"]
def __init__( self : Dict , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : Dict[str, int] = None , UpperCAmelCase__ : PILImageResampling = PILImageResampling.BILINEAR , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : Union[int, float] = 1 / 2_5_5 , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : Dict[str, int] = None , UpperCAmelCase__ : bool = True , **UpperCAmelCase__ : Dict , ) -> None:
super().__init__(**UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = size if size is not None else {"shortest_edge": 2_2_4}
__SCREAMING_SNAKE_CASE = get_size_dict(UpperCAmelCase__ , default_to_square=UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = crop_size if crop_size is not None else {"height": 2_5_6, "width": 2_5_6}
__SCREAMING_SNAKE_CASE = get_size_dict(UpperCAmelCase__ , param_name="crop_size" )
__SCREAMING_SNAKE_CASE = do_resize
__SCREAMING_SNAKE_CASE = size
__SCREAMING_SNAKE_CASE = resample
__SCREAMING_SNAKE_CASE = do_rescale
__SCREAMING_SNAKE_CASE = rescale_factor
__SCREAMING_SNAKE_CASE = do_center_crop
__SCREAMING_SNAKE_CASE = crop_size
__SCREAMING_SNAKE_CASE = do_flip_channel_order
def UpperCAmelCase_ ( self : Any , UpperCAmelCase__ : np.ndarray , UpperCAmelCase__ : Dict[str, int] , UpperCAmelCase__ : PILImageResampling = PIL.Image.BILINEAR , UpperCAmelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase__ : Optional[Any] , ) -> np.ndarray:
__SCREAMING_SNAKE_CASE = get_size_dict(UpperCAmelCase__ , default_to_square=UpperCAmelCase__ )
if "shortest_edge" not in size:
raise ValueError(F"""The `size` dictionary must contain the key `shortest_edge`. Got {size.keys()}""" )
__SCREAMING_SNAKE_CASE = get_resize_output_image_size(UpperCAmelCase__ , size=size["shortest_edge"] , default_to_square=UpperCAmelCase__ )
return resize(UpperCAmelCase__ , size=UpperCAmelCase__ , resample=UpperCAmelCase__ , data_format=UpperCAmelCase__ , **UpperCAmelCase__ )
def UpperCAmelCase_ ( self : str , UpperCAmelCase__ : np.ndarray , UpperCAmelCase__ : Dict[str, int] , UpperCAmelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase__ : Any , ) -> np.ndarray:
__SCREAMING_SNAKE_CASE = get_size_dict(UpperCAmelCase__ )
if "height" not in size or "width" not in size:
raise ValueError(F"""The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}""" )
return center_crop(UpperCAmelCase__ , size=(size["height"], size["width"]) , data_format=UpperCAmelCase__ , **UpperCAmelCase__ )
def UpperCAmelCase_ ( self : Optional[int] , UpperCAmelCase__ : np.ndarray , UpperCAmelCase__ : Union[int, float] , UpperCAmelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase__ : Dict , ) -> Dict:
return rescale(UpperCAmelCase__ , scale=UpperCAmelCase__ , data_format=UpperCAmelCase__ , **UpperCAmelCase__ )
def UpperCAmelCase_ ( self : Optional[int] , UpperCAmelCase__ : np.ndarray , UpperCAmelCase__ : Optional[Union[str, ChannelDimension]] = None ) -> np.ndarray:
return flip_channel_order(UpperCAmelCase__ , data_format=UpperCAmelCase__ )
def UpperCAmelCase_ ( self : Optional[int] , UpperCAmelCase__ : ImageInput , UpperCAmelCase__ : bool = None , UpperCAmelCase__ : Dict[str, int] = None , UpperCAmelCase__ : PILImageResampling = None , UpperCAmelCase__ : bool = None , UpperCAmelCase__ : float = None , UpperCAmelCase__ : bool = None , UpperCAmelCase__ : Dict[str, int] = None , UpperCAmelCase__ : bool = None , UpperCAmelCase__ : Optional[Union[str, TensorType]] = None , UpperCAmelCase__ : ChannelDimension = ChannelDimension.FIRST , **UpperCAmelCase__ : str , ) -> PIL.Image.Image:
__SCREAMING_SNAKE_CASE = do_resize if do_resize is not None else self.do_resize
__SCREAMING_SNAKE_CASE = resample if resample is not None else self.resample
__SCREAMING_SNAKE_CASE = do_rescale if do_rescale is not None else self.do_rescale
__SCREAMING_SNAKE_CASE = rescale_factor if rescale_factor is not None else self.rescale_factor
__SCREAMING_SNAKE_CASE = do_center_crop if do_center_crop is not None else self.do_center_crop
__SCREAMING_SNAKE_CASE = (
do_flip_channel_order if do_flip_channel_order is not None else self.do_flip_channel_order
)
__SCREAMING_SNAKE_CASE = size if size is not None else self.size
__SCREAMING_SNAKE_CASE = get_size_dict(UpperCAmelCase__ , default_to_square=UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = crop_size if crop_size is not None else self.crop_size
__SCREAMING_SNAKE_CASE = get_size_dict(UpperCAmelCase__ , param_name="crop_size" )
__SCREAMING_SNAKE_CASE = make_list_of_images(UpperCAmelCase__ )
if not valid_images(UpperCAmelCase__ ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
# All transformations expect numpy arrays.
__SCREAMING_SNAKE_CASE = [to_numpy_array(UpperCAmelCase__ ) for image in images]
if do_resize:
__SCREAMING_SNAKE_CASE = [self.resize(image=UpperCAmelCase__ , size=UpperCAmelCase__ , resample=UpperCAmelCase__ ) for image in images]
if do_center_crop:
__SCREAMING_SNAKE_CASE = [self.center_crop(image=UpperCAmelCase__ , size=UpperCAmelCase__ ) for image in images]
if do_rescale:
__SCREAMING_SNAKE_CASE = [self.rescale(image=UpperCAmelCase__ , scale=UpperCAmelCase__ ) for image in images]
# the pretrained checkpoints assume images are BGR, not RGB
if do_flip_channel_order:
__SCREAMING_SNAKE_CASE = [self.flip_channel_order(image=UpperCAmelCase__ ) for image in images]
__SCREAMING_SNAKE_CASE = [to_channel_dimension_format(UpperCAmelCase__ , UpperCAmelCase__ ) for image in images]
__SCREAMING_SNAKE_CASE = {"pixel_values": images}
return BatchFeature(data=UpperCAmelCase__ , tensor_type=UpperCAmelCase__ )
def UpperCAmelCase_ ( self : Optional[Any] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : List[Tuple] = None ) -> Optional[int]:
__SCREAMING_SNAKE_CASE = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(UpperCAmelCase__ ) != len(UpperCAmelCase__ ):
raise ValueError(
"Make sure that you pass in as many target sizes as the batch dimension of the logits" )
if is_torch_tensor(UpperCAmelCase__ ):
__SCREAMING_SNAKE_CASE = target_sizes.numpy()
__SCREAMING_SNAKE_CASE = []
for idx in range(len(UpperCAmelCase__ ) ):
__SCREAMING_SNAKE_CASE = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode="bilinear" , align_corners=UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(UpperCAmelCase__ )
else:
__SCREAMING_SNAKE_CASE = logits.argmax(dim=1 )
__SCREAMING_SNAKE_CASE = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 195 | 1 |
'''simple docstring'''
from __future__ import annotations
from math import pi
# Define the Reduced Planck Constant ℏ (H bar), speed of light C, value of
# Pi and the function
UpperCAmelCase = 1.0_5_4_5_7_1_8_1_7E-3_4 # unit of ℏ : J * s
UpperCAmelCase = 3E8 # unit of c : m * s^-1
def __UpperCamelCase ( lowercase__ : float, lowercase__ : float, lowercase__ : float ):
'''simple docstring'''
if (force, area, distance).count(0 ) != 1:
raise ValueError('One and only one argument must be 0' )
if force < 0:
raise ValueError('Magnitude of force can not be negative' )
if distance < 0:
raise ValueError('Distance can not be negative' )
if area < 0:
raise ValueError('Area can not be negative' )
if force == 0:
__lowercase =(REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / (
2_40 * (distance) ** 4
)
return {"force": force}
elif area == 0:
__lowercase =(2_40 * force * (distance) ** 4) / (
REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2
)
return {"area": area}
elif distance == 0:
__lowercase =(
(REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / (2_40 * force)
) ** (1 / 4)
return {"distance": distance}
raise ValueError('One and only one argument must be 0' )
# Run doctest
if __name__ == "__main__":
import doctest
doctest.testmod()
| 141 |
'''simple docstring'''
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv("TEST_SAGEMAKER" , "False" ) ) is not True , reason="Skipping test because should only be run when releasing minor transformers version" , )
@pytest.mark.usefixtures("sm_env" )
@parameterized_class(
[
{
"framework": "pytorch",
"script": "run_glue.py",
"model_name_or_path": "distilbert-base-cased",
"instance_type": "ml.p3.16xlarge",
"results": {"train_runtime": 6_5_0, "eval_accuracy": 0.7, "eval_loss": 0.6},
},
{
"framework": "pytorch",
"script": "run_ddp.py",
"model_name_or_path": "distilbert-base-cased",
"instance_type": "ml.p3.16xlarge",
"results": {"train_runtime": 6_0_0, "eval_accuracy": 0.7, "eval_loss": 0.6},
},
{
"framework": "tensorflow",
"script": "run_tf_dist.py",
"model_name_or_path": "distilbert-base-cased",
"instance_type": "ml.p3.16xlarge",
"results": {"train_runtime": 6_0_0, "eval_accuracy": 0.6, "eval_loss": 0.7},
},
] )
class lowerCAmelCase ( unittest.TestCase ):
def snake_case ( self : int ):
"""simple docstring"""
if self.framework == "pytorch":
subprocess.run(
f'''cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py'''.split() , encoding='utf-8' , check=__lowercase , )
assert hasattr(self , 'env' )
def snake_case ( self : Tuple , __lowercase : List[str] ):
"""simple docstring"""
__lowercase =f'''{self.env.base_job_name}-{instance_count}-{"ddp" if "ddp" in self.script else "smd"}'''
# distributed data settings
__lowercase ={'smdistributed': {'dataparallel': {'enabled': True}}} if self.script != 'run_ddp.py' else None
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=__lowercase , instance_count=__lowercase , instance_type=self.instance_type , debugger_hook_config=__lowercase , hyperparameters={**self.env.distributed_hyperparameters, 'model_name_or_path': self.model_name_or_path} , metric_definitions=self.env.metric_definitions , distribution=__lowercase , py_version='py36' , )
def snake_case ( self : int , __lowercase : List[str] ):
"""simple docstring"""
TrainingJobAnalytics(__lowercase ).export_csv(f'''{self.env.test_path}/{job_name}_metrics.csv''' )
@parameterized.expand([(2,)] )
def snake_case ( self : Tuple , __lowercase : List[Any] ):
"""simple docstring"""
__lowercase =self.create_estimator(__lowercase )
# run training
estimator.fit()
# result dataframe
__lowercase =TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
__lowercase =list(result_metrics_df[result_metrics_df.metric_name == 'eval_accuracy']['value'] )
__lowercase =list(result_metrics_df[result_metrics_df.metric_name == 'eval_loss']['value'] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
__lowercase =(
Session().describe_training_job(estimator.latest_training_job.name ).get('TrainingTimeInSeconds' , 999999 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results['eval_accuracy'] for t in eval_accuracy )
assert all(t <= self.results['eval_loss'] for t in eval_loss )
# dump tests result into json file to share in PR
with open(f'''{estimator.latest_training_job.name}.json''' , 'w' ) as outfile:
json.dump({'train_time': train_runtime, 'eval_accuracy': eval_accuracy, 'eval_loss': eval_loss} , __lowercase )
| 141 | 1 |
'''simple docstring'''
import json
import os
import pickle
import shutil
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
from datasets import Dataset
from transformers import is_faiss_available
from transformers.models.bart.configuration_bart import BartConfig
from transformers.models.bart.tokenization_bart import BartTokenizer
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES
from transformers.models.dpr.configuration_dpr import DPRConfig
from transformers.models.dpr.tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer
from transformers.models.rag.configuration_rag import RagConfig
from transformers.models.rag.retrieval_rag import CustomHFIndex, RagRetriever
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES
from transformers.testing_utils import require_faiss, require_sentencepiece, require_tokenizers, require_torch
if is_faiss_available():
import faiss
@require_faiss
class __SCREAMING_SNAKE_CASE (lowercase__ ):
"""simple docstring"""
def UpperCamelCase__ ( self : int ):
_a = tempfile.mkdtemp()
_a = 8
# DPR tok
_a = [
"[UNK]",
"[CLS]",
"[SEP]",
"[PAD]",
"[MASK]",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
_a = os.path.join(self.tmpdirname , "dpr_tokenizer" )
os.makedirs(lowercase_ , exist_ok=lowercase_ )
_a = os.path.join(lowercase_ , DPR_VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
# BART tok
_a = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"<unk>",
]
_a = dict(zip(lowercase_ , range(len(lowercase_ ) ) ) )
_a = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
_a = {"unk_token": "<unk>"}
_a = os.path.join(self.tmpdirname , "bart_tokenizer" )
os.makedirs(lowercase_ , exist_ok=lowercase_ )
_a = os.path.join(lowercase_ , BART_VOCAB_FILES_NAMES["vocab_file"] )
_a = os.path.join(lowercase_ , BART_VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(lowercase_ ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(lowercase_ ) )
def UpperCamelCase__ ( self : Optional[Any] ):
return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , "dpr_tokenizer" ) )
def UpperCamelCase__ ( self : Tuple ):
return DPRContextEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , "dpr_tokenizer" ) )
def UpperCamelCase__ ( self : Union[str, Any] ):
return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname , "bart_tokenizer" ) )
def UpperCamelCase__ ( self : str ):
shutil.rmtree(self.tmpdirname )
def UpperCamelCase__ ( self : Tuple ):
_a = Dataset.from_dict(
{
"id": ["0", "1"],
"text": ["foo", "bar"],
"title": ["Foo", "Bar"],
"embeddings": [np.ones(self.retrieval_vector_size ), 2 * np.ones(self.retrieval_vector_size )],
} )
dataset.add_faiss_index("embeddings" , string_factory="Flat" , metric_type=faiss.METRIC_INNER_PRODUCT )
return dataset
def UpperCamelCase__ ( self : Optional[Any] ):
_a = self.get_dummy_dataset()
_a = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , )
with patch("transformers.models.rag.retrieval_rag.load_dataset" ) as mock_load_dataset:
_a = dataset
_a = RagRetriever(
lowercase_ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , )
return retriever
def UpperCamelCase__ ( self : Optional[int] , __a : Dict ):
_a = self.get_dummy_dataset()
_a = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name="custom" , )
if from_disk:
_a = os.path.join(self.tmpdirname , "dataset" )
_a = os.path.join(self.tmpdirname , "index.faiss" )
dataset.get_index("embeddings" ).save(os.path.join(self.tmpdirname , "index.faiss" ) )
dataset.drop_index("embeddings" )
dataset.save_to_disk(os.path.join(self.tmpdirname , "dataset" ) )
del dataset
_a = RagRetriever(
lowercase_ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , )
else:
_a = RagRetriever(
lowercase_ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , index=CustomHFIndex(config.retrieval_vector_size , lowercase_ ) , )
return retriever
def UpperCamelCase__ ( self : List[str] ):
_a = Dataset.from_dict(
{
"id": ["0", "1"],
"text": ["foo", "bar"],
"title": ["Foo", "Bar"],
"embeddings": [np.ones(self.retrieval_vector_size + 1 ), 2 * np.ones(self.retrieval_vector_size + 1 )],
} )
dataset.add_faiss_index("embeddings" , string_factory="Flat" , metric_type=faiss.METRIC_INNER_PRODUCT )
_a = os.path.join(self.tmpdirname , "hf_bert_base.hnswSQ8_correct_phi_128.c_index" )
dataset.save_faiss_index("embeddings" , index_file_name + ".index.dpr" )
pickle.dump(dataset["id"] , open(index_file_name + ".index_meta.dpr" , "wb" ) )
_a = os.path.join(self.tmpdirname , "psgs_w100.tsv.pkl" )
_a = {sample["id"]: [sample["text"], sample["title"]] for sample in dataset}
pickle.dump(lowercase_ , open(lowercase_ , "wb" ) )
_a = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name="legacy" , index_path=self.tmpdirname , )
_a = RagRetriever(
lowercase_ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() )
return retriever
def UpperCamelCase__ ( self : Tuple ):
_a = 1
_a = self.get_dummy_canonical_hf_index_retriever()
_a = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
_a = retriever.retrieve(lowercase_ , n_docs=lowercase_ )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(lowercase_ ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ["embeddings", "id", "text", "title"] )
self.assertEqual(len(doc_dicts[0]["id"] ) , lowercase_ )
self.assertEqual(doc_dicts[0]["id"][0] , "1" ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]["id"][0] , "0" ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def UpperCamelCase__ ( self : int ):
_a = self.get_dummy_canonical_hf_index_retriever()
with tempfile.TemporaryDirectory() as tmp_dirname:
with patch("transformers.models.rag.retrieval_rag.load_dataset" ) as mock_load_dataset:
_a = self.get_dummy_dataset()
retriever.save_pretrained(lowercase_ )
_a = RagRetriever.from_pretrained(lowercase_ )
self.assertIsInstance(lowercase_ , lowercase_ )
_a = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
_a = retriever.retrieve(lowercase_ , n_docs=1 )
self.assertTrue(out is not None )
def UpperCamelCase__ ( self : int ):
_a = 1
_a = self.get_dummy_custom_hf_index_retriever(from_disk=lowercase_ )
_a = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
_a = retriever.retrieve(lowercase_ , n_docs=lowercase_ )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(lowercase_ ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ["embeddings", "id", "text", "title"] )
self.assertEqual(len(doc_dicts[0]["id"] ) , lowercase_ )
self.assertEqual(doc_dicts[0]["id"][0] , "1" ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]["id"][0] , "0" ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def UpperCamelCase__ ( self : Optional[int] ):
_a = self.get_dummy_custom_hf_index_retriever(from_disk=lowercase_ )
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(lowercase_ )
_a = RagRetriever.from_pretrained(lowercase_ )
self.assertIsInstance(lowercase_ , lowercase_ )
_a = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
_a = retriever.retrieve(lowercase_ , n_docs=1 )
self.assertTrue(out is not None )
def UpperCamelCase__ ( self : List[str] ):
_a = 1
_a = self.get_dummy_custom_hf_index_retriever(from_disk=lowercase_ )
_a = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
_a = retriever.retrieve(lowercase_ , n_docs=lowercase_ )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(lowercase_ ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ["embeddings", "id", "text", "title"] )
self.assertEqual(len(doc_dicts[0]["id"] ) , lowercase_ )
self.assertEqual(doc_dicts[0]["id"][0] , "1" ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]["id"][0] , "0" ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def UpperCamelCase__ ( self : List[Any] ):
_a = self.get_dummy_custom_hf_index_retriever(from_disk=lowercase_ )
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(lowercase_ )
_a = RagRetriever.from_pretrained(lowercase_ )
self.assertIsInstance(lowercase_ , lowercase_ )
_a = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
_a = retriever.retrieve(lowercase_ , n_docs=1 )
self.assertTrue(out is not None )
def UpperCamelCase__ ( self : Dict ):
_a = 1
_a = self.get_dummy_legacy_index_retriever()
_a = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
_a = retriever.retrieve(lowercase_ , n_docs=lowercase_ )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(lowercase_ ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ["text", "title"] )
self.assertEqual(len(doc_dicts[0]["text"] ) , lowercase_ )
self.assertEqual(doc_dicts[0]["text"][0] , "bar" ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]["text"][0] , "foo" ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def UpperCamelCase__ ( self : Optional[int] ):
_a = self.get_dummy_legacy_index_retriever()
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(lowercase_ )
_a = RagRetriever.from_pretrained(lowercase_ )
self.assertIsInstance(lowercase_ , lowercase_ )
_a = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
_a = retriever.retrieve(lowercase_ , n_docs=1 )
self.assertTrue(out is not None )
@require_torch
@require_tokenizers
@require_sentencepiece
def UpperCamelCase__ ( self : List[Any] ):
import torch
_a = 1
_a = self.get_dummy_canonical_hf_index_retriever()
_a = [[5, 7], [10, 11]]
_a = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
_a = retriever(lowercase_ , lowercase_ , prefix=retriever.config.generator.prefix , n_docs=lowercase_ )
_a = (
out["context_input_ids"],
out["context_attention_mask"],
out["retrieved_doc_embeds"],
)
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertIsInstance(lowercase_ , lowercase_ )
self.assertIsInstance(lowercase_ , lowercase_ )
self.assertIsInstance(lowercase_ , np.ndarray )
_a = retriever(
lowercase_ , lowercase_ , prefix=retriever.config.generator.prefix , n_docs=lowercase_ , return_tensors="pt" , )
_a = ( # noqa: F841
out["context_input_ids"],
out["context_attention_mask"],
out["retrieved_doc_embeds"],
out["doc_ids"],
)
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertIsInstance(lowercase_ , torch.Tensor )
self.assertIsInstance(lowercase_ , torch.Tensor )
self.assertIsInstance(lowercase_ , torch.Tensor )
@require_torch
@require_tokenizers
@require_sentencepiece
def UpperCamelCase__ ( self : Dict ):
_a = self.get_dpr_ctx_encoder_tokenizer()
_a = 1
_a = self.get_dummy_custom_hf_index_retriever(from_disk=lowercase_ )
retriever.set_ctx_encoder_tokenizer(lowercase_ )
_a = [[5, 7], [10, 11]]
_a = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
_a = retriever(lowercase_ , lowercase_ , prefix=retriever.config.generator.prefix , n_docs=lowercase_ )
self.assertEqual(
len(lowercase_ ) , 6 ) # check whether the retriever output consist of 6 attributes including tokenized docs
self.assertEqual(
all(k in out for k in ("tokenized_doc_ids", "tokenized_doc_attention_mask") ) , lowercase_ ) # check for doc token related keys in dictionary.
| 351 |
'''simple docstring'''
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ):
"""simple docstring"""
__a =['image_processor', 'tokenizer']
__a ='LayoutLMv2ImageProcessor'
__a =('LayoutXLMTokenizer', 'LayoutXLMTokenizerFast')
def __init__( self : Dict , __a : int=None , __a : List[Any]=None , **__a : str ):
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , __a , )
_a = kwargs.pop("feature_extractor" )
_a = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(__a , __a )
def __call__( self : Optional[int] , __a : Optional[Any] , __a : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , __a : Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]] = None , __a : Union[List[List[int]], List[List[List[int]]]] = None , __a : Optional[Union[List[int], List[List[int]]]] = None , __a : bool = True , __a : Union[bool, str, PaddingStrategy] = False , __a : Union[bool, str, TruncationStrategy] = None , __a : Optional[int] = None , __a : int = 0 , __a : Optional[int] = None , __a : Optional[bool] = None , __a : Optional[bool] = None , __a : bool = False , __a : bool = False , __a : bool = False , __a : bool = False , __a : bool = True , __a : Optional[Union[str, TensorType]] = None , **__a : Optional[Any] , ):
# verify input
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
"You cannot provide bounding boxes "
"if you initialized the image processor with apply_ocr set to True." )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
"You cannot provide word labels if you initialized the image processor with apply_ocr set to True." )
if return_overflowing_tokens is True and return_offsets_mapping is False:
raise ValueError("You cannot return overflowing tokens without returning the offsets mapping." )
# first, apply the image processor
_a = self.image_processor(images=__a , return_tensors=__a )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(__a , __a ):
_a = [text] # add batch dimension (as the image processor always adds a batch dimension)
_a = features["words"]
_a = self.tokenizer(
text=text if text is not None else features["words"] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features["boxes"] , word_labels=__a , add_special_tokens=__a , padding=__a , truncation=__a , max_length=__a , stride=__a , pad_to_multiple_of=__a , return_token_type_ids=__a , return_attention_mask=__a , return_overflowing_tokens=__a , return_special_tokens_mask=__a , return_offsets_mapping=__a , return_length=__a , verbose=__a , return_tensors=__a , **__a , )
# add pixel values
_a = features.pop("pixel_values" )
if return_overflowing_tokens is True:
_a = self.get_overflowing_images(__a , encoded_inputs["overflow_to_sample_mapping"] )
_a = images
return encoded_inputs
def UpperCamelCase__ ( self : int , __a : List[Any] , __a : int ):
# in case there's an overflow, ensure each `input_ids` sample is mapped to its corresponding image
_a = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(__a ) != len(__a ):
raise ValueError(
"Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got"
f' {len(__a )} and {len(__a )}' )
return images_with_overflow
def UpperCamelCase__ ( self : Optional[Any] , *__a : Dict , **__a : Union[str, Any] ):
return self.tokenizer.batch_decode(*__a , **__a )
def UpperCamelCase__ ( self : Union[str, Any] , *__a : Optional[int] , **__a : Optional[Any] ):
return self.tokenizer.decode(*__a , **__a )
@property
def UpperCamelCase__ ( self : int ):
return ["input_ids", "bbox", "attention_mask", "image"]
@property
def UpperCamelCase__ ( self : List[Any] ):
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , __a , )
return self.image_processor_class
@property
def UpperCamelCase__ ( self : int ):
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , __a , )
return self.image_processor
| 346 | 0 |
'''simple docstring'''
from __future__ import annotations
def UpperCamelCase_ ( A__ : int | float | str , A__ : int | float | str ):
'''simple docstring'''
if nth_term == "":
return [""]
lowerCAmelCase_ : str = int(A__ )
lowerCAmelCase_ : List[str] = int(A__ )
lowerCAmelCase_ : list[str] = []
for temp in range(int(A__ ) ):
series.append(f'1 / {pow(temp + 1 , int(A__ ) )}' if series else """1""" )
return series
if __name__ == "__main__":
import doctest
doctest.testmod()
__A : Dict = int(input("Enter the last number (nth term) of the P-Series"))
__A : Optional[Any] = int(input("Enter the power for P-Series"))
print("Formula of P-Series => 1+1/2^p+1/3^p ..... 1/n^p")
print(p_series(nth_term, power))
| 120 |
'''simple docstring'''
class __snake_case :
"""simple docstring"""
def __init__( self : int , lowerCamelCase : int , lowerCamelCase : int=None , lowerCamelCase : int=None ) -> str:
lowerCAmelCase_ : str = data
lowerCAmelCase_ : Optional[Any] = previous
lowerCAmelCase_ : int = next_node
def __str__( self : Any ) -> str:
return F'{self.data}'
def __lowercase ( self : Optional[Any] ) -> int:
return self.data
def __lowercase ( self : str ) -> List[str]:
return self.next
def __lowercase ( self : int ) -> Optional[int]:
return self.previous
class __snake_case :
"""simple docstring"""
def __init__( self : Union[str, Any] , lowerCamelCase : Optional[int] ) -> Optional[Any]:
lowerCAmelCase_ : Optional[Any] = head
def __iter__( self : str ) -> Optional[Any]:
return self
def __lowercase ( self : Union[str, Any] ) -> Dict:
if not self.current:
raise StopIteration
else:
lowerCAmelCase_ : Dict = self.current.get_data()
lowerCAmelCase_ : Tuple = self.current.get_next()
return value
class __snake_case :
"""simple docstring"""
def __init__( self : Union[str, Any] ) -> Any:
lowerCAmelCase_ : Optional[Any] = None # First node in list
lowerCAmelCase_ : Optional[Any] = None # Last node in list
def __str__( self : Optional[int] ) -> Dict:
lowerCAmelCase_ : str = self.head
lowerCAmelCase_ : Tuple = []
while current is not None:
nodes.append(current.get_data() )
lowerCAmelCase_ : str = current.get_next()
return " ".join(str(lowerCamelCase ) for node in nodes )
def __contains__( self : List[Any] , lowerCamelCase : int ) -> List[str]:
lowerCAmelCase_ : List[str] = self.head
while current:
if current.get_data() == value:
return True
lowerCAmelCase_ : List[Any] = current.get_next()
return False
def __iter__( self : str ) -> Optional[Any]:
return LinkedListIterator(self.head )
def __lowercase ( self : Dict ) -> Optional[int]:
if self.head:
return self.head.get_data()
return None
def __lowercase ( self : List[str] ) -> Optional[Any]:
if self.tail:
return self.tail.get_data()
return None
def __lowercase ( self : Optional[Any] , lowerCamelCase : Node ) -> None:
if self.head is None:
lowerCAmelCase_ : Union[str, Any] = node
lowerCAmelCase_ : List[str] = node
else:
self.insert_before_node(self.head , lowerCamelCase )
def __lowercase ( self : Tuple , lowerCamelCase : Node ) -> None:
if self.head is None:
self.set_head(lowerCamelCase )
else:
self.insert_after_node(self.tail , lowerCamelCase )
def __lowercase ( self : Union[str, Any] , lowerCamelCase : int ) -> None:
lowerCAmelCase_ : int = Node(lowerCamelCase )
if self.head is None:
self.set_head(lowerCamelCase )
else:
self.set_tail(lowerCamelCase )
def __lowercase ( self : Optional[Any] , lowerCamelCase : Node , lowerCamelCase : Node ) -> None:
lowerCAmelCase_ : Optional[int] = node
lowerCAmelCase_ : List[Any] = node.previous
if node.get_previous() is None:
lowerCAmelCase_ : Tuple = node_to_insert
else:
lowerCAmelCase_ : Dict = node_to_insert
lowerCAmelCase_ : Optional[int] = node_to_insert
def __lowercase ( self : Union[str, Any] , lowerCamelCase : Node , lowerCamelCase : Node ) -> None:
lowerCAmelCase_ : Optional[int] = node
lowerCAmelCase_ : Tuple = node.next
if node.get_next() is None:
lowerCAmelCase_ : Tuple = node_to_insert
else:
lowerCAmelCase_ : Tuple = node_to_insert
lowerCAmelCase_ : Optional[Any] = node_to_insert
def __lowercase ( self : Dict , lowerCamelCase : int , lowerCamelCase : int ) -> None:
lowerCAmelCase_ : List[str] = 1
lowerCAmelCase_ : Tuple = Node(lowerCamelCase )
lowerCAmelCase_ : List[Any] = self.head
while node:
if current_position == position:
self.insert_before_node(lowerCamelCase , lowerCamelCase )
return
current_position += 1
lowerCAmelCase_ : str = node.next
self.insert_after_node(self.tail , lowerCamelCase )
def __lowercase ( self : int , lowerCamelCase : int ) -> Node:
lowerCAmelCase_ : List[Any] = self.head
while node:
if node.get_data() == item:
return node
lowerCAmelCase_ : List[Any] = node.get_next()
raise Exception("""Node not found""" )
def __lowercase ( self : str , lowerCamelCase : str ) -> int:
if (node := self.get_node(lowerCamelCase )) is not None:
if node == self.head:
lowerCAmelCase_ : Any = self.head.get_next()
if node == self.tail:
lowerCAmelCase_ : Optional[int] = self.tail.get_previous()
self.remove_node_pointers(lowerCamelCase )
@staticmethod
def __lowercase ( lowerCamelCase : Node ) -> None:
if node.get_next():
lowerCAmelCase_ : Tuple = node.previous
if node.get_previous():
lowerCAmelCase_ : Any = node.next
lowerCAmelCase_ : List[Any] = None
lowerCAmelCase_ : Any = None
def __lowercase ( self : str ) -> Optional[Any]:
return self.head is None
def UpperCamelCase_ ( ):
'''simple docstring'''
if __name__ == "__main__":
import doctest
doctest.testmod()
| 120 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A = logging.get_logger(__name__)
__A = {
'''MIT/ast-finetuned-audioset-10-10-0.4593''': (
'''https://huggingface.co/MIT/ast-finetuned-audioset-10-10-0.4593/resolve/main/config.json'''
),
}
class _snake_case ( a__ ):
snake_case__ = "audio-spectrogram-transformer"
def __init__( self : List[Any] , UpperCAmelCase : Dict=768 , UpperCAmelCase : Tuple=12 , UpperCAmelCase : Any=12 , UpperCAmelCase : Optional[Any]=3072 , UpperCAmelCase : Optional[Any]="gelu" , UpperCAmelCase : Dict=0.0 , UpperCAmelCase : Dict=0.0 , UpperCAmelCase : int=0.0_2 , UpperCAmelCase : str=1E-12 , UpperCAmelCase : Dict=16 , UpperCAmelCase : str=True , UpperCAmelCase : Dict=10 , UpperCAmelCase : Optional[Any]=10 , UpperCAmelCase : Optional[int]=1024 , UpperCAmelCase : List[str]=128 , **UpperCAmelCase : Union[str, Any] , ):
super().__init__(**UpperCAmelCase )
__lowerCamelCase : Dict = hidden_size
__lowerCamelCase : int = num_hidden_layers
__lowerCamelCase : Optional[int] = num_attention_heads
__lowerCamelCase : int = intermediate_size
__lowerCamelCase : List[Any] = hidden_act
__lowerCamelCase : Any = hidden_dropout_prob
__lowerCamelCase : Dict = attention_probs_dropout_prob
__lowerCamelCase : int = initializer_range
__lowerCamelCase : Union[str, Any] = layer_norm_eps
__lowerCamelCase : Any = patch_size
__lowerCamelCase : Optional[Any] = qkv_bias
__lowerCamelCase : Optional[Any] = frequency_stride
__lowerCamelCase : Optional[int] = time_stride
__lowerCamelCase : Dict = max_length
__lowerCamelCase : Union[str, Any] = num_mel_bins | 365 | """simple docstring"""
import os
import zipfile
import requests
from get_ci_error_statistics import download_artifact, get_artifacts_links
def lowercase_ ( _lowerCamelCase: str , _lowerCamelCase: Optional[int]=7 ) -> int:
'''simple docstring'''
__lowerCamelCase : List[str] = None
if token is not None:
__lowerCamelCase : List[Any] = {"Accept": "application/vnd.github+json", "Authorization": F"""Bearer {token}"""}
# The id of a workflow (not of a workflow run)
__lowerCamelCase : Optional[Any] = "636036"
__lowerCamelCase : Dict = F"""https://api.github.com/repos/huggingface/transformers/actions/workflows/{workflow_id}/runs"""
# On `main` branch + event being `schedule` + not returning PRs + only `num_runs` results
url += F"""?branch=main&event=schedule&exclude_pull_requests=true&per_page={num_runs}"""
__lowerCamelCase : List[str] = requests.get(_lowerCamelCase , headers=_lowerCamelCase ).json()
return result["workflow_runs"]
def lowercase_ ( _lowerCamelCase: Tuple ) -> int:
'''simple docstring'''
__lowerCamelCase : List[Any] = get_daily_ci_runs(_lowerCamelCase )
__lowerCamelCase : Optional[Any] = None
for workflow_run in workflow_runs:
if workflow_run["status"] == "completed":
__lowerCamelCase : Optional[int] = workflow_run["id"]
break
return workflow_run_id
def lowercase_ ( _lowerCamelCase: List[Any] , _lowerCamelCase: int , _lowerCamelCase: str ) -> Any:
'''simple docstring'''
__lowerCamelCase : Any = get_last_daily_ci_runs(_lowerCamelCase )
if workflow_run_id is not None:
__lowerCamelCase : Dict = get_artifacts_links(worflow_run_id=_lowerCamelCase , token=_lowerCamelCase )
for artifact_name in artifact_names:
if artifact_name in artifacts_links:
__lowerCamelCase : int = artifacts_links[artifact_name]
download_artifact(
artifact_name=_lowerCamelCase , artifact_url=_lowerCamelCase , output_dir=_lowerCamelCase , token=_lowerCamelCase )
def lowercase_ ( _lowerCamelCase: Union[str, Any] , _lowerCamelCase: Dict , _lowerCamelCase: int ) -> Any:
'''simple docstring'''
get_last_daily_ci_artifacts(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
__lowerCamelCase : int = {}
for artifact_name in artifact_names:
__lowerCamelCase : Tuple = os.path.join(_lowerCamelCase , F"""{artifact_name}.zip""" )
if os.path.isfile(_lowerCamelCase ):
__lowerCamelCase : Optional[int] = {}
with zipfile.ZipFile(_lowerCamelCase ) as z:
for filename in z.namelist():
if not os.path.isdir(_lowerCamelCase ):
# read the file
with z.open(_lowerCamelCase ) as f:
__lowerCamelCase : Tuple = f.read().decode("UTF-8" )
return results | 64 | 0 |
from ..utils import DummyObject, requires_backends
class UpperCamelCase__ (metaclass=lowerCAmelCase__ ):
'''simple docstring'''
lowerCamelCase_ : Tuple = ["""onnx"""]
def __init__( self , *UpperCamelCase__ , **UpperCamelCase__ ) -> Tuple:
requires_backends(self , ["onnx"] )
@classmethod
def _lowercase ( cls , *UpperCamelCase__ , **UpperCamelCase__ ) -> Union[str, Any]:
requires_backends(cls , ["onnx"] )
@classmethod
def _lowercase ( cls , *UpperCamelCase__ , **UpperCamelCase__ ) -> Dict:
requires_backends(cls , ["onnx"] )
| 48 |
"""simple docstring"""
from __future__ import annotations
_snake_case : str = []
def A__ ( UpperCamelCase , UpperCamelCase , UpperCamelCase ):
for i in range(len(UpperCamelCase ) ):
if board[row][i] == 1:
return False
for i in range(len(UpperCamelCase ) ):
if board[i][column] == 1:
return False
for i, j in zip(range(UpperCamelCase , -1 , -1 ) , range(UpperCamelCase , -1 , -1 ) ):
if board[i][j] == 1:
return False
for i, j in zip(range(UpperCamelCase , -1 , -1 ) , range(UpperCamelCase , len(UpperCamelCase ) ) ):
if board[i][j] == 1:
return False
return True
def A__ ( UpperCamelCase , UpperCamelCase ):
if row >= len(UpperCamelCase ):
solution.append(UpperCamelCase )
printboard(UpperCamelCase )
print()
return True
for i in range(len(UpperCamelCase ) ):
if is_safe(UpperCamelCase , UpperCamelCase , UpperCamelCase ):
A = 1
solve(UpperCamelCase , row + 1 )
A = 0
return False
def A__ ( UpperCamelCase ):
for i in range(len(UpperCamelCase ) ):
for j in range(len(UpperCamelCase ) ):
if board[i][j] == 1:
print("Q" , end=" " )
else:
print("." , end=" " )
print()
# n=int(input("The no. of queens"))
_snake_case : List[str] = 8
_snake_case : List[str] = [[0 for i in range(n)] for j in range(n)]
solve(board, 0)
print('The total no. of solutions are :', len(solution))
| 292 | 0 |
import inspect
import unittest
from transformers import SegformerConfig, is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_MAPPING,
SegformerForImageClassification,
SegformerForSemanticSegmentation,
SegformerModel,
)
from transformers.models.segformer.modeling_segformer import SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import SegformerImageProcessor
class A__ ( snake_case__ ):
"""simple docstring"""
def a_ ( self ):
snake_case = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(__snake_case , '''hidden_sizes''' ) )
self.parent.assertTrue(hasattr(__snake_case , '''num_attention_heads''' ) )
self.parent.assertTrue(hasattr(__snake_case , '''num_encoder_blocks''' ) )
class A__ :
"""simple docstring"""
def __init__( self , __snake_case , __snake_case=1_3 , __snake_case=6_4 , __snake_case=3 , __snake_case=4 , __snake_case=[2, 2, 2, 2] , __snake_case=[8, 4, 2, 1] , __snake_case=[1_6, 3_2, 6_4, 1_2_8] , __snake_case=[1, 4, 8, 1_6] , __snake_case=[1, 2, 4, 8] , __snake_case=True , __snake_case=True , __snake_case="gelu" , __snake_case=0.1 , __snake_case=0.1 , __snake_case=0.02 , __snake_case=3 , __snake_case=None , ):
snake_case = parent
snake_case = batch_size
snake_case = image_size
snake_case = num_channels
snake_case = num_encoder_blocks
snake_case = sr_ratios
snake_case = depths
snake_case = hidden_sizes
snake_case = downsampling_rates
snake_case = num_attention_heads
snake_case = is_training
snake_case = use_labels
snake_case = hidden_act
snake_case = hidden_dropout_prob
snake_case = attention_probs_dropout_prob
snake_case = initializer_range
snake_case = num_labels
snake_case = scope
def a_ ( self ):
snake_case = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
snake_case = None
if self.use_labels:
snake_case = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
snake_case = self.get_config()
return config, pixel_values, labels
def a_ ( self ):
return SegformerConfig(
image_size=self.image_size , num_channels=self.num_channels , num_encoder_blocks=self.num_encoder_blocks , depths=self.depths , hidden_sizes=self.hidden_sizes , num_attention_heads=self.num_attention_heads , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , )
def a_ ( self , __snake_case , __snake_case , __snake_case ):
snake_case = SegformerModel(config=__snake_case )
model.to(__snake_case )
model.eval()
snake_case = model(__snake_case )
snake_case = snake_case = self.image_size // (self.downsampling_rates[-1] * 2)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], expected_height, expected_width) )
def a_ ( self , __snake_case , __snake_case , __snake_case ):
snake_case = self.num_labels
snake_case = SegformerForSemanticSegmentation(__snake_case )
model.to(__snake_case )
model.eval()
snake_case = model(__snake_case )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4) )
snake_case = model(__snake_case , labels=__snake_case )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4) )
self.parent.assertGreater(result.loss , 0.0 )
def a_ ( self , __snake_case , __snake_case , __snake_case ):
snake_case = 1
snake_case = SegformerForSemanticSegmentation(config=__snake_case )
model.to(__snake_case )
model.eval()
snake_case = torch.randint(0 , 1 , (self.batch_size, self.image_size, self.image_size) ).to(__snake_case )
snake_case = model(__snake_case , labels=__snake_case )
self.parent.assertGreater(result.loss , 0.0 )
def a_ ( self ):
snake_case = self.prepare_config_and_inputs()
snake_case , snake_case , snake_case = config_and_inputs
snake_case = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class A__ ( snake_case__ , snake_case__ , unittest.TestCase ):
"""simple docstring"""
__magic_name__ = (
(
SegformerModel,
SegformerForSemanticSegmentation,
SegformerForImageClassification,
)
if is_torch_available()
else ()
)
__magic_name__ = (
{
'feature-extraction': SegformerModel,
'image-classification': SegformerForImageClassification,
'image-segmentation': SegformerForSemanticSegmentation,
}
if is_torch_available()
else {}
)
__magic_name__ = True
__magic_name__ = False
__magic_name__ = False
__magic_name__ = False
def a_ ( self ):
snake_case = SegformerModelTester(self )
snake_case = SegformerConfigTester(self , config_class=__snake_case )
def a_ ( self ):
self.config_tester.run_common_tests()
def a_ ( self ):
snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__snake_case )
def a_ ( self ):
snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_binary_image_segmentation(*__snake_case )
def a_ ( self ):
snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_segmentation(*__snake_case )
@unittest.skip('''SegFormer does not use inputs_embeds''' )
def a_ ( self ):
pass
@unittest.skip('''SegFormer does not have get_input_embeddings method and get_output_embeddings methods''' )
def a_ ( self ):
pass
def a_ ( self ):
snake_case , snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case = model_class(__snake_case )
snake_case = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case = [*signature.parameters.keys()]
snake_case = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , __snake_case )
def a_ ( self ):
snake_case , snake_case = self.model_tester.prepare_config_and_inputs_for_common()
snake_case = True
for model_class in self.all_model_classes:
snake_case = True
snake_case = False
snake_case = True
snake_case = model_class(__snake_case )
model.to(__snake_case )
model.eval()
with torch.no_grad():
snake_case = model(**self._prepare_for_class(__snake_case , __snake_case ) )
snake_case = outputs.attentions
snake_case = sum(self.model_tester.depths )
self.assertEqual(len(__snake_case ) , __snake_case )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
snake_case = True
snake_case = model_class(__snake_case )
model.to(__snake_case )
model.eval()
with torch.no_grad():
snake_case = model(**self._prepare_for_class(__snake_case , __snake_case ) )
snake_case = outputs.attentions
self.assertEqual(len(__snake_case ) , __snake_case )
# verify the first attentions (first block, first layer)
snake_case = (self.model_tester.image_size // 4) ** 2
snake_case = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] , )
# verify the last attentions (last block, last layer)
snake_case = (self.model_tester.image_size // 3_2) ** 2
snake_case = (self.model_tester.image_size // (3_2 * self.model_tester.sr_ratios[-1])) ** 2
self.assertListEqual(
list(attentions[-1].shape[-3:] ) , [self.model_tester.num_attention_heads[-1], expected_seq_len, expected_reduced_seq_len] , )
snake_case = len(__snake_case )
# Check attention is always last and order is fine
snake_case = True
snake_case = True
snake_case = model_class(__snake_case )
model.to(__snake_case )
model.eval()
with torch.no_grad():
snake_case = model(**self._prepare_for_class(__snake_case , __snake_case ) )
self.assertEqual(out_len + 1 , len(__snake_case ) )
snake_case = outputs.attentions
self.assertEqual(len(__snake_case ) , __snake_case )
# verify the first attentions (first block, first layer)
snake_case = (self.model_tester.image_size // 4) ** 2
snake_case = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] , )
def a_ ( self ):
def check_hidden_states_output(__snake_case , __snake_case , __snake_case ):
snake_case = model_class(__snake_case )
model.to(__snake_case )
model.eval()
with torch.no_grad():
snake_case = model(**self._prepare_for_class(__snake_case , __snake_case ) )
snake_case = outputs.hidden_states
snake_case = self.model_tester.num_encoder_blocks
self.assertEqual(len(__snake_case ) , __snake_case )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:] ) , [
self.model_tester.hidden_sizes[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] , )
snake_case , snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case = True
check_hidden_states_output(__snake_case , __snake_case , __snake_case )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
snake_case = True
check_hidden_states_output(__snake_case , __snake_case , __snake_case )
def a_ ( self ):
if not self.model_tester.is_training:
return
snake_case , snake_case = self.model_tester.prepare_config_and_inputs_for_common()
snake_case = True
for model_class in self.all_model_classes:
if model_class in get_values(__snake_case ):
continue
snake_case = model_class(__snake_case )
model.to(__snake_case )
model.train()
snake_case = self._prepare_for_class(__snake_case , __snake_case , return_labels=__snake_case )
snake_case = model(**__snake_case ).loss
loss.backward()
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def a_ ( self ):
pass
@slow
def a_ ( self ):
for model_name in SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case = SegformerModel.from_pretrained(__snake_case )
self.assertIsNotNone(__snake_case )
def UpperCAmelCase__ ():
"""simple docstring"""
snake_case = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
class A__ ( unittest.TestCase ):
"""simple docstring"""
@slow
def a_ ( self ):
# only resize + normalize
snake_case = SegformerImageProcessor(
image_scale=(5_1_2, 5_1_2) , keep_ratio=__snake_case , align=__snake_case , do_random_crop=__snake_case )
snake_case = SegformerForSemanticSegmentation.from_pretrained('''nvidia/segformer-b0-finetuned-ade-512-512''' ).to(
__snake_case )
snake_case = prepare_img()
snake_case = image_processor(images=__snake_case , return_tensors='''pt''' )
snake_case = encoded_inputs.pixel_values.to(__snake_case )
with torch.no_grad():
snake_case = model(__snake_case )
snake_case = torch.Size((1, model.config.num_labels, 1_2_8, 1_2_8) )
self.assertEqual(outputs.logits.shape , __snake_case )
snake_case = torch.tensor(
[
[[-4.6310, -5.5232, -6.2356], [-5.1921, -6.1444, -6.5996], [-5.4424, -6.2790, -6.7574]],
[[-12.1391, -13.3122, -13.9554], [-12.8732, -13.9352, -14.3563], [-12.9438, -13.8226, -14.2513]],
[[-12.5134, -13.4686, -14.4915], [-12.8669, -14.4343, -14.7758], [-13.2523, -14.5819, -15.0694]],
] ).to(__snake_case )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] , __snake_case , atol=1E-4 ) )
@slow
def a_ ( self ):
# only resize + normalize
snake_case = SegformerImageProcessor(
image_scale=(5_1_2, 5_1_2) , keep_ratio=__snake_case , align=__snake_case , do_random_crop=__snake_case )
snake_case = SegformerForSemanticSegmentation.from_pretrained(
'''nvidia/segformer-b1-finetuned-cityscapes-1024-1024''' ).to(__snake_case )
snake_case = prepare_img()
snake_case = image_processor(images=__snake_case , return_tensors='''pt''' )
snake_case = encoded_inputs.pixel_values.to(__snake_case )
with torch.no_grad():
snake_case = model(__snake_case )
snake_case = torch.Size((1, model.config.num_labels, 1_2_8, 1_2_8) )
self.assertEqual(outputs.logits.shape , __snake_case )
snake_case = torch.tensor(
[
[[-13.5748, -13.9111, -12.6500], [-14.3500, -15.3683, -14.2328], [-14.7532, -16.0424, -15.6087]],
[[-17.1651, -15.8725, -12.9653], [-17.2580, -17.3718, -14.8223], [-16.6058, -16.8783, -16.7452]],
[[-3.6456, -3.0209, -1.4203], [-3.0797, -3.1959, -2.0000], [-1.8757, -1.9217, -1.6997]],
] ).to(__snake_case )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] , __snake_case , atol=1E-1 ) )
@slow
def a_ ( self ):
# only resize + normalize
snake_case = SegformerImageProcessor(
image_scale=(5_1_2, 5_1_2) , keep_ratio=__snake_case , align=__snake_case , do_random_crop=__snake_case )
snake_case = SegformerForSemanticSegmentation.from_pretrained('''nvidia/segformer-b0-finetuned-ade-512-512''' ).to(
__snake_case )
snake_case = prepare_img()
snake_case = image_processor(images=__snake_case , return_tensors='''pt''' )
snake_case = encoded_inputs.pixel_values.to(__snake_case )
with torch.no_grad():
snake_case = model(__snake_case )
snake_case = outputs.logits.detach().cpu()
snake_case = image_processor.post_process_semantic_segmentation(outputs=__snake_case , target_sizes=[(5_0_0, 3_0_0)] )
snake_case = torch.Size((5_0_0, 3_0_0) )
self.assertEqual(segmentation[0].shape , __snake_case )
snake_case = image_processor.post_process_semantic_segmentation(outputs=__snake_case )
snake_case = torch.Size((1_2_8, 1_2_8) )
self.assertEqual(segmentation[0].shape , __snake_case )
| 360 |
import re
from filelock import FileLock
try:
import nltk
_SCREAMING_SNAKE_CASE : Union[str, Any] = True
except (ImportError, ModuleNotFoundError):
_SCREAMING_SNAKE_CASE : Optional[Any] = False
if NLTK_AVAILABLE:
with FileLock(".lock") as lock:
nltk.download("punkt", quiet=True)
def UpperCAmelCase__ (UpperCamelCase_ ):
"""simple docstring"""
re.sub('''<n>''' ,'''''' ,UpperCamelCase_ ) # remove pegasus newline char
assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)"
return "\n".join(nltk.sent_tokenize(UpperCamelCase_ ) )
| 213 | 0 |
'''simple docstring'''
def lowercase__ ( __lowercase : int , __lowercase : int ) -> int:
"""simple docstring"""
while a != 0:
__UpperCamelCase , __UpperCamelCase = b % a, a
return b
def lowercase__ ( __lowercase : int , __lowercase : int ) -> int:
"""simple docstring"""
if gcd(__lowercase , __lowercase ) != 1:
__UpperCamelCase = F'''mod inverse of {a!r} and {m!r} does not exist'''
raise ValueError(__lowercase )
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase = 1, 0, a
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase = 0, 1, m
while va != 0:
__UpperCamelCase = ua // va
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = (ua - q * va), (ua - q * va), (ua - q * va), va, va, va
return ua % m
| 53 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from ..models.auto import AutoProcessor
from ..models.vision_encoder_decoder import VisionEncoderDecoderModel
from ..utils import is_vision_available
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple ="naver-clova-ix/donut-base-finetuned-docvqa"
SCREAMING_SNAKE_CASE_ : Dict =(
"This is a tool that answers a question about an document (pdf). It takes an input named `document` which "
"should be the document containing the information, as well as a `question` that is the question about the "
"document. It returns a text that contains the answer to the question."
)
SCREAMING_SNAKE_CASE_ : List[str] ="document_qa"
SCREAMING_SNAKE_CASE_ : Union[str, Any] =AutoProcessor
SCREAMING_SNAKE_CASE_ : Union[str, Any] =VisionEncoderDecoderModel
SCREAMING_SNAKE_CASE_ : List[Any] =["image", "text"]
SCREAMING_SNAKE_CASE_ : Any =["text"]
def __init__( self : Optional[int] , *__A : List[str] , **__A : List[Any] ):
if not is_vision_available():
raise ValueError('Pillow must be installed to use the DocumentQuestionAnsweringTool.' )
super().__init__(*__A , **__A )
def _lowerCamelCase ( self : Any , __A : "Image" , __A : str ):
__UpperCamelCase = '<s_docvqa><s_question>{user_input}</s_question><s_answer>'
__UpperCamelCase = task_prompt.replace('{user_input}' , __A )
__UpperCamelCase = self.pre_processor.tokenizer(
__A , add_special_tokens=__A , return_tensors='pt' ).input_ids
__UpperCamelCase = self.pre_processor(__A , return_tensors='pt' ).pixel_values
return {"decoder_input_ids": decoder_input_ids, "pixel_values": pixel_values}
def _lowerCamelCase ( self : Union[str, Any] , __A : Optional[Any] ):
return self.model.generate(
inputs['pixel_values'].to(self.device ) , decoder_input_ids=inputs['decoder_input_ids'].to(self.device ) , max_length=self.model.decoder.config.max_position_embeddings , early_stopping=__A , pad_token_id=self.pre_processor.tokenizer.pad_token_id , eos_token_id=self.pre_processor.tokenizer.eos_token_id , use_cache=__A , num_beams=1 , bad_words_ids=[[self.pre_processor.tokenizer.unk_token_id]] , return_dict_in_generate=__A , ).sequences
def _lowerCamelCase ( self : Tuple , __A : List[Any] ):
__UpperCamelCase = self.pre_processor.batch_decode(__A )[0]
__UpperCamelCase = sequence.replace(self.pre_processor.tokenizer.eos_token , '' )
__UpperCamelCase = sequence.replace(self.pre_processor.tokenizer.pad_token , '' )
__UpperCamelCase = re.sub(R'<.*?>' , '' , __A , count=1 ).strip() # remove first task start token
__UpperCamelCase = self.pre_processor.tokenajson(__A )
return sequence["answer"]
| 53 | 1 |
"""simple docstring"""
__lowerCamelCase = {
'A': ['B', 'C', 'E'],
'B': ['A', 'D', 'E'],
'C': ['A', 'F', 'G'],
'D': ['B'],
'E': ['A', 'B', 'D'],
'F': ['C'],
'G': ['C'],
}
def UpperCAmelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
A__ = set()
# keep track of all the paths to be checked
A__ = [[start]]
# return path if start is goal
if start == goal:
return [start]
# keeps looping until all possible paths have been checked
while queue:
# pop the first path from the queue
A__ = queue.pop(0 )
# get the last node from the path
A__ = path[-1]
if node not in explored:
A__ = graph[node]
# go through all neighbour nodes, construct a new path and
# push it into the queue
for neighbour in neighbours:
A__ = list(UpperCamelCase__ )
new_path.append(UpperCamelCase__ )
queue.append(UpperCamelCase__ )
# return path if neighbour is goal
if neighbour == goal:
return new_path
# mark node as explored
explored.add(UpperCamelCase__ )
# in case there's no path between the 2 nodes
return []
def UpperCAmelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
if not graph or start not in graph or target not in graph:
return -1
if start == target:
return 0
A__ = [start]
A__ = set(UpperCamelCase__ )
# Keep tab on distances from `start` node.
A__ = {start: 0, target: -1}
while queue:
A__ = queue.pop(0 )
if node == target:
A__ = (
dist[node] if dist[target] == -1 else min(dist[target] , dist[node] )
)
for adjacent in graph[node]:
if adjacent not in visited:
visited.add(UpperCamelCase__ )
queue.append(UpperCamelCase__ )
A__ = dist[node] + 1
return dist[target]
if __name__ == "__main__":
print(bfs_shortest_path(demo_graph, "G", "D")) # returns ['G', 'C', 'A', 'B', 'D']
print(bfs_shortest_path_distance(demo_graph, "G", "D")) # returns 4
| 352 | """simple docstring"""
import inspect
import os
import sys
import unittest
import accelerate
from accelerate.test_utils import execute_subprocess_async, require_tpu
class UpperCamelCase__( unittest.TestCase ):
def snake_case__ ( self ) -> Optional[int]:
A__ = inspect.getfile(accelerate.test_utils )
A__ = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['scripts', 'test_script.py'] )
A__ = os.path.sep.join(inspect.getfile(self.__class__ ).split(os.path.sep )[:-1] )
@require_tpu
def snake_case__ ( self ) -> int:
A__ = f'''
{self.test_dir}/xla_spawn.py
--num_cores 8
{self.test_file_path}
'''.split()
A__ = [sys.executable] + distributed_args
execute_subprocess_async(__UpperCAmelCase ,env=os.environ.copy() )
| 154 | 0 |
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
UniSpeechConfig,
UniSpeechForCTC,
UniSpeechForPreTraining,
WavaVecaFeatureExtractor,
WavaVecaPhonemeCTCTokenizer,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
__a = logging.get_logger(__name__)
__a = {
'''post_extract_proj''': '''feature_projection.projection''',
'''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''',
'''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''',
'''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''',
'''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''',
'''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''',
'''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''',
'''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''',
'''fc2''': '''encoder.layers.*.feed_forward.output_dense''',
'''final_layer_norm''': '''encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''encoder.layer_norm''',
'''w2v_model.layer_norm''': '''feature_projection.layer_norm''',
'''quantizer.weight_proj''': '''quantizer.weight_proj''',
'''quantizer.vars''': '''quantizer.codevectors''',
'''project_q''': '''project_q''',
'''final_proj''': '''project_hid''',
'''w2v_encoder.proj''': '''ctc_proj''',
'''mask_emb''': '''masked_spec_embed''',
}
__a = [
'''ctc_proj''',
'''quantizer.weight_proj''',
'''quantizer.codevectors''',
'''project_q''',
'''project_hid''',
]
def __lowercase ( _UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase ) ->Dict:
"""simple docstring"""
for attribute in key.split('''.''' ):
if is_finetuned:
if attribute in ["quantizer", "project_q", "project_hid"]:
# those layers are only relevant for pretraining and should be dropped
return
if attribute == "ctc_proj":
# we should rename `ctc_proj` to `lm_head` for fine-tuned phoneme models
lowercase : Tuple = '''lm_head'''
lowercase : Optional[int] = getattr(_UpperCamelCase, _UpperCamelCase )
if weight_type is not None:
lowercase : Any = getattr(_UpperCamelCase, _UpperCamelCase ).shape
else:
lowercase : List[str] = hf_pointer.shape
assert hf_shape == value.shape, (
f"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"""
f""" {value.shape} for {full_name}"""
)
if weight_type == "weight":
lowercase : Any = value
elif weight_type == "weight_g":
lowercase : List[Any] = value
elif weight_type == "weight_v":
lowercase : List[Any] = value
elif weight_type == "bias":
lowercase : int = value
else:
lowercase : str = value
logger.info(f"""{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.""" )
def __lowercase ( _UpperCamelCase, _UpperCamelCase, _UpperCamelCase ) ->List[str]:
"""simple docstring"""
lowercase : int = []
lowercase : Optional[int] = fairseq_model.state_dict()
lowercase : str = hf_model.unispeech.feature_extractor
for name, value in fairseq_dict.items():
lowercase : Optional[int] = False
if "conv_layers" in name:
load_conv_layer(
_UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase, hf_model.config.feat_extract_norm == '''group''', )
lowercase : str = True
else:
for key, mapped_key in MAPPING.items():
lowercase : Optional[int] = '''unispeech.''' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]:
lowercase : Dict = True
if "*" in mapped_key:
lowercase : Dict = name.split(_UpperCamelCase )[0].split('''.''' )[-2]
lowercase : List[Any] = mapped_key.replace('''*''', _UpperCamelCase )
if "weight_g" in name:
lowercase : str = '''weight_g'''
elif "weight_v" in name:
lowercase : str = '''weight_v'''
elif "bias" in name:
lowercase : List[Any] = '''bias'''
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
lowercase : Optional[Any] = '''weight'''
else:
lowercase : Any = None
set_recursively(_UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase )
continue
if not is_used:
unused_weights.append(_UpperCamelCase )
logger.warning(f"""Unused weights: {unused_weights}""" )
def __lowercase ( _UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase ) ->Tuple:
"""simple docstring"""
lowercase : Optional[int] = full_name.split('''conv_layers.''' )[-1]
lowercase : Optional[Any] = name.split('''.''' )
lowercase : Optional[Any] = int(items[0] )
lowercase : List[str] = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."""
)
lowercase : int = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."""
)
lowercase : Optional[Any] = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"""
" found."
)
lowercase : List[Any] = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."""
)
lowercase : Optional[int] = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(_UpperCamelCase )
@torch.no_grad()
def __lowercase ( _UpperCamelCase, _UpperCamelCase, _UpperCamelCase=None, _UpperCamelCase=None, _UpperCamelCase=True ) ->List[str]:
"""simple docstring"""
if config_path is not None:
lowercase : Optional[Any] = UniSpeechConfig.from_pretrained(_UpperCamelCase )
else:
lowercase : str = UniSpeechConfig()
if is_finetuned:
if dict_path:
lowercase : Tuple = Dictionary.load_from_json(_UpperCamelCase )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
lowercase : Dict = target_dict.pad_index
lowercase : int = target_dict.bos_index
lowercase : Tuple = target_dict.eos_index
lowercase : Tuple = len(target_dict.symbols )
lowercase : Dict = os.path.join(_UpperCamelCase, '''vocab.json''' )
if not os.path.isdir(_UpperCamelCase ):
logger.error('''--pytorch_dump_folder_path ({}) should be a directory'''.format(_UpperCamelCase ) )
return
os.makedirs(_UpperCamelCase, exist_ok=_UpperCamelCase )
lowercase : Tuple = target_dict.indices
# fairseq has the <pad> and <s> switched
lowercase : int = 42
lowercase : Any = 43
with open(_UpperCamelCase, '''w''', encoding='''utf-8''' ) as vocab_handle:
json.dump(_UpperCamelCase, _UpperCamelCase )
lowercase : Any = WavaVecaPhonemeCTCTokenizer(
_UpperCamelCase, unk_token=target_dict.unk_word, pad_token=target_dict.pad_word, bos_token=target_dict.bos_word, eos_token=target_dict.eos_word, word_delimiter_token='''|''', do_lower_case=_UpperCamelCase, )
lowercase : List[Any] = True if config.feat_extract_norm == '''layer''' else False
lowercase : int = WavaVecaFeatureExtractor(
feature_size=1, sampling_rate=16000, padding_value=0, do_normalize=_UpperCamelCase, return_attention_mask=_UpperCamelCase, )
lowercase : str = WavaVecaProcessor(feature_extractor=_UpperCamelCase, tokenizer=_UpperCamelCase )
processor.save_pretrained(_UpperCamelCase )
lowercase : int = UniSpeechForCTC(_UpperCamelCase )
else:
lowercase : Tuple = UniSpeechForPreTraining(_UpperCamelCase )
if is_finetuned:
lowercase , lowercase , lowercase : List[Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path], arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] ), '''w2v_path''': checkpoint_path} )
else:
lowercase , lowercase , lowercase : Any = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
lowercase : List[str] = model[0].eval()
recursively_load_weights(_UpperCamelCase, _UpperCamelCase, _UpperCamelCase )
hf_unispeech.save_pretrained(_UpperCamelCase )
if __name__ == "__main__":
__a = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--not_finetuned''', action='''store_true''', help='''Whether the model to convert is a fine-tuned model or not'''
)
__a = parser.parse_args()
convert_unispeech_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 337 |
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import torch
from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available
@dataclass
class __SCREAMING_SNAKE_CASE ( A__ ):
A : Union[List[np.ndarray], torch.FloatTensor]
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipeline_text_to_video_synth import TextToVideoSDPipeline
from .pipeline_text_to_video_synth_imgaimg import VideoToVideoSDPipeline # noqa: F401
from .pipeline_text_to_video_zero import TextToVideoZeroPipeline
| 337 | 1 |
'''simple docstring'''
import argparse
from pathlib import Path
import fairseq
import torch
from fairseq.models.xmod import XMODModel as FairseqXmodModel
from packaging import version
from transformers import XmodConfig, XmodForMaskedLM, XmodForSequenceClassification
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse("""0.12.2"""):
raise Exception("""requires fairseq >= 0.12.2""")
if version.parse(fairseq.__version__) > version.parse("""2"""):
raise Exception("""requires fairseq < v2""")
logging.set_verbosity_info()
a : Dict = logging.get_logger(__name__)
a : List[str] = """Hello, World!"""
a : List[Any] = """en_XX"""
def __lowerCamelCase ( _lowercase , _lowercase , _lowercase ) -> Dict:
UpperCAmelCase : Dict = Path("""data_bin""" )
UpperCAmelCase : Union[str, Any] = FairseqXmodModel.from_pretrained(
model_name_or_path=str(Path(_lowercase ).parent ) , checkpoint_file=Path(_lowercase ).name , _name="""xmod_base""" , arch="""xmod_base""" , task="""multilingual_masked_lm""" , data_name_or_path=str(_lowercase ) , bpe="""sentencepiece""" , sentencepiece_model=str(Path(_lowercase ).parent / """sentencepiece.bpe.model""" ) , src_dict=str(data_dir / """dict.txt""" ) , )
xmod.eval() # disable dropout
print(_lowercase )
UpperCAmelCase : List[str] = xmod.model.encoder.sentence_encoder
UpperCAmelCase : Tuple = XmodConfig(
vocab_size=xmod_sent_encoder.embed_tokens.num_embeddings , hidden_size=xmod.cfg.model.encoder_embed_dim , num_hidden_layers=xmod.cfg.model.encoder_layers , num_attention_heads=xmod.cfg.model.encoder_attention_heads , intermediate_size=xmod.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=5_1_4 , type_vocab_size=1 , layer_norm_eps=1e-5 , pre_norm=xmod.cfg.model.encoder_normalize_before , adapter_reduction_factor=getattr(xmod.cfg.model , """bottleneck""" , 2 ) , adapter_layer_norm=xmod.cfg.model.adapter_layer_norm , adapter_reuse_layer_norm=xmod.cfg.model.adapter_reuse_layer_norm , ln_before_adapter=xmod.cfg.model.ln_before_adapter , languages=xmod.cfg.model.languages , )
if classification_head:
UpperCAmelCase : List[str] = xmod.model.classification_heads["""mnli"""].out_proj.weight.shape[0]
print("""Our X-MOD config:""" , _lowercase )
UpperCAmelCase : str = XmodForSequenceClassification(_lowercase ) if classification_head else XmodForMaskedLM(_lowercase )
model.eval()
# Now let's copy all the weights.
# Embeddings
UpperCAmelCase : Union[str, Any] = xmod_sent_encoder.embed_tokens.weight
UpperCAmelCase : int = xmod_sent_encoder.embed_positions.weight
UpperCAmelCase : int = torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c xmod doesn't use them.
UpperCAmelCase : Union[str, Any] = xmod_sent_encoder.layernorm_embedding.weight
UpperCAmelCase : Optional[int] = xmod_sent_encoder.layernorm_embedding.bias
for i in range(config.num_hidden_layers ):
# Encoder: start of layer
UpperCAmelCase : List[str] = model.roberta.encoder.layer[i]
UpperCAmelCase : Optional[Any] = xmod_sent_encoder.layers[i]
# self attention
UpperCAmelCase : Optional[Any] = layer.attention.self
if not (
xmod_layer.self_attn.k_proj.weight.data.shape
== xmod_layer.self_attn.q_proj.weight.data.shape
== xmod_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size) )
):
raise AssertionError("""Dimensions of self-attention weights do not match.""" )
UpperCAmelCase : List[Any] = xmod_layer.self_attn.q_proj.weight
UpperCAmelCase : Optional[int] = xmod_layer.self_attn.q_proj.bias
UpperCAmelCase : Any = xmod_layer.self_attn.k_proj.weight
UpperCAmelCase : Optional[int] = xmod_layer.self_attn.k_proj.bias
UpperCAmelCase : int = xmod_layer.self_attn.v_proj.weight
UpperCAmelCase : List[Any] = xmod_layer.self_attn.v_proj.bias
# self-attention output
UpperCAmelCase : Optional[Any] = layer.attention.output
if self_output.dense.weight.shape != xmod_layer.self_attn.out_proj.weight.shape:
raise AssertionError("""Dimensions of self-attention output weights do not match.""" )
UpperCAmelCase : Any = xmod_layer.self_attn.out_proj.weight
UpperCAmelCase : List[str] = xmod_layer.self_attn.out_proj.bias
UpperCAmelCase : int = xmod_layer.self_attn_layer_norm.weight
UpperCAmelCase : str = xmod_layer.self_attn_layer_norm.bias
# intermediate
UpperCAmelCase : Tuple = layer.intermediate
if intermediate.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError("""Dimensions of intermediate weights do not match.""" )
UpperCAmelCase : List[str] = xmod_layer.fca.weight
UpperCAmelCase : str = xmod_layer.fca.bias
# output
UpperCAmelCase : Any = layer.output
if bert_output.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError("""Dimensions of feed-forward weights do not match.""" )
UpperCAmelCase : Dict = xmod_layer.fca.weight
UpperCAmelCase : Dict = xmod_layer.fca.bias
UpperCAmelCase : Any = xmod_layer.final_layer_norm.weight
UpperCAmelCase : Union[str, Any] = xmod_layer.final_layer_norm.bias
if bert_output.adapter_layer_norm is not None:
UpperCAmelCase : str = xmod_layer.adapter_layer_norm.weight
UpperCAmelCase : List[str] = xmod_layer.adapter_layer_norm.bias
if sorted(bert_output.adapter_modules.keys() ) != sorted(xmod_layer.adapter_modules.keys() ):
raise AssertionError("""Lists of language adapters do not match.""" )
for lang_code, adapter in xmod_layer.adapter_modules.items():
UpperCAmelCase : List[Any] = bert_output.adapter_modules[lang_code]
UpperCAmelCase : Dict = xmod_layer.adapter_modules[lang_code]
UpperCAmelCase : Any = from_adapter.fca.weight
UpperCAmelCase : int = from_adapter.fca.bias
UpperCAmelCase : Dict = from_adapter.fca.weight
UpperCAmelCase : Dict = from_adapter.fca.bias
# end of layer
if xmod_sent_encoder.layer_norm is not None:
UpperCAmelCase : Tuple = xmod_sent_encoder.layer_norm.weight
UpperCAmelCase : List[Any] = xmod_sent_encoder.layer_norm.bias
if classification_head:
UpperCAmelCase : str = xmod.model.classification_heads["""mnli"""].dense.weight
UpperCAmelCase : Tuple = xmod.model.classification_heads["""mnli"""].dense.bias
UpperCAmelCase : str = xmod.model.classification_heads["""mnli"""].out_proj.weight
UpperCAmelCase : Tuple = xmod.model.classification_heads["""mnli"""].out_proj.bias
else:
# LM Head
UpperCAmelCase : Dict = xmod.model.encoder.lm_head.dense.weight
UpperCAmelCase : List[Any] = xmod.model.encoder.lm_head.dense.bias
UpperCAmelCase : Optional[Any] = xmod.model.encoder.lm_head.layer_norm.weight
UpperCAmelCase : List[Any] = xmod.model.encoder.lm_head.layer_norm.bias
UpperCAmelCase : str = xmod.model.encoder.lm_head.weight
UpperCAmelCase : str = xmod.model.encoder.lm_head.bias
# Let's check that we get the same results.
UpperCAmelCase : Any = xmod.encode(_lowercase ).unsqueeze(0 ) # batch of size 1
model.roberta.set_default_language(_lowercase )
UpperCAmelCase : Optional[int] = model(_lowercase )[0]
if classification_head:
UpperCAmelCase : List[Any] = xmod.model.classification_heads["""mnli"""](xmod.extract_features(_lowercase ) )
else:
UpperCAmelCase : Optional[Any] = xmod.model(_lowercase , lang_id=[SAMPLE_LANGUAGE] )[0]
print(our_output.shape , their_output.shape )
UpperCAmelCase : Tuple = torch.max(torch.abs(our_output - their_output ) ).item()
print(F'''max_absolute_diff = {max_absolute_diff}''' ) # ~ 1e-7
UpperCAmelCase : Dict = torch.allclose(_lowercase , _lowercase , atol=1e-3 )
print("""Do both models output the same tensors?""" , """🔥""" if success else """💩""" )
if not success:
raise Exception("""Something went wRoNg""" )
Path(_lowercase ).mkdir(parents=_lowercase , exist_ok=_lowercase )
print(F'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(_lowercase )
if __name__ == "__main__":
a : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--xmod_checkpoint_path""", default=None, type=str, required=True, help="""Path the official PyTorch dump."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--classification_head""", action="""store_true""", help="""Whether to convert a final classification head."""
)
a : List[str] = parser.parse_args()
convert_xmod_checkpoint_to_pytorch(
args.xmod_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
)
| 338 |
'''simple docstring'''
def __lowerCamelCase ( _lowercase , _lowercase ) -> bool:
UpperCAmelCase : Tuple = len(_lowercase ) + 1
UpperCAmelCase : List[Any] = len(_lowercase ) + 1
# dp is a 2d matrix where dp[i][j] denotes whether prefix string of
# length i of input_string matches with prefix string of length j of
# given pattern.
# "dp" stands for dynamic programming.
UpperCAmelCase : str = [[0 for i in range(_lowercase )] for j in range(_lowercase )]
# since string of zero length match pattern of zero length
UpperCAmelCase : int = 1
# since pattern of zero length will never match with string of non-zero length
for i in range(1 , _lowercase ):
UpperCAmelCase : str = 0
# since string of zero length will match with pattern where there
# is at least one * alternatively
for j in range(1 , _lowercase ):
UpperCAmelCase : Optional[Any] = dp[0][j - 2] if pattern[j - 1] == """*""" else 0
# now using bottom-up approach to find for all remaining lengths
for i in range(1 , _lowercase ):
for j in range(1 , _lowercase ):
if input_string[i - 1] == pattern[j - 1] or pattern[j - 1] == ".":
UpperCAmelCase : Union[str, Any] = dp[i - 1][j - 1]
elif pattern[j - 1] == "*":
if dp[i][j - 2] == 1:
UpperCAmelCase : List[Any] = 1
elif pattern[j - 2] in (input_string[i - 1], "."):
UpperCAmelCase : Optional[int] = dp[i - 1][j]
else:
UpperCAmelCase : Any = 0
else:
UpperCAmelCase : str = 0
return bool(dp[-1][-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
# inputing the strings
# input_string = input("input a string :")
# pattern = input("input a pattern :")
a : List[str] = """aab"""
a : Optional[int] = """c*a*b"""
# using function to check whether given string matches the given pattern
if match_pattern(input_string, pattern):
print(F'''{input_string} matches the given pattern {pattern}''')
else:
print(F'''{input_string} does not match with the given pattern {pattern}''')
| 338 | 1 |
'''simple docstring'''
from manim import *
class UpperCAmelCase ( a__ ):
'''simple docstring'''
def _lowerCAmelCase( self ) -> Union[str, Any]:
lowercase__ : List[Any] = Rectangle(height=0.5 , width=0.5 )
lowercase__ : Optional[int] = Rectangle(height=0.4_6 , width=0.4_6 ).set_stroke(width=0 )
lowercase__ : Any = [mem.copy() for i in range(6 )]
lowercase__ : Any = [mem.copy() for i in range(6 )]
lowercase__ : Optional[int] = VGroup(*__lowerCAmelCase ).arrange(__lowerCAmelCase , buff=0 )
lowercase__ : Union[str, Any] = VGroup(*__lowerCAmelCase ).arrange(__lowerCAmelCase , buff=0 )
lowercase__ : str = VGroup(__lowerCAmelCase , __lowerCAmelCase ).arrange(__lowerCAmelCase , buff=0 )
lowercase__ : str = Text('''CPU''' , font_size=24 )
lowercase__ : Tuple = Group(__lowerCAmelCase , __lowerCAmelCase ).arrange(__lowerCAmelCase , buff=0.5 , aligned_edge=__lowerCAmelCase )
cpu.move_to([-2.5, -0.5, 0] )
self.add(__lowerCAmelCase )
lowercase__ : Dict = [mem.copy() for i in range(4 )]
lowercase__ : int = VGroup(*__lowerCAmelCase ).arrange(__lowerCAmelCase , buff=0 )
lowercase__ : Tuple = Text('''GPU''' , font_size=24 )
lowercase__ : Optional[int] = Group(__lowerCAmelCase , __lowerCAmelCase ).arrange(__lowerCAmelCase , buff=0.5 , aligned_edge=__lowerCAmelCase )
gpu.move_to([-1, -1, 0] )
self.add(__lowerCAmelCase )
lowercase__ : Union[str, Any] = [mem.copy() for i in range(6 )]
lowercase__ : Union[str, Any] = VGroup(*__lowerCAmelCase ).arrange(__lowerCAmelCase , buff=0 )
lowercase__ : Tuple = Text('''Model''' , font_size=24 )
lowercase__ : Dict = Group(__lowerCAmelCase , __lowerCAmelCase ).arrange(__lowerCAmelCase , buff=0.5 , aligned_edge=__lowerCAmelCase )
model.move_to([3, -1.0, 0] )
self.add(__lowerCAmelCase )
lowercase__ : Dict = []
for i, rect in enumerate(__lowerCAmelCase ):
rect.set_stroke(__lowerCAmelCase )
# target = fill.copy().set_fill(YELLOW, opacity=0.7)
# target.move_to(rect)
# self.add(target)
lowercase__ : Union[str, Any] = Rectangle(height=0.4_6 / 4 , width=0.4_6 / 3 ).set_stroke(width=0.0 ).set_fill(__lowerCAmelCase , opacity=0.7 )
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.0_2 , direction=__lowerCAmelCase )
cpu_target.set_x(cpu_target.get_x() + 0.1 )
elif i == 3:
cpu_target.next_to(cpu_targs[0] , direction=__lowerCAmelCase , buff=0.0 )
else:
cpu_target.next_to(cpu_targs[i - 1] , direction=__lowerCAmelCase , buff=0.0 )
self.add(__lowerCAmelCase )
cpu_targs.append(__lowerCAmelCase )
lowercase__ : int = [mem.copy() for i in range(6 )]
lowercase__ : int = VGroup(*__lowerCAmelCase ).arrange(__lowerCAmelCase , buff=0 )
lowercase__ : Optional[int] = Text('''Loaded Checkpoint''' , font_size=24 )
lowercase__ : Dict = Group(__lowerCAmelCase , __lowerCAmelCase ).arrange(__lowerCAmelCase , aligned_edge=__lowerCAmelCase , buff=0.4 )
checkpoint.move_to([3, 0.5, 0] )
lowercase__ : Optional[Any] = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
lowercase__ : List[str] = MarkupText(
F"""<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model""" , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(__lowerCAmelCase , __lowerCAmelCase )
lowercase__ : List[Any] = MarkupText(
F"""<span fgcolor='{BLUE}'>●</span> Checkpoint""" , font_size=18 , )
blue_text.next_to(__lowerCAmelCase , DOWN * 2.4 , aligned_edge=key_text.get_left() )
lowercase__ : int = MarkupText(
F"""Next, a <i><span fgcolor=\"{BLUE}\">second</span></i> model is loaded into memory,\nwith the weights of a <span fgcolor=\"{BLUE}\">single shard</span>.""" , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(__lowerCAmelCase ) , Write(__lowerCAmelCase ) )
self.play(Write(__lowerCAmelCase , run_time=1 ) , Create(__lowerCAmelCase , run_time=1 ) )
lowercase__ : int = []
lowercase__ : Optional[int] = []
for i, rect in enumerate(__lowerCAmelCase ):
lowercase__ : int = fill.copy().set_fill(__lowerCAmelCase , opacity=0.7 )
target.move_to(__lowerCAmelCase )
first_animations.append(GrowFromCenter(__lowerCAmelCase , run_time=1 ) )
lowercase__ : List[str] = target.copy()
cpu_target.generate_target()
if i < 5:
cpu_target.target.move_to(cpu_left_col_base[i + 1] )
else:
cpu_target.target.move_to(cpu_right_col_base[i - 5] )
second_animations.append(MoveToTarget(__lowerCAmelCase , run_time=1.5 ) )
self.play(*__lowerCAmelCase )
self.play(*__lowerCAmelCase )
self.wait()
| 198 | '''simple docstring'''
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing the experiment tracking capability,
# and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
__a: Optional[Any] = 16
__a: Any = 32
def __UpperCamelCase ( UpperCAmelCase , UpperCAmelCase = 16 ):
lowercase__ : Dict = AutoTokenizer.from_pretrained('''bert-base-cased''' )
lowercase__ : Optional[int] = load_dataset('''glue''' , '''mrpc''' )
def tokenize_function(UpperCAmelCase ):
# max_length=None => use the model max length (it's actually the default)
lowercase__ : Optional[Any] = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=UpperCAmelCase , max_length=UpperCAmelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
lowercase__ : List[Any] = datasets.map(
UpperCAmelCase , batched=UpperCAmelCase , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowercase__ : str = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(UpperCAmelCase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
lowercase__ : List[str] = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
lowercase__ : List[str] = 16
elif accelerator.mixed_precision != "no":
lowercase__ : Dict = 8
else:
lowercase__ : Optional[int] = None
return tokenizer.pad(
UpperCAmelCase , padding='''longest''' , max_length=UpperCAmelCase , pad_to_multiple_of=UpperCAmelCase , return_tensors='''pt''' , )
# Instantiate dataloaders.
lowercase__ : str = DataLoader(
tokenized_datasets['''train'''] , shuffle=UpperCAmelCase , collate_fn=UpperCAmelCase , batch_size=UpperCAmelCase )
lowercase__ : Optional[int] = DataLoader(
tokenized_datasets['''validation'''] , shuffle=UpperCAmelCase , collate_fn=UpperCAmelCase , batch_size=UpperCAmelCase )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("""TESTING_MOCKED_DATALOADERS""", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
__a: Tuple = mocked_dataloaders # noqa: F811
def __UpperCamelCase ( UpperCAmelCase , UpperCAmelCase ):
# For testing only
if os.environ.get('''TESTING_MOCKED_DATALOADERS''' , UpperCAmelCase ) == "1":
lowercase__ : Optional[int] = 2
# Initialize Accelerator
# New Code #
# We pass in "all" to `log_with` to grab all available trackers in the environment
# Note: If using a custom `Tracker` class, should be passed in here such as:
# >>> log_with = ["all", MyCustomTrackerClassInstance()]
if args.with_tracking:
lowercase__ : Union[str, Any] = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , log_with='''all''' , project_dir=args.project_dir )
else:
lowercase__ : Any = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowercase__ : int = config['''lr''']
lowercase__ : Optional[int] = int(config['''num_epochs'''] )
lowercase__ : Optional[Any] = int(config['''seed'''] )
lowercase__ : int = int(config['''batch_size'''] )
set_seed(UpperCAmelCase )
lowercase__ , lowercase__ : str = get_dataloaders(UpperCAmelCase , UpperCAmelCase )
lowercase__ : Union[str, Any] = evaluate.load('''glue''' , '''mrpc''' )
# If the batch size is too big we use gradient accumulation
lowercase__ : Any = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
lowercase__ : Union[str, Any] = batch_size // MAX_GPU_BATCH_SIZE
lowercase__ : Any = MAX_GPU_BATCH_SIZE
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowercase__ : Optional[int] = AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''' , return_dict=UpperCAmelCase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
lowercase__ : List[str] = model.to(accelerator.device )
# Instantiate optimizer
lowercase__ : List[Any] = AdamW(params=model.parameters() , lr=UpperCAmelCase )
# Instantiate scheduler
lowercase__ : List[str] = get_linear_schedule_with_warmup(
optimizer=UpperCAmelCase , num_warmup_steps=100 , num_training_steps=(len(UpperCAmelCase ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ : List[str] = accelerator.prepare(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
# New Code #
# We need to initialize the trackers we use. Overall configurations can also be stored
if args.with_tracking:
lowercase__ : Optional[Any] = os.path.split(UpperCAmelCase )[-1].split('''.''' )[0]
accelerator.init_trackers(UpperCAmelCase , UpperCAmelCase )
# Now we train the model
for epoch in range(UpperCAmelCase ):
model.train()
# New Code #
# For our tracking example, we will log the total loss of each epoch
if args.with_tracking:
lowercase__ : str = 0
for step, batch in enumerate(UpperCAmelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
lowercase__ : List[str] = model(**UpperCAmelCase )
lowercase__ : List[str] = outputs.loss
# New Code #
if args.with_tracking:
total_loss += loss.detach().float()
lowercase__ : List[str] = loss / gradient_accumulation_steps
accelerator.backward(UpperCAmelCase )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(UpperCAmelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True` (the default).
batch.to(accelerator.device )
with torch.no_grad():
lowercase__ : List[str] = model(**UpperCAmelCase )
lowercase__ : Optional[int] = outputs.logits.argmax(dim=-1 )
lowercase__ , lowercase__ : Optional[int] = accelerator.gather_for_metrics((predictions, batch['''labels''']) )
metric.add_batch(
predictions=UpperCAmelCase , references=UpperCAmelCase , )
lowercase__ : int = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F"""epoch {epoch}:""" , UpperCAmelCase )
# New Code #
# To actually log, we call `Accelerator.log`
# The values passed can be of `str`, `int`, `float` or `dict` of `str` to `float`/`int`
if args.with_tracking:
accelerator.log(
{
'''accuracy''': eval_metric['''accuracy'''],
'''f1''': eval_metric['''f1'''],
'''train_loss''': total_loss.item() / len(UpperCAmelCase ),
'''epoch''': epoch,
} , step=UpperCAmelCase , )
# New Code #
# When a run is finished, you should call `accelerator.end_training()`
# to close all of the open trackers
if args.with_tracking:
accelerator.end_training()
def __UpperCamelCase ( ):
lowercase__ : Any = argparse.ArgumentParser(description='''Simple example of training script.''' )
parser.add_argument(
'''--mixed_precision''' , type=UpperCAmelCase , default=UpperCAmelCase , choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] , help='''Whether to use mixed precision. Choose'''
'''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'''
'''and an Nvidia Ampere GPU.''' , )
parser.add_argument('''--cpu''' , action='''store_true''' , help='''If passed, will train on the CPU.''' )
parser.add_argument(
'''--with_tracking''' , action='''store_true''' , help='''Whether to load in all available experiment trackers from the environment and use them for logging.''' , )
parser.add_argument(
'''--project_dir''' , type=UpperCAmelCase , default='''logs''' , help='''Location on where to store experiment tracking logs` and relevent project information''' , )
lowercase__ : str = parser.parse_args()
lowercase__ : Tuple = {'''lr''': 2E-5, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 16}
training_function(UpperCAmelCase , UpperCAmelCase )
if __name__ == "__main__":
main()
| 198 | 1 |
"""simple docstring"""
from typing import Optional, Tuple, Union
import flax
import flax.linen as nn
import jax
import jax.numpy as jnp
from flax.core.frozen_dict import FrozenDict
from ..configuration_utils import ConfigMixin, flax_register_to_config
from ..utils import BaseOutput
from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps
from .modeling_flax_utils import FlaxModelMixin
from .unet_ad_blocks_flax import (
FlaxCrossAttnDownBlockaD,
FlaxDownBlockaD,
FlaxUNetMidBlockaDCrossAttn,
)
@flax.struct.dataclass
class lowercase( __a ):
'''simple docstring'''
lowercase__ = 42
lowercase__ = 42
class lowercase( nn.Module ):
'''simple docstring'''
lowercase__ = 42
lowercase__ = (16, 32, 96, 2_56)
lowercase__ = jnp.floataa
def UpperCamelCase_ ( self: Optional[int] ):
'''simple docstring'''
_snake_case : List[str] = nn.Conv(
self.block_out_channels[0], kernel_size=(3, 3), padding=((1, 1), (1, 1)), dtype=self.dtype, )
_snake_case : int = []
for i in range(len(self.block_out_channels ) - 1 ):
_snake_case : int = self.block_out_channels[i]
_snake_case : Tuple = self.block_out_channels[i + 1]
_snake_case : Dict = nn.Conv(
a_, kernel_size=(3, 3), padding=((1, 1), (1, 1)), dtype=self.dtype, )
blocks.append(a_ )
_snake_case : List[Any] = nn.Conv(
a_, kernel_size=(3, 3), strides=(2, 2), padding=((1, 1), (1, 1)), dtype=self.dtype, )
blocks.append(a_ )
_snake_case : Any = blocks
_snake_case : Optional[Any] = nn.Conv(
self.conditioning_embedding_channels, kernel_size=(3, 3), padding=((1, 1), (1, 1)), kernel_init=nn.initializers.zeros_init(), bias_init=nn.initializers.zeros_init(), dtype=self.dtype, )
def __call__( self: Optional[Any], a_: Optional[Any] ):
'''simple docstring'''
_snake_case : int = self.conv_in(a_ )
_snake_case : Optional[int] = nn.silu(a_ )
for block in self.blocks:
_snake_case : Tuple = block(a_ )
_snake_case : int = nn.silu(a_ )
_snake_case : Optional[int] = self.conv_out(a_ )
return embedding
@flax_register_to_config
class lowercase( nn.Module , __a , __a ):
'''simple docstring'''
lowercase__ = 32
lowercase__ = 4
lowercase__ = (
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"DownBlock2D",
)
lowercase__ = False
lowercase__ = (3_20, 6_40, 12_80, 12_80)
lowercase__ = 2
lowercase__ = 8
lowercase__ = None
lowercase__ = 12_80
lowercase__ = 0.0
lowercase__ = False
lowercase__ = jnp.floataa
lowercase__ = True
lowercase__ = 0
lowercase__ = "rgb"
lowercase__ = (16, 32, 96, 2_56)
def UpperCamelCase_ ( self: int, a_: jax.random.KeyArray ):
'''simple docstring'''
_snake_case : str = (1, self.in_channels, self.sample_size, self.sample_size)
_snake_case : Optional[Any] = jnp.zeros(a_, dtype=jnp.floataa )
_snake_case : List[str] = jnp.ones((1,), dtype=jnp.intaa )
_snake_case : List[Any] = jnp.zeros((1, 1, self.cross_attention_dim), dtype=jnp.floataa )
_snake_case : Any = (1, 3, self.sample_size * 8, self.sample_size * 8)
_snake_case : Optional[int] = jnp.zeros(a_, dtype=jnp.floataa )
_snake_case , _snake_case : Tuple = jax.random.split(a_ )
_snake_case : str = {"""params""": params_rng, """dropout""": dropout_rng}
return self.init(a_, a_, a_, a_, a_ )["params"]
def UpperCamelCase_ ( self: Any ):
'''simple docstring'''
_snake_case : Optional[int] = self.block_out_channels
_snake_case : Optional[int] = block_out_channels[0] * 4
# If `num_attention_heads` is not defined (which is the case for most models)
# it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
# The reason for this behavior is to correct for incorrectly named variables that were introduced
# when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
# Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
# which is why we correct for the naming here.
_snake_case : int = self.num_attention_heads or self.attention_head_dim
# input
_snake_case : Union[str, Any] = nn.Conv(
block_out_channels[0], kernel_size=(3, 3), strides=(1, 1), padding=((1, 1), (1, 1)), dtype=self.dtype, )
# time
_snake_case : int = FlaxTimesteps(
block_out_channels[0], flip_sin_to_cos=self.flip_sin_to_cos, freq_shift=self.config.freq_shift )
_snake_case : Any = FlaxTimestepEmbedding(a_, dtype=self.dtype )
_snake_case : Optional[Any] = FlaxControlNetConditioningEmbedding(
conditioning_embedding_channels=block_out_channels[0], block_out_channels=self.conditioning_embedding_out_channels, )
_snake_case : List[str] = self.only_cross_attention
if isinstance(a_, a_ ):
_snake_case : List[Any] = (only_cross_attention,) * len(self.down_block_types )
if isinstance(a_, a_ ):
_snake_case : Optional[Any] = (num_attention_heads,) * len(self.down_block_types )
# down
_snake_case : List[str] = []
_snake_case : Tuple = []
_snake_case : int = block_out_channels[0]
_snake_case : Optional[Any] = nn.Conv(
a_, kernel_size=(1, 1), padding="""VALID""", kernel_init=nn.initializers.zeros_init(), bias_init=nn.initializers.zeros_init(), dtype=self.dtype, )
controlnet_down_blocks.append(a_ )
for i, down_block_type in enumerate(self.down_block_types ):
_snake_case : List[Any] = output_channel
_snake_case : Any = block_out_channels[i]
_snake_case : List[str] = i == len(a_ ) - 1
if down_block_type == "CrossAttnDownBlock2D":
_snake_case : Optional[int] = FlaxCrossAttnDownBlockaD(
in_channels=a_, out_channels=a_, dropout=self.dropout, num_layers=self.layers_per_block, num_attention_heads=num_attention_heads[i], add_downsample=not is_final_block, use_linear_projection=self.use_linear_projection, only_cross_attention=only_cross_attention[i], dtype=self.dtype, )
else:
_snake_case : List[Any] = FlaxDownBlockaD(
in_channels=a_, out_channels=a_, dropout=self.dropout, num_layers=self.layers_per_block, add_downsample=not is_final_block, dtype=self.dtype, )
down_blocks.append(a_ )
for _ in range(self.layers_per_block ):
_snake_case : List[Any] = nn.Conv(
a_, kernel_size=(1, 1), padding="""VALID""", kernel_init=nn.initializers.zeros_init(), bias_init=nn.initializers.zeros_init(), dtype=self.dtype, )
controlnet_down_blocks.append(a_ )
if not is_final_block:
_snake_case : List[Any] = nn.Conv(
a_, kernel_size=(1, 1), padding="""VALID""", kernel_init=nn.initializers.zeros_init(), bias_init=nn.initializers.zeros_init(), dtype=self.dtype, )
controlnet_down_blocks.append(a_ )
_snake_case : str = down_blocks
_snake_case : Union[str, Any] = controlnet_down_blocks
# mid
_snake_case : Tuple = block_out_channels[-1]
_snake_case : Optional[int] = FlaxUNetMidBlockaDCrossAttn(
in_channels=a_, dropout=self.dropout, num_attention_heads=num_attention_heads[-1], use_linear_projection=self.use_linear_projection, dtype=self.dtype, )
_snake_case : Tuple = nn.Conv(
a_, kernel_size=(1, 1), padding="""VALID""", kernel_init=nn.initializers.zeros_init(), bias_init=nn.initializers.zeros_init(), dtype=self.dtype, )
def __call__( self: str, a_: Any, a_: Tuple, a_: Any, a_: int, a_: float = 1.0, a_: bool = True, a_: bool = False, ):
'''simple docstring'''
_snake_case : Dict = self.controlnet_conditioning_channel_order
if channel_order == "bgr":
_snake_case : List[Any] = jnp.flip(a_, axis=1 )
# 1. time
if not isinstance(a_, jnp.ndarray ):
_snake_case : Any = jnp.array([timesteps], dtype=jnp.intaa )
elif isinstance(a_, jnp.ndarray ) and len(timesteps.shape ) == 0:
_snake_case : Union[str, Any] = timesteps.astype(dtype=jnp.floataa )
_snake_case : List[str] = jnp.expand_dims(a_, 0 )
_snake_case : List[str] = self.time_proj(a_ )
_snake_case : str = self.time_embedding(a_ )
# 2. pre-process
_snake_case : List[str] = jnp.transpose(a_, (0, 2, 3, 1) )
_snake_case : List[Any] = self.conv_in(a_ )
_snake_case : Union[str, Any] = jnp.transpose(a_, (0, 2, 3, 1) )
_snake_case : Any = self.controlnet_cond_embedding(a_ )
sample += controlnet_cond
# 3. down
_snake_case : List[str] = (sample,)
for down_block in self.down_blocks:
if isinstance(a_, a_ ):
_snake_case , _snake_case : Optional[Any] = down_block(a_, a_, a_, deterministic=not train )
else:
_snake_case , _snake_case : Dict = down_block(a_, a_, deterministic=not train )
down_block_res_samples += res_samples
# 4. mid
_snake_case : Dict = self.mid_block(a_, a_, a_, deterministic=not train )
# 5. contronet blocks
_snake_case : Tuple = ()
for down_block_res_sample, controlnet_block in zip(a_, self.controlnet_down_blocks ):
_snake_case : Any = controlnet_block(a_ )
controlnet_down_block_res_samples += (down_block_res_sample,)
_snake_case : List[Any] = controlnet_down_block_res_samples
_snake_case : int = self.controlnet_mid_block(a_ )
# 6. scaling
_snake_case : int = [sample * conditioning_scale for sample in down_block_res_samples]
mid_block_res_sample *= conditioning_scale
if not return_dict:
return (down_block_res_samples, mid_block_res_sample)
return FlaxControlNetOutput(
down_block_res_samples=a_, mid_block_res_sample=a_ )
| 132 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
A_ = logging.get_logger(__name__)
A_ = {
'''microsoft/swin-tiny-patch4-window7-224''': (
'''https://huggingface.co/microsoft/swin-tiny-patch4-window7-224/resolve/main/config.json'''
),
# See all Swin models at https://huggingface.co/models?filter=swin
}
class lowercase( __a , __a ):
'''simple docstring'''
lowercase__ = "swin"
lowercase__ = {
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self: Any, a_: List[str]=224, a_: List[Any]=4, a_: List[Any]=3, a_: Dict=96, a_: List[str]=[2, 2, 6, 2], a_: int=[3, 6, 12, 24], a_: int=7, a_: str=4.0, a_: Optional[Any]=True, a_: Dict=0.0, a_: List[Any]=0.0, a_: List[str]=0.1, a_: Union[str, Any]="gelu", a_: Dict=False, a_: Union[str, Any]=0.02, a_: Optional[int]=1E-5, a_: Optional[int]=32, a_: Tuple=None, a_: Union[str, Any]=None, **a_: Any, ):
'''simple docstring'''
super().__init__(**a_ )
_snake_case : Any = image_size
_snake_case : List[Any] = patch_size
_snake_case : Tuple = num_channels
_snake_case : str = embed_dim
_snake_case : Union[str, Any] = depths
_snake_case : int = len(a_ )
_snake_case : Union[str, Any] = num_heads
_snake_case : List[str] = window_size
_snake_case : str = mlp_ratio
_snake_case : Union[str, Any] = qkv_bias
_snake_case : Dict = hidden_dropout_prob
_snake_case : str = attention_probs_dropout_prob
_snake_case : Union[str, Any] = drop_path_rate
_snake_case : Optional[int] = hidden_act
_snake_case : str = use_absolute_embeddings
_snake_case : Tuple = layer_norm_eps
_snake_case : List[Any] = initializer_range
_snake_case : Optional[Any] = encoder_stride
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
_snake_case : Any = int(embed_dim * 2 ** (len(a_ ) - 1) )
_snake_case : Any = ["""stem"""] + [f"stage{idx}" for idx in range(1, len(a_ ) + 1 )]
_snake_case , _snake_case : List[str] = get_aligned_output_features_output_indices(
out_features=a_, out_indices=a_, stage_names=self.stage_names )
class lowercase( __a ):
'''simple docstring'''
lowercase__ = version.parse("1.11" )
@property
def UpperCamelCase_ ( self: Any ):
'''simple docstring'''
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def UpperCamelCase_ ( self: List[str] ):
'''simple docstring'''
return 1E-4
| 132 | 1 |
"""simple docstring"""
from ...processing_utils import ProcessorMixin
class __A ( SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : List[Any] = "WhisperFeatureExtractor"
_UpperCamelCase : List[Any] = "WhisperTokenizer"
def __init__( self , a__ , a__ ):
super().__init__(a__ , a__ )
_lowerCAmelCase : int = self.feature_extractor
_lowerCAmelCase : Union[str, Any] = False
def __A ( self , a__=None , a__=None , a__=True ):
return self.tokenizer.get_decoder_prompt_ids(task=a__ , language=a__ , no_timestamps=a__ )
def __call__( self , *a__ , **a__ ):
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*a__ , **a__ )
_lowerCAmelCase : Union[str, Any] = kwargs.pop("""audio""" , a__ )
_lowerCAmelCase : Dict = kwargs.pop("""sampling_rate""" , a__ )
_lowerCAmelCase : Optional[Any] = kwargs.pop("""text""" , a__ )
if len(a__ ) > 0:
_lowerCAmelCase : Any = args[0]
_lowerCAmelCase : int = args[1:]
if audio is None and text is None:
raise ValueError("""You need to specify either an `audio` or `text` input to process.""" )
if audio is not None:
_lowerCAmelCase : str = self.feature_extractor(a__ , *a__ , sampling_rate=a__ , **a__ )
if text is not None:
_lowerCAmelCase : Any = self.tokenizer(a__ , **a__ )
if text is None:
return inputs
elif audio is None:
return encodings
else:
_lowerCAmelCase : Any = encodings["""input_ids"""]
return inputs
def __A ( self , *a__ , **a__ ):
return self.tokenizer.batch_decode(*a__ , **a__ )
def __A ( self , *a__ , **a__ ):
return self.tokenizer.decode(*a__ , **a__ )
def __A ( self , a__ , a__="np" ):
return self.tokenizer.get_prompt_ids(a__ , return_tensors=a__ )
| 44 |
"""simple docstring"""
def lowercase_ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> int:
def count_of_possible_combinations(__UpperCAmelCase ) -> int:
if target < 0:
return 0
if target == 0:
return 1
return sum(count_of_possible_combinations(target - item ) for item in array )
return count_of_possible_combinations(__UpperCAmelCase )
def lowercase_ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> int:
def count_of_possible_combinations_with_dp_array(
__UpperCAmelCase , __UpperCAmelCase ) -> int:
if target < 0:
return 0
if target == 0:
return 1
if dp_array[target] != -1:
return dp_array[target]
lowerCAmelCase__ : str = sum(
count_of_possible_combinations_with_dp_array(target - item , __UpperCAmelCase )
for item in array )
lowerCAmelCase__ : List[str] = answer
return answer
lowerCAmelCase__ : Dict = [-1] * (target + 1)
return count_of_possible_combinations_with_dp_array(__UpperCAmelCase , __UpperCAmelCase )
def lowercase_ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> int:
lowerCAmelCase__ : int = [0] * (target + 1)
lowerCAmelCase__ : int = 1
for i in range(1 , target + 1 ):
for j in range(__UpperCAmelCase ):
if i - array[j] >= 0:
dp_array[i] += dp_array[i - array[j]]
return dp_array[target]
if __name__ == "__main__":
import doctest
doctest.testmod()
_A = 3
_A = 5
_A = [1, 2, 5]
print(combination_sum_iv(n, array, target))
| 242 | 0 |
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available
from ...utils import OptionalDependencyNotAvailable
lowerCAmelCase__ :Any = {'''configuration_gpt_neox''': ['''GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GPTNeoXConfig''']}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ :Optional[Any] = ['''GPTNeoXTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ :List[str] = [
'''GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''GPTNeoXForCausalLM''',
'''GPTNeoXForQuestionAnswering''',
'''GPTNeoXForSequenceClassification''',
'''GPTNeoXForTokenClassification''',
'''GPTNeoXLayer''',
'''GPTNeoXModel''',
'''GPTNeoXPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_gpt_neox import GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXConfig
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_gpt_neox_fast import GPTNeoXTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neox import (
GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
GPTNeoXLayer,
GPTNeoXModel,
GPTNeoXPreTrainedModel,
)
else:
import sys
lowerCAmelCase__ :List[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 364 |
from __future__ import annotations
from PIL import Image
# Define glider example
lowerCAmelCase__ :str = [
[0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0],
[1, 1, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
]
# Define blinker example
lowerCAmelCase__ :Any = [[0, 1, 0], [0, 1, 0], [0, 1, 0]]
def lowerCAmelCase__ ( a__: list[list[int]] ) -> list[list[int]]:
'''simple docstring'''
_UpperCAmelCase = []
for i in range(len(a__ ) ):
_UpperCAmelCase = []
for j in range(len(cells[i] ) ):
# Get the number of live neighbours
_UpperCAmelCase = 0
if i > 0 and j > 0:
neighbour_count += cells[i - 1][j - 1]
if i > 0:
neighbour_count += cells[i - 1][j]
if i > 0 and j < len(cells[i] ) - 1:
neighbour_count += cells[i - 1][j + 1]
if j > 0:
neighbour_count += cells[i][j - 1]
if j < len(cells[i] ) - 1:
neighbour_count += cells[i][j + 1]
if i < len(a__ ) - 1 and j > 0:
neighbour_count += cells[i + 1][j - 1]
if i < len(a__ ) - 1:
neighbour_count += cells[i + 1][j]
if i < len(a__ ) - 1 and j < len(cells[i] ) - 1:
neighbour_count += cells[i + 1][j + 1]
# Rules of the game of life (excerpt from Wikipedia):
# 1. Any live cell with two or three live neighbours survives.
# 2. Any dead cell with three live neighbours becomes a live cell.
# 3. All other live cells die in the next generation.
# Similarly, all other dead cells stay dead.
_UpperCAmelCase = cells[i][j] == 1
if (
(alive and 2 <= neighbour_count <= 3)
or not alive
and neighbour_count == 3
):
next_generation_row.append(1 )
else:
next_generation_row.append(0 )
next_generation.append(a__ )
return next_generation
def lowerCAmelCase__ ( a__: list[list[int]] , a__: int ) -> list[Image.Image]:
'''simple docstring'''
_UpperCAmelCase = []
for _ in range(a__ ):
# Create output image
_UpperCAmelCase = Image.new('RGB' , (len(cells[0] ), len(a__ )) )
_UpperCAmelCase = img.load()
# Save cells to image
for x in range(len(a__ ) ):
for y in range(len(cells[0] ) ):
_UpperCAmelCase = 2_5_5 - cells[y][x] * 2_5_5
_UpperCAmelCase = (colour, colour, colour)
# Save image
images.append(a__ )
_UpperCAmelCase = new_generation(a__ )
return images
if __name__ == "__main__":
lowerCAmelCase__ :Tuple = generate_images(GLIDER, 1_6)
images[0].save('''out.gif''', save_all=True, append_images=images[1:])
| 185 | 0 |
from collections import Counter
import numpy as np
from sklearn import datasets
from sklearn.model_selection import train_test_split
UpperCAmelCase = datasets.load_iris()
UpperCAmelCase = np.array(data['''data'''])
UpperCAmelCase = np.array(data['''target'''])
UpperCAmelCase = data['''target_names''']
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = train_test_split(X, y)
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
return np.linalg.norm(np.array(__SCREAMING_SNAKE_CASE ) - np.array(__SCREAMING_SNAKE_CASE ) )
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=5 ):
lowercase = zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# List of distances of all points from the point to be classified
lowercase = []
for data_point in data:
lowercase = euclidean_distance(data_point[0] , __SCREAMING_SNAKE_CASE )
distances.append((distance, data_point[1]) )
# Choosing 'k' points with the least distances.
lowercase = [i[1] for i in sorted(__SCREAMING_SNAKE_CASE )[:k]]
# Most commonly occurring class among them
# is the class into which the point is classified
lowercase = Counter(__SCREAMING_SNAKE_CASE ).most_common(1 )[0][0]
return classes[result]
if __name__ == "__main__":
print(classifier(X_train, y_train, classes, [4.4, 3.1, 1.3, 1.4]))
| 195 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
UpperCAmelCase = logging.get_logger(__name__)
if is_vision_available():
import PIL
class A_ ( __lowerCamelCase ):
'''simple docstring'''
_UpperCamelCase : str = ["""pixel_values"""]
def __init__( self , snake_case = True , snake_case = None , snake_case = PILImageResampling.BICUBIC , snake_case = True , snake_case = None , snake_case = True , snake_case = 1 / 255 , snake_case = True , snake_case = None , snake_case = None , snake_case = True , **snake_case , ):
super().__init__(**snake_case )
lowercase = size if size is not None else {'shortest_edge': 224}
lowercase = get_size_dict(snake_case , default_to_square=snake_case )
lowercase = crop_size if crop_size is not None else {'height': 224, 'width': 224}
lowercase = get_size_dict(snake_case , default_to_square=snake_case , param_name='crop_size' )
lowercase = do_resize
lowercase = size
lowercase = resample
lowercase = do_center_crop
lowercase = crop_size
lowercase = do_rescale
lowercase = rescale_factor
lowercase = do_normalize
lowercase = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
lowercase = image_std if image_std is not None else OPENAI_CLIP_STD
lowercase = do_convert_rgb
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case = PILImageResampling.BICUBIC , snake_case = None , **snake_case , ):
lowercase = get_size_dict(snake_case , default_to_square=snake_case )
if "shortest_edge" not in size:
raise ValueError(F'''The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}''' )
lowercase = get_resize_output_image_size(snake_case , size=size['shortest_edge'] , default_to_square=snake_case )
return resize(snake_case , size=snake_case , resample=snake_case , data_format=snake_case , **snake_case )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case = None , **snake_case , ):
lowercase = get_size_dict(snake_case )
if "height" not in size or "width" not in size:
raise ValueError(F'''The `size` parameter must contain the keys (height, width). Got {size.keys()}''' )
return center_crop(snake_case , size=(size['height'], size['width']) , data_format=snake_case , **snake_case )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case = None , **snake_case , ):
return rescale(snake_case , scale=snake_case , data_format=snake_case , **snake_case )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case , snake_case = None , **snake_case , ):
return normalize(snake_case , mean=snake_case , std=snake_case , data_format=snake_case , **snake_case )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case = None , snake_case = None , snake_case = None , snake_case = None , snake_case = None , snake_case = None , snake_case = None , snake_case = None , snake_case = None , snake_case = None , snake_case = None , snake_case = None , snake_case = ChannelDimension.FIRST , **snake_case , ):
lowercase = do_resize if do_resize is not None else self.do_resize
lowercase = size if size is not None else self.size
lowercase = get_size_dict(snake_case , param_name='size' , default_to_square=snake_case )
lowercase = resample if resample is not None else self.resample
lowercase = do_center_crop if do_center_crop is not None else self.do_center_crop
lowercase = crop_size if crop_size is not None else self.crop_size
lowercase = get_size_dict(snake_case , param_name='crop_size' , default_to_square=snake_case )
lowercase = do_rescale if do_rescale is not None else self.do_rescale
lowercase = rescale_factor if rescale_factor is not None else self.rescale_factor
lowercase = do_normalize if do_normalize is not None else self.do_normalize
lowercase = image_mean if image_mean is not None else self.image_mean
lowercase = image_std if image_std is not None else self.image_std
lowercase = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
lowercase = make_list_of_images(snake_case )
if not valid_images(snake_case ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.' )
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
lowercase = [convert_to_rgb(snake_case ) for image in images]
# All transformations expect numpy arrays.
lowercase = [to_numpy_array(snake_case ) for image in images]
if do_resize:
lowercase = [self.resize(image=snake_case , size=snake_case , resample=snake_case ) for image in images]
if do_center_crop:
lowercase = [self.center_crop(image=snake_case , size=snake_case ) for image in images]
if do_rescale:
lowercase = [self.rescale(image=snake_case , scale=snake_case ) for image in images]
if do_normalize:
lowercase = [self.normalize(image=snake_case , mean=snake_case , std=snake_case ) for image in images]
lowercase = [to_channel_dimension_format(snake_case , snake_case ) for image in images]
lowercase = {'pixel_values': images}
return BatchFeature(data=snake_case , tensor_type=snake_case )
| 195 | 1 |
from __future__ import annotations
from math import gcd
def A_ ( A__ , A__ = 2 , A__ = 1 , A__ = 3 , ) -> int | None:
# A value less than 2 can cause an infinite loop in the algorithm.
if num < 2:
raise ValueError('The input value cannot be less than 2' )
# Because of the relationship between ``f(f(x))`` and ``f(x)``, this
# algorithm struggles to find factors that are divisible by two.
# As a workaround, we specifically check for two and even inputs.
# See: https://math.stackexchange.com/a/2856214/165820
if num > 2 and num % 2 == 0:
return 2
# Pollard's Rho algorithm requires a function that returns pseudorandom
# values between 0 <= X < ``num``. It doesn't need to be random in the
# sense that the output value is cryptographically secure or difficult
# to calculate, it only needs to be random in the sense that all output
# values should be equally likely to appear.
# For this reason, Pollard suggested using ``f(x) = (x**2 - 1) % num``
# However, the success of Pollard's algorithm isn't guaranteed and is
# determined in part by the initial seed and the chosen random function.
# To make retries easier, we will instead use ``f(x) = (x**2 + C) % num``
# where ``C`` is a value that we can modify between each attempt.
def rand_fn(A__ , A__ , A__ ) -> int:
return (pow(A__ , 2 ) + step) % modulus
for _ in range(A__ ):
# These track the position within the cycle detection logic.
a__ : Dict = seed
a__ : Optional[int] = seed
while True:
# At each iteration, the tortoise moves one step and the hare moves two.
a__ : Union[str, Any] = rand_fn(A__ , A__ , A__ )
a__ : str = rand_fn(A__ , A__ , A__ )
a__ : int = rand_fn(A__ , A__ , A__ )
# At some point both the tortoise and the hare will enter a cycle whose
# length ``p`` is a divisor of ``num``. Once in that cycle, at some point
# the tortoise and hare will end up on the same value modulo ``p``.
# We can detect when this happens because the position difference between
# the tortoise and the hare will share a common divisor with ``num``.
a__ : str = gcd(hare - tortoise , A__ )
if divisor == 1:
# No common divisor yet, just keep searching.
continue
else:
# We found a common divisor!
if divisor == num:
# Unfortunately, the divisor is ``num`` itself and is useless.
break
else:
# The divisor is a nontrivial factor of ``num``!
return divisor
# If we made it here, then this attempt failed.
# We need to pick a new starting seed for the tortoise and hare
# in addition to a new step value for the random function.
# To keep this example implementation deterministic, the
# new values will be generated based on currently available
# values instead of using something like ``random.randint``.
# We can use the hare's position as the new seed.
# This is actually what Richard Brent's the "optimized" variant does.
a__ : Union[str, Any] = hare
# The new step value for the random function can just be incremented.
# At first the results will be similar to what the old function would
# have produced, but the value will quickly diverge after a bit.
step += 1
# We haven't found a divisor within the requested number of attempts.
# We were unlucky or ``num`` itself is actually prime.
return None
if __name__ == "__main__":
import argparse
lowercase : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument(
"""num""",
type=int,
help="""The value to find a divisor of""",
)
parser.add_argument(
"""--attempts""",
type=int,
default=3,
help="""The number of attempts before giving up""",
)
lowercase : Optional[Any] = parser.parse_args()
lowercase : Optional[int] = pollard_rho(args.num, attempts=args.attempts)
if divisor is None:
print(F"""{args.num} is probably prime""")
else:
lowercase : Any = args.num // divisor
print(F"""{args.num} = {divisor} * {quotient}""")
| 225 |
from __future__ import annotations
from collections.abc import Callable
def A_ ( A__ , A__ , A__ , A__ = 100 , ) -> float:
a__ : Dict = x_start
a__ : Any = fnc(A__ )
a__ : Optional[int] = 0.0
for _ in range(A__ ):
# Approximates small segments of curve as linear and solve
# for trapezoidal area
a__ : Union[str, Any] = (x_end - x_start) / steps + xa
a__ : str = fnc(A__ )
area += abs(fxa + fxa ) * (xa - xa) / 2
# Increment step
a__ : Optional[Any] = xa
a__ : Optional[int] = fxa
return area
if __name__ == "__main__":
def A_ ( A__ ) -> List[str]:
return x**3 + x**2
print("""f(x) = x^3 + x^2""")
print("""The area between the curve, x = -5, x = 5 and the x axis is:""")
lowercase : Union[str, Any] = 1_0
while i <= 1_0_0_0_0_0:
print(F"""with {i} steps: {trapezoidal_area(f, -5, 5, i)}""")
i *= 1_0
| 225 | 1 |
'''simple docstring'''
from typing import List, Optional, Tuple, Union
import PIL
import torch
from torchvision import transforms
from diffusers.pipeline_utils import DiffusionPipeline, ImagePipelineOutput
from diffusers.schedulers import DDIMScheduler
from diffusers.utils import randn_tensor
UpperCamelCase__: str = transforms.Compose(
[
transforms.Resize((256, 256)),
transforms.ToTensor(),
transforms.Normalize([0.5], [0.5]),
]
)
def snake_case_ ( _lowerCAmelCase : Optional[int] ) -> List[str]:
if isinstance(_lowerCAmelCase , torch.Tensor ):
return image
elif isinstance(_lowerCAmelCase , PIL.Image.Image ):
UpperCAmelCase : List[Any] = [image]
UpperCAmelCase : str = [trans(img.convert('''RGB''' ) ) for img in image]
UpperCAmelCase : List[Any] = torch.stack(_lowerCAmelCase )
return image
class SCREAMING_SNAKE_CASE( A__ ):
"""simple docstring"""
def __init__( self : Union[str, Any] , __snake_case : int , __snake_case : List[Any] ) -> List[Any]:
super().__init__()
# make sure scheduler can always be converted to DDIM
UpperCAmelCase : Union[str, Any] = DDIMScheduler.from_config(scheduler.config )
self.register_modules(unet=__snake_case , scheduler=__snake_case )
def A ( self : Tuple , __snake_case : Union[str, Any] ) -> str:
if strength < 0 or strength > 1:
raise ValueError(F"""The value of strength should in [0.0, 1.0] but is {strength}""" )
def A ( self : List[str] , __snake_case : List[str] , __snake_case : Any , __snake_case : List[Any] ) -> Optional[Any]:
# get the original timestep using init_timestep
UpperCAmelCase : Optional[int] = min(int(num_inference_steps * strength ) , __snake_case )
UpperCAmelCase : Tuple = max(num_inference_steps - init_timestep , 0 )
UpperCAmelCase : Tuple = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def A ( self : Union[str, Any] , __snake_case : List[str] , __snake_case : Union[str, Any] , __snake_case : Union[str, Any] , __snake_case : Union[str, Any] , __snake_case : Tuple , __snake_case : str=None ) -> List[Any]:
if not isinstance(__snake_case , (torch.Tensor, PIL.Image.Image, list) ):
raise ValueError(
F"""`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(__snake_case )}""" )
UpperCAmelCase : Optional[Any] = image.to(device=__snake_case , dtype=__snake_case )
if isinstance(__snake_case , __snake_case ) and len(__snake_case ) != batch_size:
raise ValueError(
F"""You have passed a list of generators of length {len(__snake_case )}, but requested an effective batch"""
F""" size of {batch_size}. Make sure the batch size matches the length of the generators.""" )
UpperCAmelCase : Optional[int] = init_latents.shape
UpperCAmelCase : Tuple = randn_tensor(__snake_case , generator=__snake_case , device=__snake_case , dtype=__snake_case )
# get latents
print('''add noise to latents at timestep''' , __snake_case )
UpperCAmelCase : List[Any] = self.scheduler.add_noise(__snake_case , __snake_case , __snake_case )
UpperCAmelCase : Any = init_latents
return latents
@torch.no_grad()
def __call__( self : Union[str, Any] , __snake_case : Union[torch.FloatTensor, PIL.Image.Image] = None , __snake_case : float = 0.8 , __snake_case : int = 1 , __snake_case : Optional[Union[torch.Generator, List[torch.Generator]]] = None , __snake_case : float = 0.0 , __snake_case : int = 50 , __snake_case : Optional[bool] = None , __snake_case : Optional[str] = "pil" , __snake_case : bool = True , ) -> Union[ImagePipelineOutput, Tuple]:
self.check_inputs(__snake_case )
# 2. Preprocess image
UpperCAmelCase : int = preprocess(__snake_case )
# 3. set timesteps
self.scheduler.set_timesteps(__snake_case , device=self.device )
UpperCAmelCase , UpperCAmelCase : Union[str, Any] = self.get_timesteps(__snake_case , __snake_case , self.device )
UpperCAmelCase : List[Any] = timesteps[:1].repeat(__snake_case )
# 4. Prepare latent variables
UpperCAmelCase : Union[str, Any] = self.prepare_latents(__snake_case , __snake_case , __snake_case , self.unet.dtype , self.device , __snake_case )
UpperCAmelCase : Dict = latents
# 5. Denoising loop
for t in self.progress_bar(__snake_case ):
# 1. predict noise model_output
UpperCAmelCase : List[Any] = self.unet(__snake_case , __snake_case ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
UpperCAmelCase : Any = self.scheduler.step(
__snake_case , __snake_case , __snake_case , eta=__snake_case , use_clipped_model_output=__snake_case , generator=__snake_case , ).prev_sample
UpperCAmelCase : Optional[int] = (image / 2 + 0.5).clamp(0 , 1 )
UpperCAmelCase : List[Any] = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
UpperCAmelCase : Dict = self.numpy_to_pil(__snake_case )
if not return_dict:
return (image, latent_timestep.item())
return ImagePipelineOutput(images=__snake_case )
| 23 |
'''simple docstring'''
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {'vocab_file': 'spiece.model'}
UpperCAmelCase_ = {
'vocab_file': {
'TsinghuaAI/CPM-Generate': 'https://huggingface.co/TsinghuaAI/CPM-Generate/resolve/main/spiece.model',
}
}
class lowerCAmelCase_ ( lowerCamelCase_ ):
'''simple docstring'''
def __init__( self : Dict , _UpperCAmelCase : str , _UpperCAmelCase : Any=False , _UpperCAmelCase : int=True , _UpperCAmelCase : Union[str, Any]=False , _UpperCAmelCase : Dict="<s>" , _UpperCAmelCase : int="</s>" , _UpperCAmelCase : Dict="<unk>" , _UpperCAmelCase : Tuple="<sep>" , _UpperCAmelCase : List[Any]="<pad>" , _UpperCAmelCase : int="<cls>" , _UpperCAmelCase : Union[str, Any]="<mask>" , _UpperCAmelCase : List[str]=["<eop>", "<eod>"] , _UpperCAmelCase : Optional[Dict[str, Any]] = None , **_UpperCAmelCase : int , ):
"""simple docstring"""
UpperCAmelCase__ = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else mask_token
UpperCAmelCase__ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=_UpperCAmelCase , remove_space=_UpperCAmelCase , keep_accents=_UpperCAmelCase , bos_token=_UpperCAmelCase , eos_token=_UpperCAmelCase , unk_token=_UpperCAmelCase , sep_token=_UpperCAmelCase , pad_token=_UpperCAmelCase , cls_token=_UpperCAmelCase , mask_token=_UpperCAmelCase , additional_special_tokens=_UpperCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **_UpperCAmelCase , )
UpperCAmelCase__ = 3
UpperCAmelCase__ = do_lower_case
UpperCAmelCase__ = remove_space
UpperCAmelCase__ = keep_accents
UpperCAmelCase__ = vocab_file
UpperCAmelCase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_UpperCAmelCase )
try:
import jieba
except ModuleNotFoundError as error:
raise error.__class__(
"""You need to install jieba to use CpmTokenizer or CpmTokenizerFast. """
"""See https://pypi.org/project/jieba/ for installation.""" )
UpperCAmelCase__ = jieba
UpperCAmelCase__ = str.maketrans(""" \n""" , """\u2582\u2583""" )
@property
# Copied from transformers.models.xlnet.tokenization_xlnet.XLNetTokenizer.vocab_size
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
"""simple docstring"""
return len(self.sp_model )
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
"""simple docstring"""
UpperCAmelCase__ = {self.convert_ids_to_tokens(_UpperCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Dict ):
"""simple docstring"""
UpperCAmelCase__ = self.__dict__.copy()
UpperCAmelCase__ = None
return state
def __setstate__( self : Union[str, Any] , _UpperCAmelCase : Union[str, Any] ):
"""simple docstring"""
UpperCAmelCase__ = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
UpperCAmelCase__ = {}
UpperCAmelCase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def SCREAMING_SNAKE_CASE__ ( self : int , _UpperCAmelCase : Optional[Any] ):
"""simple docstring"""
if self.remove_space:
UpperCAmelCase__ = """ """.join(inputs.strip().split() )
else:
UpperCAmelCase__ = inputs
UpperCAmelCase__ = outputs.replace("""``""" , """\"""" ).replace("""''""" , """\"""" )
if not self.keep_accents:
UpperCAmelCase__ = unicodedata.normalize("""NFKD""" , _UpperCAmelCase )
UpperCAmelCase__ = """""".join([c for c in outputs if not unicodedata.combining(_UpperCAmelCase )] )
if self.do_lower_case:
UpperCAmelCase__ = outputs.lower()
return outputs
def SCREAMING_SNAKE_CASE__ ( self : Tuple , _UpperCAmelCase : str ):
"""simple docstring"""
UpperCAmelCase__ = self.preprocess_text(_UpperCAmelCase )
UpperCAmelCase__ = self.sp_model.encode(_UpperCAmelCase , out_type=_UpperCAmelCase )
UpperCAmelCase__ = []
for piece in pieces:
if len(_UpperCAmelCase ) > 1 and piece[-1] == str(""",""" ) and piece[-2].isdigit():
UpperCAmelCase__ = self.sp_model.EncodeAsPieces(piece[:-1].replace(_UpperCAmelCase , """""" ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
UpperCAmelCase__ = cur_pieces[1:]
else:
UpperCAmelCase__ = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(_UpperCAmelCase )
else:
new_pieces.append(_UpperCAmelCase )
return new_pieces
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , _UpperCAmelCase : Union[str, Any] ):
"""simple docstring"""
return self.sp_model.PieceToId(_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self : Dict , _UpperCAmelCase : Any ):
"""simple docstring"""
return self.sp_model.IdToPiece(_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self : int , _UpperCAmelCase : Dict ):
"""simple docstring"""
UpperCAmelCase__ = """""".join(_UpperCAmelCase ).replace(_UpperCAmelCase , """ """ ).strip()
return out_string
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , _UpperCAmelCase : List[int] , _UpperCAmelCase : Optional[List[int]] = None ):
"""simple docstring"""
UpperCAmelCase__ = [self.sep_token_id]
UpperCAmelCase__ = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , _UpperCAmelCase : List[int] , _UpperCAmelCase : Optional[List[int]] = None , _UpperCAmelCase : bool = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_UpperCAmelCase , token_ids_a=_UpperCAmelCase , already_has_special_tokens=_UpperCAmelCase )
if token_ids_a is not None:
return ([0] * len(_UpperCAmelCase )) + [1] + ([0] * len(_UpperCAmelCase )) + [1, 1]
return ([0] * len(_UpperCAmelCase )) + [1, 1]
def SCREAMING_SNAKE_CASE__ ( self : Dict , _UpperCAmelCase : List[int] , _UpperCAmelCase : Optional[List[int]] = None ):
"""simple docstring"""
UpperCAmelCase__ = [self.sep_token_id]
UpperCAmelCase__ = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def SCREAMING_SNAKE_CASE__ ( self : List[Any] , _UpperCAmelCase : str , _UpperCAmelCase : Optional[str] = None ):
"""simple docstring"""
if not os.path.isdir(_UpperCAmelCase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
UpperCAmelCase__ = os.path.join(
_UpperCAmelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_UpperCAmelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _UpperCAmelCase )
elif not os.path.isfile(self.vocab_file ):
with open(_UpperCAmelCase , """wb""" ) as fi:
UpperCAmelCase__ = self.sp_model.serialized_model_proto()
fi.write(_UpperCAmelCase )
return (out_vocab_file,)
def SCREAMING_SNAKE_CASE__ ( self : Tuple , *_UpperCAmelCase : Tuple , **_UpperCAmelCase : Optional[Any] ):
"""simple docstring"""
UpperCAmelCase__ = super()._decode(*_UpperCAmelCase , **_UpperCAmelCase )
UpperCAmelCase__ = text.replace(""" """ , """""" ).replace("""\u2582""" , """ """ ).replace("""\u2583""" , """\n""" )
return text
| 346 | 0 |
'''simple docstring'''
import json
import os
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ImageGPTImageProcessor
class UpperCamelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Union[str, Any] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Union[str, Any]=7 , UpperCAmelCase_ : Tuple=3 , UpperCAmelCase_ : Optional[Any]=1_8 , UpperCAmelCase_ : Tuple=3_0 , UpperCAmelCase_ : Dict=4_0_0 , UpperCAmelCase_ : int=True , UpperCAmelCase_ : str=None , UpperCAmelCase_ : int=True , ):
"""simple docstring"""
a : Optional[int] = size if size is not None else {'height': 1_8, 'width': 1_8}
a : str = parent
a : int = batch_size
a : Dict = num_channels
a : Optional[int] = image_size
a : Any = min_resolution
a : Tuple = max_resolution
a : Dict = do_resize
a : Optional[Any] = size
a : Tuple = do_normalize
def SCREAMING_SNAKE_CASE_ ( self : List[str]):
"""simple docstring"""
return {
# here we create 2 clusters for the sake of simplicity
"clusters": np.asarray(
[
[0.88_66_44_36_34_03_32_03, 0.66_18_82_93_69_54_49_83, 0.38_91_74_64_01_78_68_04],
[-0.60_42_55_91_46_88_11_04, -0.0_22_95_00_88_60_52_84_69, 0.54_23_79_73_69_00_32_96],
]),
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
}
@require_torch
@require_vision
class UpperCamelCase ( a_ , unittest.TestCase ):
"""simple docstring"""
A : Dict = ImageGPTImageProcessor if is_vision_available() else None
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any]):
"""simple docstring"""
a : Union[str, Any] = ImageGPTImageProcessingTester(self)
@property
def SCREAMING_SNAKE_CASE_ ( self : Any):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def SCREAMING_SNAKE_CASE_ ( self : Dict):
"""simple docstring"""
a : Any = self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(UpperCAmelCase_ , 'clusters'))
self.assertTrue(hasattr(UpperCAmelCase_ , 'do_resize'))
self.assertTrue(hasattr(UpperCAmelCase_ , 'size'))
self.assertTrue(hasattr(UpperCAmelCase_ , 'do_normalize'))
def SCREAMING_SNAKE_CASE_ ( self : Any):
"""simple docstring"""
a : Tuple = self.image_processing_class.from_dict(self.image_processor_dict)
self.assertEqual(image_processor.size , {'height': 1_8, 'width': 1_8})
a : List[Any] = self.image_processing_class.from_dict(self.image_processor_dict , size=4_2)
self.assertEqual(image_processor.size , {'height': 4_2, 'width': 4_2})
def SCREAMING_SNAKE_CASE_ ( self : Optional[int]):
"""simple docstring"""
a : Optional[Any] = self.image_processing_class(**self.image_processor_dict)
a : Tuple = json.loads(image_processor.to_json_string())
for key, value in self.image_processor_dict.items():
if key == "clusters":
self.assertTrue(np.array_equal(UpperCAmelCase_ , obj[key]))
else:
self.assertEqual(obj[key] , UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : Dict):
"""simple docstring"""
a : Any = self.image_processing_class(**self.image_processor_dict)
with tempfile.TemporaryDirectory() as tmpdirname:
a : str = os.path.join(UpperCAmelCase_ , 'image_processor.json')
image_processor_first.to_json_file(UpperCAmelCase_)
a : Optional[Any] = self.image_processing_class.from_json_file(UpperCAmelCase_).to_dict()
a : Union[str, Any] = image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(UpperCAmelCase_ , image_processor_second[key]))
else:
self.assertEqual(image_processor_first[key] , UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : Dict):
"""simple docstring"""
a : List[Any] = self.image_processing_class(**self.image_processor_dict)
with tempfile.TemporaryDirectory() as tmpdirname:
image_processor_first.save_pretrained(UpperCAmelCase_)
a : Dict = self.image_processing_class.from_pretrained(UpperCAmelCase_).to_dict()
a : Optional[int] = image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(UpperCAmelCase_ , image_processor_second[key]))
else:
self.assertEqual(image_processor_first[key] , UpperCAmelCase_)
@unittest.skip('ImageGPT requires clusters at initialization')
def SCREAMING_SNAKE_CASE_ ( self : List[str]):
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE__ ( ) -> str:
"""simple docstring"""
a : List[str] = load_dataset('hf-internal-testing/fixtures_image_utils' , split='test' )
a : List[Any] = Image.open(dataset[4]['file'] )
a : int = Image.open(dataset[5]['file'] )
a : Any = [imagea, imagea]
return images
@require_vision
@require_torch
class UpperCamelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def SCREAMING_SNAKE_CASE_ ( self : List[str]):
"""simple docstring"""
a : Optional[Any] = ImageGPTImageProcessor.from_pretrained('openai/imagegpt-small')
a : List[str] = prepare_images()
# test non-batched
a : str = image_processing(images[0] , return_tensors='pt')
self.assertIsInstance(encoding.input_ids , torch.LongTensor)
self.assertEqual(encoding.input_ids.shape , (1, 1_0_2_4))
a : int = [3_0_6, 1_9_1, 1_9_1]
self.assertEqual(encoding.input_ids[0, :3].tolist() , UpperCAmelCase_)
# test batched
a : List[Any] = image_processing(UpperCAmelCase_ , return_tensors='pt')
self.assertIsInstance(encoding.input_ids , torch.LongTensor)
self.assertEqual(encoding.input_ids.shape , (2, 1_0_2_4))
a : Any = [3_0_3, 1_3, 1_3]
self.assertEqual(encoding.input_ids[1, -3:].tolist() , UpperCAmelCase_)
| 345 | '''simple docstring'''
from __future__ import annotations
from decimal import Decimal
from math import * # noqa: F403
from sympy import diff
def SCREAMING_SNAKE_CASE__ ( snake_case : str , snake_case : float | Decimal , snake_case : float = 10**-10 ) -> float:
"""simple docstring"""
a : Dict = a
while True:
a : Any = Decimal(snake_case ) - (
Decimal(eval(snake_case ) ) / Decimal(eval(str(diff(snake_case ) ) ) ) # noqa: S307
)
# This number dictates the accuracy of the answer
if abs(eval(snake_case ) ) < precision: # noqa: S307
return float(snake_case )
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(f'''The root of sin(x) = 0 is {newton_raphson("sin(x)", 2)}''')
# Find root of polynomial
print(f'''The root of x**2 - 5*x + 2 = 0 is {newton_raphson("x**2 - 5*x + 2", 0.4)}''')
# Find Square Root of 5
print(f'''The root of log(x) - 1 = 0 is {newton_raphson("log(x) - 1", 2)}''')
# Exponential Roots
print(f'''The root of exp(x) - 1 = 0 is {newton_raphson("exp(x) - 1", 0)}''')
| 345 | 1 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_fnet import FNetTokenizer
else:
a__ = None
a__ = logging.get_logger(__name__)
a__ = {"""vocab_file""": """spiece.model""", """tokenizer_file""": """tokenizer.json"""}
a__ = {
"""vocab_file""": {
"""google/fnet-base""": """https://huggingface.co/google/fnet-base/resolve/main/spiece.model""",
"""google/fnet-large""": """https://huggingface.co/google/fnet-large/resolve/main/spiece.model""",
},
"""tokenizer_file""": {
"""google/fnet-base""": """https://huggingface.co/google/fnet-base/resolve/main/tokenizer.json""",
"""google/fnet-large""": """https://huggingface.co/google/fnet-large/resolve/main/tokenizer.json""",
},
}
a__ = {
"""google/fnet-base""": 5_12,
"""google/fnet-large""": 5_12,
}
a__ = """▁"""
class snake_case ( __a ):
'''simple docstring'''
snake_case_ : List[str] = VOCAB_FILES_NAMES
snake_case_ : List[Any] = PRETRAINED_VOCAB_FILES_MAP
snake_case_ : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case_ : Union[str, Any] = ["""input_ids""", """token_type_ids"""]
snake_case_ : List[str] = FNetTokenizer
def __init__( self : List[str] , lowerCAmelCase : str=None , lowerCAmelCase : Optional[Any]=None , lowerCAmelCase : Tuple=False , lowerCAmelCase : Any=True , lowerCAmelCase : List[str]=True , lowerCAmelCase : List[Any]="<unk>" , lowerCAmelCase : Optional[Any]="[SEP]" , lowerCAmelCase : Optional[int]="<pad>" , lowerCAmelCase : Optional[Any]="[CLS]" , lowerCAmelCase : int="[MASK]" , **lowerCAmelCase : Optional[Any] , ) -> List[str]:
"""simple docstring"""
_snake_case : str = (
AddedToken(a_ , lstrip=a_ , rstrip=a_ , normalized=a_)
if isinstance(a_ , a_)
else mask_token
)
super().__init__(
a_ , tokenizer_file=a_ , do_lower_case=a_ , remove_space=a_ , keep_accents=a_ , unk_token=a_ , sep_token=a_ , pad_token=a_ , cls_token=a_ , mask_token=a_ , **a_ , )
_snake_case : Union[str, Any] = do_lower_case
_snake_case : Dict = remove_space
_snake_case : int = keep_accents
_snake_case : Dict = vocab_file
_snake_case : str = False if not self.vocab_file else True
def UpperCamelCase_ ( self : Optional[Any] , lowerCAmelCase : List[int] , lowerCAmelCase : Optional[List[int]] = None) -> Union[str, Any]:
"""simple docstring"""
_snake_case : List[Any] = [self.sep_token_id]
_snake_case : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def UpperCamelCase_ ( self : Union[str, Any] , lowerCAmelCase : List[int] , lowerCAmelCase : Optional[List[int]] = None) -> List[Any]:
"""simple docstring"""
_snake_case : Any = [self.sep_token_id]
_snake_case : Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1]
def UpperCamelCase_ ( self : List[str] , lowerCAmelCase : str , lowerCAmelCase : Optional[str] = None) -> List[str]:
"""simple docstring"""
if not os.path.isdir(a_):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''')
return
_snake_case : List[Any] = os.path.join(
a_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""])
if os.path.abspath(self.vocab_file) != os.path.abspath(a_):
copyfile(self.vocab_file , a_)
return (out_vocab_file,)
| 317 |
"""simple docstring"""
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import add_start_docstrings
A_ = r'''
[`RagConfig`] stores the configuration of a *RagModel*. Configuration objects inherit from [`PretrainedConfig`] and
can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information.
Args:
title_sep (`str`, *optional*, defaults to `" / "`):
Separator inserted between the title and the text of the retrieved document when calling [`RagRetriever`].
doc_sep (`str`, *optional*, defaults to `" // "`):
Separator inserted between the text of the retrieved document and the original input when calling
[`RagRetriever`].
n_docs (`int`, *optional*, defaults to 5):
Number of documents to retrieve.
max_combined_length (`int`, *optional*, defaults to 300):
Max length of contextualized input returned by [`~RagRetriever.__call__`].
retrieval_vector_size (`int`, *optional*, defaults to 768):
Dimensionality of the document embeddings indexed by [`RagRetriever`].
retrieval_batch_size (`int`, *optional*, defaults to 8):
Retrieval batch size, defined as the number of queries issues concurrently to the faiss index encapsulated
[`RagRetriever`].
dataset (`str`, *optional*, defaults to `"wiki_dpr"`):
A dataset identifier of the indexed dataset in HuggingFace Datasets (list all available datasets and ids
using `datasets.list_datasets()`).
dataset_split (`str`, *optional*, defaults to `"train"`)
Which split of the `dataset` to load.
index_name (`str`, *optional*, defaults to `"compressed"`)
The index name of the index associated with the `dataset`. One can choose between `"legacy"`, `"exact"` and
`"compressed"`.
index_path (`str`, *optional*)
The path to the serialized faiss index on disk.
passages_path (`str`, *optional*):
A path to text passages compatible with the faiss index. Required if using
[`~models.rag.retrieval_rag.LegacyIndex`]
use_dummy_dataset (`bool`, *optional*, defaults to `False`)
Whether to load a "dummy" variant of the dataset specified by `dataset`.
label_smoothing (`float`, *optional*, defaults to 0.0):
Only relevant if `return_loss` is set to `True`. Controls the `epsilon` parameter value for label smoothing
in the loss calculation. If set to 0, no label smoothing is performed.
do_marginalize (`bool`, *optional*, defaults to `False`):
If `True`, the logits are marginalized over all documents by making use of
`torch.nn.functional.log_softmax`.
reduce_loss (`bool`, *optional*, defaults to `False`):
Whether or not to reduce the NLL loss using the `torch.Tensor.sum` operation.
do_deduplication (`bool`, *optional*, defaults to `True`):
Whether or not to deduplicate the generations from different context documents for a given input. Has to be
set to `False` if used while training with distributed backend.
exclude_bos_score (`bool`, *optional*, defaults to `False`):
Whether or not to disregard the BOS token when computing the loss.
output_retrieved(`bool`, *optional*, defaults to `False`):
If set to `True`, `retrieved_doc_embeds`, `retrieved_doc_ids`, `context_input_ids` and
`context_attention_mask` are returned. See returned tensors for more detail.
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the model should return the last key/values attentions (not used by all models).
forced_eos_token_id (`int`, *optional*):
The id of the token to force as the last generated token when `max_length` is reached. Usually set to
`eos_token_id`.
'''
@add_start_docstrings(__a )
class lowercase( __a ):
'''simple docstring'''
lowercase__ = "rag"
lowercase__ = True
def __init__( self: Union[str, Any], a_: int=None, a_: Tuple=True, a_: Optional[int]=None, a_: List[str]=None, a_: int=None, a_: Optional[Any]=None, a_: List[str]=None, a_: Optional[Any]=" / ", a_: Tuple=" // ", a_: List[Any]=5, a_: Dict=300, a_: Tuple=768, a_: Optional[Any]=8, a_: int="wiki_dpr", a_: Any="train", a_: Optional[int]="compressed", a_: Optional[int]=None, a_: List[Any]=None, a_: Optional[Any]=False, a_: str=False, a_: Dict=0.0, a_: Union[str, Any]=True, a_: Union[str, Any]=False, a_: str=False, a_: List[str]=False, a_: Union[str, Any]=True, a_: Any=None, **a_: List[Any], ):
'''simple docstring'''
super().__init__(
bos_token_id=a_, pad_token_id=a_, eos_token_id=a_, decoder_start_token_id=a_, forced_eos_token_id=a_, is_encoder_decoder=a_, prefix=a_, vocab_size=a_, **a_, )
assert (
"question_encoder" in kwargs and "generator" in kwargs
), "Config has to be initialized with question_encoder and generator config"
_snake_case : Union[str, Any] = kwargs.pop("""question_encoder""" )
_snake_case : List[str] = question_encoder_config.pop("""model_type""" )
_snake_case : Union[str, Any] = kwargs.pop("""generator""" )
_snake_case : Any = decoder_config.pop("""model_type""" )
from ..auto.configuration_auto import AutoConfig
_snake_case : Union[str, Any] = AutoConfig.for_model(a_, **a_ )
_snake_case : Optional[Any] = AutoConfig.for_model(a_, **a_ )
_snake_case : Any = reduce_loss
_snake_case : Optional[int] = label_smoothing
_snake_case : Dict = exclude_bos_score
_snake_case : int = do_marginalize
_snake_case : Optional[Any] = title_sep
_snake_case : Any = doc_sep
_snake_case : List[str] = n_docs
_snake_case : Tuple = max_combined_length
_snake_case : Optional[Any] = dataset
_snake_case : Union[str, Any] = dataset_split
_snake_case : Tuple = index_name
_snake_case : Any = retrieval_vector_size
_snake_case : Union[str, Any] = retrieval_batch_size
_snake_case : str = passages_path
_snake_case : Tuple = index_path
_snake_case : List[Any] = use_dummy_dataset
_snake_case : Optional[Any] = output_retrieved
_snake_case : Tuple = do_deduplication
_snake_case : Union[str, Any] = use_cache
if self.forced_eos_token_id is None:
_snake_case : Dict = getattr(self.generator, """forced_eos_token_id""", a_ )
@classmethod
def UpperCamelCase_ ( cls: Any, a_: PretrainedConfig, a_: PretrainedConfig, **a_: Optional[Any] ):
'''simple docstring'''
return cls(question_encoder=question_encoder_config.to_dict(), generator=generator_config.to_dict(), **a_ )
def UpperCamelCase_ ( self: Tuple ):
'''simple docstring'''
_snake_case : Optional[int] = copy.deepcopy(self.__dict__ )
_snake_case : List[str] = self.question_encoder.to_dict()
_snake_case : Tuple = self.generator.to_dict()
_snake_case : Dict = self.__class__.model_type
return output
| 64 | 0 |
"""simple docstring"""
from __future__ import annotations
import copy
import inspect
import json
import math
import os
import tempfile
import unittest
from importlib import import_module
import numpy as np
from transformers import ViTMAEConfig
from transformers.file_utils import cached_property, is_tf_available, is_vision_available
from transformers.testing_utils import require_tf, require_vision, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFViTMAEForPreTraining, TFViTMAEModel
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class _UpperCAmelCase :
def __init__( self : str , _lowercase : Union[str, Any] , _lowercase : int=13 , _lowercase : Any=30 , _lowercase : str=2 , _lowercase : List[Any]=3 , _lowercase : Union[str, Any]=True , _lowercase : Any=True , _lowercase : Union[str, Any]=32 , _lowercase : Union[str, Any]=2 , _lowercase : Optional[Any]=4 , _lowercase : Optional[Any]=37 , _lowercase : int="gelu" , _lowercase : Optional[Any]=0.1 , _lowercase : List[Any]=0.1 , _lowercase : str=10 , _lowercase : Optional[int]=0.02 , _lowercase : Any=3 , _lowercase : Optional[Any]=0.6 , _lowercase : str=None , ):
__UpperCAmelCase = parent
__UpperCAmelCase = batch_size
__UpperCAmelCase = image_size
__UpperCAmelCase = patch_size
__UpperCAmelCase = num_channels
__UpperCAmelCase = is_training
__UpperCAmelCase = use_labels
__UpperCAmelCase = hidden_size
__UpperCAmelCase = num_hidden_layers
__UpperCAmelCase = num_attention_heads
__UpperCAmelCase = intermediate_size
__UpperCAmelCase = hidden_act
__UpperCAmelCase = hidden_dropout_prob
__UpperCAmelCase = attention_probs_dropout_prob
__UpperCAmelCase = type_sequence_label_size
__UpperCAmelCase = initializer_range
__UpperCAmelCase = mask_ratio
__UpperCAmelCase = scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
__UpperCAmelCase = (image_size // patch_size) ** 2
__UpperCAmelCase = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) )
def a ( self : List[str] ):
__UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__UpperCAmelCase = None
if self.use_labels:
__UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__UpperCAmelCase = self.get_config()
return config, pixel_values, labels
def a ( self : Union[str, Any] ):
return ViTMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , decoder_hidden_size=self.hidden_size , decoder_num_hidden_layers=self.num_hidden_layers , decoder_num_attention_heads=self.num_attention_heads , decoder_intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_a , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , )
def a ( self : Any , _lowercase : Dict , _lowercase : int , _lowercase : List[str] ):
__UpperCAmelCase = TFViTMAEModel(config=_a )
__UpperCAmelCase = model(_a , training=_a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def a ( self : Optional[int] , _lowercase : Union[str, Any] , _lowercase : Tuple , _lowercase : Optional[int] ):
__UpperCAmelCase = TFViTMAEForPreTraining(_a )
__UpperCAmelCase = model(_a , training=_a )
# expected sequence length = num_patches
__UpperCAmelCase = (self.image_size // self.patch_size) ** 2
__UpperCAmelCase = self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
# test greyscale images
__UpperCAmelCase = 1
__UpperCAmelCase = TFViTMAEForPreTraining(_a )
__UpperCAmelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__UpperCAmelCase = model(_a , training=_a )
__UpperCAmelCase = self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
def a ( self : Any ):
__UpperCAmelCase = self.prepare_config_and_inputs()
(__UpperCAmelCase) = config_and_inputs
__UpperCAmelCase = {"pixel_values": pixel_values}
return config, inputs_dict
@require_tf
class _UpperCAmelCase ( _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
a__ : Union[str, Any] = (TFViTMAEModel, TFViTMAEForPreTraining) if is_tf_available() else ()
a__ : Dict = {"feature-extraction": TFViTMAEModel} if is_tf_available() else {}
a__ : Optional[Any] = False
a__ : str = False
a__ : Dict = False
a__ : Tuple = False
def a ( self : str ):
__UpperCAmelCase = TFViTMAEModelTester(self )
__UpperCAmelCase = ConfigTester(self , config_class=_a , has_text_modality=_a , hidden_size=37 )
def a ( self : Tuple ):
self.config_tester.run_common_tests()
@unittest.skip(reason='''ViTMAE does not use inputs_embeds''' )
def a ( self : List[Any] ):
pass
def a ( self : List[str] ):
__UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCAmelCase = model_class(_a )
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) )
__UpperCAmelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_a , tf.keras.layers.Layer ) )
def a ( self : Tuple ):
__UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCAmelCase = model_class(_a )
__UpperCAmelCase = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__UpperCAmelCase = [*signature.parameters.keys()]
__UpperCAmelCase = ["pixel_values"]
self.assertListEqual(arg_names[:1] , _a )
def a ( self : Optional[Any] ):
__UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_a )
def a ( self : List[str] ):
__UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*_a )
def a ( self : int ):
# make the mask reproducible
np.random.seed(2 )
__UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCAmelCase = int((config.image_size // config.patch_size) ** 2 )
__UpperCAmelCase = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
__UpperCAmelCase = model_class(_a )
__UpperCAmelCase = self._prepare_for_class(_a , _a )
__UpperCAmelCase = model(_a , noise=_a )
__UpperCAmelCase = copy.deepcopy(self._prepare_for_class(_a , _a ) )
__UpperCAmelCase = model(**_a , noise=_a )
__UpperCAmelCase = outputs_dict[0].numpy()
__UpperCAmelCase = outputs_keywords[0].numpy()
self.assertLess(np.sum(np.abs(output_dict - output_keywords ) ) , 1E-6 )
def a ( self : str ):
# make the mask reproducible
np.random.seed(2 )
__UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCAmelCase = int((config.image_size // config.patch_size) ** 2 )
__UpperCAmelCase = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
def prepare_numpy_arrays(_lowercase : Any ):
__UpperCAmelCase = {}
for k, v in inputs_dict.items():
if tf.is_tensor(_a ):
__UpperCAmelCase = v.numpy()
else:
__UpperCAmelCase = np.array(_a )
return inputs_np_dict
for model_class in self.all_model_classes:
__UpperCAmelCase = model_class(_a )
__UpperCAmelCase = self._prepare_for_class(_a , _a )
__UpperCAmelCase = prepare_numpy_arrays(_a )
__UpperCAmelCase = model(_a , noise=_a )
__UpperCAmelCase = model(**_a , noise=_a )
self.assert_outputs_same(_a , _a )
def a ( self : List[Any] , _lowercase : Optional[Any] , _lowercase : Union[str, Any] , _lowercase : Dict ):
# make masks reproducible
np.random.seed(2 )
__UpperCAmelCase = int((tf_model.config.image_size // tf_model.config.patch_size) ** 2 )
__UpperCAmelCase = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
__UpperCAmelCase = tf.constant(_a )
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
__UpperCAmelCase = tf_noise
super().check_pt_tf_models(_a , _a , _a )
def a ( self : Optional[Any] ):
# make mask reproducible
np.random.seed(2 )
__UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCAmelCase = {
module_member
for model_class in self.all_model_classes
for module in (import_module(model_class.__module__ ),)
for module_member_name in dir(_a )
if module_member_name.endswith('''MainLayer''' )
# This condition is required, since `modeling_tf_clip.py` has 3 classes whose names end with `MainLayer`.
and module_member_name[: -len('''MainLayer''' )] == model_class.__name__[: -len('''Model''' )]
for module_member in (getattr(_a , _a ),)
if isinstance(_a , _a )
and tf.keras.layers.Layer in module_member.__bases__
and getattr(_a , '''_keras_serializable''' , _a )
}
__UpperCAmelCase = int((config.image_size // config.patch_size) ** 2 )
__UpperCAmelCase = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
__UpperCAmelCase = tf.convert_to_tensor(_a )
inputs_dict.update({'''noise''': noise} )
for main_layer_class in tf_main_layer_classes:
__UpperCAmelCase = main_layer_class(_a )
__UpperCAmelCase = {
name: tf.keras.Input(tensor.shape[1:] , dtype=tensor.dtype ) for name, tensor in inputs_dict.items()
}
__UpperCAmelCase = tf.keras.Model(_a , outputs=main_layer(_a ) )
__UpperCAmelCase = model(_a )
with tempfile.TemporaryDirectory() as tmpdirname:
__UpperCAmelCase = os.path.join(_a , '''keras_model.h5''' )
model.save(_a )
__UpperCAmelCase = tf.keras.models.load_model(
_a , custom_objects={main_layer_class.__name__: main_layer_class} )
assert isinstance(_a , tf.keras.Model )
__UpperCAmelCase = model(_a )
self.assert_outputs_same(_a , _a )
@slow
def a ( self : str ):
# make mask reproducible
np.random.seed(2 )
__UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCAmelCase = int((config.image_size // config.patch_size) ** 2 )
__UpperCAmelCase = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
__UpperCAmelCase = model_class(_a )
__UpperCAmelCase = self._prepare_for_class(_a , _a )
__UpperCAmelCase = model(_a , noise=_a )
if model_class.__name__ == "TFViTMAEModel":
__UpperCAmelCase = outputs.last_hidden_state.numpy()
__UpperCAmelCase = 0
else:
__UpperCAmelCase = outputs.logits.numpy()
__UpperCAmelCase = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(_a , saved_model=_a )
__UpperCAmelCase = model_class.from_pretrained(_a )
__UpperCAmelCase = model(_a , noise=_a )
if model_class.__name__ == "TFViTMAEModel":
__UpperCAmelCase = after_outputs["last_hidden_state"].numpy()
__UpperCAmelCase = 0
else:
__UpperCAmelCase = after_outputs["logits"].numpy()
__UpperCAmelCase = 0
__UpperCAmelCase = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(_a , 1E-5 )
def a ( self : Optional[int] ):
# make mask reproducible
np.random.seed(2 )
__UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCAmelCase = int((config.image_size // config.patch_size) ** 2 )
__UpperCAmelCase = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
__UpperCAmelCase = model_class(_a )
__UpperCAmelCase = self._prepare_for_class(_a , _a )
__UpperCAmelCase = model(_a , noise=_a )
__UpperCAmelCase = model.get_config()
# make sure that returned config is jsonifiable, which is required by keras
json.dumps(_a )
__UpperCAmelCase = model_class.from_config(model.get_config() )
# make sure it also accepts a normal config
__UpperCAmelCase = model_class.from_config(model.config )
__UpperCAmelCase = new_model(_a ) # Build model
new_model.set_weights(model.get_weights() )
__UpperCAmelCase = new_model(_a , noise=_a )
self.assert_outputs_same(_a , _a )
@unittest.skip(
reason='''ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results.''' )
def a ( self : Tuple ):
pass
@unittest.skip(reason='''ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load''' )
def a ( self : Union[str, Any] ):
pass
@slow
def a ( self : Tuple ):
__UpperCAmelCase = TFViTMAEModel.from_pretrained('''google/vit-base-patch16-224''' )
self.assertIsNotNone(_a )
def lowercase__ ( ):
__UpperCAmelCase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_tf
@require_vision
class _UpperCAmelCase ( unittest.TestCase ):
@cached_property
def a ( self : int ):
return ViTImageProcessor.from_pretrained('''facebook/vit-mae-base''' ) if is_vision_available() else None
@slow
def a ( self : Optional[int] ):
# make random mask reproducible across the PT and TF model
np.random.seed(2 )
__UpperCAmelCase = TFViTMAEForPreTraining.from_pretrained('''facebook/vit-mae-base''' )
__UpperCAmelCase = self.default_image_processor
__UpperCAmelCase = prepare_img()
__UpperCAmelCase = image_processor(images=_a , return_tensors='''tf''' )
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
__UpperCAmelCase = ViTMAEConfig()
__UpperCAmelCase = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 )
__UpperCAmelCase = np.random.uniform(size=(1, num_patches) )
# forward pass
__UpperCAmelCase = model(**_a , noise=_a )
# verify the logits
__UpperCAmelCase = tf.convert_to_tensor([1, 1_96, 7_68] )
self.assertEqual(outputs.logits.shape , _a )
__UpperCAmelCase = tf.convert_to_tensor(
[[-0.0_548, -1.7_023, -0.9_325], [0.3_721, -0.5_670, -0.2_233], [0.8_235, -1.3_878, -0.3_524]] )
tf.debugging.assert_near(outputs.logits[0, :3, :3] , _a , atol=1E-4 )
| 369 |
"""simple docstring"""
import numpy as np
import pandas as pd
from sklearn.preprocessing import MinMaxScaler
from tensorflow.keras.layers import LSTM, Dense
from tensorflow.keras.models import Sequential
if __name__ == "__main__":
_lowercase : int = pd.read_csv('sample_data.csv', header=None)
_lowercase : str = df.shape[:1][0]
# If you're using some other dataset input the target column
_lowercase : Optional[int] = df.iloc[:, 1:2]
_lowercase : Optional[int] = actual_data.values.reshape(len_data, 1)
_lowercase : Any = MinMaxScaler().fit_transform(actual_data)
_lowercase : Dict = 10
_lowercase : List[str] = 5
_lowercase : Any = 20
_lowercase : Optional[int] = len_data - periods * look_back
_lowercase : Optional[int] = actual_data[:division]
_lowercase : Optional[int] = actual_data[division - look_back :]
_lowercase ,_lowercase : Tuple = [], []
_lowercase ,_lowercase : Optional[Any] = [], []
for i in range(0, len(train_data) - forward_days - look_back + 1):
train_x.append(train_data[i : i + look_back])
train_y.append(train_data[i + look_back : i + look_back + forward_days])
for i in range(0, len(test_data) - forward_days - look_back + 1):
test_x.append(test_data[i : i + look_back])
test_y.append(test_data[i + look_back : i + look_back + forward_days])
_lowercase : List[str] = np.array(train_x)
_lowercase : str = np.array(test_x)
_lowercase : Union[str, Any] = np.array([list(i.ravel()) for i in train_y])
_lowercase : List[Any] = np.array([list(i.ravel()) for i in test_y])
_lowercase : str = Sequential()
model.add(LSTM(1_28, input_shape=(look_back, 1), return_sequences=True))
model.add(LSTM(64, input_shape=(1_28, 1)))
model.add(Dense(forward_days))
model.compile(loss='mean_squared_error', optimizer='adam')
_lowercase : str = model.fit(
x_train, y_train, epochs=1_50, verbose=1, shuffle=True, batch_size=4
)
_lowercase : str = model.predict(x_test)
| 86 | 0 |
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextModel,
CLIPTokenizer,
WhisperForConditionalGeneration,
WhisperProcessor,
)
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.utils import logging
__a : Optional[int] = logging.get_logger(__name__) # pylint: disable=invalid-name
class _UpperCamelCase ( lowercase_ ):
"""simple docstring"""
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , ) -> List[str]:
'''simple docstring'''
super().__init__()
if safety_checker is None:
logger.warning(
F"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"
''' that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered'''
''' results in services or applications open to the public. Both the diffusers team and Hugging Face'''
''' strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling'''
''' it only for use-cases that involve analyzing network behavior or auditing its results. For more'''
''' information, please have a look at https://github.com/huggingface/diffusers/pull/254 .''' )
self.register_modules(
speech_model=__UpperCamelCase , speech_processor=__UpperCamelCase , vae=__UpperCamelCase , text_encoder=__UpperCamelCase , tokenizer=__UpperCamelCase , unet=__UpperCamelCase , scheduler=__UpperCamelCase , feature_extractor=__UpperCamelCase , )
def _SCREAMING_SNAKE_CASE ( self , lowerCAmelCase__ = "auto" ) -> int:
'''simple docstring'''
if slice_size == "auto":
__lowercase = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(__UpperCamelCase )
def _SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
'''simple docstring'''
self.enable_attention_slicing(__UpperCamelCase )
@torch.no_grad()
def __call__( self , lowerCAmelCase__ , lowerCAmelCase__=1_60_00 , lowerCAmelCase__ = 5_12 , lowerCAmelCase__ = 5_12 , lowerCAmelCase__ = 50 , lowerCAmelCase__ = 7.5 , lowerCAmelCase__ = None , lowerCAmelCase__ = 1 , lowerCAmelCase__ = 0.0 , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = "pil" , lowerCAmelCase__ = True , lowerCAmelCase__ = None , lowerCAmelCase__ = 1 , **lowerCAmelCase__ , ) -> Union[str, Any]:
'''simple docstring'''
__lowercase = self.speech_processor.feature_extractor(
__UpperCamelCase , return_tensors='''pt''' , sampling_rate=__UpperCamelCase ).input_features.to(self.device )
__lowercase = self.speech_model.generate(__UpperCamelCase , max_length=48_00_00 )
__lowercase = self.speech_processor.tokenizer.batch_decode(__UpperCamelCase , skip_special_tokens=__UpperCamelCase , normalize=__UpperCamelCase )[
0
]
if isinstance(__UpperCamelCase , __UpperCamelCase ):
__lowercase = 1
elif isinstance(__UpperCamelCase , __UpperCamelCase ):
__lowercase = len(__UpperCamelCase )
else:
raise ValueError(F"`prompt` has to be of type `str` or `list` but is {type(__UpperCamelCase )}" )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F"`height` and `width` have to be divisible by 8 but are {height} and {width}." )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(__UpperCamelCase , __UpperCamelCase ) or callback_steps <= 0)
):
raise ValueError(
F"`callback_steps` has to be a positive integer but is {callback_steps} of type"
F" {type(__UpperCamelCase )}." )
# get prompt text embeddings
__lowercase = self.tokenizer(
__UpperCamelCase , padding='''max_length''' , max_length=self.tokenizer.model_max_length , return_tensors='''pt''' , )
__lowercase = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
__lowercase = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
'''The following part of your input was truncated because CLIP can only handle sequences up to'''
F" {self.tokenizer.model_max_length} tokens: {removed_text}" )
__lowercase = text_input_ids[:, : self.tokenizer.model_max_length]
__lowercase = self.text_encoder(text_input_ids.to(self.device ) )[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
__lowercase = text_embeddings.shape
__lowercase = text_embeddings.repeat(1 , __UpperCamelCase , 1 )
__lowercase = text_embeddings.view(bs_embed * num_images_per_prompt , __UpperCamelCase , -1 )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
__lowercase = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
__lowercase = 42
if negative_prompt is None:
__lowercase = [''] * batch_size
elif type(__UpperCamelCase ) is not type(__UpperCamelCase ):
raise TypeError(
F"`negative_prompt` should be the same type to `prompt`, but got {type(__UpperCamelCase )} !="
F" {type(__UpperCamelCase )}." )
elif isinstance(__UpperCamelCase , __UpperCamelCase ):
__lowercase = [negative_prompt]
elif batch_size != len(__UpperCamelCase ):
raise ValueError(
F"`negative_prompt`: {negative_prompt} has batch size {len(__UpperCamelCase )}, but `prompt`:"
F" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
''' the batch size of `prompt`.''' )
else:
__lowercase = negative_prompt
__lowercase = text_input_ids.shape[-1]
__lowercase = self.tokenizer(
__UpperCamelCase , padding='''max_length''' , max_length=__UpperCamelCase , truncation=__UpperCamelCase , return_tensors='''pt''' , )
__lowercase = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
__lowercase = uncond_embeddings.shape[1]
__lowercase = uncond_embeddings.repeat(1 , __UpperCamelCase , 1 )
__lowercase = uncond_embeddings.view(batch_size * num_images_per_prompt , __UpperCamelCase , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
__lowercase = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
__lowercase = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
__lowercase = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
__lowercase = torch.randn(__UpperCamelCase , generator=__UpperCamelCase , device='''cpu''' , dtype=__UpperCamelCase ).to(
self.device )
else:
__lowercase = torch.randn(__UpperCamelCase , generator=__UpperCamelCase , device=self.device , dtype=__UpperCamelCase )
else:
if latents.shape != latents_shape:
raise ValueError(F"Unexpected latents shape, got {latents.shape}, expected {latents_shape}" )
__lowercase = latents.to(self.device )
# set timesteps
self.scheduler.set_timesteps(__UpperCamelCase )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
__lowercase = self.scheduler.timesteps.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
__lowercase = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
__lowercase = 'eta' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
__lowercase = {}
if accepts_eta:
__lowercase = eta
for i, t in enumerate(self.progress_bar(__UpperCamelCase ) ):
# expand the latents if we are doing classifier free guidance
__lowercase = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
__lowercase = self.scheduler.scale_model_input(__UpperCamelCase , __UpperCamelCase )
# predict the noise residual
__lowercase = self.unet(__UpperCamelCase , __UpperCamelCase , encoder_hidden_states=__UpperCamelCase ).sample
# perform guidance
if do_classifier_free_guidance:
__lowercase = noise_pred.chunk(2 )
__lowercase = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
__lowercase = self.scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
__lowercase = 1 / 0.1_8215 * latents
__lowercase = self.vae.decode(__UpperCamelCase ).sample
__lowercase = (image / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
__lowercase = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
__lowercase = self.numpy_to_pil(__UpperCamelCase )
if not return_dict:
return image
return StableDiffusionPipelineOutput(images=__UpperCamelCase , nsfw_content_detected=__UpperCamelCase ) | 210 | """simple docstring"""
import warnings
from ...utils import logging
from .image_processing_beit import BeitImageProcessor
__SCREAMING_SNAKE_CASE =logging.get_logger(__name__)
class UpperCamelCase ( lowercase_ ):
def __init__( self ,*__UpperCamelCase ,**__UpperCamelCase ) -> None:
'''simple docstring'''
warnings.warn(
'The class BeitFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use BeitImageProcessor instead.' ,__UpperCamelCase ,)
super().__init__(*__UpperCamelCase ,**__UpperCamelCase )
| 213 | 0 |
import argparse
import json
from typing import List
from ltp import LTP
from transformers.models.bert.tokenization_bert import BertTokenizer
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : List[Any] ):
# This defines a "chinese character" as anything in the CJK Unicode block:
# https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
#
# Note that the CJK Unicode block is NOT all Japanese and Korean characters,
# despite its name. The modern Korean Hangul alphabet is a different block,
# as is Japanese Hiragana and Katakana. Those alphabets are used to write
# space-separated words, so they are not treated specially and handled
# like the all of the other languages.
if (
(cp >= 0X4e00 and cp <= 0X9fff)
or (cp >= 0X3400 and cp <= 0X4dbf) #
or (cp >= 0X2_0000 and cp <= 0X2_a6df) #
or (cp >= 0X2_a700 and cp <= 0X2_b73f) #
or (cp >= 0X2_b740 and cp <= 0X2_b81f) #
or (cp >= 0X2_b820 and cp <= 0X2_ceaf) #
or (cp >= 0Xf900 and cp <= 0Xfaff)
or (cp >= 0X2_f800 and cp <= 0X2_fa1f) #
): #
return True
return False
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : str ):
# word like '180' or '身高' or '神'
for char in word:
__UpperCamelCase =ord(SCREAMING_SNAKE_CASE__ )
if not _is_chinese_char(SCREAMING_SNAKE_CASE__ ):
return 0
return 1
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : List[str] ):
__UpperCamelCase =set()
for token in tokens:
__UpperCamelCase =len(SCREAMING_SNAKE_CASE__ ) > 1 and is_chinese(SCREAMING_SNAKE_CASE__ )
if chinese_word:
word_set.add(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =list(SCREAMING_SNAKE_CASE__ )
return word_list
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : set() ):
if not chinese_word_set:
return bert_tokens
__UpperCamelCase =max([len(SCREAMING_SNAKE_CASE__ ) for w in chinese_word_set] )
__UpperCamelCase =bert_tokens
__UpperCamelCase , __UpperCamelCase =0, len(SCREAMING_SNAKE_CASE__ )
while start < end:
__UpperCamelCase =True
if is_chinese(bert_word[start] ):
__UpperCamelCase =min(end - start , SCREAMING_SNAKE_CASE__ )
for i in range(SCREAMING_SNAKE_CASE__ , 1 , -1 ):
__UpperCamelCase =''.join(bert_word[start : start + i] )
if whole_word in chinese_word_set:
for j in range(start + 1 , start + i ):
__UpperCamelCase ='##' + bert_word[j]
__UpperCamelCase =start + i
__UpperCamelCase =False
break
if single_word:
start += 1
return bert_word
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : LTP , SCREAMING_SNAKE_CASE__ : BertTokenizer ):
__UpperCamelCase =[]
for i in range(0 , len(SCREAMING_SNAKE_CASE__ ) , 1_00 ):
__UpperCamelCase =ltp_tokenizer.pipeline(lines[i : i + 1_00] , tasks=['cws'] ).cws
__UpperCamelCase =[get_chinese_word(SCREAMING_SNAKE_CASE__ ) for r in res]
ltp_res.extend(SCREAMING_SNAKE_CASE__ )
assert len(SCREAMING_SNAKE_CASE__ ) == len(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =[]
for i in range(0 , len(SCREAMING_SNAKE_CASE__ ) , 1_00 ):
__UpperCamelCase =bert_tokenizer(lines[i : i + 1_00] , add_special_tokens=SCREAMING_SNAKE_CASE__ , truncation=SCREAMING_SNAKE_CASE__ , max_length=5_12 )
bert_res.extend(res['input_ids'] )
assert len(SCREAMING_SNAKE_CASE__ ) == len(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =[]
for input_ids, chinese_word in zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
__UpperCamelCase =[]
for id in input_ids:
__UpperCamelCase =bert_tokenizer._convert_id_to_token(SCREAMING_SNAKE_CASE__ )
input_tokens.append(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =add_sub_symbol(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =[]
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(SCREAMING_SNAKE_CASE__ ):
if token[:2] == "##":
__UpperCamelCase =token[2:]
# save chinese tokens' pos
if len(SCREAMING_SNAKE_CASE__ ) == 1 and _is_chinese_char(ord(SCREAMING_SNAKE_CASE__ ) ):
ref_id.append(SCREAMING_SNAKE_CASE__ )
ref_ids.append(SCREAMING_SNAKE_CASE__ )
assert len(SCREAMING_SNAKE_CASE__ ) == len(SCREAMING_SNAKE_CASE__ )
return ref_ids
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Tuple ):
# For Chinese (Ro)Bert, the best result is from : RoBERTa-wwm-ext (https://github.com/ymcui/Chinese-BERT-wwm)
# If we want to fine-tune these model, we have to use same tokenizer : LTP (https://github.com/HIT-SCIR/ltp)
with open(args.file_name , 'r' , encoding='utf-8' ) as f:
__UpperCamelCase =f.readlines()
__UpperCamelCase =[line.strip() for line in data if len(SCREAMING_SNAKE_CASE__ ) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
__UpperCamelCase =LTP(args.ltp ) # faster in GPU device
__UpperCamelCase =BertTokenizer.from_pretrained(args.bert )
__UpperCamelCase =prepare_ref(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
with open(args.save_path , 'w' , encoding='utf-8' ) as f:
__UpperCamelCase =[json.dumps(SCREAMING_SNAKE_CASE__ ) + '\n' for ref in ref_ids]
f.writelines(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
_A = argparse.ArgumentParser(description='prepare_chinese_ref')
parser.add_argument(
'--file_name',
required=False,
type=str,
default='./resources/chinese-demo.txt',
help='file need process, same as training data in lm',
)
parser.add_argument(
'--ltp',
required=False,
type=str,
default='./resources/ltp',
help='resources for LTP tokenizer, usually a path',
)
parser.add_argument(
'--bert',
required=False,
type=str,
default='./resources/robert',
help='resources for Bert tokenizer',
)
parser.add_argument(
'--save_path',
required=False,
type=str,
default='./resources/ref.txt',
help='path to save res',
)
_A = parser.parse_args()
main(args)
| 355 |
import sys
from .dependency_versions_table import deps
from .utils.versions import require_version, require_version_core
# define which module versions we always want to check at run time
# (usually the ones defined in `install_requires` in setup.py)
#
# order specific notes:
# - tqdm must be checked before tokenizers
_A = 'python tqdm regex requests packaging filelock numpy tokenizers'.split()
if sys.version_info < (3, 7):
pkgs_to_check_at_runtime.append('dataclasses')
if sys.version_info < (3, 8):
pkgs_to_check_at_runtime.append('importlib_metadata')
for pkg in pkgs_to_check_at_runtime:
if pkg in deps:
if pkg == "tokenizers":
# must be loaded here, or else tqdm check may fail
from .utils import is_tokenizers_available
if not is_tokenizers_available():
continue # not required, check version only if installed
require_version_core(deps[pkg])
else:
raise ValueError(f"""can't find {pkg} in {deps.keys()}, check dependency_versions_table.py""")
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Optional[int]=None ):
require_version(deps[pkg] , SCREAMING_SNAKE_CASE__ )
| 117 | 0 |
def lowerCAmelCase_ ( __A, __A ) -> int:
'''simple docstring'''
if len(__A ) != len(__A ):
raise ValueError("String lengths must match!" )
UpperCAmelCase__ = 0
for chara, chara in zip(__A, __A ):
if chara != chara:
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 65 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
__A : int = {'configuration_unispeech': ['UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP', 'UniSpeechConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Dict = [
'UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST',
'UniSpeechForCTC',
'UniSpeechForPreTraining',
'UniSpeechForSequenceClassification',
'UniSpeechModel',
'UniSpeechPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_unispeech import UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP, UniSpeechConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_unispeech import (
UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST,
UniSpeechForCTC,
UniSpeechForPreTraining,
UniSpeechForSequenceClassification,
UniSpeechModel,
UniSpeechPreTrainedModel,
)
else:
import sys
__A : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 154 | 0 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import Iterator
from typing import Generic, TypeVar
A__ : Any =TypeVar('''T''')
class UpperCAmelCase ( Generic[T] ):
def __init__( self : Optional[int] , __snake_case : T ) -> Dict:
_lowerCAmelCase = data
_lowerCAmelCase = None
def __str__( self : List[Any] ) -> str:
return f"{self.data}"
class UpperCAmelCase ( Generic[T] ):
def __init__( self : Dict ) -> None:
_lowerCAmelCase = None
def __iter__( self : int ) -> Iterator[T]:
_lowerCAmelCase = self.top
while node:
yield node.data
_lowerCAmelCase = node.next
def __str__( self : Union[str, Any] ) -> str:
return "->".join([str(__snake_case ) for item in self] )
def __len__( self : Dict ) -> int:
return len(tuple(iter(self ) ) )
def lowercase__ ( self : List[Any] ) -> bool:
return self.top is None
def lowercase__ ( self : List[Any] , __snake_case : T ) -> None:
_lowerCAmelCase = Node(__snake_case )
if not self.is_empty():
_lowerCAmelCase = self.top
_lowerCAmelCase = node
def lowercase__ ( self : Optional[int] ) -> T:
if self.is_empty():
raise IndexError("""pop from empty stack""" )
assert isinstance(self.top , __snake_case )
_lowerCAmelCase = self.top
_lowerCAmelCase = self.top.next
return pop_node.data
def lowercase__ ( self : List[str] ) -> T:
if self.is_empty():
raise IndexError("""peek from empty stack""" )
assert self.top is not None
return self.top.data
def lowercase__ ( self : str ) -> None:
_lowerCAmelCase = None
if __name__ == "__main__":
from doctest import testmod
testmod()
| 220 |
'''simple docstring'''
import unittest
from transformers import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING, is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class UpperCAmelCase :
@staticmethod
def lowercase__ ( *__snake_case : Optional[Any] , **__snake_case : Any ) -> Tuple:
pass
@is_pipeline_test
@require_torch
@require_vision
class UpperCAmelCase ( unittest.TestCase ):
_lowercase: Union[str, Any] = MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
def lowercase__ ( self : List[str] , __snake_case : Optional[Any] , __snake_case : str , __snake_case : List[str] ) -> int:
_lowerCAmelCase = pipeline("""visual-question-answering""" , model="""hf-internal-testing/tiny-vilt-random-vqa""" )
_lowerCAmelCase = [
{
"""image""": Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ),
"""question""": """How many cats are there?""",
},
{
"""image""": """./tests/fixtures/tests_samples/COCO/000000039769.png""",
"""question""": """How many cats are there?""",
},
]
return vqa_pipeline, examples
def lowercase__ ( self : Any , __snake_case : List[Any] , __snake_case : List[Any] ) -> Union[str, Any]:
_lowerCAmelCase = vqa_pipeline(__snake_case , top_k=1 )
self.assertEqual(
__snake_case , [
[{"""score""": ANY(__snake_case ), """answer""": ANY(__snake_case )}],
[{"""score""": ANY(__snake_case ), """answer""": ANY(__snake_case )}],
] , )
@require_torch
def lowercase__ ( self : str ) -> int:
_lowerCAmelCase = pipeline("""visual-question-answering""" , model="""hf-internal-testing/tiny-vilt-random-vqa""" )
_lowerCAmelCase = """./tests/fixtures/tests_samples/COCO/000000039769.png"""
_lowerCAmelCase = """How many cats are there?"""
_lowerCAmelCase = vqa_pipeline(image=__snake_case , question="""How many cats are there?""" , top_k=2 )
self.assertEqual(
__snake_case , [{"""score""": ANY(__snake_case ), """answer""": ANY(__snake_case )}, {"""score""": ANY(__snake_case ), """answer""": ANY(__snake_case )}] )
_lowerCAmelCase = vqa_pipeline({"""image""": image, """question""": question} , top_k=2 )
self.assertEqual(
__snake_case , [{"""score""": ANY(__snake_case ), """answer""": ANY(__snake_case )}, {"""score""": ANY(__snake_case ), """answer""": ANY(__snake_case )}] )
@slow
@require_torch
def lowercase__ ( self : List[Any] ) -> List[str]:
_lowerCAmelCase = pipeline("""visual-question-answering""" , model="""dandelin/vilt-b32-finetuned-vqa""" )
_lowerCAmelCase = """./tests/fixtures/tests_samples/COCO/000000039769.png"""
_lowerCAmelCase = """How many cats are there?"""
_lowerCAmelCase = vqa_pipeline(image=__snake_case , question=__snake_case , top_k=2 )
self.assertEqual(
nested_simplify(__snake_case , decimals=4 ) , [{"""score""": 0.87_99, """answer""": """2"""}, {"""score""": 0.2_96, """answer""": """1"""}] )
_lowerCAmelCase = vqa_pipeline({"""image""": image, """question""": question} , top_k=2 )
self.assertEqual(
nested_simplify(__snake_case , decimals=4 ) , [{"""score""": 0.87_99, """answer""": """2"""}, {"""score""": 0.2_96, """answer""": """1"""}] )
_lowerCAmelCase = vqa_pipeline(
[{"""image""": image, """question""": question}, {"""image""": image, """question""": question}] , top_k=2 )
self.assertEqual(
nested_simplify(__snake_case , decimals=4 ) , [[{"""score""": 0.87_99, """answer""": """2"""}, {"""score""": 0.2_96, """answer""": """1"""}]] * 2 , )
@require_tf
@unittest.skip("""Visual question answering not implemented in TF""" )
def lowercase__ ( self : List[str] ) -> Union[str, Any]:
pass
| 220 | 1 |
import argparse
from pathlib import Path
import fairseq
import torch
from fairseq.models.xmod import XMODModel as FairseqXmodModel
from packaging import version
from transformers import XmodConfig, XmodForMaskedLM, XmodForSequenceClassification
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse('''0.12.2'''):
raise Exception('''requires fairseq >= 0.12.2''')
if version.parse(fairseq.__version__) > version.parse('''2'''):
raise Exception('''requires fairseq < v2''')
logging.set_verbosity_info()
lowercase__ : Any = logging.get_logger(__name__)
lowercase__ : Any = '''Hello, World!'''
lowercase__ : int = '''en_XX'''
def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ , snake_case__ ) -> int:
lowerCAmelCase = Path('''data_bin''' )
lowerCAmelCase = FairseqXmodModel.from_pretrained(
model_name_or_path=str(Path(snake_case__ ).parent ) , checkpoint_file=Path(snake_case__ ).name , _name='''xmod_base''' , arch='''xmod_base''' , task='''multilingual_masked_lm''' , data_name_or_path=str(snake_case__ ) , bpe='''sentencepiece''' , sentencepiece_model=str(Path(snake_case__ ).parent / '''sentencepiece.bpe.model''' ) , src_dict=str(data_dir / '''dict.txt''' ) , )
xmod.eval() # disable dropout
print(snake_case__ )
lowerCAmelCase = xmod.model.encoder.sentence_encoder
lowerCAmelCase = XmodConfig(
vocab_size=xmod_sent_encoder.embed_tokens.num_embeddings , hidden_size=xmod.cfg.model.encoder_embed_dim , num_hidden_layers=xmod.cfg.model.encoder_layers , num_attention_heads=xmod.cfg.model.encoder_attention_heads , intermediate_size=xmod.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=5_1_4 , type_vocab_size=1 , layer_norm_eps=1E-5 , pre_norm=xmod.cfg.model.encoder_normalize_before , adapter_reduction_factor=getattr(xmod.cfg.model , '''bottleneck''' , 2 ) , adapter_layer_norm=xmod.cfg.model.adapter_layer_norm , adapter_reuse_layer_norm=xmod.cfg.model.adapter_reuse_layer_norm , ln_before_adapter=xmod.cfg.model.ln_before_adapter , languages=xmod.cfg.model.languages , )
if classification_head:
lowerCAmelCase = xmod.model.classification_heads['''mnli'''].out_proj.weight.shape[0]
print('''Our X-MOD config:''' , snake_case__ )
lowerCAmelCase = XmodForSequenceClassification(snake_case__ ) if classification_head else XmodForMaskedLM(snake_case__ )
model.eval()
# Now let's copy all the weights.
# Embeddings
lowerCAmelCase = xmod_sent_encoder.embed_tokens.weight
lowerCAmelCase = xmod_sent_encoder.embed_positions.weight
lowerCAmelCase = torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c xmod doesn't use them.
lowerCAmelCase = xmod_sent_encoder.layernorm_embedding.weight
lowerCAmelCase = xmod_sent_encoder.layernorm_embedding.bias
for i in range(config.num_hidden_layers ):
# Encoder: start of layer
lowerCAmelCase = model.roberta.encoder.layer[i]
lowerCAmelCase = xmod_sent_encoder.layers[i]
# self attention
lowerCAmelCase = layer.attention.self
if not (
xmod_layer.self_attn.k_proj.weight.data.shape
== xmod_layer.self_attn.q_proj.weight.data.shape
== xmod_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size) )
):
raise AssertionError('''Dimensions of self-attention weights do not match.''' )
lowerCAmelCase = xmod_layer.self_attn.q_proj.weight
lowerCAmelCase = xmod_layer.self_attn.q_proj.bias
lowerCAmelCase = xmod_layer.self_attn.k_proj.weight
lowerCAmelCase = xmod_layer.self_attn.k_proj.bias
lowerCAmelCase = xmod_layer.self_attn.v_proj.weight
lowerCAmelCase = xmod_layer.self_attn.v_proj.bias
# self-attention output
lowerCAmelCase = layer.attention.output
if self_output.dense.weight.shape != xmod_layer.self_attn.out_proj.weight.shape:
raise AssertionError('''Dimensions of self-attention output weights do not match.''' )
lowerCAmelCase = xmod_layer.self_attn.out_proj.weight
lowerCAmelCase = xmod_layer.self_attn.out_proj.bias
lowerCAmelCase = xmod_layer.self_attn_layer_norm.weight
lowerCAmelCase = xmod_layer.self_attn_layer_norm.bias
# intermediate
lowerCAmelCase = layer.intermediate
if intermediate.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError('''Dimensions of intermediate weights do not match.''' )
lowerCAmelCase = xmod_layer.fca.weight
lowerCAmelCase = xmod_layer.fca.bias
# output
lowerCAmelCase = layer.output
if bert_output.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError('''Dimensions of feed-forward weights do not match.''' )
lowerCAmelCase = xmod_layer.fca.weight
lowerCAmelCase = xmod_layer.fca.bias
lowerCAmelCase = xmod_layer.final_layer_norm.weight
lowerCAmelCase = xmod_layer.final_layer_norm.bias
if bert_output.adapter_layer_norm is not None:
lowerCAmelCase = xmod_layer.adapter_layer_norm.weight
lowerCAmelCase = xmod_layer.adapter_layer_norm.bias
if sorted(bert_output.adapter_modules.keys() ) != sorted(xmod_layer.adapter_modules.keys() ):
raise AssertionError('''Lists of language adapters do not match.''' )
for lang_code, adapter in xmod_layer.adapter_modules.items():
lowerCAmelCase = bert_output.adapter_modules[lang_code]
lowerCAmelCase = xmod_layer.adapter_modules[lang_code]
lowerCAmelCase = from_adapter.fca.weight
lowerCAmelCase = from_adapter.fca.bias
lowerCAmelCase = from_adapter.fca.weight
lowerCAmelCase = from_adapter.fca.bias
# end of layer
if xmod_sent_encoder.layer_norm is not None:
lowerCAmelCase = xmod_sent_encoder.layer_norm.weight
lowerCAmelCase = xmod_sent_encoder.layer_norm.bias
if classification_head:
lowerCAmelCase = xmod.model.classification_heads['''mnli'''].dense.weight
lowerCAmelCase = xmod.model.classification_heads['''mnli'''].dense.bias
lowerCAmelCase = xmod.model.classification_heads['''mnli'''].out_proj.weight
lowerCAmelCase = xmod.model.classification_heads['''mnli'''].out_proj.bias
else:
# LM Head
lowerCAmelCase = xmod.model.encoder.lm_head.dense.weight
lowerCAmelCase = xmod.model.encoder.lm_head.dense.bias
lowerCAmelCase = xmod.model.encoder.lm_head.layer_norm.weight
lowerCAmelCase = xmod.model.encoder.lm_head.layer_norm.bias
lowerCAmelCase = xmod.model.encoder.lm_head.weight
lowerCAmelCase = xmod.model.encoder.lm_head.bias
# Let's check that we get the same results.
lowerCAmelCase = xmod.encode(snake_case__ ).unsqueeze(0 ) # batch of size 1
model.roberta.set_default_language(snake_case__ )
lowerCAmelCase = model(snake_case__ )[0]
if classification_head:
lowerCAmelCase = xmod.model.classification_heads['''mnli'''](xmod.extract_features(snake_case__ ) )
else:
lowerCAmelCase = xmod.model(snake_case__ , lang_id=[SAMPLE_LANGUAGE] )[0]
print(our_output.shape , their_output.shape )
lowerCAmelCase = torch.max(torch.abs(our_output - their_output ) ).item()
print(f"max_absolute_diff = {max_absolute_diff}" ) # ~ 1e-7
lowerCAmelCase = torch.allclose(snake_case__ , snake_case__ , atol=1E-3 )
print('''Do both models output the same tensors?''' , '''🔥''' if success else '''💩''' )
if not success:
raise Exception('''Something went wRoNg''' )
Path(snake_case__ ).mkdir(parents=snake_case__ , exist_ok=snake_case__ )
print(f"Saving model to {pytorch_dump_folder_path}" )
model.save_pretrained(snake_case__ )
if __name__ == "__main__":
lowercase__ : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--xmod_checkpoint_path''', default=None, type=str, required=True, help='''Path the official PyTorch dump.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--classification_head''', action='''store_true''', help='''Whether to convert a final classification head.'''
)
lowercase__ : List[Any] = parser.parse_args()
convert_xmod_checkpoint_to_pytorch(
args.xmod_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
)
| 338 | import unittest
import numpy as np
def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ , snake_case__ , snake_case__ = None , ) -> np.ndarray:
lowerCAmelCase = np.shape(snake_case__ )
lowerCAmelCase = np.shape(snake_case__ )
lowerCAmelCase = np.shape(snake_case__ )
if shape_a[0] != shape_b[0]:
lowerCAmelCase = (
'''Expected the same number of rows for A and B. '''
f"Instead found A of size {shape_a} and B of size {shape_b}"
)
raise ValueError(snake_case__ )
if shape_b[1] != shape_c[1]:
lowerCAmelCase = (
'''Expected the same number of columns for B and C. '''
f"Instead found B of size {shape_b} and C of size {shape_c}"
)
raise ValueError(snake_case__ )
lowerCAmelCase = pseudo_inv
if a_inv is None:
try:
lowerCAmelCase = np.linalg.inv(snake_case__ )
except np.linalg.LinAlgError:
raise ValueError(
'''Input matrix A is not invertible. Cannot compute Schur complement.''' )
return mat_c - mat_b.T @ a_inv @ mat_b
class lowercase_ ( unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE_ ( self ) ->None:
lowerCAmelCase = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
lowerCAmelCase = np.array([[0, 3], [3, 0], [2, 3]] )
lowerCAmelCase = np.array([[2, 1], [6, 3]] )
lowerCAmelCase = schur_complement(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
lowerCAmelCase = np.block([[a, b], [b.T, c]] )
lowerCAmelCase = np.linalg.det(__SCREAMING_SNAKE_CASE )
lowerCAmelCase = np.linalg.det(__SCREAMING_SNAKE_CASE )
lowerCAmelCase = np.linalg.det(__SCREAMING_SNAKE_CASE )
self.assertAlmostEqual(__SCREAMING_SNAKE_CASE , det_a * det_s )
def SCREAMING_SNAKE_CASE_ ( self ) ->None:
lowerCAmelCase = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
lowerCAmelCase = np.array([[0, 3], [3, 0], [2, 3]] )
lowerCAmelCase = np.array([[2, 1], [6, 3]] )
with self.assertRaises(__SCREAMING_SNAKE_CASE ):
schur_complement(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self ) ->None:
lowerCAmelCase = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
lowerCAmelCase = np.array([[0, 3], [3, 0], [2, 3]] )
lowerCAmelCase = np.array([[2, 1, 3], [6, 3, 5]] )
with self.assertRaises(__SCREAMING_SNAKE_CASE ):
schur_complement(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
import doctest
doctest.testmod()
unittest.main()
| 338 | 1 |
'''simple docstring'''
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import datasets
import numpy as np
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
EvalPrediction,
HfArgumentParser,
PreTrainedTokenizer,
TFAutoModelForSequenceClassification,
TFTrainer,
TFTrainingArguments,
)
from transformers.utils import logging as hf_logging
hf_logging.set_verbosity_info()
hf_logging.enable_default_handler()
hf_logging.enable_explicit_format()
def lowerCAmelCase__ ( lowerCamelCase : str ,lowerCamelCase : str ,lowerCamelCase : str ,lowerCamelCase : PreTrainedTokenizer ,lowerCamelCase : int ,lowerCamelCase : Optional[int] = None ,):
_A : Tuple = {}
if train_file is not None:
_A : Optional[int] = [train_file]
if eval_file is not None:
_A : Optional[int] = [eval_file]
if test_file is not None:
_A : Dict = [test_file]
_A : List[str] = datasets.load_dataset('csv' ,data_files=lowerCamelCase )
_A : Union[str, Any] = list(ds[list(files.keys() )[0]].features.keys() )
_A : Any = features_name.pop(lowerCamelCase )
_A : Optional[int] = list(set(ds[list(files.keys() )[0]][label_name] ) )
_A : Tuple = {label: i for i, label in enumerate(lowerCamelCase )}
_A : str = tokenizer.model_input_names
_A : Tuple = {}
if len(lowerCamelCase ) == 1:
for k in files.keys():
_A : Optional[Any] = ds[k].map(
lambda lowerCamelCase : tokenizer.batch_encode_plus(
example[features_name[0]] ,truncation=lowerCamelCase ,max_length=lowerCamelCase ,padding='max_length' ) ,batched=lowerCamelCase ,)
elif len(lowerCamelCase ) == 2:
for k in files.keys():
_A : Optional[int] = ds[k].map(
lambda lowerCamelCase : tokenizer.batch_encode_plus(
(example[features_name[0]], example[features_name[1]]) ,truncation=lowerCamelCase ,max_length=lowerCamelCase ,padding='max_length' ,) ,batched=lowerCamelCase ,)
def gen_train():
for ex in transformed_ds[datasets.Split.TRAIN]:
_A : List[str] = {k: v for k, v in ex.items() if k in input_names}
_A : Tuple = labelaid[ex[label_name]]
yield (d, label)
def gen_val():
for ex in transformed_ds[datasets.Split.VALIDATION]:
_A : Any = {k: v for k, v in ex.items() if k in input_names}
_A : Union[str, Any] = labelaid[ex[label_name]]
yield (d, label)
def gen_test():
for ex in transformed_ds[datasets.Split.TEST]:
_A : Optional[Any] = {k: v for k, v in ex.items() if k in input_names}
_A : Any = labelaid[ex[label_name]]
yield (d, label)
_A : Optional[Any] = (
tf.data.Dataset.from_generator(
lowerCamelCase ,({k: tf.intaa for k in input_names}, tf.intaa) ,({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) ,)
if datasets.Split.TRAIN in transformed_ds
else None
)
if train_ds is not None:
_A : Optional[Any] = train_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TRAIN] ) ) )
_A : Union[str, Any] = (
tf.data.Dataset.from_generator(
lowerCamelCase ,({k: tf.intaa for k in input_names}, tf.intaa) ,({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) ,)
if datasets.Split.VALIDATION in transformed_ds
else None
)
if val_ds is not None:
_A : Optional[int] = val_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.VALIDATION] ) ) )
_A : Dict = (
tf.data.Dataset.from_generator(
lowerCamelCase ,({k: tf.intaa for k in input_names}, tf.intaa) ,({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) ,)
if datasets.Split.TEST in transformed_ds
else None
)
if test_ds is not None:
_A : int = test_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TEST] ) ) )
return train_ds, val_ds, test_ds, labelaid
A : str = logging.getLogger(__name__)
@dataclass
class __lowerCamelCase :
"""simple docstring"""
a = field(metadata={"help": "Which column contains the label"} )
a = field(default=a_ , metadata={"help": "The path of the training file"} )
a = field(default=a_ , metadata={"help": "The path of the development file"} )
a = field(default=a_ , metadata={"help": "The path of the test file"} )
a = field(
default=128 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
a = field(
default=a_ , metadata={"help": "Overwrite the cached training and evaluation sets"} )
@dataclass
class __lowerCamelCase :
"""simple docstring"""
a = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
a = field(
default=a_ , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
a = field(
default=a_ , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
a = field(default=a_ , metadata={"help": "Set this flag to use fast tokenization."} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
a = field(
default=a_ , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
def lowerCAmelCase__ ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
_A : List[Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TFTrainingArguments) )
_A : Optional[int] = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F'Output directory ({training_args.output_dir}) already exists and is not empty. Use'
' --overwrite_output_dir to overcome.' )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' ,datefmt='%m/%d/%Y %H:%M:%S' ,level=logging.INFO ,)
logger.info(
F'n_replicas: {training_args.n_replicas}, distributed training: {bool(training_args.n_replicas > 1 )}, '
F'16-bits training: {training_args.fpaa}' )
logger.info(F'Training/evaluation parameters {training_args}' )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_A : Optional[int] = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path ,cache_dir=model_args.cache_dir ,)
_A : Dict = get_tfds(
train_file=data_args.train_file ,eval_file=data_args.dev_file ,test_file=data_args.test_file ,tokenizer=lowerCamelCase ,label_column_id=data_args.label_column_id ,max_seq_length=data_args.max_seq_length ,)
_A : List[str] = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path ,num_labels=len(lowerCamelCase ) ,labelaid=lowerCamelCase ,idalabel={id: label for label, id in labelaid.items()} ,finetuning_task='text-classification' ,cache_dir=model_args.cache_dir ,)
with training_args.strategy.scope():
_A : Any = TFAutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path ,from_pt=bool('.bin' in model_args.model_name_or_path ) ,config=lowerCamelCase ,cache_dir=model_args.cache_dir ,)
def compute_metrics(lowerCamelCase : EvalPrediction ) -> Dict:
_A : List[str] = np.argmax(p.predictions ,axis=1 )
return {"acc": (preds == p.label_ids).mean()}
# Initialize our Trainer
_A : str = TFTrainer(
model=lowerCamelCase ,args=lowerCamelCase ,train_dataset=lowerCamelCase ,eval_dataset=lowerCamelCase ,compute_metrics=lowerCamelCase ,)
# Training
if training_args.do_train:
trainer.train()
trainer.save_model()
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
_A : List[str] = {}
if training_args.do_eval:
logger.info('*** Evaluate ***' )
_A : Optional[Any] = trainer.evaluate()
_A : List[str] = os.path.join(training_args.output_dir ,'eval_results.txt' )
with open(lowerCamelCase ,'w' ) as writer:
logger.info('***** Eval results *****' )
for key, value in result.items():
logger.info(F' {key} = {value}' )
writer.write(F'{key} = {value}\n' )
results.update(lowerCamelCase )
return results
if __name__ == "__main__":
main()
| 362 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
A : List[str] = {
'''configuration_electra''': ['''ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ElectraConfig''', '''ElectraOnnxConfig'''],
'''tokenization_electra''': ['''ElectraTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : int = ['''ElectraTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : List[str] = [
'''ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ElectraForCausalLM''',
'''ElectraForMaskedLM''',
'''ElectraForMultipleChoice''',
'''ElectraForPreTraining''',
'''ElectraForQuestionAnswering''',
'''ElectraForSequenceClassification''',
'''ElectraForTokenClassification''',
'''ElectraModel''',
'''ElectraPreTrainedModel''',
'''load_tf_weights_in_electra''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Dict = [
'''TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFElectraForMaskedLM''',
'''TFElectraForMultipleChoice''',
'''TFElectraForPreTraining''',
'''TFElectraForQuestionAnswering''',
'''TFElectraForSequenceClassification''',
'''TFElectraForTokenClassification''',
'''TFElectraModel''',
'''TFElectraPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : List[Any] = [
'''FlaxElectraForCausalLM''',
'''FlaxElectraForMaskedLM''',
'''FlaxElectraForMultipleChoice''',
'''FlaxElectraForPreTraining''',
'''FlaxElectraForQuestionAnswering''',
'''FlaxElectraForSequenceClassification''',
'''FlaxElectraForTokenClassification''',
'''FlaxElectraModel''',
'''FlaxElectraPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_electra import ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, ElectraConfig, ElectraOnnxConfig
from .tokenization_electra import ElectraTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_electra_fast import ElectraTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_electra import (
ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
ElectraForCausalLM,
ElectraForMaskedLM,
ElectraForMultipleChoice,
ElectraForPreTraining,
ElectraForQuestionAnswering,
ElectraForSequenceClassification,
ElectraForTokenClassification,
ElectraModel,
ElectraPreTrainedModel,
load_tf_weights_in_electra,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_electra import (
TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFElectraForMaskedLM,
TFElectraForMultipleChoice,
TFElectraForPreTraining,
TFElectraForQuestionAnswering,
TFElectraForSequenceClassification,
TFElectraForTokenClassification,
TFElectraModel,
TFElectraPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_electra import (
FlaxElectraForCausalLM,
FlaxElectraForMaskedLM,
FlaxElectraForMultipleChoice,
FlaxElectraForPreTraining,
FlaxElectraForQuestionAnswering,
FlaxElectraForSequenceClassification,
FlaxElectraForTokenClassification,
FlaxElectraModel,
FlaxElectraPreTrainedModel,
)
else:
import sys
A : Tuple = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 227 | 0 |
"""simple docstring"""
import datasets
a :Dict = "\\n@InProceedings{conneau2018xnli,\n author = \"Conneau, Alexis\n and Rinott, Ruty\n and Lample, Guillaume\n and Williams, Adina\n and Bowman, Samuel R.\n and Schwenk, Holger\n and Stoyanov, Veselin\",\n title = \"XNLI: Evaluating Cross-lingual Sentence Representations\",\n booktitle = \"Proceedings of the 2018 Conference on Empirical Methods\n in Natural Language Processing\",\n year = \"2018\",\n publisher = \"Association for Computational Linguistics\",\n location = \"Brussels, Belgium\",\n}\n"
a :List[str] = "\\nXNLI is a subset of a few thousand examples from MNLI which has been translated\ninto a 14 different languages (some low-ish resource). As with MNLI, the goal is\nto predict textual entailment (does sentence A imply/contradict/neither sentence\nB) and is a classification task (given two sentences, predict one of three\nlabels).\n"
a :Optional[Any] = "\nComputes XNLI score which is just simple accuracy.\nArgs:\n predictions: Predicted labels.\n references: Ground truth labels.\nReturns:\n 'accuracy': accuracy\nExamples:\n\n >>> predictions = [0, 1]\n >>> references = [0, 1]\n >>> xnli_metric = datasets.load_metric(\"xnli\")\n >>> results = xnli_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'accuracy': 1.0}\n"
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase ) -> List[str]:
return (preds == labels).mean()
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class __a (datasets.Metric):
'''simple docstring'''
def _a ( self ) -> List[Any]:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""int64""" if self.config_name != """sts-b""" else """float32""" ),
"""references""": datasets.Value("""int64""" if self.config_name != """sts-b""" else """float32""" ),
} ) , codebase_urls=[] , reference_urls=[] , format="""numpy""" , )
def _a ( self , _a , _a ) -> Optional[int]:
"""simple docstring"""
return {"accuracy": simple_accuracy(_a , _a )}
| 132 |
"""simple docstring"""
a :dict[tuple[int, int, int], int] = {}
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> int:
# if we are absent twice, or late 3 consecutive days,
# no further prize strings are possible
if late == 3 or absent == 2:
return 0
# if we have no days left, and have not failed any other rules,
# we have a prize string
if days == 0:
return 1
# No easy solution, so now we need to do the recursive calculation
# First, check if the combination is already in the cache, and
# if yes, return the stored value from there since we already
# know the number of possible prize strings from this point on
SCREAMING_SNAKE_CASE__ : str = (days, absent, late)
if key in cache:
return cache[key]
# now we calculate the three possible ways that can unfold from
# this point on, depending on our attendance today
# 1) if we are late (but not absent), the "absent" counter stays as
# it is, but the "late" counter increases by one
SCREAMING_SNAKE_CASE__ : Tuple = _calculate(days - 1 , __lowerCAmelCase , late + 1 )
# 2) if we are absent, the "absent" counter increases by 1, and the
# "late" counter resets to 0
SCREAMING_SNAKE_CASE__ : List[str] = _calculate(days - 1 , absent + 1 , 0 )
# 3) if we are on time, this resets the "late" counter and keeps the
# absent counter
SCREAMING_SNAKE_CASE__ : Optional[Any] = _calculate(days - 1 , __lowerCAmelCase , 0 )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = state_late + state_absent + state_ontime
SCREAMING_SNAKE_CASE__ : Optional[int] = prizestrings
return prizestrings
def _lowercase ( __lowerCAmelCase = 30 ) -> int:
return _calculate(__lowerCAmelCase , absent=0 , late=0 )
if __name__ == "__main__":
print(solution())
| 132 | 1 |
'''simple docstring'''
from __future__ import annotations
import math
def _lowerCamelCase ( lowercase : int ) -> list[int]:
if num <= 0:
_a = F'{num}: Invalid input, please enter a positive integer.'
raise ValueError(lowercase )
_a = [True] * (num + 1)
_a = []
_a = 2
_a = int(math.sqrt(lowercase ) )
while start <= end:
# If start is a prime
if sieve[start] is True:
prime.append(lowercase )
# Set multiples of start be False
for i in range(start * start , num + 1 , lowercase ):
if sieve[i] is True:
_a = False
start += 1
for j in range(end + 1 , num + 1 ):
if sieve[j] is True:
prime.append(lowercase )
return prime
if __name__ == "__main__":
print(prime_sieve(int(input('Enter a positive integer: ').strip())))
| 346 |
'''simple docstring'''
from dataclasses import dataclass
from typing import Tuple
import numpy as np
import torch
@dataclass
class __SCREAMING_SNAKE_CASE :
"""simple docstring"""
__a =42 # [batch_size x 3]
__a =42 # [batch_size x 3]
__a =42 # [batch_size x 3]
__a =42 # [batch_size x 3]
__a =42
__a =42
__a =42
__a =42
__a =42
def UpperCamelCase__ ( self : str ):
assert self.x.shape[0] == self.y.shape[0] == self.z.shape[0] == self.origin.shape[0]
assert self.x.shape[1] == self.y.shape[1] == self.z.shape[1] == self.origin.shape[1] == 3
assert len(self.x.shape ) == len(self.y.shape ) == len(self.z.shape ) == len(self.origin.shape ) == 2
def UpperCamelCase__ ( self : List[str] ):
return torch.from_numpy(np.array([self.width, self.height] , dtype=np.floataa ) )
def UpperCamelCase__ ( self : Union[str, Any] ):
return torch.from_numpy(np.array([self.x_fov, self.y_fov] , dtype=np.floataa ) )
def UpperCamelCase__ ( self : Union[str, Any] ):
_a = torch.arange(self.height * self.width )
_a = torch.stack(
[
pixel_indices % self.width,
torch.div(__a , self.width , rounding_mode="trunc" ),
] , axis=1 , )
return coords
@property
def UpperCamelCase__ ( self : List[Any] ):
_a , *_a = self.shape
_a = int(np.prod(__a ) )
_a = self.get_image_coords()
_a = torch.broadcast_to(coords.unsqueeze(0 ) , [batch_size * inner_batch_size, *coords.shape] )
_a = self.get_camera_rays(__a )
_a = rays.view(__a , inner_batch_size * self.height * self.width , 2 , 3 )
return rays
def UpperCamelCase__ ( self : Dict , __a : torch.Tensor ):
_a , *_a , _a = coords.shape
assert n_coords == 2
assert batch_size == self.origin.shape[0]
_a = coords.view(__a , -1 , 2 )
_a = self.resolution()
_a = self.fov()
_a = (flat.float() / (res - 1)) * 2 - 1
_a = fracs * torch.tan(fov / 2 )
_a = fracs.view(__a , -1 , 2 )
_a = (
self.z.view(__a , 1 , 3 )
+ self.x.view(__a , 1 , 3 ) * fracs[:, :, :1]
+ self.y.view(__a , 1 , 3 ) * fracs[:, :, 1:]
)
_a = directions / directions.norm(dim=-1 , keepdim=__a )
_a = torch.stack(
[
torch.broadcast_to(self.origin.view(__a , 1 , 3 ) , [batch_size, directions.shape[1], 3] ),
directions,
] , dim=2 , )
return rays.view(__a , *__a , 2 , 3 )
def UpperCamelCase__ ( self : Dict , __a : int , __a : int ):
assert width * self.height == height * self.width, "The aspect ratio should not change."
return DifferentiableProjectiveCamera(
origin=self.origin , x=self.x , y=self.y , z=self.z , width=__a , height=__a , x_fov=self.x_fov , y_fov=self.y_fov , )
def _lowerCamelCase ( lowercase : int ) -> DifferentiableProjectiveCamera:
_a = []
_a = []
_a = []
_a = []
for theta in np.linspace(0 , 2 * np.pi , num=20 ):
_a = np.array([np.sin(lowercase ), np.cos(lowercase ), -0.5] )
z /= np.sqrt(np.sum(z**2 ) )
_a = -z * 4
_a = np.array([np.cos(lowercase ), -np.sin(lowercase ), 0.0] )
_a = np.cross(lowercase , lowercase )
origins.append(lowercase )
xs.append(lowercase )
ys.append(lowercase )
zs.append(lowercase )
return DifferentiableProjectiveCamera(
origin=torch.from_numpy(np.stack(lowercase , axis=0 ) ).float() , x=torch.from_numpy(np.stack(lowercase , axis=0 ) ).float() , y=torch.from_numpy(np.stack(lowercase , axis=0 ) ).float() , z=torch.from_numpy(np.stack(lowercase , axis=0 ) ).float() , width=lowercase , height=lowercase , x_fov=0.7 , y_fov=0.7 , shape=(1, len(lowercase )) , )
| 346 | 1 |
def SCREAMING_SNAKE_CASE_ ( __A : str ) -> list:
"""simple docstring"""
return [
txt[:a] + txt[a].upper() + txt[a + 1 :]
for a in range(len(__A ) )
if txt[a].isalpha()
]
if __name__ == "__main__":
__import__('doctest').testmod()
| 32 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_torch_available,
is_vision_available,
)
A__ : List[str] = {"""configuration_beit""": ["""BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """BeitConfig""", """BeitOnnxConfig"""]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : str = ["""BeitFeatureExtractor"""]
A__ : List[str] = ["""BeitImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : Optional[int] = [
"""BEIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""BeitForImageClassification""",
"""BeitForMaskedImageModeling""",
"""BeitForSemanticSegmentation""",
"""BeitModel""",
"""BeitPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : Any = [
"""FlaxBeitForImageClassification""",
"""FlaxBeitForMaskedImageModeling""",
"""FlaxBeitModel""",
"""FlaxBeitPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_beit import BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, BeitConfig, BeitOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_beit import BeitFeatureExtractor
from .image_processing_beit import BeitImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_beit import (
BEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
BeitForImageClassification,
BeitForMaskedImageModeling,
BeitForSemanticSegmentation,
BeitModel,
BeitPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_beit import (
FlaxBeitForImageClassification,
FlaxBeitForMaskedImageModeling,
FlaxBeitModel,
FlaxBeitPreTrainedModel,
)
else:
import sys
A__ : Optional[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 185 | 0 |
"""simple docstring"""
from __future__ import annotations
import copy
import inspect
import unittest
import numpy as np
from transformers import is_tf_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
TFLayoutLMvaModel,
)
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class UpperCamelCase_ :
'''simple docstring'''
def __init__( self : Tuple , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Optional[int]=2 , UpperCAmelCase__ : Optional[Any]=3 , UpperCAmelCase__ : Any=4 , UpperCAmelCase__ : List[Any]=2 , UpperCAmelCase__ : List[Any]=7 , UpperCAmelCase__ : Union[str, Any]=True , UpperCAmelCase__ : List[str]=True , UpperCAmelCase__ : List[str]=True , UpperCAmelCase__ : Any=True , UpperCAmelCase__ : Optional[Any]=99 , UpperCAmelCase__ : Any=36 , UpperCAmelCase__ : int=2 , UpperCAmelCase__ : Tuple=4 , UpperCAmelCase__ : str=37 , UpperCAmelCase__ : Dict="gelu" , UpperCAmelCase__ : List[str]=0.1 , UpperCAmelCase__ : Optional[int]=0.1 , UpperCAmelCase__ : List[str]=512 , UpperCAmelCase__ : str=16 , UpperCAmelCase__ : Union[str, Any]=2 , UpperCAmelCase__ : List[Any]=0.02 , UpperCAmelCase__ : int=6 , UpperCAmelCase__ : Optional[Any]=6 , UpperCAmelCase__ : Optional[Any]=3 , UpperCAmelCase__ : Union[str, Any]=4 , UpperCAmelCase__ : List[Any]=None , UpperCAmelCase__ : Tuple=1_000 , ) ->Union[str, Any]:
'''simple docstring'''
A__ = parent
A__ = batch_size
A__ = num_channels
A__ = image_size
A__ = patch_size
A__ = is_training
A__ = use_input_mask
A__ = use_token_type_ids
A__ = use_labels
A__ = vocab_size
A__ = hidden_size
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = intermediate_size
A__ = hidden_act
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = max_position_embeddings
A__ = type_vocab_size
A__ = type_sequence_label_size
A__ = initializer_range
A__ = coordinate_size
A__ = shape_size
A__ = num_labels
A__ = num_choices
A__ = scope
A__ = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
A__ = text_seq_length
A__ = (image_size // patch_size) ** 2 + 1
A__ = self.text_seq_length + self.image_seq_length
def SCREAMING_SNAKE_CASE ( self : int) ->str:
'''simple docstring'''
A__ = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size)
A__ = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox)
A__ = bbox.numpy()
# Ensure that bbox is legal
for i in range(bbox.shape[0]):
for j in range(bbox.shape[1]):
if bbox[i, j, 3] < bbox[i, j, 1]:
A__ = bbox[i, j, 3]
A__ = bbox[i, j, 1]
A__ = tmp_coordinate
if bbox[i, j, 2] < bbox[i, j, 0]:
A__ = bbox[i, j, 2]
A__ = bbox[i, j, 0]
A__ = tmp_coordinate
A__ = tf.constant(UpperCAmelCase__)
A__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
A__ = None
if self.use_input_mask:
A__ = random_attention_mask([self.batch_size, self.text_seq_length])
A__ = None
if self.use_token_type_ids:
A__ = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size)
A__ = None
A__ = None
if self.use_labels:
A__ = ids_tensor([self.batch_size] , self.type_sequence_label_size)
A__ = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels)
A__ = LayoutLMvaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , )
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def SCREAMING_SNAKE_CASE ( self : str , UpperCAmelCase__ : str , UpperCAmelCase__ : Dict , UpperCAmelCase__ : str , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : str , UpperCAmelCase__ : Tuple) ->Any:
'''simple docstring'''
A__ = TFLayoutLMvaModel(config=UpperCAmelCase__)
# text + image
A__ = model(UpperCAmelCase__ , pixel_values=UpperCAmelCase__ , training=UpperCAmelCase__)
A__ = model(
UpperCAmelCase__ , bbox=UpperCAmelCase__ , pixel_values=UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , training=UpperCAmelCase__ , )
A__ = model(UpperCAmelCase__ , bbox=UpperCAmelCase__ , pixel_values=UpperCAmelCase__ , training=UpperCAmelCase__)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
# text only
A__ = model(UpperCAmelCase__ , training=UpperCAmelCase__)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size))
# image only
A__ = model({'''pixel_values''': pixel_values} , training=UpperCAmelCase__)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size))
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : str , UpperCAmelCase__ : Dict , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Optional[Any]) ->str:
'''simple docstring'''
A__ = self.num_labels
A__ = TFLayoutLMvaForSequenceClassification(config=UpperCAmelCase__)
A__ = model(
UpperCAmelCase__ , bbox=UpperCAmelCase__ , pixel_values=UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , labels=UpperCAmelCase__ , training=UpperCAmelCase__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def SCREAMING_SNAKE_CASE ( self : int , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : str , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Tuple) ->Tuple:
'''simple docstring'''
A__ = self.num_labels
A__ = TFLayoutLMvaForTokenClassification(config=UpperCAmelCase__)
A__ = model(
UpperCAmelCase__ , bbox=UpperCAmelCase__ , pixel_values=UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , labels=UpperCAmelCase__ , training=UpperCAmelCase__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels))
def SCREAMING_SNAKE_CASE ( self : str , UpperCAmelCase__ : str , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Dict , UpperCAmelCase__ : int , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : List[Any]) ->List[Any]:
'''simple docstring'''
A__ = 2
A__ = TFLayoutLMvaForQuestionAnswering(config=UpperCAmelCase__)
A__ = model(
UpperCAmelCase__ , bbox=UpperCAmelCase__ , pixel_values=UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , start_positions=UpperCAmelCase__ , end_positions=UpperCAmelCase__ , training=UpperCAmelCase__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def SCREAMING_SNAKE_CASE ( self : List[str]) ->Optional[Any]:
'''simple docstring'''
A__ = self.prepare_config_and_inputs()
((A__) , (A__) , (A__) , (A__) , (A__) , (A__) , (A__) , (A__)) = config_and_inputs
A__ = {
'''input_ids''': input_ids,
'''bbox''': bbox,
'''pixel_values''': pixel_values,
'''token_type_ids''': token_type_ids,
'''attention_mask''': input_mask,
}
return config, inputs_dict
@require_tf
class UpperCamelCase_ ( snake_case_ , snake_case_ , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase__ = (
(
TFLayoutLMvaModel,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
)
if is_tf_available()
else ()
)
UpperCAmelCase__ = (
{'''document-question-answering''': TFLayoutLMvaForQuestionAnswering, '''feature-extraction''': TFLayoutLMvaModel}
if is_tf_available()
else {}
)
UpperCAmelCase__ = False
UpperCAmelCase__ = False
UpperCAmelCase__ = False
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , UpperCAmelCase__ : int , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : List[Any]) ->Any:
'''simple docstring'''
return True
def SCREAMING_SNAKE_CASE ( self : Any , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : str=False) ->dict:
'''simple docstring'''
A__ = copy.deepcopy(UpperCAmelCase__)
if model_class in get_values(UpperCAmelCase__):
A__ = {
k: tf.tile(tf.expand_dims(UpperCAmelCase__ , 1) , (1, self.model_tester.num_choices) + (1,) * (v.ndim - 1))
if isinstance(UpperCAmelCase__ , tf.Tensor) and v.ndim > 0
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(UpperCAmelCase__):
A__ = tf.ones(self.model_tester.batch_size , dtype=tf.intaa)
elif model_class in get_values(UpperCAmelCase__):
A__ = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa)
A__ = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa)
elif model_class in get_values(UpperCAmelCase__):
A__ = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa)
elif model_class in get_values(UpperCAmelCase__):
A__ = tf.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=tf.intaa)
return inputs_dict
def SCREAMING_SNAKE_CASE ( self : int) ->int:
'''simple docstring'''
A__ = TFLayoutLMvaModelTester(self)
A__ = ConfigTester(self , config_class=UpperCAmelCase__ , hidden_size=37)
def SCREAMING_SNAKE_CASE ( self : List[Any]) ->Dict:
'''simple docstring'''
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE ( self : str) ->Optional[Any]:
'''simple docstring'''
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ = model_class(UpperCAmelCase__)
if getattr(UpperCAmelCase__ , '''hf_compute_loss''' , UpperCAmelCase__):
# The number of elements in the loss should be the same as the number of elements in the label
A__ = self._prepare_for_class(inputs_dict.copy() , UpperCAmelCase__ , return_labels=UpperCAmelCase__)
A__ = prepared_for_class[
sorted(prepared_for_class.keys() - inputs_dict.keys() , reverse=UpperCAmelCase__)[0]
]
A__ = added_label.shape.as_list()[:1]
# Test that model correctly compute the loss with kwargs
A__ = self._prepare_for_class(inputs_dict.copy() , UpperCAmelCase__ , return_labels=UpperCAmelCase__)
A__ = prepared_for_class.pop('''input_ids''')
A__ = model(UpperCAmelCase__ , **UpperCAmelCase__)[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1])
# Test that model correctly compute the loss when we mask some positions
A__ = self._prepare_for_class(inputs_dict.copy() , UpperCAmelCase__ , return_labels=UpperCAmelCase__)
A__ = prepared_for_class.pop('''input_ids''')
if "labels" in prepared_for_class:
A__ = prepared_for_class['''labels'''].numpy()
if len(labels.shape) > 1 and labels.shape[1] != 1:
A__ = -100
A__ = tf.convert_to_tensor(UpperCAmelCase__)
A__ = model(UpperCAmelCase__ , **UpperCAmelCase__)[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1])
self.assertTrue(not np.any(np.isnan(loss.numpy())))
# Test that model correctly compute the loss with a dict
A__ = self._prepare_for_class(inputs_dict.copy() , UpperCAmelCase__ , return_labels=UpperCAmelCase__)
A__ = model(UpperCAmelCase__)[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1])
# Test that model correctly compute the loss with a tuple
A__ = self._prepare_for_class(inputs_dict.copy() , UpperCAmelCase__ , return_labels=UpperCAmelCase__)
# Get keys that were added with the _prepare_for_class function
A__ = prepared_for_class.keys() - inputs_dict.keys()
A__ = inspect.signature(model.call).parameters
A__ = list(signature.keys())
# Create a dictionary holding the location of the tensors in the tuple
A__ = {0: '''input_ids'''}
for label_key in label_keys:
A__ = signature_names.index(UpperCAmelCase__)
A__ = label_key
A__ = sorted(tuple_index_mapping.items())
# Initialize a list with their default values, update the values and convert to a tuple
A__ = []
for name in signature_names:
if name != "kwargs":
list_input.append(signature[name].default)
for index, value in sorted_tuple_index_mapping:
A__ = prepared_for_class[value]
A__ = tuple(UpperCAmelCase__)
# Send to model
A__ = model(tuple_input[:-1])[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1])
def SCREAMING_SNAKE_CASE ( self : Dict) ->List[Any]:
'''simple docstring'''
(
(
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) ,
) = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : str) ->Any:
'''simple docstring'''
(
(
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) ,
) = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
A__ = type
self.model_tester.create_and_check_model(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : Optional[int]) ->Any:
'''simple docstring'''
(
(
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) ,
) = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : List[Any]) ->List[str]:
'''simple docstring'''
(
(
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) ,
) = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : Any) ->List[str]:
'''simple docstring'''
(
(
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) ,
) = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__)
@slow
def SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->Any:
'''simple docstring'''
for model_name in TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A__ = TFLayoutLMvaModel.from_pretrained(UpperCAmelCase__)
self.assertIsNotNone(UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( ) -> List[str]:
"""simple docstring"""
A__ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_tf
class UpperCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def SCREAMING_SNAKE_CASE ( self : Any) ->Optional[Any]:
'''simple docstring'''
return LayoutLMvaImageProcessor(apply_ocr=UpperCAmelCase__) if is_vision_available() else None
@slow
def SCREAMING_SNAKE_CASE ( self : Any) ->List[str]:
'''simple docstring'''
A__ = TFLayoutLMvaModel.from_pretrained('''microsoft/layoutlmv3-base''')
A__ = self.default_image_processor
A__ = prepare_img()
A__ = image_processor(images=UpperCAmelCase__ , return_tensors='''tf''').pixel_values
A__ = tf.constant([[1, 2]])
A__ = tf.expand_dims(tf.constant([[1, 2, 3, 4], [5, 6, 7, 8]]) , axis=0)
# forward pass
A__ = model(input_ids=UpperCAmelCase__ , bbox=UpperCAmelCase__ , pixel_values=UpperCAmelCase__ , training=UpperCAmelCase__)
# verify the logits
A__ = (1, 199, 768)
self.assertEqual(outputs.last_hidden_state.shape , UpperCAmelCase__)
A__ = tf.constant(
[[-0.0529, 0.3618, 0.1632], [-0.1587, -0.1667, -0.0400], [-0.1557, -0.1671, -0.0505]])
self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] , UpperCAmelCase__ , atol=1e-4))
| 353 |
from __future__ import annotations
from math import pi
from typing import Protocol
import matplotlib.pyplot as plt
import numpy as np
class UpperCamelCase_ ( UpperCAmelCase__ ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE ( self : Optional[int] , UpperCAmelCase__ : float) ->float:
'''simple docstring'''
return 0.0
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> tuple[int | float, int | float]:
"""simple docstring"""
A__ = min([-20, np.min(fft_results[1 : samplerate // 2 - 1] )] )
A__ = max([20, np.max(fft_results[1 : samplerate // 2 - 1] )] )
return lowest, highest
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> None:
"""simple docstring"""
A__ = 512
A__ = [1] + [0] * (size - 1)
A__ = [filter_type.process(lowercase_ ) for item in inputs]
A__ = [0] * (samplerate - size) # zero-padding
outputs += filler
A__ = np.abs(np.fft.fft(lowercase_ ) )
A__ = 20 * np.logaa(lowercase_ )
# Frequencies on log scale from 24 to nyquist frequency
plt.xlim(24 , samplerate / 2 - 1 )
plt.xlabel('''Frequency (Hz)''' )
plt.xscale('''log''' )
# Display within reasonable bounds
A__ = get_bounds(lowercase_ , lowercase_ )
plt.ylim(max([-80, bounds[0]] ) , min([80, bounds[1]] ) )
plt.ylabel('''Gain (dB)''' )
plt.plot(lowercase_ )
plt.show()
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> None:
"""simple docstring"""
A__ = 512
A__ = [1] + [0] * (size - 1)
A__ = [filter_type.process(lowercase_ ) for item in inputs]
A__ = [0] * (samplerate - size) # zero-padding
outputs += filler
A__ = np.angle(np.fft.fft(lowercase_ ) )
# Frequencies on log scale from 24 to nyquist frequency
plt.xlim(24 , samplerate / 2 - 1 )
plt.xlabel('''Frequency (Hz)''' )
plt.xscale('''log''' )
plt.ylim(-2 * pi , 2 * pi )
plt.ylabel('''Phase shift (Radians)''' )
plt.plot(np.unwrap(lowercase_ , -2 * pi ) )
plt.show()
| 231 | 0 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase__ : List[str] = logging.get_logger(__name__)
lowerCamelCase__ : Any = {
'facebook/xlm-roberta-xl': 'https://huggingface.co/facebook/xlm-roberta-xl/resolve/main/config.json',
'facebook/xlm-roberta-xxl': 'https://huggingface.co/facebook/xlm-roberta-xxl/resolve/main/config.json',
# See all XLM-RoBERTa-XL models at https://huggingface.co/models?filter=xlm-roberta-xl
}
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = "xlm-roberta-xl"
def __init__( self : int , _lowerCAmelCase : Any=250_880 , _lowerCAmelCase : int=2_560 , _lowerCAmelCase : List[Any]=36 , _lowerCAmelCase : Union[str, Any]=32 , _lowerCAmelCase : Optional[int]=10_240 , _lowerCAmelCase : List[str]="gelu" , _lowerCAmelCase : Union[str, Any]=0.1 , _lowerCAmelCase : Tuple=0.1 , _lowerCAmelCase : Any=514 , _lowerCAmelCase : int=1 , _lowerCAmelCase : Optional[Any]=0.02 , _lowerCAmelCase : List[str]=1E-05 , _lowerCAmelCase : List[Any]=1 , _lowerCAmelCase : List[Any]=0 , _lowerCAmelCase : int=2 , _lowerCAmelCase : Union[str, Any]="absolute" , _lowerCAmelCase : Any=True , _lowerCAmelCase : int=None , **_lowerCAmelCase : int , ):
super().__init__(pad_token_id=_lowerCAmelCase , bos_token_id=_lowerCAmelCase , eos_token_id=_lowerCAmelCase , **_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = vocab_size
SCREAMING_SNAKE_CASE_ = hidden_size
SCREAMING_SNAKE_CASE_ = num_hidden_layers
SCREAMING_SNAKE_CASE_ = num_attention_heads
SCREAMING_SNAKE_CASE_ = hidden_act
SCREAMING_SNAKE_CASE_ = intermediate_size
SCREAMING_SNAKE_CASE_ = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ = max_position_embeddings
SCREAMING_SNAKE_CASE_ = type_vocab_size
SCREAMING_SNAKE_CASE_ = initializer_range
SCREAMING_SNAKE_CASE_ = layer_norm_eps
SCREAMING_SNAKE_CASE_ = position_embedding_type
SCREAMING_SNAKE_CASE_ = use_cache
SCREAMING_SNAKE_CASE_ = classifier_dropout
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
@property
def lowerCAmelCase_ ( self : str ):
if self.task == "multiple-choice":
SCREAMING_SNAKE_CASE_ = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
SCREAMING_SNAKE_CASE_ = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] ) | 225 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowerCamelCase__ : str = {
'configuration_mask2former': [
'MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'Mask2FormerConfig',
],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ : int = ['Mask2FormerImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ : Dict = [
'MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'Mask2FormerForUniversalSegmentation',
'Mask2FormerModel',
'Mask2FormerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_maskaformer import MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskaFormerConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_maskaformer import MaskaFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_maskaformer import (
MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
MaskaFormerForUniversalSegmentation,
MaskaFormerModel,
MaskaFormerPreTrainedModel,
)
else:
import sys
lowerCamelCase__ : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure) | 225 | 1 |
import json
import os
import re
import unicodedata
from json.encoder import INFINITY
from typing import Any, Dict, List, Optional, Tuple, Union
import numpy as np
import regex
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, is_flax_available, is_tf_available, is_torch_available, logging
from ...utils.generic import _is_jax, _is_numpy
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = {
'''artists_file''': '''artists.json''',
'''lyrics_file''': '''lyrics.json''',
'''genres_file''': '''genres.json''',
}
__lowerCAmelCase = {
'''artists_file''': {
'''jukebox''': '''https://huggingface.co/ArthurZ/jukebox/blob/main/artists.json''',
},
'''genres_file''': {
'''jukebox''': '''https://huggingface.co/ArthurZ/jukebox/blob/main/genres.json''',
},
'''lyrics_file''': {
'''jukebox''': '''https://huggingface.co/ArthurZ/jukebox/blob/main/lyrics.json''',
},
}
__lowerCAmelCase = {
'''jukebox''': 5_12,
}
class __a ( __UpperCamelCase ):
__lowercase : Dict = VOCAB_FILES_NAMES
__lowercase : Tuple = PRETRAINED_VOCAB_FILES_MAP
__lowercase : Tuple = PRETRAINED_LYRIC_TOKENS_SIZES
__lowercase : Dict = ['input_ids', 'attention_mask']
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=["v3", "v2", "v2"] , lowerCAmelCase__=512 , lowerCAmelCase__=5 , lowerCAmelCase__="<|endoftext|>" , **lowerCAmelCase__ , ) -> Union[str, Any]:
'''simple docstring'''
lowercase__: Union[str, Any] = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else unk_token
super().__init__(
unk_token=lowerCAmelCase__ , n_genres=lowerCAmelCase__ , version=lowerCAmelCase__ , max_n_lyric_tokens=lowerCAmelCase__ , **lowerCAmelCase__ , )
lowercase__: List[str] = version
lowercase__: Any = max_n_lyric_tokens
lowercase__: Optional[Any] = n_genres
with open(lowerCAmelCase__ , encoding='utf-8' ) as vocab_handle:
lowercase__: str = json.load(lowerCAmelCase__ )
with open(lowerCAmelCase__ , encoding='utf-8' ) as vocab_handle:
lowercase__: int = json.load(lowerCAmelCase__ )
with open(lowerCAmelCase__ , encoding='utf-8' ) as vocab_handle:
lowercase__: List[Any] = json.load(lowerCAmelCase__ )
lowercase__: List[str] = R'[^A-Za-z0-9.,:;!?\-\'\"()\[\] \t\n]+'
# In v2, we had a n_vocab=80 and in v3 we missed + and so n_vocab=79 of characters.
if len(self.lyrics_encoder ) == 79:
lowercase__: Tuple = oov.replace(R'\-\'' , R'\-+\'' )
lowercase__: Optional[int] = regex.compile(lowerCAmelCase__ )
lowercase__: int = {v: k for k, v in self.artists_encoder.items()}
lowercase__: Any = {v: k for k, v in self.genres_encoder.items()}
lowercase__: Union[str, Any] = {v: k for k, v in self.lyrics_encoder.items()}
@property
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict:
'''simple docstring'''
return len(self.artists_encoder ) + len(self.genres_encoder ) + len(self.lyrics_encoder )
def SCREAMING_SNAKE_CASE__ ( self ) -> Tuple:
'''simple docstring'''
return dict(self.artists_encoder , self.genres_encoder , self.lyrics_encoder )
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Any:
'''simple docstring'''
lowercase__: int = [self.artists_encoder.get(lowerCAmelCase__ , 0 ) for artist in list_artists]
for genres in range(len(lowerCAmelCase__ ) ):
lowercase__: str = [self.genres_encoder.get(lowerCAmelCase__ , 0 ) for genre in list_genres[genres]]
lowercase__: Tuple = list_genres[genres] + [-1] * (self.n_genres - len(list_genres[genres] ))
lowercase__: Tuple = [[self.lyrics_encoder.get(lowerCAmelCase__ , 0 ) for character in list_lyrics[0]], [], []]
return artists_id, list_genres, lyric_ids
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ ) -> str:
'''simple docstring'''
return list(lowerCAmelCase__ )
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , **lowerCAmelCase__ ) -> Optional[int]:
'''simple docstring'''
lowercase__ , lowercase__ , lowercase__: List[Any] = self.prepare_for_tokenization(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
lowercase__: Optional[Any] = self._tokenize(lowerCAmelCase__ )
return artist, genre, lyrics
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = False ) -> Tuple[str, str, str, Dict[str, Any]]:
'''simple docstring'''
for idx in range(len(self.version ) ):
if self.version[idx] == "v3":
lowercase__: Optional[Any] = artists[idx].lower()
lowercase__: List[Any] = [genres[idx].lower()]
else:
lowercase__: int = self._normalize(artists[idx] ) + '.v2'
lowercase__: Tuple = [
self._normalize(lowerCAmelCase__ ) + '.v2' for genre in genres[idx].split('_' )
] # split is for the full dictionary with combined genres
if self.version[0] == "v2":
lowercase__: int = regex.compile(R'[^A-Za-z0-9.,:;!?\-\'\"()\[\] \t\n]+' )
lowercase__: str = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789.,:;!?-+\'\"()[] \t\n'
lowercase__: Dict = {vocab[index]: index + 1 for index in range(len(lowerCAmelCase__ ) )}
lowercase__: int = 0
lowercase__: Optional[Any] = len(lowerCAmelCase__ ) + 1
lowercase__: List[Any] = self.vocab
lowercase__: Optional[int] = {v: k for k, v in self.vocab.items()}
lowercase__: Tuple = ''
else:
lowercase__: Optional[int] = regex.compile(R'[^A-Za-z0-9.,:;!?\-+\'\"()\[\] \t\n]+' )
lowercase__: str = self._run_strip_accents(lowerCAmelCase__ )
lowercase__: Union[str, Any] = lyrics.replace('\\' , '\n' )
lowercase__: Optional[Any] = self.out_of_vocab.sub('' , lowerCAmelCase__ ), [], []
return artists, genres, lyrics
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ ) -> str:
'''simple docstring'''
lowercase__: List[str] = unicodedata.normalize('NFD' , lowerCAmelCase__ )
lowercase__: Union[str, Any] = []
for char in text:
lowercase__: Optional[Any] = unicodedata.category(lowerCAmelCase__ )
if cat == "Mn":
continue
output.append(lowerCAmelCase__ )
return "".join(lowerCAmelCase__ )
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ ) -> str:
'''simple docstring'''
lowercase__: Optional[int] = (
[chr(lowerCAmelCase__ ) for i in range(ord('a' ) , ord('z' ) + 1 )]
+ [chr(lowerCAmelCase__ ) for i in range(ord('A' ) , ord('Z' ) + 1 )]
+ [chr(lowerCAmelCase__ ) for i in range(ord('0' ) , ord('9' ) + 1 )]
+ ['.']
)
lowercase__: Union[str, Any] = frozenset(lowerCAmelCase__ )
lowercase__: int = re.compile(R'_+' )
lowercase__: Any = ''.join([c if c in accepted else '_' for c in text.lower()] )
lowercase__: Optional[int] = pattern.sub('_' , lowerCAmelCase__ ).strip('_' )
return text
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ ) -> str:
'''simple docstring'''
return " ".join(lowerCAmelCase__ )
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = None , lowerCAmelCase__ = False ) -> Tuple:
'''simple docstring'''
# Convert to TensorType
if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
lowercase__: Dict = TensorType(lowerCAmelCase__ )
# Get a function reference for the correct framework
if tensor_type == TensorType.TENSORFLOW:
if not is_tf_available():
raise ImportError(
'Unable to convert output to TensorFlow tensors format, TensorFlow is not installed.' )
import tensorflow as tf
lowercase__: str = tf.constant
lowercase__: List[str] = tf.is_tensor
elif tensor_type == TensorType.PYTORCH:
if not is_torch_available():
raise ImportError('Unable to convert output to PyTorch tensors format, PyTorch is not installed.' )
import torch
lowercase__: Tuple = torch.tensor
lowercase__: int = torch.is_tensor
elif tensor_type == TensorType.JAX:
if not is_flax_available():
raise ImportError('Unable to convert output to JAX tensors format, JAX is not installed.' )
import jax.numpy as jnp # noqa: F811
lowercase__: Any = jnp.array
lowercase__: Tuple = _is_jax
else:
lowercase__: Optional[Any] = np.asarray
lowercase__: List[Any] = _is_numpy
# Do the tensor conversion in batch
try:
if prepend_batch_axis:
lowercase__: Optional[Any] = [inputs]
if not is_tensor(lowerCAmelCase__ ):
lowercase__: Dict = as_tensor(lowerCAmelCase__ )
except: # noqa E722
raise ValueError(
'Unable to create tensor, you should probably activate truncation and/or padding '
'with \'padding=True\' \'truncation=True\' to have batched tensors with the same length.' )
return inputs
def __call__( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__="" , lowerCAmelCase__="pt" ) -> BatchEncoding:
'''simple docstring'''
lowercase__: List[str] = [0, 0, 0]
lowercase__: List[Any] = [artist] * len(self.version )
lowercase__: str = [genres] * len(self.version )
lowercase__ , lowercase__ , lowercase__: Union[str, Any] = self.tokenize(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
lowercase__ , lowercase__ , lowercase__: Any = self._convert_token_to_id(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
lowercase__: str = [-INFINITY] * len(full_tokens[-1] )
lowercase__: int = [
self.convert_to_tensors(
[input_ids + [artists_id[i]] + genres_ids[i] + full_tokens[i]] , tensor_type=lowerCAmelCase__ )
for i in range(len(self.version ) )
]
return BatchEncoding({'input_ids': input_ids, 'attention_masks': attention_masks} )
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(lowerCAmelCase__ ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
lowercase__: Tuple = os.path.join(
lowerCAmelCase__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['artists_file'] )
with open(lowerCAmelCase__ , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(self.artists_encoder , ensure_ascii=lowerCAmelCase__ ) )
lowercase__: List[Any] = os.path.join(
lowerCAmelCase__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['genres_file'] )
with open(lowerCAmelCase__ , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(self.genres_encoder , ensure_ascii=lowerCAmelCase__ ) )
lowercase__: Tuple = os.path.join(
lowerCAmelCase__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['lyrics_file'] )
with open(lowerCAmelCase__ , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(self.lyrics_encoder , ensure_ascii=lowerCAmelCase__ ) )
return (artists_file, genres_file, lyrics_file)
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Dict:
'''simple docstring'''
lowercase__: List[Any] = self.artists_decoder.get(lowerCAmelCase__ )
lowercase__: Dict = [self.genres_decoder.get(lowerCAmelCase__ ) for genre in genres_index]
lowercase__: Optional[int] = [self.lyrics_decoder.get(lowerCAmelCase__ ) for character in lyric_index]
return artist, genres, lyrics
| 288 |
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import DeiTConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
TFDeiTModel,
)
from transformers.models.deit.modeling_tf_deit import TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DeiTImageProcessor
class __a :
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__=13 , lowerCAmelCase__=30 , lowerCAmelCase__=2 , lowerCAmelCase__=3 , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__=32 , lowerCAmelCase__=2 , lowerCAmelCase__=4 , lowerCAmelCase__=37 , lowerCAmelCase__="gelu" , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.1 , lowerCAmelCase__=10 , lowerCAmelCase__=0.0_2 , lowerCAmelCase__=3 , lowerCAmelCase__=None , lowerCAmelCase__=2 , ) -> List[str]:
'''simple docstring'''
lowercase__: List[str] = parent
lowercase__: Tuple = batch_size
lowercase__: Optional[Any] = image_size
lowercase__: int = patch_size
lowercase__: Union[str, Any] = num_channels
lowercase__: Dict = is_training
lowercase__: Any = use_labels
lowercase__: Optional[int] = hidden_size
lowercase__: Tuple = num_hidden_layers
lowercase__: List[str] = num_attention_heads
lowercase__: Any = intermediate_size
lowercase__: Union[str, Any] = hidden_act
lowercase__: Union[str, Any] = hidden_dropout_prob
lowercase__: Optional[Any] = attention_probs_dropout_prob
lowercase__: str = type_sequence_label_size
lowercase__: List[str] = initializer_range
lowercase__: Optional[Any] = scope
lowercase__: Dict = encoder_stride
# in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens)
lowercase__: List[str] = (image_size // patch_size) ** 2
lowercase__: Tuple = num_patches + 2
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[int]:
'''simple docstring'''
lowercase__: List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase__: int = None
if self.use_labels:
lowercase__: str = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase__: List[str] = self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[Any]:
'''simple docstring'''
return DeiTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowerCAmelCase__ , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> int:
'''simple docstring'''
lowercase__: List[str] = TFDeiTModel(config=lowerCAmelCase__ )
lowercase__: Any = model(lowerCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Union[str, Any]:
'''simple docstring'''
lowercase__: Any = TFDeiTForMaskedImageModeling(config=lowerCAmelCase__ )
lowercase__: int = model(lowerCAmelCase__ )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
lowercase__: Tuple = 1
lowercase__: Optional[int] = TFDeiTForMaskedImageModeling(lowerCAmelCase__ )
lowercase__: int = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowercase__: str = model(lowerCAmelCase__ )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> List[Any]:
'''simple docstring'''
lowercase__: Any = self.type_sequence_label_size
lowercase__: Optional[Any] = TFDeiTForImageClassification(lowerCAmelCase__ )
lowercase__: List[Any] = model(lowerCAmelCase__ , labels=lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
lowercase__: Optional[Any] = 1
lowercase__: Union[str, Any] = TFDeiTForImageClassification(lowerCAmelCase__ )
lowercase__: List[str] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowercase__: str = model(lowerCAmelCase__ , labels=lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def SCREAMING_SNAKE_CASE__ ( self ) -> Tuple:
'''simple docstring'''
lowercase__: Dict = self.prepare_config_and_inputs()
lowercase__ , lowercase__ , lowercase__: Dict = config_and_inputs
lowercase__: Optional[Any] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_tf
class __a ( __UpperCamelCase , __UpperCamelCase , unittest.TestCase ):
__lowercase : Union[str, Any] = (
(
TFDeiTModel,
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
)
if is_tf_available()
else ()
)
__lowercase : Union[str, Any] = (
{
'feature-extraction': TFDeiTModel,
'image-classification': (TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher),
}
if is_tf_available()
else {}
)
__lowercase : Union[str, Any] = False
__lowercase : Tuple = False
__lowercase : int = False
__lowercase : Optional[int] = False
def SCREAMING_SNAKE_CASE__ ( self ) -> List[Any]:
'''simple docstring'''
lowercase__: List[Any] = TFDeiTModelTester(self )
lowercase__: int = ConfigTester(self , config_class=lowerCAmelCase__ , has_text_modality=lowerCAmelCase__ , hidden_size=37 )
def SCREAMING_SNAKE_CASE__ ( self ) -> List[str]:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='DeiT does not use inputs_embeds' )
def SCREAMING_SNAKE_CASE__ ( self ) -> List[str]:
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict:
'''simple docstring'''
lowercase__ , lowercase__: int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__: Tuple = model_class(lowerCAmelCase__ )
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) )
lowercase__: Union[str, Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCAmelCase__ , tf.keras.layers.Dense ) )
def SCREAMING_SNAKE_CASE__ ( self ) -> Union[str, Any]:
'''simple docstring'''
lowercase__ , lowercase__: Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__: Any = model_class(lowerCAmelCase__ )
lowercase__: Any = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase__: Dict = [*signature.parameters.keys()]
lowercase__: Any = ['pixel_values']
self.assertListEqual(arg_names[:1] , lowerCAmelCase__ )
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict:
'''simple docstring'''
lowercase__: Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase__ )
def SCREAMING_SNAKE_CASE__ ( self ) -> List[str]:
'''simple docstring'''
lowercase__: Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*lowerCAmelCase__ )
def SCREAMING_SNAKE_CASE__ ( self ) -> int:
'''simple docstring'''
lowercase__: List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase__ )
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=False ) -> List[Any]:
'''simple docstring'''
lowercase__: Union[str, Any] = super()._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ , return_labels=lowerCAmelCase__ )
if return_labels:
if "labels" in inputs_dict and "labels" not in inspect.signature(model_class.call ).parameters:
del inputs_dict["labels"]
return inputs_dict
@slow
def SCREAMING_SNAKE_CASE__ ( self ) -> Tuple:
'''simple docstring'''
for model_name in TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__: Union[str, Any] = TFDeiTModel.from_pretrained(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
def snake_case_ ( ) -> Tuple:
lowercase__: Any = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_tf
@require_vision
class __a ( unittest.TestCase ):
@cached_property
def SCREAMING_SNAKE_CASE__ ( self ) -> Tuple:
'''simple docstring'''
return (
DeiTImageProcessor.from_pretrained('facebook/deit-base-distilled-patch16-224' )
if is_vision_available()
else None
)
@slow
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict:
'''simple docstring'''
lowercase__: str = TFDeiTForImageClassificationWithTeacher.from_pretrained('facebook/deit-base-distilled-patch16-224' )
lowercase__: str = self.default_image_processor
lowercase__: Dict = prepare_img()
lowercase__: Optional[int] = image_processor(images=lowerCAmelCase__ , return_tensors='tf' )
# forward pass
lowercase__: int = model(**lowerCAmelCase__ )
# verify the logits
lowercase__: Tuple = tf.TensorShape((1, 1_000) )
self.assertEqual(outputs.logits.shape , lowerCAmelCase__ )
lowercase__: List[Any] = tf.constant([-1.0_2_6_6, 0.1_9_1_2, -1.2_8_6_1] )
self.assertTrue(np.allclose(outputs.logits[0, :3] , lowerCAmelCase__ , atol=1E-4 ) )
| 288 | 1 |
import json
import os
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ImageGPTImageProcessor
class _snake_case ( unittest.TestCase ):
'''simple docstring'''
def __init__( self: Union[str, Any] ,lowerCamelCase_: Union[str, Any] ,lowerCamelCase_: List[Any]=7 ,lowerCamelCase_: Optional[int]=3 ,lowerCamelCase_: Optional[int]=18 ,lowerCamelCase_: Tuple=30 ,lowerCamelCase_: Any=400 ,lowerCamelCase_: List[str]=True ,lowerCamelCase_: List[Any]=None ,lowerCamelCase_: int=True ,) -> Union[str, Any]:
UpperCAmelCase_ : Any = size if size is not None else {"""height""": 18, """width""": 18}
UpperCAmelCase_ : Tuple = parent
UpperCAmelCase_ : List[str] = batch_size
UpperCAmelCase_ : Optional[Any] = num_channels
UpperCAmelCase_ : Union[str, Any] = image_size
UpperCAmelCase_ : Optional[Any] = min_resolution
UpperCAmelCase_ : str = max_resolution
UpperCAmelCase_ : List[str] = do_resize
UpperCAmelCase_ : Any = size
UpperCAmelCase_ : Dict = do_normalize
def A__ ( self: str ) -> Any:
return {
# here we create 2 clusters for the sake of simplicity
"clusters": np.asarray(
[
[0.8_8_6_6_4_4_3_6_3_4_0_3_3_2_0_3, 0.6_6_1_8_8_2_9_3_6_9_5_4_4_9_8_3, 0.3_8_9_1_7_4_6_4_0_1_7_8_6_8_0_4],
[-0.6_0_4_2_5_5_9_1_4_6_8_8_1_1_0_4, -0.0_2_2_9_5_0_0_8_8_6_0_5_2_8_4_6_9, 0.5_4_2_3_7_9_7_3_6_9_0_0_3_2_9_6],
] ),
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
}
@require_torch
@require_vision
class _snake_case ( __snake_case , unittest.TestCase ):
'''simple docstring'''
A__ : Dict = ImageGPTImageProcessor if is_vision_available() else None
def A__ ( self: Optional[int] ) -> str:
UpperCAmelCase_ : Optional[int] = ImageGPTImageProcessingTester(self )
@property
def A__ ( self: str ) -> List[Any]:
return self.image_processor_tester.prepare_image_processor_dict()
def A__ ( self: Optional[Any] ) -> Dict:
UpperCAmelCase_ : str = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCamelCase_ ,"""clusters""" ) )
self.assertTrue(hasattr(lowerCamelCase_ ,"""do_resize""" ) )
self.assertTrue(hasattr(lowerCamelCase_ ,"""size""" ) )
self.assertTrue(hasattr(lowerCamelCase_ ,"""do_normalize""" ) )
def A__ ( self: Dict ) -> Any:
UpperCAmelCase_ : Optional[Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size ,{"""height""": 18, """width""": 18} )
UpperCAmelCase_ : Union[str, Any] = self.image_processing_class.from_dict(self.image_processor_dict ,size=42 )
self.assertEqual(image_processor.size ,{"""height""": 42, """width""": 42} )
def A__ ( self: Optional[Any] ) -> List[Any]:
UpperCAmelCase_ : str = self.image_processing_class(**self.image_processor_dict )
UpperCAmelCase_ : Any = json.loads(image_processor.to_json_string() )
for key, value in self.image_processor_dict.items():
if key == "clusters":
self.assertTrue(np.array_equal(lowerCamelCase_ ,obj[key] ) )
else:
self.assertEqual(obj[key] ,lowerCamelCase_ )
def A__ ( self: Dict ) -> Tuple:
UpperCAmelCase_ : Optional[int] = self.image_processing_class(**self.image_processor_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCAmelCase_ : Optional[Any] = os.path.join(lowerCamelCase_ ,"""image_processor.json""" )
image_processor_first.to_json_file(lowerCamelCase_ )
UpperCAmelCase_ : Any = self.image_processing_class.from_json_file(lowerCamelCase_ ).to_dict()
UpperCAmelCase_ : Union[str, Any] = image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(lowerCamelCase_ ,image_processor_second[key] ) )
else:
self.assertEqual(image_processor_first[key] ,lowerCamelCase_ )
def A__ ( self: Optional[int] ) -> List[Any]:
UpperCAmelCase_ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
image_processor_first.save_pretrained(lowerCamelCase_ )
UpperCAmelCase_ : int = self.image_processing_class.from_pretrained(lowerCamelCase_ ).to_dict()
UpperCAmelCase_ : int = image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(lowerCamelCase_ ,image_processor_second[key] ) )
else:
self.assertEqual(image_processor_first[key] ,lowerCamelCase_ )
@unittest.skip("""ImageGPT requires clusters at initialization""" )
def A__ ( self: Dict ) -> Optional[int]:
pass
def lowerCamelCase_ ( ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = load_dataset("""hf-internal-testing/fixtures_image_utils""" , split="""test""" )
UpperCAmelCase_ : Union[str, Any] = Image.open(dataset[4]["""file"""] )
UpperCAmelCase_ : Dict = Image.open(dataset[5]["""file"""] )
UpperCAmelCase_ : Optional[int] = [imagea, imagea]
return images
@require_vision
@require_torch
class _snake_case ( unittest.TestCase ):
'''simple docstring'''
@slow
def A__ ( self: Optional[Any] ) -> Any:
UpperCAmelCase_ : int = ImageGPTImageProcessor.from_pretrained("""openai/imagegpt-small""" )
UpperCAmelCase_ : Optional[int] = prepare_images()
# test non-batched
UpperCAmelCase_ : Union[str, Any] = image_processing(images[0] ,return_tensors="""pt""" )
self.assertIsInstance(encoding.input_ids ,torch.LongTensor )
self.assertEqual(encoding.input_ids.shape ,(1, 1024) )
UpperCAmelCase_ : Optional[Any] = [306, 191, 191]
self.assertEqual(encoding.input_ids[0, :3].tolist() ,lowerCamelCase_ )
# test batched
UpperCAmelCase_ : Any = image_processing(lowerCamelCase_ ,return_tensors="""pt""" )
self.assertIsInstance(encoding.input_ids ,torch.LongTensor )
self.assertEqual(encoding.input_ids.shape ,(2, 1024) )
UpperCAmelCase_ : List[str] = [303, 13, 13]
self.assertEqual(encoding.input_ids[1, -3:].tolist() ,lowerCamelCase_ )
| 345 |
import os
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from huggingface_hub.file_download import http_get
from requests.exceptions import HTTPError
from transformers import (
AlbertTokenizer,
AutoTokenizer,
BertTokenizer,
BertTokenizerFast,
GPTaTokenizerFast,
is_tokenizers_available,
)
from transformers.testing_utils import TOKEN, USER, is_staging_test, require_tokenizers
from transformers.tokenization_utils import Trie
sys.path.append(str(Path(__file__).parent.parent / '''utils'''))
from test_module.custom_tokenization import CustomTokenizer # noqa E402
if is_tokenizers_available():
from test_module.custom_tokenization_fast import CustomTokenizerFast
class _snake_case ( unittest.TestCase ):
'''simple docstring'''
def A__ ( self: int ) -> str:
# A mock response for an HTTP head request to emulate server down
UpperCAmelCase_ : List[str] = mock.Mock()
UpperCAmelCase_ : List[Any] = 500
UpperCAmelCase_ : Union[str, Any] = {}
UpperCAmelCase_ : Union[str, Any] = HTTPError
UpperCAmelCase_ : Any = {}
# Download this model to make sure it's in the cache.
UpperCAmelCase_ : Union[str, Any] = BertTokenizer.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch("""requests.Session.request""" ,return_value=lowerCamelCase_ ) as mock_head:
UpperCAmelCase_ : Any = BertTokenizer.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
# This check we did call the fake head request
mock_head.assert_called()
@require_tokenizers
def A__ ( self: str ) -> int:
# A mock response for an HTTP head request to emulate server down
UpperCAmelCase_ : str = mock.Mock()
UpperCAmelCase_ : Optional[int] = 500
UpperCAmelCase_ : int = {}
UpperCAmelCase_ : Union[str, Any] = HTTPError
UpperCAmelCase_ : List[Any] = {}
# Download this model to make sure it's in the cache.
UpperCAmelCase_ : Optional[int] = GPTaTokenizerFast.from_pretrained("""gpt2""" )
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch("""requests.Session.request""" ,return_value=lowerCamelCase_ ) as mock_head:
UpperCAmelCase_ : Any = GPTaTokenizerFast.from_pretrained("""gpt2""" )
# This check we did call the fake head request
mock_head.assert_called()
def A__ ( self: str ) -> Dict:
# This test is for deprecated behavior and can be removed in v5
try:
UpperCAmelCase_ : Any = tempfile.mktemp()
with open(lowerCamelCase_ ,"""wb""" ) as f:
http_get("""https://huggingface.co/albert-base-v1/resolve/main/spiece.model""" ,lowerCamelCase_ )
UpperCAmelCase_ : Tuple = AlbertTokenizer.from_pretrained(lowerCamelCase_ )
finally:
os.remove(lowerCamelCase_ )
# Supporting this legacy load introduced a weird bug where the tokenizer would load local files if they are in
# the current folder and have the right name.
if os.path.isfile("""tokenizer.json""" ):
# We skip the test if the user has a `tokenizer.json` in this folder to avoid deleting it.
return
try:
with open("""tokenizer.json""" ,"""wb""" ) as f:
http_get("""https://huggingface.co/hf-internal-testing/tiny-random-bert/blob/main/tokenizer.json""" ,lowerCamelCase_ )
UpperCAmelCase_ : str = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
# The tiny random BERT has a vocab size of 1024, tiny gpt2 as a vocab size of 1000
self.assertEqual(tokenizer.vocab_size ,1000 )
# Tokenizer should depend on the remote checkpoint, not the local tokenizer.json file.
finally:
os.remove("""tokenizer.json""" )
def A__ ( self: List[str] ) -> Tuple:
# This test is for deprecated behavior and can be removed in v5
UpperCAmelCase_ : str = AlbertTokenizer.from_pretrained("""https://huggingface.co/albert-base-v1/resolve/main/spiece.model""" )
@is_staging_test
class _snake_case ( unittest.TestCase ):
'''simple docstring'''
A__ : str = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "bla", "blou"]
@classmethod
def A__ ( cls: Dict ) -> Optional[int]:
UpperCAmelCase_ : List[str] = TOKEN
HfFolder.save_token(lowerCamelCase_ )
@classmethod
def A__ ( cls: Optional[Any] ) -> List[str]:
try:
delete_repo(token=cls._token ,repo_id="""test-tokenizer""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token ,repo_id="""valid_org/test-tokenizer-org""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token ,repo_id="""test-dynamic-tokenizer""" )
except HTTPError:
pass
def A__ ( self: Any ) -> Optional[int]:
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCAmelCase_ : Tuple = os.path.join(lowerCamelCase_ ,"""vocab.txt""" )
with open(lowerCamelCase_ ,"""w""" ,encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in self.vocab_tokens] ) )
UpperCAmelCase_ : List[Any] = BertTokenizer(lowerCamelCase_ )
tokenizer.push_to_hub("""test-tokenizer""" ,use_auth_token=self._token )
UpperCAmelCase_ : List[Any] = BertTokenizer.from_pretrained(F'''{USER}/test-tokenizer''' )
self.assertDictEqual(new_tokenizer.vocab ,tokenizer.vocab )
# Reset repo
delete_repo(token=self._token ,repo_id="""test-tokenizer""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(lowerCamelCase_ ,repo_id="""test-tokenizer""" ,push_to_hub=lowerCamelCase_ ,use_auth_token=self._token )
UpperCAmelCase_ : List[Any] = BertTokenizer.from_pretrained(F'''{USER}/test-tokenizer''' )
self.assertDictEqual(new_tokenizer.vocab ,tokenizer.vocab )
def A__ ( self: Optional[int] ) -> Any:
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCAmelCase_ : List[Any] = os.path.join(lowerCamelCase_ ,"""vocab.txt""" )
with open(lowerCamelCase_ ,"""w""" ,encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in self.vocab_tokens] ) )
UpperCAmelCase_ : Dict = BertTokenizer(lowerCamelCase_ )
tokenizer.push_to_hub("""valid_org/test-tokenizer-org""" ,use_auth_token=self._token )
UpperCAmelCase_ : Dict = BertTokenizer.from_pretrained("""valid_org/test-tokenizer-org""" )
self.assertDictEqual(new_tokenizer.vocab ,tokenizer.vocab )
# Reset repo
delete_repo(token=self._token ,repo_id="""valid_org/test-tokenizer-org""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(
lowerCamelCase_ ,repo_id="""valid_org/test-tokenizer-org""" ,push_to_hub=lowerCamelCase_ ,use_auth_token=self._token )
UpperCAmelCase_ : List[Any] = BertTokenizer.from_pretrained("""valid_org/test-tokenizer-org""" )
self.assertDictEqual(new_tokenizer.vocab ,tokenizer.vocab )
@require_tokenizers
def A__ ( self: Optional[int] ) -> Optional[Any]:
CustomTokenizer.register_for_auto_class()
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCAmelCase_ : Any = os.path.join(lowerCamelCase_ ,"""vocab.txt""" )
with open(lowerCamelCase_ ,"""w""" ,encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in self.vocab_tokens] ) )
UpperCAmelCase_ : Optional[Any] = CustomTokenizer(lowerCamelCase_ )
# No fast custom tokenizer
tokenizer.push_to_hub("""test-dynamic-tokenizer""" ,use_auth_token=self._token )
UpperCAmelCase_ : Optional[Any] = AutoTokenizer.from_pretrained(F'''{USER}/test-dynamic-tokenizer''' ,trust_remote_code=lowerCamelCase_ )
# Can't make an isinstance check because the new_model.config is from the CustomTokenizer class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ ,"""CustomTokenizer""" )
# Fast and slow custom tokenizer
CustomTokenizerFast.register_for_auto_class()
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCAmelCase_ : List[str] = os.path.join(lowerCamelCase_ ,"""vocab.txt""" )
with open(lowerCamelCase_ ,"""w""" ,encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in self.vocab_tokens] ) )
UpperCAmelCase_ : str = BertTokenizerFast.from_pretrained(lowerCamelCase_ )
bert_tokenizer.save_pretrained(lowerCamelCase_ )
UpperCAmelCase_ : List[str] = CustomTokenizerFast.from_pretrained(lowerCamelCase_ )
tokenizer.push_to_hub("""test-dynamic-tokenizer""" ,use_auth_token=self._token )
UpperCAmelCase_ : List[str] = AutoTokenizer.from_pretrained(F'''{USER}/test-dynamic-tokenizer''' ,trust_remote_code=lowerCamelCase_ )
# Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ ,"""CustomTokenizerFast""" )
UpperCAmelCase_ : List[str] = AutoTokenizer.from_pretrained(
F'''{USER}/test-dynamic-tokenizer''' ,use_fast=lowerCamelCase_ ,trust_remote_code=lowerCamelCase_ )
# Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ ,"""CustomTokenizer""" )
class _snake_case ( unittest.TestCase ):
'''simple docstring'''
def A__ ( self: Optional[Any] ) -> Any:
UpperCAmelCase_ : Any = Trie()
trie.add("""Hello 友達""" )
self.assertEqual(trie.data ,{"""H""": {"""e""": {"""l""": {"""l""": {"""o""": {""" """: {"""友""": {"""達""": {"""""": 1}}}}}}}}} )
trie.add("""Hello""" )
trie.data
self.assertEqual(trie.data ,{"""H""": {"""e""": {"""l""": {"""l""": {"""o""": {"""""": 1, """ """: {"""友""": {"""達""": {"""""": 1}}}}}}}}} )
def A__ ( self: Tuple ) -> Optional[int]:
UpperCAmelCase_ : str = Trie()
self.assertEqual(trie.split("""[CLS] This is a extra_id_100""" ) ,["""[CLS] This is a extra_id_100"""] )
trie.add("""[CLS]""" )
trie.add("""extra_id_1""" )
trie.add("""extra_id_100""" )
self.assertEqual(trie.split("""[CLS] This is a extra_id_100""" ) ,["""[CLS]""", """ This is a """, """extra_id_100"""] )
def A__ ( self: Optional[Any] ) -> Optional[int]:
UpperCAmelCase_ : Dict = Trie()
trie.add("""A""" )
self.assertEqual(trie.split("""ABC""" ) ,["""A""", """BC"""] )
self.assertEqual(trie.split("""BCA""" ) ,["""BC""", """A"""] )
def A__ ( self: Union[str, Any] ) -> int:
UpperCAmelCase_ : List[str] = Trie()
trie.add("""TOKEN]""" )
trie.add("""[SPECIAL_TOKEN]""" )
self.assertEqual(trie.split("""This is something [SPECIAL_TOKEN]""" ) ,["""This is something """, """[SPECIAL_TOKEN]"""] )
def A__ ( self: int ) -> Union[str, Any]:
UpperCAmelCase_ : List[str] = Trie()
trie.add("""A""" )
trie.add("""P""" )
trie.add("""[SPECIAL_TOKEN]""" )
self.assertEqual(trie.split("""This is something [SPECIAL_TOKEN]""" ) ,["""This is something """, """[SPECIAL_TOKEN]"""] )
def A__ ( self: int ) -> List[str]:
UpperCAmelCase_ : int = Trie()
trie.add("""AB""" )
trie.add("""B""" )
trie.add("""C""" )
self.assertEqual(trie.split("""ABC""" ) ,["""AB""", """C"""] )
def A__ ( self: str ) -> Optional[int]:
UpperCAmelCase_ : Optional[Any] = Trie()
trie.add("""ABC""" )
trie.add("""B""" )
trie.add("""CD""" )
self.assertEqual(trie.split("""ABCD""" ) ,["""ABC""", """D"""] )
def A__ ( self: List[Any] ) -> Any:
# Even if the offsets are wrong, we necessarily output correct string
# parts.
UpperCAmelCase_ : Tuple = Trie()
UpperCAmelCase_ : Optional[Any] = trie.cut_text("""ABC""" ,[0, 0, 2, 1, 2, 3] )
self.assertEqual(lowerCamelCase_ ,["""AB""", """C"""] )
| 345 | 1 |
'''simple docstring'''
import torch
from transformers import AutoModel
class __lowerCAmelCase ( torch.nn.Module ):
def __init__(self , lowerCAmelCase__="sayef/fsner-bert-base-uncased" ):
super(lowerCAmelCase__ , self ).__init__()
_UpperCAmelCase : Optional[Any] = AutoModel.from_pretrained(lowerCAmelCase__ , return_dict=lowerCAmelCase__ )
_UpperCAmelCase : Any = torch.nn.CosineSimilarity(3 , 1e-08 )
_UpperCAmelCase : str = torch.nn.Softmax(dim=1 )
def snake_case_ (self , **lowerCAmelCase__ ):
return self.bert(**lowerCAmelCase__ ).last_hidden_state
def snake_case_ (self , lowerCAmelCase__ ):
return token_embeddings.sum(2 , keepdim=lowerCAmelCase__ )
def snake_case_ (self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=1 ):
return self.softmax(T * self.cos(lowerCAmelCase__ , lowerCAmelCase__ ) )
def snake_case_ (self , lowerCAmelCase__ , lowerCAmelCase__ ):
_UpperCAmelCase : int = W_supports["""sizes"""].tolist()
_UpperCAmelCase : Optional[Any] = W_supports["""start_token_id"""].item()
_UpperCAmelCase : str = W_supports["""end_token_id"""].item()
del W_supports["sizes"]
del W_supports["start_token_id"]
del W_supports["end_token_id"]
_UpperCAmelCase : Optional[Any] = self.BERT(**lowerCAmelCase__ )
_UpperCAmelCase : Dict = self.BERT(**lowerCAmelCase__ )
_UpperCAmelCase : str = None
_UpperCAmelCase : int = None
_UpperCAmelCase : int = W_supports["""input_ids"""] == start_token_id
_UpperCAmelCase : str = W_supports["""input_ids"""] == end_token_id
for i, size in enumerate(lowerCAmelCase__ ):
if i == 0:
_UpperCAmelCase : int = 0
else:
_UpperCAmelCase : Union[str, Any] = support_sizes[i - 1]
_UpperCAmelCase : Tuple = S[s : s + size][start_token_masks[s : s + size]]
_UpperCAmelCase : Optional[Any] = S[s : s + size][end_token_masks[s : s + size]]
_UpperCAmelCase : List[str] = torch.matmul(q[i] , s_start.T ).sum(1 ).softmax(0 )
_UpperCAmelCase : Optional[int] = torch.matmul(q[i] , s_end.T ).sum(1 ).softmax(0 )
if p_starts is not None:
_UpperCAmelCase : Dict = torch.vstack((p_starts, p_start) )
_UpperCAmelCase : Optional[int] = torch.vstack((p_ends, p_end) )
else:
_UpperCAmelCase : str = p_start
_UpperCAmelCase : Dict = p_end
return p_starts, p_ends
| 170 |
'''simple docstring'''
from typing import List, Optional, Tuple, Union
import torch
from ...schedulers import DDIMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class __lowerCAmelCase ( __a ):
def __init__(self , lowerCAmelCase__ , lowerCAmelCase__ ):
super().__init__()
# make sure scheduler can always be converted to DDIM
_UpperCAmelCase : Tuple = DDIMScheduler.from_config(scheduler.config )
self.register_modules(unet=lowerCAmelCase__ , scheduler=lowerCAmelCase__ )
@torch.no_grad()
def __call__(self , lowerCAmelCase__ = 1 , lowerCAmelCase__ = None , lowerCAmelCase__ = 0.0 , lowerCAmelCase__ = 5_0 , lowerCAmelCase__ = None , lowerCAmelCase__ = "pil" , lowerCAmelCase__ = True , ):
# Sample gaussian noise to begin loop
if isinstance(self.unet.config.sample_size , lowerCAmelCase__ ):
_UpperCAmelCase : str = (
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size,
self.unet.config.sample_size,
)
else:
_UpperCAmelCase : int = (batch_size, self.unet.config.in_channels, *self.unet.config.sample_size)
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) and len(lowerCAmelCase__ ) != batch_size:
raise ValueError(
F"You have passed a list of generators of length {len(lowerCAmelCase__ )}, but requested an effective batch"
F" size of {batch_size}. Make sure the batch size matches the length of the generators." )
_UpperCAmelCase : Optional[Any] = randn_tensor(lowerCAmelCase__ , generator=lowerCAmelCase__ , device=self.device , dtype=self.unet.dtype )
# set step values
self.scheduler.set_timesteps(lowerCAmelCase__ )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
_UpperCAmelCase : str = self.unet(lowerCAmelCase__ , lowerCAmelCase__ ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
_UpperCAmelCase : List[str] = self.scheduler.step(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , eta=lowerCAmelCase__ , use_clipped_model_output=lowerCAmelCase__ , generator=lowerCAmelCase__ ).prev_sample
_UpperCAmelCase : Optional[int] = (image / 2 + 0.5).clamp(0 , 1 )
_UpperCAmelCase : str = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
_UpperCAmelCase : str = self.numpy_to_pil(lowerCAmelCase__ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=lowerCAmelCase__ )
| 170 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.