code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
|---|---|---|---|---|
"""simple docstring"""
import logging
import os
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from tempfile import TemporaryDirectory
from typing import List, Optional
import faiss
import torch
from datasets import Features, Sequence, Value, load_dataset
from transformers import DPRContextEncoder, DPRContextEncoderTokenizerFast, HfArgumentParser
UpperCAmelCase : Tuple = logging.getLogger(__name__)
torch.set_grad_enabled(False)
UpperCAmelCase : Dict = "cuda" if torch.cuda.is_available() else "cpu"
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase=1_00 , __lowerCAmelCase=" " ) -> List[str]:
'''simple docstring'''
lowercase_ = text.split(UpperCamelCase__ )
return [character.join(text[i : i + n] ).strip() for i in range(0 , len(UpperCamelCase__ ) , UpperCamelCase__ )]
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> dict:
'''simple docstring'''
lowercase_ , lowercase_ = [], []
for title, text in zip(documents["""title"""] , documents["""text"""] ):
if text is not None:
for passage in split_text(UpperCamelCase__ ):
titles.append(title if title is not None else """""" )
texts.append(UpperCamelCase__ )
return {"title": titles, "text": texts}
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> dict:
'''simple docstring'''
lowercase_ = ctx_tokenizer(
documents["""title"""] , documents["""text"""] , truncation=UpperCamelCase__ , padding="""longest""" , return_tensors="""pt""" )["""input_ids"""]
lowercase_ = ctx_encoder(input_ids.to(device=UpperCamelCase__ ) , return_dict=UpperCamelCase__ ).pooler_output
return {"embeddings": embeddings.detach().cpu().numpy()}
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , ) -> Union[str, Any]:
'''simple docstring'''
logger.info("""Step 1 - Create the dataset""" )
######################################
# The dataset needed for RAG must have three columns:
# - title (string): title of the document
# - text (string): text of a passage of the document
# - embeddings (array of dimension d): DPR representation of the passage
# Let's say you have documents in tab-separated csv files with columns "title" and "text"
assert os.path.isfile(rag_example_args.csv_path ), "Please provide a valid path to a csv file"
# You can load a Dataset object this way
lowercase_ = load_dataset(
"""csv""" , data_files=[rag_example_args.csv_path] , split="""train""" , delimiter="""\t""" , column_names=["""title""", """text"""] )
# More info about loading csv files in the documentation: https://huggingface.co/docs/datasets/loading_datasets.html?highlight=csv#csv-files
# Then split the documents into passages of 100 words
lowercase_ = dataset.map(UpperCamelCase__ , batched=UpperCamelCase__ , num_proc=processing_args.num_proc )
# And compute the embeddings
lowercase_ = DPRContextEncoder.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name ).to(device=UpperCamelCase__ )
lowercase_ = DPRContextEncoderTokenizerFast.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name )
lowercase_ = Features(
{"""text""": Value("""string""" ), """title""": Value("""string""" ), """embeddings""": Sequence(Value("""float32""" ) )} ) # optional, save as float32 instead of float64 to save space
lowercase_ = dataset.map(
partial(UpperCamelCase__ , ctx_encoder=UpperCamelCase__ , ctx_tokenizer=UpperCamelCase__ ) , batched=UpperCamelCase__ , batch_size=processing_args.batch_size , features=UpperCamelCase__ , )
# And finally save your dataset
lowercase_ = os.path.join(rag_example_args.output_dir , """my_knowledge_dataset""" )
dataset.save_to_disk(UpperCamelCase__ )
# from datasets import load_from_disk
# dataset = load_from_disk(passages_path) # to reload the dataset
######################################
logger.info("""Step 2 - Index the dataset""" )
######################################
# Let's use the Faiss implementation of HNSW for fast approximate nearest neighbor search
lowercase_ = faiss.IndexHNSWFlat(index_hnsw_args.d , index_hnsw_args.m , faiss.METRIC_INNER_PRODUCT )
dataset.add_faiss_index("""embeddings""" , custom_index=UpperCamelCase__ )
# And save the index
lowercase_ = os.path.join(rag_example_args.output_dir , """my_knowledge_dataset_hnsw_index.faiss""" )
dataset.get_index("""embeddings""" ).save(UpperCamelCase__ )
# dataset.load_faiss_index("embeddings", index_path) # to reload the index
@dataclass
class SCREAMING_SNAKE_CASE__ :
lowercase__ = field(
default=str(Path(__UpperCAmelCase ).parent / "test_run" / "dummy-kb" / "my_knowledge_dataset.csv" ) , metadata={"help": "Path to a tab-separated csv file with columns \'title\' and \'text\'"} , )
lowercase__ = field(
default=__UpperCAmelCase , metadata={"help": "Question that is passed as input to RAG. Default is \'What does Moses\' rod turn into ?\'."} , )
lowercase__ = field(
default="facebook/rag-sequence-nq" , metadata={"help": "The RAG model to use. Either \'facebook/rag-sequence-nq\' or \'facebook/rag-token-nq\'"} , )
lowercase__ = field(
default="facebook/dpr-ctx_encoder-multiset-base" , metadata={
"help": (
"The DPR context encoder model to use. Either \'facebook/dpr-ctx_encoder-single-nq-base\' or"
" \'facebook/dpr-ctx_encoder-multiset-base\'"
)
} , )
lowercase__ = field(
default=str(Path(__UpperCAmelCase ).parent / "test_run" / "dummy-kb" ) , metadata={"help": "Path to a directory where the dataset passages and the index will be saved"} , )
@dataclass
class SCREAMING_SNAKE_CASE__ :
lowercase__ = field(
default=__UpperCAmelCase , metadata={
"help": "The number of processes to use to split the documents into passages. Default is single process."
} , )
lowercase__ = field(
default=16 , metadata={
"help": "The batch size to use when computing the passages embeddings using the DPR context encoder."
} , )
@dataclass
class SCREAMING_SNAKE_CASE__ :
lowercase__ = field(
default=768 , metadata={"help": "The dimension of the embeddings to pass to the HNSW Faiss index."} , )
lowercase__ = field(
default=128 , metadata={
"help": (
"The number of bi-directional links created for every new element during the HNSW index construction."
)
} , )
if __name__ == "__main__":
logging.basicConfig(level=logging.WARNING)
logger.setLevel(logging.INFO)
UpperCAmelCase : Dict = HfArgumentParser((RagExampleArguments, ProcessingArguments, IndexHnswArguments))
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : str = parser.parse_args_into_dataclasses()
with TemporaryDirectory() as tmp_dir:
UpperCAmelCase : List[Any] = rag_example_args.output_dir or tmp_dir
main(rag_example_args, processing_args, index_hnsw_args)
| 136
|
from dataclasses import dataclass
from typing import Tuple
import numpy as np
import torch
@dataclass
class __lowerCAmelCase :
"""simple docstring"""
snake_case_ = 42 # [batch_size x 3]
snake_case_ = 42 # [batch_size x 3]
snake_case_ = 42 # [batch_size x 3]
snake_case_ = 42 # [batch_size x 3]
snake_case_ = 42
snake_case_ = 42
snake_case_ = 42
snake_case_ = 42
snake_case_ = 42
def lowercase_ ( self ) -> Union[str, Any]:
'''simple docstring'''
assert self.x.shape[0] == self.y.shape[0] == self.z.shape[0] == self.origin.shape[0]
assert self.x.shape[1] == self.y.shape[1] == self.z.shape[1] == self.origin.shape[1] == 3
assert len(self.x.shape ) == len(self.y.shape ) == len(self.z.shape ) == len(self.origin.shape ) == 2
def lowercase_ ( self ) -> Optional[int]:
'''simple docstring'''
return torch.from_numpy(np.array([self.width, self.height] , dtype=np.floataa ) )
def lowercase_ ( self ) -> Optional[Any]:
'''simple docstring'''
return torch.from_numpy(np.array([self.x_fov, self.y_fov] , dtype=np.floataa ) )
def lowercase_ ( self ) -> torch.Tensor:
'''simple docstring'''
__lowerCamelCase = torch.arange(self.height * self.width )
__lowerCamelCase = torch.stack(
[
pixel_indices % self.width,
torch.div(lowerCamelCase__ , self.width , rounding_mode='trunc' ),
] , axis=1 , )
return coords
@property
def lowercase_ ( self ) -> int:
'''simple docstring'''
__lowerCamelCase , *__lowerCamelCase = self.shape
__lowerCamelCase = int(np.prod(lowerCamelCase__ ) )
__lowerCamelCase = self.get_image_coords()
__lowerCamelCase = torch.broadcast_to(coords.unsqueeze(0 ) , [batch_size * inner_batch_size, *coords.shape] )
__lowerCamelCase = self.get_camera_rays(lowerCamelCase__ )
__lowerCamelCase = rays.view(lowerCamelCase__ , inner_batch_size * self.height * self.width , 2 , 3 )
return rays
def lowercase_ ( self , lowerCamelCase__ ) -> torch.Tensor:
'''simple docstring'''
__lowerCamelCase , *__lowerCamelCase , __lowerCamelCase = coords.shape
assert n_coords == 2
assert batch_size == self.origin.shape[0]
__lowerCamelCase = coords.view(lowerCamelCase__ , -1 , 2 )
__lowerCamelCase = self.resolution()
__lowerCamelCase = self.fov()
__lowerCamelCase = (flat.float() / (res - 1)) * 2 - 1
__lowerCamelCase = fracs * torch.tan(fov / 2 )
__lowerCamelCase = fracs.view(lowerCamelCase__ , -1 , 2 )
__lowerCamelCase = (
self.z.view(lowerCamelCase__ , 1 , 3 )
+ self.x.view(lowerCamelCase__ , 1 , 3 ) * fracs[:, :, :1]
+ self.y.view(lowerCamelCase__ , 1 , 3 ) * fracs[:, :, 1:]
)
__lowerCamelCase = directions / directions.norm(dim=-1 , keepdim=lowerCamelCase__ )
__lowerCamelCase = torch.stack(
[
torch.broadcast_to(self.origin.view(lowerCamelCase__ , 1 , 3 ) , [batch_size, directions.shape[1], 3] ),
directions,
] , dim=2 , )
return rays.view(lowerCamelCase__ , *lowerCamelCase__ , 2 , 3 )
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ ) -> "DifferentiableProjectiveCamera":
'''simple docstring'''
assert width * self.height == height * self.width, "The aspect ratio should not change."
return DifferentiableProjectiveCamera(
origin=self.origin , x=self.x , y=self.y , z=self.z , width=lowerCamelCase__ , height=lowerCamelCase__ , x_fov=self.x_fov , y_fov=self.y_fov , )
def lowerCamelCase_ ( UpperCamelCase__ : int ) -> DifferentiableProjectiveCamera:
"""simple docstring"""
__lowerCamelCase = []
__lowerCamelCase = []
__lowerCamelCase = []
__lowerCamelCase = []
for theta in np.linspace(0 , 2 * np.pi , num=20 ):
__lowerCamelCase = np.array([np.sin(UpperCamelCase__ ), np.cos(UpperCamelCase__ ), -0.5] )
z /= np.sqrt(np.sum(z**2 ) )
__lowerCamelCase = -z * 4
__lowerCamelCase = np.array([np.cos(UpperCamelCase__ ), -np.sin(UpperCamelCase__ ), 0.0] )
__lowerCamelCase = np.cross(UpperCamelCase__ , UpperCamelCase__ )
origins.append(UpperCamelCase__ )
xs.append(UpperCamelCase__ )
ys.append(UpperCamelCase__ )
zs.append(UpperCamelCase__ )
return DifferentiableProjectiveCamera(
origin=torch.from_numpy(np.stack(UpperCamelCase__ , axis=0 ) ).float() , x=torch.from_numpy(np.stack(UpperCamelCase__ , axis=0 ) ).float() , y=torch.from_numpy(np.stack(UpperCamelCase__ , axis=0 ) ).float() , z=torch.from_numpy(np.stack(UpperCamelCase__ , axis=0 ) ).float() , width=UpperCamelCase__ , height=UpperCamelCase__ , x_fov=0.7 , y_fov=0.7 , shape=(1, len(UpperCamelCase__ )) , )
| 90
| 0
|
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__snake_case = logging.get_logger(__name__)
__snake_case = '''▁'''
__snake_case = {'''vocab_file''': '''sentencepiece.bpe.model''', '''monolingual_vocab_file''': '''dict.txt'''}
__snake_case = {
'''vocab_file''': {
'''vinai/bartpho-syllable''': '''https://huggingface.co/vinai/bartpho-syllable/resolve/main/sentencepiece.bpe.model''',
},
'''monolingual_vocab_file''': {
'''vinai/bartpho-syllable''': '''https://huggingface.co/vinai/bartpho-syllable/resolve/main/dict.txt''',
},
}
__snake_case = {'''vinai/bartpho-syllable''': 10_24}
class __snake_case ( A__ ):
__lowerCamelCase : Union[str, Any] = VOCAB_FILES_NAMES
__lowerCamelCase : Dict = PRETRAINED_VOCAB_FILES_MAP
__lowerCamelCase : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCamelCase : List[str] = ["input_ids", "attention_mask"]
def __init__( self , snake_case__ , snake_case__ , snake_case__="<s>" , snake_case__="</s>" , snake_case__="</s>" , snake_case__="<s>" , snake_case__="<unk>" , snake_case__="<pad>" , snake_case__="<mask>" , snake_case__ = None , **snake_case__ , ) -> None:
'''simple docstring'''
UpperCAmelCase : Dict =AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else mask_token
UpperCAmelCase : Optional[int] ={} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=__A , eos_token=__A , unk_token=__A , sep_token=__A , cls_token=__A , pad_token=__A , mask_token=__A , sp_model_kwargs=self.sp_model_kwargs , **__A , )
UpperCAmelCase : Any =vocab_file
UpperCAmelCase : Optional[int] =monolingual_vocab_file
UpperCAmelCase : List[str] =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(__A ) )
# Load the reduced vocab
# Keep order of special tokens for backward compatibility
UpperCAmelCase : Tuple ={}
UpperCAmelCase : List[str] =0
for token in [bos_token, pad_token, eos_token, unk_token, sep_token, cls_token]:
if str(__A ) not in self.fairseq_tokens_to_ids:
UpperCAmelCase : List[str] =cnt
cnt += 1
with open(__A , '''r''' , encoding='''utf-8''' ) as f:
for line in f.readlines():
UpperCAmelCase : int =line.strip().split()[0]
UpperCAmelCase : int =len(self.fairseq_tokens_to_ids )
if str(__A ) not in self.fairseq_tokens_to_ids:
UpperCAmelCase : Union[str, Any] =len(self.fairseq_tokens_to_ids )
UpperCAmelCase : List[str] ={v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self ) -> Any:
'''simple docstring'''
UpperCAmelCase : Optional[Any] =self.__dict__.copy()
UpperCAmelCase : str =None
UpperCAmelCase : str =self.sp_model.serialized_model_proto()
return state
def __setstate__( self , snake_case__ ) -> Any:
'''simple docstring'''
UpperCAmelCase : Optional[int] =d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
UpperCAmelCase : int ={}
UpperCAmelCase : Tuple =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def UpperCAmelCase__ ( self , snake_case__ , snake_case__ = None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
UpperCAmelCase : Tuple =[self.cls_token_id]
UpperCAmelCase : Any =[self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def UpperCAmelCase__ ( self , snake_case__ , snake_case__ = None , snake_case__ = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__A , token_ids_a=__A , already_has_special_tokens=__A )
if token_ids_a is None:
return [1] + ([0] * len(__A )) + [1]
return [1] + ([0] * len(__A )) + [1, 1] + ([0] * len(__A )) + [1]
def UpperCAmelCase__ ( self , snake_case__ , snake_case__ = None ) -> List[int]:
'''simple docstring'''
UpperCAmelCase : Optional[int] =[self.sep_token_id]
UpperCAmelCase : List[Any] =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def UpperCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
return len(self.fairseq_ids_to_tokens )
def UpperCAmelCase__ ( self ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase : str ={self.convert_ids_to_tokens(__A ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def UpperCAmelCase__ ( self , snake_case__ ) -> List[str]:
'''simple docstring'''
return self.sp_model.encode(__A , out_type=__A )
def UpperCAmelCase__ ( self , snake_case__ ) -> int:
'''simple docstring'''
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
else:
return self.unk_token_id
def UpperCAmelCase__ ( self , snake_case__ ) -> List[str]:
'''simple docstring'''
return self.fairseq_ids_to_tokens[index]
def UpperCAmelCase__ ( self , snake_case__ ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase : int ="""""".join(__A ).replace(__A , ''' ''' ).strip()
return out_string
def UpperCAmelCase__ ( self , snake_case__ , snake_case__ = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(__A ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
UpperCAmelCase : List[Any] =os.path.join(
__A , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
UpperCAmelCase : Tuple =os.path.join(
__A , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''monolingual_vocab_file'''] , )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__A ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __A )
elif not os.path.isfile(self.vocab_file ):
with open(__A , '''wb''' ) as fi:
UpperCAmelCase : int =self.sp_model.serialized_model_proto()
fi.write(__A )
if os.path.abspath(self.monolingual_vocab_file ) != os.path.abspath(
__A ) and os.path.isfile(self.monolingual_vocab_file ):
copyfile(self.monolingual_vocab_file , __A )
elif not os.path.isfile(self.monolingual_vocab_file ):
with open(__A , '''w''' , encoding='''utf-8''' ) as fp:
for token in self.fairseq_tokens_to_ids:
if token not in self.all_special_tokens:
fp.write(f'''{str(__A )} \n''' )
return out_vocab_file, out_monolingual_vocab_file
| 351
|
import functools
import logging
import os
import sys
import threading
from logging import (
CRITICAL, # NOQA
DEBUG, # NOQA
ERROR, # NOQA
FATAL, # NOQA
INFO, # NOQA
NOTSET, # NOQA
WARN, # NOQA
WARNING, # NOQA
)
from typing import Optional
import huggingface_hub.utils as hf_hub_utils
from tqdm import auto as tqdm_lib
__snake_case = threading.Lock()
__snake_case = None
__snake_case = {
'''debug''': logging.DEBUG,
'''info''': logging.INFO,
'''warning''': logging.WARNING,
'''error''': logging.ERROR,
'''critical''': logging.CRITICAL,
}
__snake_case = logging.WARNING
__snake_case = True
def lowerCAmelCase_ ( )-> List[str]:
'''simple docstring'''
UpperCAmelCase : Optional[int] =os.getenv('''TRANSFORMERS_VERBOSITY''' , __lowerCAmelCase )
if env_level_str:
if env_level_str in log_levels:
return log_levels[env_level_str]
else:
logging.getLogger().warning(
f'''Unknown option TRANSFORMERS_VERBOSITY={env_level_str}, '''
f'''has to be one of: { ', '.join(log_levels.keys() ) }''' )
return _default_log_level
def lowerCAmelCase_ ( )-> str:
'''simple docstring'''
return __name__.split('''.''' )[0]
def lowerCAmelCase_ ( )-> logging.Logger:
'''simple docstring'''
return logging.getLogger(_get_library_name() )
def lowerCAmelCase_ ( )-> None:
'''simple docstring'''
global _default_handler
with _lock:
if _default_handler:
# This library has already configured the library root logger.
return
UpperCAmelCase : Union[str, Any] =logging.StreamHandler() # Set sys.stderr as stream.
UpperCAmelCase : str =sys.stderr.flush
# Apply our default configuration to the library root logger.
UpperCAmelCase : List[Any] =_get_library_root_logger()
library_root_logger.addHandler(_default_handler )
library_root_logger.setLevel(_get_default_logging_level() )
UpperCAmelCase : Optional[int] =False
def lowerCAmelCase_ ( )-> None:
'''simple docstring'''
global _default_handler
with _lock:
if not _default_handler:
return
UpperCAmelCase : str =_get_library_root_logger()
library_root_logger.removeHandler(_default_handler )
library_root_logger.setLevel(logging.NOTSET )
UpperCAmelCase : Optional[Any] =None
def lowerCAmelCase_ ( )-> Tuple:
'''simple docstring'''
return log_levels
def lowerCAmelCase_ ( __lowerCAmelCase = None )-> logging.Logger:
'''simple docstring'''
if name is None:
UpperCAmelCase : int =_get_library_name()
_configure_library_root_logger()
return logging.getLogger(__lowerCAmelCase )
def lowerCAmelCase_ ( )-> int:
'''simple docstring'''
_configure_library_root_logger()
return _get_library_root_logger().getEffectiveLevel()
def lowerCAmelCase_ ( __lowerCAmelCase )-> None:
'''simple docstring'''
_configure_library_root_logger()
_get_library_root_logger().setLevel(__lowerCAmelCase )
def lowerCAmelCase_ ( )-> Optional[int]:
'''simple docstring'''
return set_verbosity(__lowerCAmelCase )
def lowerCAmelCase_ ( )-> Tuple:
'''simple docstring'''
return set_verbosity(__lowerCAmelCase )
def lowerCAmelCase_ ( )-> Any:
'''simple docstring'''
return set_verbosity(__lowerCAmelCase )
def lowerCAmelCase_ ( )-> Dict:
'''simple docstring'''
return set_verbosity(__lowerCAmelCase )
def lowerCAmelCase_ ( )-> None:
'''simple docstring'''
_configure_library_root_logger()
assert _default_handler is not None
_get_library_root_logger().removeHandler(_default_handler )
def lowerCAmelCase_ ( )-> None:
'''simple docstring'''
_configure_library_root_logger()
assert _default_handler is not None
_get_library_root_logger().addHandler(_default_handler )
def lowerCAmelCase_ ( __lowerCAmelCase )-> None:
'''simple docstring'''
_configure_library_root_logger()
assert handler is not None
_get_library_root_logger().addHandler(__lowerCAmelCase )
def lowerCAmelCase_ ( __lowerCAmelCase )-> None:
'''simple docstring'''
_configure_library_root_logger()
assert handler is not None and handler not in _get_library_root_logger().handlers
_get_library_root_logger().removeHandler(__lowerCAmelCase )
def lowerCAmelCase_ ( )-> None:
'''simple docstring'''
_configure_library_root_logger()
UpperCAmelCase : int =False
def lowerCAmelCase_ ( )-> None:
'''simple docstring'''
_configure_library_root_logger()
UpperCAmelCase : Tuple =True
def lowerCAmelCase_ ( )-> None:
'''simple docstring'''
UpperCAmelCase : List[Any] =_get_library_root_logger().handlers
for handler in handlers:
UpperCAmelCase : str =logging.Formatter('''[%(levelname)s|%(filename)s:%(lineno)s] %(asctime)s >> %(message)s''' )
handler.setFormatter(__lowerCAmelCase )
def lowerCAmelCase_ ( )-> None:
'''simple docstring'''
UpperCAmelCase : int =_get_library_root_logger().handlers
for handler in handlers:
handler.setFormatter(__lowerCAmelCase )
def lowerCAmelCase_ ( self , *__lowerCAmelCase , **__lowerCAmelCase )-> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase : Optional[Any] =os.getenv('''TRANSFORMERS_NO_ADVISORY_WARNINGS''' , __lowerCAmelCase )
if no_advisory_warnings:
return
self.warning(*__lowerCAmelCase , **__lowerCAmelCase )
__snake_case = warning_advice
@functools.lru_cache(__lowerCAmelCase )
def lowerCAmelCase_ ( self , *__lowerCAmelCase , **__lowerCAmelCase )-> Optional[int]:
'''simple docstring'''
self.warning(*__lowerCAmelCase , **__lowerCAmelCase )
__snake_case = warning_once
class __snake_case :
def __init__( self , *snake_case__ , **snake_case__ ) -> Dict: # pylint: disable=unused-argument
'''simple docstring'''
UpperCAmelCase : Any =args[0] if args else None
def __iter__( self ) -> List[Any]:
'''simple docstring'''
return iter(self._iterator )
def __getattr__( self , snake_case__ ) -> str:
'''simple docstring'''
def empty_fn(*snake_case__ , **snake_case__ ): # pylint: disable=unused-argument
return
return empty_fn
def __enter__( self ) -> int:
'''simple docstring'''
return self
def __exit__( self , snake_case__ , snake_case__ , snake_case__ ) -> List[Any]:
'''simple docstring'''
return
class __snake_case :
def __call__( self , *snake_case__ , **snake_case__ ) -> Tuple:
'''simple docstring'''
if _tqdm_active:
return tqdm_lib.tqdm(*snake_case__ , **snake_case__ )
else:
return EmptyTqdm(*snake_case__ , **snake_case__ )
def UpperCAmelCase__ ( self , *snake_case__ , **snake_case__ ) -> Any:
'''simple docstring'''
UpperCAmelCase : Union[str, Any] =None
if _tqdm_active:
return tqdm_lib.tqdm.set_lock(*snake_case__ , **snake_case__ )
def UpperCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
if _tqdm_active:
return tqdm_lib.tqdm.get_lock()
__snake_case = _tqdm_cls()
def lowerCAmelCase_ ( )-> bool:
'''simple docstring'''
global _tqdm_active
return bool(_tqdm_active )
def lowerCAmelCase_ ( )-> Optional[Any]:
'''simple docstring'''
global _tqdm_active
UpperCAmelCase : Dict =True
hf_hub_utils.enable_progress_bars()
def lowerCAmelCase_ ( )-> Optional[Any]:
'''simple docstring'''
global _tqdm_active
UpperCAmelCase : List[str] =False
hf_hub_utils.disable_progress_bars()
| 78
| 0
|
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_realm import RealmTokenizer
__A : Tuple = logging.get_logger(__name__)
__A : Union[str, Any] = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
__A : Tuple = {
'''vocab_file''': {
'''google/realm-cc-news-pretrained-embedder''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/vocab.txt'''
),
'''google/realm-cc-news-pretrained-encoder''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/vocab.txt'''
),
'''google/realm-cc-news-pretrained-scorer''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/vocab.txt'''
),
'''google/realm-cc-news-pretrained-openqa''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/vocab.txt'''
),
'''google/realm-orqa-nq-openqa''': '''https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/vocab.txt''',
'''google/realm-orqa-nq-reader''': '''https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/vocab.txt''',
'''google/realm-orqa-wq-openqa''': '''https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/vocab.txt''',
'''google/realm-orqa-wq-reader''': '''https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/vocab.txt''',
},
'''tokenizer_file''': {
'''google/realm-cc-news-pretrained-embedder''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/tokenizer.jsont'''
),
'''google/realm-cc-news-pretrained-encoder''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/tokenizer.json'''
),
'''google/realm-cc-news-pretrained-scorer''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/tokenizer.json'''
),
'''google/realm-cc-news-pretrained-openqa''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/tokenizer.json'''
),
'''google/realm-orqa-nq-openqa''': (
'''https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/tokenizer.json'''
),
'''google/realm-orqa-nq-reader''': (
'''https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/tokenizer.json'''
),
'''google/realm-orqa-wq-openqa''': (
'''https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/tokenizer.json'''
),
'''google/realm-orqa-wq-reader''': (
'''https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/tokenizer.json'''
),
},
}
__A : Optional[int] = {
'''google/realm-cc-news-pretrained-embedder''': 512,
'''google/realm-cc-news-pretrained-encoder''': 512,
'''google/realm-cc-news-pretrained-scorer''': 512,
'''google/realm-cc-news-pretrained-openqa''': 512,
'''google/realm-orqa-nq-openqa''': 512,
'''google/realm-orqa-nq-reader''': 512,
'''google/realm-orqa-wq-openqa''': 512,
'''google/realm-orqa-wq-reader''': 512,
}
__A : Optional[int] = {
'''google/realm-cc-news-pretrained-embedder''': {'''do_lower_case''': True},
'''google/realm-cc-news-pretrained-encoder''': {'''do_lower_case''': True},
'''google/realm-cc-news-pretrained-scorer''': {'''do_lower_case''': True},
'''google/realm-cc-news-pretrained-openqa''': {'''do_lower_case''': True},
'''google/realm-orqa-nq-openqa''': {'''do_lower_case''': True},
'''google/realm-orqa-nq-reader''': {'''do_lower_case''': True},
'''google/realm-orqa-wq-openqa''': {'''do_lower_case''': True},
'''google/realm-orqa-wq-reader''': {'''do_lower_case''': True},
}
class _UpperCAmelCase ( _A ):
SCREAMING_SNAKE_CASE_ : List[Any] = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE_ : List[Any] = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE_ : Any = PRETRAINED_INIT_CONFIGURATION
SCREAMING_SNAKE_CASE_ : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE_ : Any = RealmTokenizer
def __init__( self : str , A : Optional[int]=None , A : int=None , A : Union[str, Any]=True , A : str="[UNK]" , A : Dict="[SEP]" , A : Dict="[PAD]" , A : List[str]="[CLS]" , A : List[str]="[MASK]" , A : Dict=True , A : Union[str, Any]=None , **A : int , ) -> Optional[Any]:
super().__init__(
A , tokenizer_file=A , do_lower_case=A , unk_token=A , sep_token=A , pad_token=A , cls_token=A , mask_token=A , tokenize_chinese_chars=A , strip_accents=A , **A , )
lowercase_ : int = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' , A ) != do_lower_case
or normalizer_state.get('''strip_accents''' , A ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , A ) != tokenize_chinese_chars
):
lowercase_ : Tuple = getattr(A , normalizer_state.pop('''type''' ) )
lowercase_ : Optional[Any] = do_lower_case
lowercase_ : Optional[Any] = strip_accents
lowercase_ : Optional[Any] = tokenize_chinese_chars
lowercase_ : int = normalizer_class(**A )
lowercase_ : Tuple = do_lower_case
def A ( self : Union[str, Any] , A : int , **A : Dict ) -> List[Any]:
lowercase_ : Optional[Any] = PaddingStrategy.MAX_LENGTH
lowercase_ : Tuple = text
lowercase_ : List[str] = kwargs.pop('''text_pair''' , A )
lowercase_ : Optional[int] = kwargs.pop('''return_tensors''' , A )
lowercase_ : List[Any] = {
'''input_ids''': [],
'''attention_mask''': [],
'''token_type_ids''': [],
}
for idx, candidate_text in enumerate(A ):
if batch_text_pair is not None:
lowercase_ : List[str] = batch_text_pair[idx]
else:
lowercase_ : Optional[int] = None
lowercase_ : Optional[Any] = super().__call__(A , A , return_tensors=A , **A )
lowercase_ : Tuple = encoded_candidates.get('''input_ids''' )
lowercase_ : List[Any] = encoded_candidates.get('''attention_mask''' )
lowercase_ : Dict = encoded_candidates.get('''token_type_ids''' )
if encoded_input_ids is not None:
output_data["input_ids"].append(A )
if encoded_attention_mask is not None:
output_data["attention_mask"].append(A )
if encoded_token_type_ids is not None:
output_data["token_type_ids"].append(A )
lowercase_ : List[str] = {key: item for key, item in output_data.items() if len(A ) != 0}
return BatchEncoding(A , tensor_type=A )
def A ( self : Any , A : Union[str, Any] , A : Optional[int]=None ) -> List[str]:
lowercase_ : Dict = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def A ( self : List[str] , A : List[int] , A : Optional[List[int]] = None ) -> List[int]:
lowercase_ : List[Any] = [self.sep_token_id]
lowercase_ : int = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def A ( self : List[Any] , A : str , A : Optional[str] = None ) -> Tuple[str]:
lowercase_ : Optional[int] = self._tokenizer.model.save(A , name=A )
return tuple(A )
| 33
|
"""simple docstring"""
def lowercase ( __snake_case : int = 1_0_0_0 ):
lowercase_ , lowercase_ : str = 1, 1
lowercase_ : List[str] = 2
while True:
lowercase_ : Tuple = 0
lowercase_ : List[Any] = fa + fa
lowercase_ , lowercase_ : Optional[int] = fa, f
index += 1
for _ in str(__snake_case ):
i += 1
if i == n:
break
return index
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 33
| 1
|
"""simple docstring"""
import datasets
from .evaluate import evaluate
__A : List[Any] = '\\n@article{hendrycks2021cuad,\n title={CUAD: An Expert-Annotated NLP Dataset for Legal Contract Review},\n author={Dan Hendrycks and Collin Burns and Anya Chen and Spencer Ball},\n journal={arXiv preprint arXiv:2103.06268},\n year={2021}\n}\n'
__A : Dict = '\nThis metric wrap the official scoring script for version 1 of the Contract\nUnderstanding Atticus Dataset (CUAD).\nContract Understanding Atticus Dataset (CUAD) v1 is a corpus of more than 13,000 labels in 510\ncommercial legal contracts that have been manually labeled to identify 41 categories of important\nclauses that lawyers look for when reviewing contracts in connection with corporate transactions.\n'
__A : str = '\nComputes CUAD scores (EM, F1, AUPR, Precision@80%Recall, and Precision@90%Recall).\nArgs:\n predictions: List of question-answers dictionaries with the following key-values:\n - \'id\': id of the question-answer pair as given in the references (see below)\n - \'prediction_text\': list of possible texts for the answer, as a list of strings\n depending on a threshold on the confidence probability of each prediction.\n references: List of question-answers dictionaries with the following key-values:\n - \'id\': id of the question-answer pair (see above),\n - \'answers\': a Dict in the CUAD dataset format\n {\n \'text\': list of possible texts for the answer, as a list of strings\n \'answer_start\': list of start positions for the answer, as a list of ints\n }\n Note that answer_start values are not taken into account to compute the metric.\nReturns:\n \'exact_match\': Exact match (the normalized answer exactly match the gold answer)\n \'f1\': The F-score of predicted tokens versus the gold answer\n \'aupr\': Area Under the Precision-Recall curve\n \'prec_at_80_recall\': Precision at 80% recall\n \'prec_at_90_recall\': Precision at 90% recall\nExamples:\n >>> predictions = [{\'prediction_text\': [\'The seller:\', \'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.\'], \'id\': \'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties\'}]\n >>> references = [{\'answers\': {\'answer_start\': [143, 49], \'text\': [\'The seller:\', \'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.\']}, \'id\': \'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties\'}]\n >>> cuad_metric = datasets.load_metric("cuad")\n >>> results = cuad_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'exact_match\': 100.0, \'f1\': 100.0, \'aupr\': 0.0, \'prec_at_80_recall\': 1.0, \'prec_at_90_recall\': 1.0}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __UpperCamelCase ( datasets.Metric ):
def SCREAMING_SNAKE_CASE__ (self : Union[str, Any]):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": {
"id": datasets.Value("string"),
"prediction_text": datasets.features.Sequence(datasets.Value("string")),
},
"references": {
"id": datasets.Value("string"),
"answers": datasets.features.Sequence(
{
"text": datasets.Value("string"),
"answer_start": datasets.Value("int32"),
}),
},
}) , codebase_urls=["https://www.atticusprojectai.org/cuad"] , reference_urls=["https://www.atticusprojectai.org/cuad"] , )
def SCREAMING_SNAKE_CASE__ (self : Optional[Any] , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Tuple):
A = {prediction["id"]: prediction["prediction_text"] for prediction in predictions}
A = [
{
"paragraphs": [
{
"qas": [
{
"answers": [{"text": answer_text} for answer_text in ref["answers"]["text"]],
"id": ref["id"],
}
for ref in references
]
}
]
}
]
A = evaluate(dataset=__SCREAMING_SNAKE_CASE , predictions=__SCREAMING_SNAKE_CASE)
return score
| 370
|
"""simple docstring"""
# This is the module that test_patching.py uses to test patch_submodule()
import os # noqa: this is just for tests
import os as renamed_os # noqa: this is just for tests
from os import path # noqa: this is just for tests
from os import path as renamed_path # noqa: this is just for tests
from os.path import join # noqa: this is just for tests
from os.path import join as renamed_join # noqa: this is just for tests
__A : Any = open # noqa: we just need to have a builtin inside this module to test it properly
| 57
| 0
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
_lowercase = logging.get_logger(__name__)
_lowercase = {
'''shi-labs/dinat-mini-in1k-224''': '''https://huggingface.co/shi-labs/dinat-mini-in1k-224/resolve/main/config.json''',
# See all Dinat models at https://huggingface.co/models?filter=dinat
}
class lowerCAmelCase_ ( _lowercase , _lowercase ):
'''simple docstring'''
_lowerCamelCase: Union[str, Any] = '''dinat'''
_lowerCamelCase: str = {
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self : Union[str, Any] ,A_ : Optional[int]=4 ,A_ : Union[str, Any]=3 ,A_ : List[str]=64 ,A_ : List[str]=[3, 4, 6, 5] ,A_ : Any=[2, 4, 8, 16] ,A_ : List[Any]=7 ,A_ : Tuple=[[1, 8, 1], [1, 4, 1, 4], [1, 2, 1, 2, 1, 2], [1, 1, 1, 1, 1]] ,A_ : str=3.0 ,A_ : List[Any]=True ,A_ : Any=0.0 ,A_ : str=0.0 ,A_ : Union[str, Any]=0.1 ,A_ : int="gelu" ,A_ : List[str]=0.02 ,A_ : Optional[Any]=1e-5 ,A_ : Dict=0.0 ,A_ : int=None ,A_ : Optional[Any]=None ,**A_ : Tuple ,) -> Union[str, Any]:
super().__init__(**A_ )
A = patch_size
A = num_channels
A = embed_dim
A = depths
A = len(A_ )
A = num_heads
A = kernel_size
A = dilations
A = mlp_ratio
A = qkv_bias
A = hidden_dropout_prob
A = attention_probs_dropout_prob
A = drop_path_rate
A = hidden_act
A = layer_norm_eps
A = initializer_range
# we set the hidden_size attribute in order to make Dinat work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
A = int(embed_dim * 2 ** (len(A_ ) - 1) )
A = layer_scale_init_value
A = ['stem'] + [F'stage{idx}' for idx in range(1 ,len(A_ ) + 1 )]
A , A = get_aligned_output_features_output_indices(
out_features=A_ ,out_indices=A_ ,stage_names=self.stage_names )
| 74
|
"""simple docstring"""
import os
from typing import List, Optional, Union
from ...tokenization_utils import PreTrainedTokenizer
from ...tokenization_utils_base import AddedToken
from ...utils import logging
A: str = logging.get_logger(__name__)
A: List[Any] = {"vocab_file": "vocab.txt"}
A: List[str] = {
"vocab_file": {
"facebook/esm2_t6_8M_UR50D": "https://huggingface.co/facebook/esm2_t6_8M_UR50D/resolve/main/vocab.txt",
"facebook/esm2_t12_35M_UR50D": "https://huggingface.co/facebook/esm2_t12_35M_UR50D/resolve/main/vocab.txt",
},
}
A: Dict = {
"facebook/esm2_t6_8M_UR50D": 1_0_2_4,
"facebook/esm2_t12_35M_UR50D": 1_0_2_4,
}
def _snake_case ( UpperCamelCase : int ):
with open(UpperCamelCase , """r""" ) as f:
UpperCAmelCase : int = f.read().splitlines()
return [l.strip() for l in lines]
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase__ ):
__lowerCAmelCase : Union[str, Any] = VOCAB_FILES_NAMES
__lowerCAmelCase : Dict = PRETRAINED_VOCAB_FILES_MAP
__lowerCAmelCase : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCAmelCase : str = ['input_ids', 'attention_mask']
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE="<unk>" , _SCREAMING_SNAKE_CASE="<cls>" , _SCREAMING_SNAKE_CASE="<pad>" , _SCREAMING_SNAKE_CASE="<mask>" , _SCREAMING_SNAKE_CASE="<eos>" , **_SCREAMING_SNAKE_CASE , ) -> Any:
'''simple docstring'''
super().__init__(**_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Optional[Any] = load_vocab_file(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Optional[Any] = dict(enumerate(self.all_tokens ) )
UpperCAmelCase : List[str] = {tok: ind for ind, tok in enumerate(self.all_tokens )}
UpperCAmelCase : Any = unk_token
UpperCAmelCase : str = cls_token
UpperCAmelCase : int = pad_token
UpperCAmelCase : Tuple = mask_token
UpperCAmelCase : str = eos_token
UpperCAmelCase : List[str] = self.all_tokens
self._create_trie(self.unique_no_split_tokens )
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE ) -> str:
'''simple docstring'''
return self._id_to_token.get(_SCREAMING_SNAKE_CASE , self.unk_token )
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE ) -> int:
'''simple docstring'''
return self._token_to_id.get(_SCREAMING_SNAKE_CASE , self._token_to_id.get(self.unk_token ) )
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> Optional[Any]:
'''simple docstring'''
return text.split()
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE=False ) -> Union[str, Any]:
'''simple docstring'''
return len(self._id_to_token )
def SCREAMING_SNAKE_CASE ( self ) -> Dict:
'''simple docstring'''
return {token: i for i, token in enumerate(self.all_tokens )}
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE ) -> int:
'''simple docstring'''
return self._token_to_id.get(_SCREAMING_SNAKE_CASE , self._token_to_id.get(self.unk_token ) )
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE ) -> str:
'''simple docstring'''
return self._id_to_token.get(_SCREAMING_SNAKE_CASE , self.unk_token )
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ) -> List[int]:
'''simple docstring'''
UpperCAmelCase : Optional[Any] = [self.cls_token_id]
UpperCAmelCase : Tuple = [self.eos_token_id] # No sep token in ESM vocabulary
if token_ids_a is None:
if self.eos_token_id is None:
return cls + token_ids_a
else:
return cls + token_ids_a + sep
elif self.eos_token_id is None:
raise ValueError("""Cannot tokenize multiple sequences when EOS token is not set!""" )
return cls + token_ids_a + sep + token_ids_a + sep # Multiple inputs always have an EOS token
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
"""You should not supply a second sequence if the provided sequence of """
"""ids is already formatted with special tokens for the model.""" )
return [1 if token in self.all_special_ids else 0 for token in token_ids_a]
UpperCAmelCase : str = [1] + ([0] * len(_SCREAMING_SNAKE_CASE )) + [1]
if token_ids_a is not None:
mask += [0] * len(_SCREAMING_SNAKE_CASE ) + [1]
return mask
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase : Optional[int] = os.path.join(_SCREAMING_SNAKE_CASE , (filename_prefix + """-""" if filename_prefix else """""") + """vocab.txt""" )
with open(_SCREAMING_SNAKE_CASE , """w""" ) as f:
f.write("""\n""".join(self.all_tokens ) )
return (vocab_file,)
@property
def SCREAMING_SNAKE_CASE ( self ) -> int:
'''simple docstring'''
return self.get_vocab_size(with_added_tokens=_SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = False ) -> int:
'''simple docstring'''
return super()._add_tokens(_SCREAMING_SNAKE_CASE , special_tokens=_SCREAMING_SNAKE_CASE )
| 109
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
_lowercase : Tuple = {
'configuration_clip': [
'CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP',
'CLIPConfig',
'CLIPOnnxConfig',
'CLIPTextConfig',
'CLIPVisionConfig',
],
'processing_clip': ['CLIPProcessor'],
'tokenization_clip': ['CLIPTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : List[Any] = ['CLIPTokenizerFast']
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Optional[Any] = ['CLIPFeatureExtractor']
_lowercase : Any = ['CLIPImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : List[Any] = [
'CLIP_PRETRAINED_MODEL_ARCHIVE_LIST',
'CLIPModel',
'CLIPPreTrainedModel',
'CLIPTextModel',
'CLIPTextModelWithProjection',
'CLIPVisionModel',
'CLIPVisionModelWithProjection',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : int = [
'TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFCLIPModel',
'TFCLIPPreTrainedModel',
'TFCLIPTextModel',
'TFCLIPVisionModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Tuple = [
'FlaxCLIPModel',
'FlaxCLIPPreTrainedModel',
'FlaxCLIPTextModel',
'FlaxCLIPTextPreTrainedModel',
'FlaxCLIPVisionModel',
'FlaxCLIPVisionPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_clip import (
CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPConfig,
CLIPOnnxConfig,
CLIPTextConfig,
CLIPVisionConfig,
)
from .processing_clip import CLIPProcessor
from .tokenization_clip import CLIPTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_clip_fast import CLIPTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clip import CLIPFeatureExtractor
from .image_processing_clip import CLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clip import (
CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPModel,
CLIPPreTrainedModel,
CLIPTextModel,
CLIPTextModelWithProjection,
CLIPVisionModel,
CLIPVisionModelWithProjection,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_clip import (
TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCLIPModel,
TFCLIPPreTrainedModel,
TFCLIPTextModel,
TFCLIPVisionModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_clip import (
FlaxCLIPModel,
FlaxCLIPPreTrainedModel,
FlaxCLIPTextModel,
FlaxCLIPTextPreTrainedModel,
FlaxCLIPVisionModel,
FlaxCLIPVisionPreTrainedModel,
)
else:
import sys
_lowercase : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 86
|
"""simple docstring"""
from __future__ import annotations
import bisect
def lowercase__ ( snake_case_ :list[int] , snake_case_ :int , snake_case_ :int = 0 , snake_case_ :int = -1 ):
if hi < 0:
__UpperCAmelCase = len(snake_case_ )
while lo < hi:
__UpperCAmelCase = lo + (hi - lo) // 2
if sorted_collection[mid] < item:
__UpperCAmelCase = mid + 1
else:
__UpperCAmelCase = mid
return lo
def lowercase__ ( snake_case_ :list[int] , snake_case_ :int , snake_case_ :int = 0 , snake_case_ :int = -1 ):
if hi < 0:
__UpperCAmelCase = len(snake_case_ )
while lo < hi:
__UpperCAmelCase = lo + (hi - lo) // 2
if sorted_collection[mid] <= item:
__UpperCAmelCase = mid + 1
else:
__UpperCAmelCase = mid
return lo
def lowercase__ ( snake_case_ :list[int] , snake_case_ :int , snake_case_ :int = 0 , snake_case_ :int = -1 ):
sorted_collection.insert(bisect_left(snake_case_ , snake_case_ , snake_case_ , snake_case_ ) , snake_case_ )
def lowercase__ ( snake_case_ :list[int] , snake_case_ :int , snake_case_ :int = 0 , snake_case_ :int = -1 ):
sorted_collection.insert(bisect_right(snake_case_ , snake_case_ , snake_case_ , snake_case_ ) , snake_case_ )
def lowercase__ ( snake_case_ :list[int] , snake_case_ :int ):
__UpperCAmelCase = 0
__UpperCAmelCase = len(snake_case_ ) - 1
while left <= right:
__UpperCAmelCase = left + (right - left) // 2
__UpperCAmelCase = sorted_collection[midpoint]
if current_item == item:
return midpoint
elif item < current_item:
__UpperCAmelCase = midpoint - 1
else:
__UpperCAmelCase = midpoint + 1
return None
def lowercase__ ( snake_case_ :list[int] , snake_case_ :int ):
__UpperCAmelCase = bisect.bisect_left(snake_case_ , snake_case_ )
if index != len(snake_case_ ) and sorted_collection[index] == item:
return index
return None
def lowercase__ ( snake_case_ :list[int] , snake_case_ :int , snake_case_ :int , snake_case_ :int ):
if right < left:
return None
__UpperCAmelCase = left + (right - left) // 2
if sorted_collection[midpoint] == item:
return midpoint
elif sorted_collection[midpoint] > item:
return binary_search_by_recursion(snake_case_ , snake_case_ , snake_case_ , midpoint - 1 )
else:
return binary_search_by_recursion(snake_case_ , snake_case_ , midpoint + 1 , snake_case_ )
if __name__ == "__main__":
_lowercase : Optional[Any] = input('Enter numbers separated by comma:\n').strip()
_lowercase : Optional[int] = sorted(int(item) for item in user_input.split(','))
_lowercase : Optional[Any] = int(input('Enter a single number to be found in the list:\n'))
_lowercase : int = binary_search(collection, target)
if result is None:
print(f"""{target} was not found in {collection}.""")
else:
print(f"""{target} was found at position {result} in {collection}.""")
| 86
| 1
|
from ...utils import is_note_seq_available, is_transformers_available, is_torch_available
from ...utils import OptionalDependencyNotAvailable
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .notes_encoder import SpectrogramNotesEncoder
from .continous_encoder import SpectrogramContEncoder
from .pipeline_spectrogram_diffusion import (
SpectrogramContEncoder,
SpectrogramDiffusionPipeline,
TaFilmDecoder,
)
try:
if not (is_transformers_available() and is_torch_available() and is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403
else:
from .midi_utils import MidiProcessor
| 306
|
import argparse
import pickle
import numpy as np
import torch
from torch import nn
from transformers import ReformerConfig, ReformerModelWithLMHead
from transformers.utils import logging
logging.set_verbosity_info()
def __lowerCamelCase ( snake_case__ ,snake_case__ ,snake_case__=None ) -> Optional[int]:
"""simple docstring"""
assert torch_layer.weight.shape == weight.shape, F'{torch_layer} layer.weight does not match'
_SCREAMING_SNAKE_CASE = nn.Parameter(snake_case__ )
if bias is not None:
assert torch_layer.bias.shape == bias.shape, F'{torch_layer} layer.bias does not match'
_SCREAMING_SNAKE_CASE = nn.Parameter(snake_case__ )
def __lowerCamelCase ( snake_case__ ,snake_case__ ,snake_case__ ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = np.asarray(weights[0] )
_SCREAMING_SNAKE_CASE = np.asarray(weights[1] )
_SCREAMING_SNAKE_CASE = np.asarray(weights[2] )
set_param(
torch_layer.self_attention.query_key ,torch.tensor(snake_case__ ).transpose(1 ,2 ).contiguous().view(-1 ,snake_case__ ) ,)
set_param(
torch_layer.self_attention.value ,torch.tensor(snake_case__ ).transpose(1 ,2 ).contiguous().view(-1 ,snake_case__ ) ,)
set_param(
torch_layer.output.dense ,torch.tensor(snake_case__ ).view(-1 ,snake_case__ ).contiguous().transpose(0 ,1 ) ,)
def __lowerCamelCase ( snake_case__ ,snake_case__ ,snake_case__ ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = np.asarray(weights[0] )
_SCREAMING_SNAKE_CASE = np.asarray(weights[1] )
_SCREAMING_SNAKE_CASE = np.asarray(weights[2] )
_SCREAMING_SNAKE_CASE = np.asarray(weights[3] )
set_param(
torch_layer.self_attention.query ,torch.tensor(snake_case__ ).transpose(1 ,2 ).contiguous().view(-1 ,snake_case__ ) ,)
set_param(
torch_layer.self_attention.key ,torch.tensor(snake_case__ ).transpose(1 ,2 ).contiguous().view(-1 ,snake_case__ ) ,)
set_param(
torch_layer.self_attention.value ,torch.tensor(snake_case__ ).transpose(1 ,2 ).contiguous().view(-1 ,snake_case__ ) ,)
set_param(
torch_layer.output.dense ,torch.tensor(snake_case__ ).view(-1 ,snake_case__ ).contiguous().transpose(0 ,1 ) ,)
def __lowerCamelCase ( snake_case__ ,snake_case__ ,snake_case__ ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = weights[0][0][0]
_SCREAMING_SNAKE_CASE = np.asarray(layer_norm_a[0] )
_SCREAMING_SNAKE_CASE = np.asarray(layer_norm_a[1] )
set_param(
torch_block.attention.layer_norm ,torch.tensor(snake_case__ ) ,torch.tensor(snake_case__ ) ,)
# lsh weights + output
_SCREAMING_SNAKE_CASE = weights[0][1]
if len(snake_case__ ) < 4:
set_layer_weights_in_torch_lsh(snake_case__ ,torch_block.attention ,snake_case__ )
else:
set_layer_weights_in_torch_local(snake_case__ ,torch_block.attention ,snake_case__ )
# intermediate weighs
_SCREAMING_SNAKE_CASE = weights[2][0][1][2]
# Chunked Feed Forward
if len(snake_case__ ) == 4:
_SCREAMING_SNAKE_CASE = intermediate_weights[2]
# layernorm 2
_SCREAMING_SNAKE_CASE = np.asarray(intermediate_weights[0][0] )
_SCREAMING_SNAKE_CASE = np.asarray(intermediate_weights[0][1] )
set_param(
torch_block.feed_forward.layer_norm ,torch.tensor(snake_case__ ) ,torch.tensor(snake_case__ ) ,)
# intermediate dense
_SCREAMING_SNAKE_CASE = np.asarray(intermediate_weights[1][0] )
_SCREAMING_SNAKE_CASE = np.asarray(intermediate_weights[1][1] )
set_param(
torch_block.feed_forward.dense.dense ,torch.tensor(snake_case__ ).transpose(0 ,1 ).contiguous() ,torch.tensor(snake_case__ ) ,)
# intermediate out
_SCREAMING_SNAKE_CASE = np.asarray(intermediate_weights[4][0] )
_SCREAMING_SNAKE_CASE = np.asarray(intermediate_weights[4][1] )
set_param(
torch_block.feed_forward.output.dense ,torch.tensor(snake_case__ ).transpose(0 ,1 ).contiguous() ,torch.tensor(snake_case__ ) ,)
def __lowerCamelCase ( snake_case__ ,snake_case__ ,snake_case__ ) -> int:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = torch_model.reformer
# word embeds
_SCREAMING_SNAKE_CASE = np.asarray(weights[1] )
set_param(
torch_model_reformer.embeddings.word_embeddings ,torch.tensor(snake_case__ ) ,)
if isinstance(weights[3] ,snake_case__ ):
_SCREAMING_SNAKE_CASE = torch_model_reformer.embeddings.position_embeddings
for emb_idx in range(len(position_embeddings.weights ) ):
_SCREAMING_SNAKE_CASE = np.asarray(weights[3][emb_idx][0] )
assert (
position_embeddings.weights[emb_idx].shape == emb_weights.shape
), F'{position_embeddings[emb_idx]} emb does not match'
_SCREAMING_SNAKE_CASE = nn.Parameter(torch.tensor(snake_case__ ) )
_SCREAMING_SNAKE_CASE = weights[5]
assert len(torch_model_reformer.encoder.layers ) * 4 == len(
snake_case__ ), "HF and trax model do not have the same number of layers"
for layer_idx, layer in enumerate(torch_model_reformer.encoder.layers ):
_SCREAMING_SNAKE_CASE = trax_layer_weights[4 * layer_idx : 4 * (layer_idx + 1)]
set_block_weights_in_torch(snake_case__ ,snake_case__ ,snake_case__ )
# output layer norm
_SCREAMING_SNAKE_CASE = np.asarray(weights[7][0] )
_SCREAMING_SNAKE_CASE = np.asarray(weights[7][1] )
set_param(
torch_model_reformer.encoder.layer_norm ,torch.tensor(snake_case__ ) ,torch.tensor(snake_case__ ) ,)
# output embeddings
_SCREAMING_SNAKE_CASE = np.asarray(weights[9][0] )
_SCREAMING_SNAKE_CASE = np.asarray(weights[9][1] )
set_param(
torch_model.lm_head.decoder ,torch.tensor(snake_case__ ).transpose(0 ,1 ).contiguous() ,torch.tensor(snake_case__ ) ,)
def __lowerCamelCase ( snake_case__ ,snake_case__ ,snake_case__ ) -> Tuple:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = ReformerConfig.from_json_file(snake_case__ )
print(F'Building PyTorch model from configuration: {config}' )
_SCREAMING_SNAKE_CASE = ReformerModelWithLMHead(snake_case__ )
with open(snake_case__ ,"""rb""" ) as f:
_SCREAMING_SNAKE_CASE = pickle.load(snake_case__ )["""weights"""]
set_model_weights_in_torch(snake_case__ ,snake_case__ ,config.hidden_size )
# Save pytorch-model
print(F'Save PyTorch model to {pytorch_dump_path}' )
torch.save(model.state_dict() ,snake_case__ )
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--trax_model_pkl_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained Reformer model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
UpperCamelCase = parser.parse_args()
convert_trax_checkpoint_to_pytorch(args.trax_model_pkl_path, args.config_file, args.pytorch_dump_path)
| 306
| 1
|
"""simple docstring"""
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
UniSpeechConfig,
UniSpeechForCTC,
UniSpeechForPreTraining,
WavaVecaFeatureExtractor,
WavaVecaPhonemeCTCTokenizer,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
__UpperCamelCase : Any = logging.get_logger(__name__)
__UpperCamelCase : Tuple = {
"post_extract_proj": "feature_projection.projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "encoder.layer_norm",
"w2v_model.layer_norm": "feature_projection.layer_norm",
"quantizer.weight_proj": "quantizer.weight_proj",
"quantizer.vars": "quantizer.codevectors",
"project_q": "project_q",
"final_proj": "project_hid",
"w2v_encoder.proj": "ctc_proj",
"mask_emb": "masked_spec_embed",
}
__UpperCamelCase : Optional[Any] = [
"ctc_proj",
"quantizer.weight_proj",
"quantizer.codevectors",
"project_q",
"project_hid",
]
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Any , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : str , _UpperCAmelCase : str , _UpperCAmelCase : Tuple , _UpperCAmelCase : Union[str, Any] ):
for attribute in key.split('.' ):
if is_finetuned:
if attribute in ["quantizer", "project_q", "project_hid"]:
# those layers are only relevant for pretraining and should be dropped
return
if attribute == "ctc_proj":
# we should rename `ctc_proj` to `lm_head` for fine-tuned phoneme models
lowerCAmelCase = 'lm_head'
lowerCAmelCase = getattr(lowerCamelCase__ , lowerCamelCase__ )
if weight_type is not None:
lowerCAmelCase = getattr(lowerCamelCase__ , lowerCamelCase__ ).shape
else:
lowerCAmelCase = hf_pointer.shape
assert hf_shape == value.shape, (
F'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
F' {value.shape} for {full_name}'
)
if weight_type == "weight":
lowerCAmelCase = value
elif weight_type == "weight_g":
lowerCAmelCase = value
elif weight_type == "weight_v":
lowerCAmelCase = value
elif weight_type == "bias":
lowerCAmelCase = value
else:
lowerCAmelCase = value
logger.info(F'{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.' )
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Dict , _UpperCAmelCase : Optional[int] ):
lowerCAmelCase = []
lowerCAmelCase = fairseq_model.state_dict()
lowerCAmelCase = hf_model.unispeech.feature_extractor
for name, value in fairseq_dict.items():
lowerCAmelCase = False
if "conv_layers" in name:
load_conv_layer(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , hf_model.config.feat_extract_norm == 'group' , )
lowerCAmelCase = True
else:
for key, mapped_key in MAPPING.items():
lowerCAmelCase = 'unispeech.' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
lowerCAmelCase = True
if "*" in mapped_key:
lowerCAmelCase = name.split(lowerCamelCase__ )[0].split('.' )[-2]
lowerCAmelCase = mapped_key.replace('*' , lowerCamelCase__ )
if "weight_g" in name:
lowerCAmelCase = 'weight_g'
elif "weight_v" in name:
lowerCAmelCase = 'weight_v'
elif "bias" in name:
lowerCAmelCase = 'bias'
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
lowerCAmelCase = 'weight'
else:
lowerCAmelCase = None
set_recursively(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
continue
if not is_used:
unused_weights.append(lowerCamelCase__ )
logger.warning(F'Unused weights: {unused_weights}' )
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Any , _UpperCAmelCase : Dict , _UpperCAmelCase : Any , _UpperCAmelCase : List[Any] , _UpperCAmelCase : str ):
lowerCAmelCase = full_name.split('conv_layers.' )[-1]
lowerCAmelCase = name.split('.' )
lowerCAmelCase = int(items[0] )
lowerCAmelCase = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'
)
lowerCAmelCase = value
logger.info(F'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'
)
lowerCAmelCase = value
logger.info(F'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F'{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'
" found."
)
lowerCAmelCase = value
logger.info(F'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F'{full_name} has size {value.shape}, but'
F' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'
)
lowerCAmelCase = value
logger.info(F'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
else:
unused_weights.append(lowerCamelCase__ )
@torch.no_grad()
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Any , _UpperCAmelCase : Dict , _UpperCAmelCase : Union[str, Any]=None , _UpperCAmelCase : List[Any]=None , _UpperCAmelCase : Union[str, Any]=True ):
if config_path is not None:
lowerCAmelCase = UniSpeechConfig.from_pretrained(lowerCamelCase__ )
else:
lowerCAmelCase = UniSpeechConfig()
if is_finetuned:
if dict_path:
lowerCAmelCase = Dictionary.load_from_json(lowerCamelCase__ )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
lowerCAmelCase = target_dict.pad_index
lowerCAmelCase = target_dict.bos_index
lowerCAmelCase = target_dict.eos_index
lowerCAmelCase = len(target_dict.symbols )
lowerCAmelCase = os.path.join(lowerCamelCase__ , 'vocab.json' )
if not os.path.isdir(lowerCamelCase__ ):
logger.error('--pytorch_dump_folder_path ({}) should be a directory'.format(lowerCamelCase__ ) )
return
os.makedirs(lowerCamelCase__ , exist_ok=lowerCamelCase__ )
lowerCAmelCase = target_dict.indices
# fairseq has the <pad> and <s> switched
lowerCAmelCase = 42
lowerCAmelCase = 43
with open(lowerCamelCase__ , 'w' , encoding='utf-8' ) as vocab_handle:
json.dump(lowerCamelCase__ , lowerCamelCase__ )
lowerCAmelCase = WavaVecaPhonemeCTCTokenizer(
lowerCamelCase__ , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='|' , do_lower_case=lowerCamelCase__ , )
lowerCAmelCase = True if config.feat_extract_norm == 'layer' else False
lowerCAmelCase = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6000 , padding_value=0 , do_normalize=lowerCamelCase__ , return_attention_mask=lowerCamelCase__ , )
lowerCAmelCase = WavaVecaProcessor(feature_extractor=lowerCamelCase__ , tokenizer=lowerCamelCase__ )
processor.save_pretrained(lowerCamelCase__ )
lowerCAmelCase = UniSpeechForCTC(lowerCamelCase__ )
else:
lowerCAmelCase = UniSpeechForPreTraining(lowerCamelCase__ )
if is_finetuned:
lowerCAmelCase ,lowerCAmelCase ,lowerCAmelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] ), 'w2v_path': checkpoint_path} )
else:
lowerCAmelCase ,lowerCAmelCase ,lowerCAmelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
lowerCAmelCase = model[0].eval()
recursively_load_weights(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
hf_unispeech.save_pretrained(lowerCamelCase__ )
if __name__ == "__main__":
__UpperCamelCase : List[str] = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--not_finetuned''', action='''store_true''', help='''Whether the model to convert is a fine-tuned model or not'''
)
__UpperCamelCase : Dict = parser.parse_args()
convert_unispeech_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 350
|
"""simple docstring"""
from __future__ import annotations
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : str , _UpperCAmelCase : list[str] | None = None ):
lowerCAmelCase = word_bank or []
# create a table
lowerCAmelCase = len(_UpperCAmelCase ) + 1
lowerCAmelCase = []
for _ in range(_UpperCAmelCase ):
table.append([] )
# seed value
lowerCAmelCase = [[]] # because empty string has empty combination
# iterate through the indices
for i in range(_UpperCAmelCase ):
# condition
if table[i] != []:
for word in word_bank:
# slice condition
if target[i : i + len(_UpperCAmelCase )] == word:
lowerCAmelCase = [
[word, *way] for way in table[i]
]
# adds the word to every combination the current position holds
# now,push that combination to the table[i+len(word)]
table[i + len(_UpperCAmelCase )] += new_combinations
# combinations are in reverse order so reverse for better output
for combination in table[len(_UpperCAmelCase )]:
combination.reverse()
return table[len(_UpperCAmelCase )]
if __name__ == "__main__":
print(all_construct('''jwajalapa''', ['''jwa''', '''j''', '''w''', '''a''', '''la''', '''lapa''']))
print(all_construct('''rajamati''', ['''s''', '''raj''', '''amat''', '''raja''', '''ma''', '''i''', '''t''']))
print(
all_construct(
'''hexagonosaurus''',
['''h''', '''ex''', '''hex''', '''ag''', '''ago''', '''ru''', '''auru''', '''rus''', '''go''', '''no''', '''o''', '''s'''],
)
)
| 309
| 0
|
"""simple docstring"""
import math
class lowerCAmelCase_ :
'''simple docstring'''
def __init__( self : Union[str, Any] ,A_ : Optional[int]=0 ) -> List[str]: # a graph with Node 0,1,...,N-1
A = n
A = [
[math.inf for j in range(0 ,A_ )] for i in range(0 ,A_ )
] # adjacency matrix for weight
A = [
[math.inf for j in range(0 ,A_ )] for i in range(0 ,A_ )
] # dp[i][j] stores minimum distance from i to j
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ,A_ : List[str] ,A_ : Any ,A_ : Any ) -> Optional[Any]:
A = w
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Union[str, Any]:
for k in range(0 ,self.n ):
for i in range(0 ,self.n ):
for j in range(0 ,self.n ):
A = min(self.dp[i][j] ,self.dp[i][k] + self.dp[k][j] )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : List[Any] ,A_ : Any ) -> Optional[Any]:
return self.dp[u][v]
if __name__ == "__main__":
_lowercase = Graph(5)
graph.add_edge(0, 2, 9)
graph.add_edge(0, 4, 10)
graph.add_edge(1, 3, 5)
graph.add_edge(2, 3, 7)
graph.add_edge(3, 0, 10)
graph.add_edge(3, 1, 2)
graph.add_edge(3, 2, 1)
graph.add_edge(3, 4, 6)
graph.add_edge(4, 1, 3)
graph.add_edge(4, 2, 4)
graph.add_edge(4, 3, 9)
graph.floyd_warshall()
graph.show_min(1, 4)
graph.show_min(0, 3)
| 74
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowercase = logging.get_logger(__name__)
_lowercase = {
'''bigcode/gpt_bigcode-santacoder''': '''https://huggingface.co/bigcode/gpt_bigcode-santacoder/resolve/main/config.json''',
}
class lowerCAmelCase_ ( _lowercase ):
'''simple docstring'''
_lowerCamelCase: List[str] = '''gpt_bigcode'''
_lowerCamelCase: List[Any] = ['''past_key_values''']
_lowerCamelCase: int = {
'''hidden_size''': '''n_embd''',
'''max_position_embeddings''': '''n_positions''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self : Optional[int] ,A_ : Dict=5_0257 ,A_ : Union[str, Any]=1024 ,A_ : str=768 ,A_ : Any=12 ,A_ : Any=12 ,A_ : Optional[int]=None ,A_ : Any="gelu_pytorch_tanh" ,A_ : List[str]=0.1 ,A_ : Optional[int]=0.1 ,A_ : List[str]=0.1 ,A_ : Tuple=1e-5 ,A_ : Optional[int]=0.02 ,A_ : List[str]=True ,A_ : Optional[Any]=True ,A_ : List[Any]=5_0256 ,A_ : Union[str, Any]=5_0256 ,A_ : int=True ,A_ : Optional[Any]=True ,A_ : Dict=True ,**A_ : Union[str, Any] ,) -> Union[str, Any]:
A = vocab_size
A = n_positions
A = n_embd
A = n_layer
A = n_head
A = n_inner
A = activation_function
A = resid_pdrop
A = embd_pdrop
A = attn_pdrop
A = layer_norm_epsilon
A = initializer_range
A = scale_attn_weights
A = use_cache
A = attention_softmax_in_fpaa
A = scale_attention_softmax_in_fpaa
A = multi_query
A = bos_token_id
A = eos_token_id
super().__init__(bos_token_id=A_ ,eos_token_id=A_ ,**A_ )
| 74
| 1
|
'''simple docstring'''
import numpy as np
import torch
import tqdm
from ...models.unet_ad import UNetaDModel
from ...pipelines import DiffusionPipeline
from ...utils import randn_tensor
from ...utils.dummy_pt_objects import DDPMScheduler
class _snake_case ( lowercase_ ):
def __init__( self , a__ , a__ , a__ , a__ , ) -> str:
'''simple docstring'''
super().__init__()
snake_case_ = value_function
snake_case_ = unet
snake_case_ = scheduler
snake_case_ = env
snake_case_ = env.get_dataset()
snake_case_ = {}
for key in self.data.keys():
try:
snake_case_ = self.data[key].mean()
except: # noqa: E722
pass
snake_case_ = {}
for key in self.data.keys():
try:
snake_case_ = self.data[key].std()
except: # noqa: E722
pass
snake_case_ = env.observation_space.shape[0]
snake_case_ = env.action_space.shape[0]
def lowerCAmelCase__ ( self , a__ , a__ ) -> Union[str, Any]:
'''simple docstring'''
return (x_in - self.means[key]) / self.stds[key]
def lowerCAmelCase__ ( self , a__ , a__ ) -> str:
'''simple docstring'''
return x_in * self.stds[key] + self.means[key]
def lowerCAmelCase__ ( self , a__ ) -> Any:
'''simple docstring'''
if type(a__ ) is dict:
return {k: self.to_torch(a__ ) for k, v in x_in.items()}
elif torch.is_tensor(a__ ):
return x_in.to(self.unet.device )
return torch.tensor(a__ , device=self.unet.device )
def lowerCAmelCase__ ( self , a__ , a__ , a__ ) -> Dict:
'''simple docstring'''
for key, val in cond.items():
snake_case_ = val.clone()
return x_in
def lowerCAmelCase__ ( self , a__ , a__ , a__ , a__ ) -> str:
'''simple docstring'''
snake_case_ = x.shape[0]
snake_case_ = None
for i in tqdm.tqdm(self.scheduler.timesteps ):
# create batch of timesteps to pass into model
snake_case_ = torch.full((batch_size,) , a__ , device=self.unet.device , dtype=torch.long )
for _ in range(a__ ):
with torch.enable_grad():
x.requires_grad_()
# permute to match dimension for pre-trained models
snake_case_ = self.value_function(x.permute(0 , 2 , 1 ) , a__ ).sample
snake_case_ = torch.autograd.grad([y.sum()] , [x] )[0]
snake_case_ = self.scheduler._get_variance(a__ )
snake_case_ = torch.exp(0.5 * posterior_variance )
snake_case_ = model_std * grad
snake_case_ = 0
snake_case_ = x.detach()
snake_case_ = x + scale * grad
snake_case_ = self.reset_xa(a__ , a__ , self.action_dim )
snake_case_ = self.unet(x.permute(0 , 2 , 1 ) , a__ ).sample.permute(0 , 2 , 1 )
# TODO: verify deprecation of this kwarg
snake_case_ = self.scheduler.step(a__ , a__ , a__ , predict_epsilon=a__ )["prev_sample"]
# apply conditions to the trajectory (set the initial state)
snake_case_ = self.reset_xa(a__ , a__ , self.action_dim )
snake_case_ = self.to_torch(a__ )
return x, y
def __call__( self , a__ , a__=64 , a__=32 , a__=2 , a__=0.1 ) -> Optional[Any]:
'''simple docstring'''
snake_case_ = self.normalize(a__ , "observations" )
snake_case_ = obs[None].repeat(a__ , axis=0 )
snake_case_ = {0: self.to_torch(a__ )}
snake_case_ = (batch_size, planning_horizon, self.state_dim + self.action_dim)
# generate initial noise and apply our conditions (to make the trajectories start at current state)
snake_case_ = randn_tensor(a__ , device=self.unet.device )
snake_case_ = self.reset_xa(a__ , a__ , self.action_dim )
snake_case_ = self.to_torch(a__ )
# run the diffusion process
snake_case_ , snake_case_ = self.run_diffusion(a__ , a__ , a__ , a__ )
# sort output trajectories by value
snake_case_ = y.argsort(0 , descending=a__ ).squeeze()
snake_case_ = x[sorted_idx]
snake_case_ = sorted_values[:, :, : self.action_dim]
snake_case_ = actions.detach().cpu().numpy()
snake_case_ = self.de_normalize(a__ , key="actions" )
# select the action with the highest value
if y is not None:
snake_case_ = 0
else:
# if we didn't run value guiding, select a random action
snake_case_ = np.random.randint(0 , a__ )
snake_case_ = denorm_actions[selected_index, 0]
return denorm_actions
| 92
|
'''simple docstring'''
from __future__ import annotations
from scipy.special import comb # type: ignore
class _snake_case :
def __init__( self , a__ ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ = list_of_points
# Degree determines the flexibility of the curve.
# Degree = 1 will produce a straight line.
snake_case_ = len(a__ ) - 1
def lowerCAmelCase__ ( self , a__ ) -> list[float]:
'''simple docstring'''
assert 0 <= t <= 1, "Time t must be between 0 and 1."
snake_case_ = []
for i in range(len(self.list_of_points ) ):
# basis function for each i
output_values.append(
comb(self.degree , a__ ) * ((1 - t) ** (self.degree - i)) * (t**i) )
# the basis must sum up to 1 for it to produce a valid Bezier curve.
assert round(sum(a__ ) , 5 ) == 1
return output_values
def lowerCAmelCase__ ( self , a__ ) -> tuple[float, float]:
'''simple docstring'''
assert 0 <= t <= 1, "Time t must be between 0 and 1."
snake_case_ = self.basis_function(a__ )
snake_case_ = 0.0
snake_case_ = 0.0
for i in range(len(self.list_of_points ) ):
# For all points, sum up the product of i-th basis function and i-th point.
x += basis_function[i] * self.list_of_points[i][0]
y += basis_function[i] * self.list_of_points[i][1]
return (x, y)
def lowerCAmelCase__ ( self , a__ = 0.0_1 ) -> Any:
'''simple docstring'''
from matplotlib import pyplot as plt # type: ignore
snake_case_ = [] # x coordinates of points to plot
snake_case_ = [] # y coordinates of points to plot
snake_case_ = 0.0
while t <= 1:
snake_case_ = self.bezier_curve_function(a__ )
to_plot_x.append(value[0] )
to_plot_y.append(value[1] )
t += step_size
snake_case_ = [i[0] for i in self.list_of_points]
snake_case_ = [i[1] for i in self.list_of_points]
plt.plot(
a__ , a__ , color="blue" , label="Curve of Degree " + str(self.degree ) , )
plt.scatter(a__ , a__ , color="red" , label="Control Points" )
plt.legend()
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
BezierCurve([(1, 2), (3, 5)]).plot_curve() # degree 1
BezierCurve([(0, 0), (5, 5), (5, 0)]).plot_curve() # degree 2
BezierCurve([(0, 0), (5, 5), (5, 0), (2.5, -2.5)]).plot_curve() # degree 3
| 92
| 1
|
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DiffusionPipeline,
EulerDiscreteScheduler,
StableDiffusionXLImgaImgPipeline,
UNetaDConditionModel,
)
from diffusers.utils import floats_tensor, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class _A ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , unittest.TestCase ):
snake_case__ : Union[str, Any] = StableDiffusionXLImgaImgPipeline
snake_case__ : Tuple = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'height', 'width'}
snake_case__ : Optional[Any] = PipelineTesterMixin.required_optional_params - {'latents'}
snake_case__ : List[str] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
snake_case__ : Optional[Any] = IMAGE_TO_IMAGE_IMAGE_PARAMS
snake_case__ : int = IMAGE_TO_IMAGE_IMAGE_PARAMS
def A__ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
lowercase = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , attention_head_dim=(2, 4) , use_linear_projection=__lowerCAmelCase , addition_embed_type="""text_time""" , addition_time_embed_dim=8 , transformer_layers_per_block=(1, 2) , projection_class_embeddings_input_dim=80 , cross_attention_dim=64 , )
lowercase = EulerDiscreteScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , steps_offset=1 , beta_schedule="""scaled_linear""" , timestep_spacing="""leading""" , )
torch.manual_seed(0 )
lowercase = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
lowercase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act="""gelu""" , projection_dim=32 , )
lowercase = CLIPTextModel(__lowerCAmelCase )
lowercase = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" , local_files_only=__lowerCAmelCase )
lowercase = CLIPTextModelWithProjection(__lowerCAmelCase )
lowercase = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" , local_files_only=__lowerCAmelCase )
lowercase = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""text_encoder_2""": text_encoder_a,
"""tokenizer_2""": tokenizer_a,
# "safety_checker": None,
# "feature_extractor": None,
}
return components
def A__ ( self , __lowerCAmelCase , __lowerCAmelCase=0 ):
"""simple docstring"""
lowercase = floats_tensor((1, 3, 32, 32) , rng=random.Random(__lowerCAmelCase ) ).to(__lowerCAmelCase )
lowercase = image / 2 + 0.5
if str(__lowerCAmelCase ).startswith("""mps""" ):
lowercase = torch.manual_seed(__lowerCAmelCase )
else:
lowercase = torch.Generator(device=__lowerCAmelCase ).manual_seed(__lowerCAmelCase )
lowercase = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 5.0,
"""output_type""": """numpy""",
"""strength""": 0.7_5,
}
return inputs
def A__ ( self ):
"""simple docstring"""
lowercase = """cpu""" # ensure determinism for the device-dependent torch.Generator
lowercase = self.get_dummy_components()
lowercase = StableDiffusionXLImgaImgPipeline(**__lowerCAmelCase )
lowercase = sd_pipe.to(__lowerCAmelCase )
sd_pipe.set_progress_bar_config(disable=__lowerCAmelCase )
lowercase = self.get_dummy_inputs(__lowerCAmelCase )
lowercase = sd_pipe(**__lowerCAmelCase ).images
lowercase = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
lowercase = np.array([0.4_6_5_6, 0.4_8_4_0, 0.4_4_3_9, 0.6_6_9_8, 0.5_5_7_4, 0.4_5_2_4, 0.5_7_9_9, 0.5_9_4_3, 0.5_1_6_5] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def A__ ( self ):
"""simple docstring"""
super().test_attention_slicing_forward_pass(expected_max_diff=3E-3 )
def A__ ( self ):
"""simple docstring"""
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
def A__ ( self ):
"""simple docstring"""
pass
def A__ ( self ):
"""simple docstring"""
lowercase = self.get_dummy_components()
lowercase = StableDiffusionXLImgaImgPipeline(**__lowerCAmelCase )
lowercase = sd_pipe.to(__lowerCAmelCase )
lowercase = sd_pipe.to(__lowerCAmelCase )
sd_pipe.set_progress_bar_config(disable=__lowerCAmelCase )
# forward without prompt embeds
lowercase = self.get_dummy_inputs(__lowerCAmelCase )
lowercase = 3 * ["""this is a negative prompt"""]
lowercase = negative_prompt
lowercase = 3 * [inputs["""prompt"""]]
lowercase = sd_pipe(**__lowerCAmelCase )
lowercase = output.images[0, -3:, -3:, -1]
# forward with prompt embeds
lowercase = self.get_dummy_inputs(__lowerCAmelCase )
lowercase = 3 * ["""this is a negative prompt"""]
lowercase = 3 * [inputs.pop("""prompt""" )]
(
(
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) ,
) = sd_pipe.encode_prompt(__lowerCAmelCase , negative_prompt=__lowerCAmelCase )
lowercase = sd_pipe(
**__lowerCAmelCase , prompt_embeds=__lowerCAmelCase , negative_prompt_embeds=__lowerCAmelCase , pooled_prompt_embeds=__lowerCAmelCase , negative_pooled_prompt_embeds=__lowerCAmelCase , )
lowercase = output.images[0, -3:, -3:, -1]
# make sure that it's equal
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1E-4
@slow
@require_torch_gpu
class _A ( unittest.TestCase ):
def A__ ( self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A__ ( self , __lowerCAmelCase , __lowerCAmelCase="cpu" , __lowerCAmelCase=torch.floataa , __lowerCAmelCase=0 ):
"""simple docstring"""
lowercase = torch.Generator(device=__lowerCAmelCase ).manual_seed(__lowerCAmelCase )
lowercase = np.random.RandomState(__lowerCAmelCase ).standard_normal((1, 4, 64, 64) )
lowercase = torch.from_numpy(__lowerCAmelCase ).to(device=__lowerCAmelCase , dtype=__lowerCAmelCase )
lowercase = {
"""prompt""": """a photograph of an astronaut riding a horse""",
"""latents""": latents,
"""generator""": generator,
"""num_inference_steps""": 3,
"""guidance_scale""": 7.5,
"""output_type""": """numpy""",
}
return inputs
def A__ ( self ):
"""simple docstring"""
lowercase = DiffusionPipeline.from_pretrained("""stabilityai/stable-diffusion-2-base""" )
pipe.to(__lowerCAmelCase )
pipe.set_progress_bar_config(disable=__lowerCAmelCase )
lowercase = self.get_inputs(__lowerCAmelCase )
lowercase = pipe(**__lowerCAmelCase ).images
lowercase = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
lowercase = np.array([0.4_9_4_9_3, 0.4_7_8_9_6, 0.4_0_7_9_8, 0.5_4_2_1_4, 0.5_3_2_1_2, 0.4_8_2_0_2, 0.4_7_6_5_6, 0.4_6_3_2_9, 0.4_8_5_0_6] )
assert np.abs(image_slice - expected_slice ).max() < 7E-3
| 197
|
'''simple docstring'''
from __future__ import annotations
from collections.abc import Generator
def _A () -> Generator[int, None, None]:
'''simple docstring'''
_a = {}
_a = 2
while True:
_a = factor_map.pop(lowerCAmelCase__ , lowerCAmelCase__ )
if factor:
_a = factor + prime
while x in factor_map:
x += factor
_a = factor
else:
_a = prime
yield prime
prime += 1
def _A (lowerCAmelCase__ :float = 1E10 ) -> int:
'''simple docstring'''
_a = sieve()
_a = 1
while True:
_a = next(lowerCAmelCase__ )
if (2 * prime * n) > limit:
return n
# Ignore the next prime as the reminder will be 2.
next(lowerCAmelCase__ )
n += 2
if __name__ == "__main__":
print(solution())
| 168
| 0
|
from ..utils import DummyObject, requires_backends
class _a ( metaclass=_lowercase ):
UpperCamelCase = ['''torch''', '''scipy''']
def __init__( self : Union[str, Any], *lowerCAmelCase__ : Any, **lowerCAmelCase__ : Tuple ) -> Optional[Any]:
'''simple docstring'''
requires_backends(self, ['''torch''', '''scipy'''] )
@classmethod
def snake_case ( cls : Tuple, *lowerCAmelCase__ : Optional[int], **lowerCAmelCase__ : Optional[int] ) -> str:
'''simple docstring'''
requires_backends(cls, ['''torch''', '''scipy'''] )
@classmethod
def snake_case ( cls : List[str], *lowerCAmelCase__ : List[Any], **lowerCAmelCase__ : Any ) -> List[str]:
'''simple docstring'''
requires_backends(cls, ['''torch''', '''scipy'''] )
| 360
|
"""simple docstring"""
import warnings
from diffusers import StableDiffusionInpaintPipeline as StableDiffusionInpaintPipeline # noqa F401
warnings.warn(
"""The `inpainting.py` script is outdated. Please use directly `from diffusers import"""
""" StableDiffusionInpaintPipeline` instead."""
)
| 128
| 0
|
'''simple docstring'''
import argparse
import os
# New Code #
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils import find_executable_batch_size
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to ensure out-of-memory errors never
# interrupt training, and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
_UpperCamelCase = 16
_UpperCamelCase = 32
def lowercase_ ( lowerCAmelCase__ : Accelerator , lowerCAmelCase__ : int = 16 ):
"""simple docstring"""
__UpperCAmelCase : Optional[int] = AutoTokenizer.from_pretrained("""bert-base-cased""" )
__UpperCAmelCase : List[Any] = load_dataset("""glue""" , """mrpc""" )
def tokenize_function(lowerCAmelCase__ : List[Any] ):
# max_length=None => use the model max length (it's actually the default)
__UpperCAmelCase : List[str] = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=lowerCAmelCase__ , max_length=lowerCAmelCase__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
__UpperCAmelCase : Optional[Any] = datasets.map(
lowerCAmelCase__ , batched=lowerCAmelCase__ , remove_columns=["""idx""", """sentence1""", """sentence2"""] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
__UpperCAmelCase : Optional[int] = tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(lowerCAmelCase__ : Optional[int] ):
# On TPU it's best to pad everything to the same length or training will be very slow.
__UpperCAmelCase : Optional[int] = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
__UpperCAmelCase : int = 16
elif accelerator.mixed_precision != "no":
__UpperCAmelCase : Tuple = 8
else:
__UpperCAmelCase : str = None
return tokenizer.pad(
lowerCAmelCase__ , padding="""longest""" , max_length=lowerCAmelCase__ , pad_to_multiple_of=lowerCAmelCase__ , return_tensors="""pt""" , )
# Instantiate dataloaders.
__UpperCAmelCase : Dict = DataLoader(
tokenized_datasets["""train"""] , shuffle=lowerCAmelCase__ , collate_fn=lowerCAmelCase__ , batch_size=lowerCAmelCase__ )
__UpperCAmelCase : Union[str, Any] = DataLoader(
tokenized_datasets["""validation"""] , shuffle=lowerCAmelCase__ , collate_fn=lowerCAmelCase__ , batch_size=lowerCAmelCase__ )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('''TESTING_MOCKED_DATALOADERS''', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
_UpperCamelCase = mocked_dataloaders # noqa: F811
def lowercase_ ( lowerCAmelCase__ : Any , lowerCAmelCase__ : Dict ):
"""simple docstring"""
if os.environ.get("""TESTING_MOCKED_DATALOADERS""" , lowerCAmelCase__ ) == "1":
__UpperCAmelCase : Optional[Any] = 2
# Initialize accelerator
__UpperCAmelCase : Dict = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
__UpperCAmelCase : Any = config["""lr"""]
__UpperCAmelCase : str = int(config["""num_epochs"""] )
__UpperCAmelCase : Optional[Any] = int(config["""seed"""] )
__UpperCAmelCase : Union[str, Any] = int(config["""batch_size"""] )
__UpperCAmelCase : Optional[int] = evaluate.load("""glue""" , """mrpc""" )
# New Code #
# We now can define an inner training loop function. It should take a batch size as the only parameter,
# and build the dataloaders in there.
# It also gets our decorator
@find_executable_batch_size(starting_batch_size=lowerCAmelCase__ )
def inner_training_loop(lowerCAmelCase__ : str ):
# And now just move everything below under this function
# We need to bring in the Accelerator object from earlier
nonlocal accelerator
# And reset all of its attributes that could hold onto any memory:
accelerator.free_memory()
# Then we can declare the model, optimizer, and everything else:
set_seed(lowerCAmelCase__ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
__UpperCAmelCase : Union[str, Any] = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" , return_dict=lowerCAmelCase__ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
__UpperCAmelCase : Tuple = model.to(accelerator.device )
# Instantiate optimizer
__UpperCAmelCase : str = AdamW(params=model.parameters() , lr=lowerCAmelCase__ )
__UpperCAmelCase , __UpperCAmelCase : Optional[int] = get_dataloaders(lowerCAmelCase__ , lowerCAmelCase__ )
# Instantiate scheduler
__UpperCAmelCase : Tuple = get_linear_schedule_with_warmup(
optimizer=lowerCAmelCase__ , num_warmup_steps=100 , num_training_steps=(len(lowerCAmelCase__ ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : str = accelerator.prepare(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# Now we train the model
for epoch in range(lowerCAmelCase__ ):
model.train()
for step, batch in enumerate(lowerCAmelCase__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
__UpperCAmelCase : List[str] = model(**lowerCAmelCase__ )
__UpperCAmelCase : List[str] = outputs.loss
accelerator.backward(lowerCAmelCase__ )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(lowerCAmelCase__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
__UpperCAmelCase : int = model(**lowerCAmelCase__ )
__UpperCAmelCase : str = outputs.logits.argmax(dim=-1 )
__UpperCAmelCase , __UpperCAmelCase : Dict = accelerator.gather_for_metrics((predictions, batch["""labels"""]) )
metric.add_batch(
predictions=lowerCAmelCase__ , references=lowerCAmelCase__ , )
__UpperCAmelCase : Optional[Any] = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f'epoch {epoch}:' , lowerCAmelCase__ )
# New Code #
# And call it at the end with no arguments
# Note: You could also refactor this outside of your training loop function
inner_training_loop()
def lowercase_ ( ):
"""simple docstring"""
__UpperCAmelCase : Tuple = argparse.ArgumentParser(description="""Simple example of training script.""" )
parser.add_argument(
"""--mixed_precision""" , type=lowerCAmelCase__ , default=lowerCAmelCase__ , choices=["""no""", """fp16""", """bf16""", """fp8"""] , help="""Whether to use mixed precision. Choose"""
"""between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."""
"""and an Nvidia Ampere GPU.""" , )
parser.add_argument("""--cpu""" , action="""store_true""" , help="""If passed, will train on the CPU.""" )
__UpperCAmelCase : List[str] = parser.parse_args()
__UpperCAmelCase : int = {"""lr""": 2E-5, """num_epochs""": 3, """seed""": 42, """batch_size""": 16}
training_function(lowerCAmelCase__ , lowerCAmelCase__ )
if __name__ == "__main__":
main()
| 254
|
'''simple docstring'''
import os
import re
import warnings
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_ta import TaTokenizer
else:
_UpperCamelCase = None
_UpperCamelCase = logging.get_logger(__name__)
_UpperCamelCase = {'''vocab_file''': '''spiece.model''', '''tokenizer_file''': '''tokenizer.json'''}
_UpperCamelCase = {
'''vocab_file''': {
'''t5-small''': '''https://huggingface.co/t5-small/resolve/main/spiece.model''',
'''t5-base''': '''https://huggingface.co/t5-base/resolve/main/spiece.model''',
'''t5-large''': '''https://huggingface.co/t5-large/resolve/main/spiece.model''',
'''t5-3b''': '''https://huggingface.co/t5-3b/resolve/main/spiece.model''',
'''t5-11b''': '''https://huggingface.co/t5-11b/resolve/main/spiece.model''',
},
'''tokenizer_file''': {
'''t5-small''': '''https://huggingface.co/t5-small/resolve/main/tokenizer.json''',
'''t5-base''': '''https://huggingface.co/t5-base/resolve/main/tokenizer.json''',
'''t5-large''': '''https://huggingface.co/t5-large/resolve/main/tokenizer.json''',
'''t5-3b''': '''https://huggingface.co/t5-3b/resolve/main/tokenizer.json''',
'''t5-11b''': '''https://huggingface.co/t5-11b/resolve/main/tokenizer.json''',
},
}
# TODO(PVP) - this should be removed in Transformers v5
_UpperCamelCase = {
'''t5-small''': 512,
'''t5-base''': 512,
'''t5-large''': 512,
'''t5-3b''': 512,
'''t5-11b''': 512,
}
class _A ( __SCREAMING_SNAKE_CASE ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = VOCAB_FILES_NAMES
_SCREAMING_SNAKE_CASE : Tuple = PRETRAINED_VOCAB_FILES_MAP
_SCREAMING_SNAKE_CASE : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_SCREAMING_SNAKE_CASE : str = ["input_ids", "attention_mask"]
_SCREAMING_SNAKE_CASE : Optional[Any] = TaTokenizer
_SCREAMING_SNAKE_CASE : List[int] = []
def __init__( self , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase="</s>" , __UpperCAmelCase="<unk>" , __UpperCAmelCase="<pad>" , __UpperCAmelCase=100 , __UpperCAmelCase=None , **__UpperCAmelCase , ) -> List[Any]:
'''simple docstring'''
# Add extra_ids to the special token list
if extra_ids > 0 and additional_special_tokens is None:
__UpperCAmelCase : List[Any] = [f'<extra_id_{i}>' for i in range(__UpperCAmelCase )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra special tokens
__UpperCAmelCase : Any = len(set(filter(lambda __UpperCAmelCase : bool("""extra_id_""" in str(__UpperCAmelCase ) ) , __UpperCAmelCase ) ) )
if extra_tokens != extra_ids:
raise ValueError(
f'Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are'
""" provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids"""
""" tokens""" )
super().__init__(
__UpperCAmelCase , tokenizer_file=__UpperCAmelCase , eos_token=__UpperCAmelCase , unk_token=__UpperCAmelCase , pad_token=__UpperCAmelCase , extra_ids=__UpperCAmelCase , additional_special_tokens=__UpperCAmelCase , **__UpperCAmelCase , )
__UpperCAmelCase : Optional[int] = vocab_file
__UpperCAmelCase : Any = False if not self.vocab_file else True
__UpperCAmelCase : Optional[int] = extra_ids
@staticmethod
def __A ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> int:
'''simple docstring'''
if pretrained_model_name_or_path in TaTokenizerFast.max_model_input_sizes:
__UpperCAmelCase : int = TaTokenizerFast.max_model_input_sizes[pretrained_model_name_or_path]
if init_max_model_length is not None and init_max_model_length != max_model_length:
return init_max_model_length
elif init_max_model_length is None:
warnings.warn(
"""This tokenizer was incorrectly instantiated with a model max length of"""
f' {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this'
""" behavior is kept to avoid breaking backwards compatibility when padding/encoding with"""
""" `truncation is True`.\n- Be aware that you SHOULD NOT rely on"""
f' {pretrained_model_name_or_path} automatically truncating your input to'
f' {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences'
f' longer than {deprecated_max_model_length} you can either instantiate this tokenizer with'
""" `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please"""
""" instantiate this tokenizer with `model_max_length` set to your preferred value.""" , __UpperCAmelCase , )
return max_model_length
def __A ( self , __UpperCAmelCase , __UpperCAmelCase = None ) -> Tuple[str]:
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
"""Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """
"""tokenizer.""" )
if not os.path.isdir(__UpperCAmelCase ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
__UpperCAmelCase : Any = os.path.join(
__UpperCAmelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__UpperCAmelCase ):
copyfile(self.vocab_file , __UpperCAmelCase )
logger.info(f'Copy vocab file to {out_vocab_file}' )
return (out_vocab_file,)
def __A ( self , __UpperCAmelCase , __UpperCAmelCase = None ) -> List[int]:
'''simple docstring'''
__UpperCAmelCase : str = token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return self.prefix_tokens + token_ids_a
else:
__UpperCAmelCase : Optional[Any] = token_ids_a + [self.eos_token_id]
return self.prefix_tokens + token_ids_a + token_ids_a
def __A ( self , __UpperCAmelCase , __UpperCAmelCase = None ) -> List[int]:
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def __A ( self ) -> Any:
'''simple docstring'''
return list(
set(filter(lambda __UpperCAmelCase : bool(re.search(r"""<extra_id_\d+>""" , __UpperCAmelCase ) ) is not None , self.additional_special_tokens ) ) )
def __A ( self ) -> Optional[int]:
'''simple docstring'''
return [self.convert_tokens_to_ids(__UpperCAmelCase ) for token in self.get_sentinel_tokens()]
| 254
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
is_vision_available,
)
__UpperCamelCase = {'''configuration_vit''': ['''VIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ViTConfig''', '''ViTOnnxConfig''']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase = ['''ViTFeatureExtractor''']
__UpperCamelCase = ['''ViTImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase = [
'''VIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ViTForImageClassification''',
'''ViTForMaskedImageModeling''',
'''ViTModel''',
'''ViTPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase = [
'''TFViTForImageClassification''',
'''TFViTModel''',
'''TFViTPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase = [
'''FlaxViTForImageClassification''',
'''FlaxViTModel''',
'''FlaxViTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_vit import VIT_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTConfig, ViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_vit import ViTFeatureExtractor
from .image_processing_vit import ViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit import (
VIT_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTForImageClassification,
ViTForMaskedImageModeling,
ViTModel,
ViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit import TFViTForImageClassification, TFViTModel, TFViTPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel, FlaxViTPreTrainedModel
else:
import sys
__UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 370
|
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_mvp import MvpTokenizer
__UpperCamelCase = logging.get_logger(__name__)
__UpperCamelCase = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
# See all MVP models at https://huggingface.co/models?filter=mvp
__UpperCamelCase = {
'''vocab_file''': {
'''RUCAIBox/mvp''': '''https://huggingface.co/RUCAIBox/mvp/resolve/main/vocab.json''',
},
'''added_tokens.json''': {
'''RUCAIBox/mvp''': '''https://huggingface.co/RUCAIBox/mvp/resolve/main/added_tokens.json''',
},
'''merges_file''': {
'''RUCAIBox/mvp''': '''https://huggingface.co/RUCAIBox/mvp/resolve/main/merges.txt''',
},
'''tokenizer_file''': {
'''RUCAIBox/mvp''': '''https://huggingface.co/RUCAIBox/mvp/resolve/main/tokenizer.json''',
},
}
__UpperCamelCase = {
'''RUCAIBox/mvp''': 1024,
}
class UpperCamelCase ( lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE_ = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE_ = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE_ = ["input_ids", "attention_mask"]
SCREAMING_SNAKE_CASE_ = MvpTokenizer
def __init__( self, lowerCAmelCase__=None, lowerCAmelCase__=None, lowerCAmelCase__=None, lowerCAmelCase__="replace", lowerCAmelCase__="<s>", lowerCAmelCase__="</s>", lowerCAmelCase__="</s>", lowerCAmelCase__="<s>", lowerCAmelCase__="<unk>", lowerCAmelCase__="<pad>", lowerCAmelCase__="<mask>", lowerCAmelCase__=False, lowerCAmelCase__=True, **lowerCAmelCase__, ) -> Union[str, Any]:
super().__init__(
lowerCAmelCase__, lowerCAmelCase__, tokenizer_file=lowerCAmelCase__, errors=lowerCAmelCase__, bos_token=lowerCAmelCase__, eos_token=lowerCAmelCase__, sep_token=lowerCAmelCase__, cls_token=lowerCAmelCase__, unk_token=lowerCAmelCase__, pad_token=lowerCAmelCase__, mask_token=lowerCAmelCase__, add_prefix_space=lowerCAmelCase__, trim_offsets=lowerCAmelCase__, **lowerCAmelCase__, )
snake_case_ = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__())
if pre_tok_state.get('add_prefix_space', lowerCAmelCase__) != add_prefix_space:
snake_case_ = getattr(lowerCAmelCase__, pre_tok_state.pop('type'))
snake_case_ = add_prefix_space
snake_case_ = pre_tok_class(**lowerCAmelCase__)
snake_case_ = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
snake_case_ = 'post_processor'
snake_case_ = getattr(self.backend_tokenizer, lowerCAmelCase__, lowerCAmelCase__)
if tokenizer_component_instance:
snake_case_ = json.loads(tokenizer_component_instance.__getstate__())
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
snake_case_ = tuple(state['sep'])
if "cls" in state:
snake_case_ = tuple(state['cls'])
snake_case_ = False
if state.get('add_prefix_space', lowerCAmelCase__) != add_prefix_space:
snake_case_ = add_prefix_space
snake_case_ = True
if state.get('trim_offsets', lowerCAmelCase__) != trim_offsets:
snake_case_ = trim_offsets
snake_case_ = True
if changes_to_apply:
snake_case_ = getattr(lowerCAmelCase__, state.pop('type'))
snake_case_ = component_class(**lowerCAmelCase__)
setattr(self.backend_tokenizer, lowerCAmelCase__, lowerCAmelCase__)
@property
def a_ ( self) -> str:
if self._mask_token is None:
if self.verbose:
logger.error('Using mask_token, but it is not set yet.')
return None
return str(self._mask_token)
@mask_token.setter
def a_ ( self, lowerCAmelCase__) -> Any:
snake_case_ = AddedToken(lowerCAmelCase__, lstrip=lowerCAmelCase__, rstrip=lowerCAmelCase__) if isinstance(lowerCAmelCase__, lowerCAmelCase__) else value
snake_case_ = value
def a_ ( self, *lowerCAmelCase__, **lowerCAmelCase__) -> BatchEncoding:
snake_case_ = kwargs.get('is_split_into_words', lowerCAmelCase__)
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
'to use it with pretokenized inputs.')
return super()._batch_encode_plus(*lowerCAmelCase__, **lowerCAmelCase__)
def a_ ( self, *lowerCAmelCase__, **lowerCAmelCase__) -> BatchEncoding:
snake_case_ = kwargs.get('is_split_into_words', lowerCAmelCase__)
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
'to use it with pretokenized inputs.')
return super()._encode_plus(*lowerCAmelCase__, **lowerCAmelCase__)
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__ = None) -> Tuple[str]:
snake_case_ = self._tokenizer.model.save(lowerCAmelCase__, name=lowerCAmelCase__)
return tuple(lowerCAmelCase__)
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__=None) -> str:
snake_case_ = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__ = None) -> List[int]:
snake_case_ = [self.sep_token_id]
snake_case_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
| 312
| 0
|
"""simple docstring"""
from collections import defaultdict
def _snake_case ( _snake_case : int ) -> int:
'''simple docstring'''
_A = 1
_A = True
for v in tree[start]:
if v not in visited:
ret += dfs(_snake_case )
if ret % 2 == 0:
cuts.append(_snake_case )
return ret
def _snake_case ( ) -> str:
'''simple docstring'''
dfs(1 )
if __name__ == "__main__":
a , a = 10, 9
a = defaultdict(list)
a = {}
a = []
a = 0
a = [(2, 1), (3, 1), (4, 3), (5, 2), (6, 1), (7, 2), (8, 6), (9, 8), (10, 8)]
for u, v in edges:
tree[u].append(v)
tree[v].append(u)
even_tree()
print(len(cuts) - 1)
| 315
|
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_imagegpt import ImageGPTImageProcessor
a = logging.get_logger(__name__)
class lowercase_ ( __lowerCAmelCase ):
'''simple docstring'''
def __init__( self : Any , *_UpperCAmelCase : List[str] , **_UpperCAmelCase : Union[str, Any] ):
warnings.warn(
'The class ImageGPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use ImageGPTImageProcessor instead.' , _UpperCAmelCase , )
super().__init__(*_UpperCAmelCase , **_UpperCAmelCase )
| 315
| 1
|
"""simple docstring"""
from typing import List, Optional, Tuple, Union
import torch
from ...utils import logging, randn_tensor
from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline
__A = logging.get_logger(__name__) # pylint: disable=invalid-name
class _snake_case ( a__ ):
def __init__( self : int , UpperCAmelCase : List[Any] , UpperCAmelCase : str ):
super().__init__()
self.register_modules(unet=UpperCAmelCase , scheduler=UpperCAmelCase )
@torch.no_grad()
def __call__( self : List[str] , UpperCAmelCase : int = 1 , UpperCAmelCase : int = 100 , UpperCAmelCase : Optional[Union[torch.Generator, List[torch.Generator]]] = None , UpperCAmelCase : Optional[float] = None , UpperCAmelCase : bool = True , ):
if audio_length_in_s is None:
__lowerCamelCase : Optional[Any] = self.unet.config.sample_size / self.unet.config.sample_rate
__lowerCamelCase : List[Any] = audio_length_in_s * self.unet.config.sample_rate
__lowerCamelCase : Tuple = 2 ** len(self.unet.up_blocks )
if sample_size < 3 * down_scale_factor:
raise ValueError(
F"""{audio_length_in_s} is too small. Make sure it's bigger or equal to"""
F""" {3 * down_scale_factor / self.unet.config.sample_rate}.""" )
__lowerCamelCase : List[str] = int(UpperCAmelCase )
if sample_size % down_scale_factor != 0:
__lowerCamelCase : Any = (
(audio_length_in_s * self.unet.config.sample_rate) // down_scale_factor + 1
) * down_scale_factor
logger.info(
F"""{audio_length_in_s} is increased to {sample_size / self.unet.config.sample_rate} so that it can be handled"""
F""" by the model. It will be cut to {original_sample_size / self.unet.config.sample_rate} after the denoising"""
" process." )
__lowerCamelCase : Optional[int] = int(UpperCAmelCase )
__lowerCamelCase : str = next(iter(self.unet.parameters() ) ).dtype
__lowerCamelCase : Tuple = (batch_size, self.unet.config.in_channels, sample_size)
if isinstance(UpperCAmelCase , UpperCAmelCase ) and len(UpperCAmelCase ) != batch_size:
raise ValueError(
F"""You have passed a list of generators of length {len(UpperCAmelCase )}, but requested an effective batch"""
F""" size of {batch_size}. Make sure the batch size matches the length of the generators.""" )
__lowerCamelCase : List[str] = randn_tensor(UpperCAmelCase , generator=UpperCAmelCase , device=self.device , dtype=UpperCAmelCase )
# set step values
self.scheduler.set_timesteps(UpperCAmelCase , device=audio.device )
__lowerCamelCase : Tuple = self.scheduler.timesteps.to(UpperCAmelCase )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
__lowerCamelCase : Tuple = self.unet(UpperCAmelCase , UpperCAmelCase ).sample
# 2. compute previous image: x_t -> t_t-1
__lowerCamelCase : Union[str, Any] = self.scheduler.step(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ).prev_sample
__lowerCamelCase : Tuple = audio.clamp(-1 , 1 ).float().cpu().numpy()
__lowerCamelCase : Union[str, Any] = audio[:, :, :original_sample_size]
if not return_dict:
return (audio,)
return AudioPipelineOutput(audios=UpperCAmelCase )
| 64
|
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from diffusers import AutoencoderKL, DDIMScheduler, DiTPipeline, DPMSolverMultistepScheduler, TransformeraDModel
from diffusers.utils import is_xformers_available, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS,
CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class _snake_case ( a__ , unittest.TestCase ):
snake_case__ = DiTPipeline
snake_case__ = CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS
snake_case__ = PipelineTesterMixin.required_optional_params - {
"latents",
"num_images_per_prompt",
"callback",
"callback_steps",
}
snake_case__ = CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS
snake_case__ = False
def lowerCamelCase__ ( self : Tuple ):
torch.manual_seed(0 )
__lowerCamelCase : Optional[Any] = TransformeraDModel(
sample_size=16 , num_layers=2 , patch_size=4 , attention_head_dim=8 , num_attention_heads=2 , in_channels=4 , out_channels=8 , attention_bias=UpperCAmelCase , activation_fn="gelu-approximate" , num_embeds_ada_norm=1000 , norm_type="ada_norm_zero" , norm_elementwise_affine=UpperCAmelCase , )
__lowerCamelCase : List[str] = AutoencoderKL()
__lowerCamelCase : List[Any] = DDIMScheduler()
__lowerCamelCase : Optional[Any] = {"transformer": transformer.eval(), "vae": vae.eval(), "scheduler": scheduler}
return components
def lowerCamelCase__ ( self : List[str] , UpperCAmelCase : Any , UpperCAmelCase : Union[str, Any]=0 ):
if str(UpperCAmelCase ).startswith("mps" ):
__lowerCamelCase : List[str] = torch.manual_seed(UpperCAmelCase )
else:
__lowerCamelCase : List[str] = torch.Generator(device=UpperCAmelCase ).manual_seed(UpperCAmelCase )
__lowerCamelCase : str = {
"class_labels": [1],
"generator": generator,
"num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
def lowerCamelCase__ ( self : Optional[Any] ):
__lowerCamelCase : Dict = "cpu"
__lowerCamelCase : int = self.get_dummy_components()
__lowerCamelCase : Optional[Any] = self.pipeline_class(**UpperCAmelCase )
pipe.to(UpperCAmelCase )
pipe.set_progress_bar_config(disable=UpperCAmelCase )
__lowerCamelCase : Optional[int] = self.get_dummy_inputs(UpperCAmelCase )
__lowerCamelCase : List[Any] = pipe(**UpperCAmelCase ).images
__lowerCamelCase : Optional[int] = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 16, 16, 3) )
__lowerCamelCase : Optional[int] = np.array([0.2_9_4_6, 0.6_6_0_1, 0.4_3_2_9, 0.3_2_9_6, 0.4_1_4_4, 0.5_3_1_9, 0.7_2_7_3, 0.5_0_1_3, 0.4_4_5_7] )
__lowerCamelCase : Optional[int] = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(UpperCAmelCase , 1E-3 )
def lowerCamelCase__ ( self : Any ):
self._test_inference_batch_single_identical(relax_max_difference=UpperCAmelCase , expected_max_diff=1E-3 )
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def lowerCamelCase__ ( self : List[str] ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
@require_torch_gpu
@slow
class _snake_case ( unittest.TestCase ):
def lowerCamelCase__ ( self : Any ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase__ ( self : List[str] ):
__lowerCamelCase : Optional[int] = torch.manual_seed(0 )
__lowerCamelCase : str = DiTPipeline.from_pretrained("facebook/DiT-XL-2-256" )
pipe.to("cuda" )
__lowerCamelCase : Tuple = ["vase", "umbrella", "white shark", "white wolf"]
__lowerCamelCase : Optional[int] = pipe.get_label_ids(UpperCAmelCase )
__lowerCamelCase : Optional[Any] = pipe(UpperCAmelCase , generator=UpperCAmelCase , num_inference_steps=40 , output_type="np" ).images
for word, image in zip(UpperCAmelCase , UpperCAmelCase ):
__lowerCamelCase : Dict = load_numpy(
F"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/dit/{word}.npy""" )
assert np.abs((expected_image - image).max() ) < 1E-2
def lowerCamelCase__ ( self : str ):
__lowerCamelCase : Tuple = DiTPipeline.from_pretrained("facebook/DiT-XL-2-512" )
__lowerCamelCase : Any = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.to("cuda" )
__lowerCamelCase : Union[str, Any] = ["vase", "umbrella"]
__lowerCamelCase : int = pipe.get_label_ids(UpperCAmelCase )
__lowerCamelCase : Dict = torch.manual_seed(0 )
__lowerCamelCase : Dict = pipe(UpperCAmelCase , generator=UpperCAmelCase , num_inference_steps=25 , output_type="np" ).images
for word, image in zip(UpperCAmelCase , UpperCAmelCase ):
__lowerCamelCase : Union[str, Any] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
F"""/dit/{word}_512.npy""" )
assert np.abs((expected_image - image).max() ) < 1E-1
| 64
| 1
|
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast
from ...utils import logging
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {
'EleutherAI/gpt-neo-1.3B': 'https://huggingface.co/EleutherAI/gpt-neo-1.3B/resolve/main/config.json',
# See all GPTNeo models at https://huggingface.co/models?filter=gpt_neo
}
class lowerCamelCase__( __lowerCamelCase):
UpperCAmelCase__ : int = 'gpt_neo'
UpperCAmelCase__ : Union[str, Any] = ['past_key_values']
UpperCAmelCase__ : int = {'num_attention_heads': 'num_heads', 'num_hidden_layers': 'num_layers'}
def __init__( self: List[Any] , UpperCamelCase_: Union[str, Any]=5_02_57 , UpperCamelCase_: Any=20_48 , UpperCamelCase_: Optional[Any]=20_48 , UpperCamelCase_: Any=24 , UpperCamelCase_: int=[[["global", "local"], 12]] , UpperCamelCase_: Optional[int]=16 , UpperCamelCase_: str=None , UpperCamelCase_: Any=2_56 , UpperCamelCase_: List[str]="gelu_new" , UpperCamelCase_: Union[str, Any]=0.0 , UpperCamelCase_: Optional[Any]=0.0 , UpperCamelCase_: Union[str, Any]=0.0 , UpperCamelCase_: Dict=0.1 , UpperCamelCase_: List[Any]=1E-5 , UpperCamelCase_: Optional[Any]=0.02 , UpperCamelCase_: int=True , UpperCamelCase_: Optional[Any]=5_02_56 , UpperCamelCase_: Dict=5_02_56 , **UpperCamelCase_: int , ):
__lowerCamelCase = vocab_size
__lowerCamelCase = max_position_embeddings
__lowerCamelCase = hidden_size
__lowerCamelCase = num_layers
__lowerCamelCase = num_heads
__lowerCamelCase = intermediate_size
__lowerCamelCase = window_size
__lowerCamelCase = activation_function
__lowerCamelCase = resid_dropout
__lowerCamelCase = embed_dropout
__lowerCamelCase = attention_dropout
__lowerCamelCase = classifier_dropout
__lowerCamelCase = layer_norm_epsilon
__lowerCamelCase = initializer_range
__lowerCamelCase = use_cache
__lowerCamelCase = bos_token_id
__lowerCamelCase = eos_token_id
__lowerCamelCase = attention_types
__lowerCamelCase = self.expand_attention_types_params(UpperCamelCase_ )
if len(self.attention_layers ) != self.num_layers:
raise ValueError(
"""Configuration for convolutional module is incorrect. """
"""It is required that `len(config.attention_layers)` == `config.num_layers` """
F'but is `len(config.attention_layers) = {len(self.attention_layers )}`, '
F'`config.num_layers = {self.num_layers}`. '
"""`config.attention_layers` is prepared using `config.attention_types`. """
"""Please verify the value of `config.attention_types` argument.""" )
super().__init__(bos_token_id=UpperCamelCase_ , eos_token_id=UpperCamelCase_ , **UpperCamelCase_ )
@staticmethod
def lowerCAmelCase__ ( UpperCamelCase_: Tuple ):
__lowerCamelCase = []
for item in attention_types:
for _ in range(item[1] ):
attentions.extend(item[0] )
return attentions
def lowerCamelCase__ ( A__ : int , A__ : Optional[Any] , A__ : Optional[int] , A__ : List[Any] ):
'''simple docstring'''
import torch
__lowerCamelCase = input.size()
__lowerCamelCase = len(A__ )
__lowerCamelCase = shape[dimension]
__lowerCamelCase = torch.arange(0 , A__ , A__ )
__lowerCamelCase = torch.div(sizedim - size , A__ , rounding_mode="""floor""" ) + 1
__lowerCamelCase = torch.arange(A__ ) + low_indices[:min_length][:, None]
__lowerCamelCase = [slice(A__ )] * rank
__lowerCamelCase = indices
__lowerCamelCase = input[s]
__lowerCamelCase = list(range(0 , rank + 1 ) )
perm.append(perm.pop(dimension + 1 ) )
return sliced.permute(A__ )
def lowerCamelCase__ ( A__ : Optional[int] , A__ : List[str] ):
'''simple docstring'''
import torch
__lowerCamelCase = torch.arange(1 , A__ )
__lowerCamelCase = torch.remainder(A__ , A__ )
__lowerCamelCase = remainders == 0
__lowerCamelCase = candidates[divisor_indices]
__lowerCamelCase = torch.max(A__ )
return largest_divisor, torch.div(A__ , A__ , rounding_mode="""floor""" )
class lowerCamelCase__( __lowerCamelCase):
@property
def lowerCAmelCase__ ( self: Any ):
__lowerCamelCase = OrderedDict({"""input_ids""": {0: """batch""", 1: """sequence"""}} )
if self.use_past:
self.fill_with_past_key_values_(UpperCamelCase_ , direction="""inputs""" )
__lowerCamelCase = {0: """batch""", 1: """past_sequence + sequence"""}
else:
__lowerCamelCase = {0: """batch""", 1: """sequence"""}
return common_inputs
@property
def lowerCAmelCase__ ( self: Dict ):
return self._config.num_heads
def lowerCAmelCase__ ( self: Tuple , UpperCamelCase_: PreTrainedTokenizer , UpperCamelCase_: int = -1 , UpperCamelCase_: int = -1 , UpperCamelCase_: bool = False , UpperCamelCase_: Optional[TensorType] = None , ):
__lowerCamelCase = super(UpperCamelCase_ , self ).generate_dummy_inputs(
UpperCamelCase_ , batch_size=UpperCamelCase_ , seq_length=UpperCamelCase_ , is_pair=UpperCamelCase_ , framework=UpperCamelCase_ )
# We need to order the input in the way they appears in the forward()
__lowerCamelCase = OrderedDict({"""input_ids""": common_inputs["""input_ids"""]} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" )
else:
import torch
__lowerCamelCase, __lowerCamelCase = common_inputs["""input_ids"""].shape
# Not using the same length for past_key_values
__lowerCamelCase = seqlen + 2
__lowerCamelCase = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
__lowerCamelCase = [
(torch.zeros(UpperCamelCase_ ), torch.zeros(UpperCamelCase_ )) for _ in range(self.num_layers )
]
__lowerCamelCase = common_inputs["""attention_mask"""]
if self.use_past:
__lowerCamelCase = ordered_inputs["""attention_mask"""].dtype
__lowerCamelCase = torch.cat(
[ordered_inputs["""attention_mask"""], torch.ones(UpperCamelCase_ , UpperCamelCase_ , dtype=UpperCamelCase_ )] , dim=1 )
return ordered_inputs
@property
def lowerCAmelCase__ ( self: Optional[Any] ):
return 13
| 12
|
from typing import Callable, List, Optional, Tuple, Union
import torch
from transformers import CLIPTextModel, CLIPTokenizer
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin, TransformeraDModel, VQModel
from ...schedulers import VQDiffusionScheduler
from ...utils import logging
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
UpperCAmelCase_ = logging.get_logger(__name__) # pylint: disable=invalid-name
class lowerCamelCase__( __lowerCamelCase , __lowerCamelCase):
@register_to_config
def __init__( self: Optional[Any] , UpperCamelCase_: bool , UpperCamelCase_: Optional[int] = None , UpperCamelCase_: Optional[int] = None ):
super().__init__()
__lowerCamelCase = learnable
if self.learnable:
assert hidden_size is not None, "learnable=True requires `hidden_size` to be set"
assert length is not None, "learnable=True requires `length` to be set"
__lowerCamelCase = torch.zeros(UpperCamelCase_ , UpperCamelCase_ )
else:
__lowerCamelCase = None
__lowerCamelCase = torch.nn.Parameter(UpperCamelCase_ )
class lowerCamelCase__( __lowerCamelCase):
UpperCAmelCase__ : VQModel
UpperCAmelCase__ : CLIPTextModel
UpperCAmelCase__ : CLIPTokenizer
UpperCAmelCase__ : TransformeraDModel
UpperCAmelCase__ : LearnedClassifierFreeSamplingEmbeddings
UpperCAmelCase__ : VQDiffusionScheduler
def __init__( self: str , UpperCamelCase_: VQModel , UpperCamelCase_: CLIPTextModel , UpperCamelCase_: CLIPTokenizer , UpperCamelCase_: TransformeraDModel , UpperCamelCase_: VQDiffusionScheduler , UpperCamelCase_: LearnedClassifierFreeSamplingEmbeddings , ):
super().__init__()
self.register_modules(
vqvae=UpperCamelCase_ , transformer=UpperCamelCase_ , text_encoder=UpperCamelCase_ , tokenizer=UpperCamelCase_ , scheduler=UpperCamelCase_ , learned_classifier_free_sampling_embeddings=UpperCamelCase_ , )
def lowerCAmelCase__ ( self: str , UpperCamelCase_: Any , UpperCamelCase_: List[Any] , UpperCamelCase_: Any ):
__lowerCamelCase = len(UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else 1
# get prompt text embeddings
__lowerCamelCase = self.tokenizer(
UpperCamelCase_ , padding="""max_length""" , max_length=self.tokenizer.model_max_length , return_tensors="""pt""" , )
__lowerCamelCase = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
__lowerCamelCase = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
"""The following part of your input was truncated because CLIP can only handle sequences up to"""
F' {self.tokenizer.model_max_length} tokens: {removed_text}' )
__lowerCamelCase = text_input_ids[:, : self.tokenizer.model_max_length]
__lowerCamelCase = self.text_encoder(text_input_ids.to(self.device ) )[0]
# NOTE: This additional step of normalizing the text embeddings is from VQ-Diffusion.
# While CLIP does normalize the pooled output of the text transformer when combining
# the image and text embeddings, CLIP does not directly normalize the last hidden state.
#
# CLIP normalizing the pooled output.
# https://github.com/huggingface/transformers/blob/d92e22d1f28324f513f3080e5c47c071a3916721/src/transformers/models/clip/modeling_clip.py#L1052-L1053
__lowerCamelCase = prompt_embeds / prompt_embeds.norm(dim=-1 , keepdim=UpperCamelCase_ )
# duplicate text embeddings for each generation per prompt
__lowerCamelCase = prompt_embeds.repeat_interleave(UpperCamelCase_ , dim=0 )
if do_classifier_free_guidance:
if self.learned_classifier_free_sampling_embeddings.learnable:
__lowerCamelCase = self.learned_classifier_free_sampling_embeddings.embeddings
__lowerCamelCase = negative_prompt_embeds.unsqueeze(0 ).repeat(UpperCamelCase_ , 1 , 1 )
else:
__lowerCamelCase = [""""""] * batch_size
__lowerCamelCase = text_input_ids.shape[-1]
__lowerCamelCase = self.tokenizer(
UpperCamelCase_ , padding="""max_length""" , max_length=UpperCamelCase_ , truncation=UpperCamelCase_ , return_tensors="""pt""" , )
__lowerCamelCase = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# See comment for normalizing text embeddings
__lowerCamelCase = negative_prompt_embeds / negative_prompt_embeds.norm(dim=-1 , keepdim=UpperCamelCase_ )
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
__lowerCamelCase = negative_prompt_embeds.shape[1]
__lowerCamelCase = negative_prompt_embeds.repeat(1 , UpperCamelCase_ , 1 )
__lowerCamelCase = negative_prompt_embeds.view(batch_size * num_images_per_prompt , UpperCamelCase_ , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
__lowerCamelCase = torch.cat([negative_prompt_embeds, prompt_embeds] )
return prompt_embeds
@torch.no_grad()
def __call__( self: Tuple , UpperCamelCase_: Union[str, List[str]] , UpperCamelCase_: int = 1_00 , UpperCamelCase_: float = 5.0 , UpperCamelCase_: float = 1.0 , UpperCamelCase_: int = 1 , UpperCamelCase_: Optional[Union[torch.Generator, List[torch.Generator]]] = None , UpperCamelCase_: Optional[torch.FloatTensor] = None , UpperCamelCase_: Optional[str] = "pil" , UpperCamelCase_: bool = True , UpperCamelCase_: Optional[Callable[[int, int, torch.FloatTensor], None]] = None , UpperCamelCase_: int = 1 , ):
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
__lowerCamelCase = 1
elif isinstance(UpperCamelCase_ , UpperCamelCase_ ):
__lowerCamelCase = len(UpperCamelCase_ )
else:
raise ValueError(F'`prompt` has to be of type `str` or `list` but is {type(UpperCamelCase_ )}' )
__lowerCamelCase = batch_size * num_images_per_prompt
__lowerCamelCase = guidance_scale > 1.0
__lowerCamelCase = self._encode_prompt(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(UpperCamelCase_ , UpperCamelCase_ ) or callback_steps <= 0)
):
raise ValueError(
F'`callback_steps` has to be a positive integer but is {callback_steps} of type'
F' {type(UpperCamelCase_ )}.' )
# get the initial completely masked latents unless the user supplied it
__lowerCamelCase = (batch_size, self.transformer.num_latent_pixels)
if latents is None:
__lowerCamelCase = self.transformer.num_vector_embeds - 1
__lowerCamelCase = torch.full(UpperCamelCase_ , UpperCamelCase_ ).to(self.device )
else:
if latents.shape != latents_shape:
raise ValueError(F'Unexpected latents shape, got {latents.shape}, expected {latents_shape}' )
if (latents < 0).any() or (latents >= self.transformer.num_vector_embeds).any():
raise ValueError(
"""Unexpected latents value(s). All latents be valid embedding indices i.e. in the range 0,"""
F' {self.transformer.num_vector_embeds - 1} (inclusive).' )
__lowerCamelCase = latents.to(self.device )
# set timesteps
self.scheduler.set_timesteps(UpperCamelCase_ , device=self.device )
__lowerCamelCase = self.scheduler.timesteps.to(self.device )
__lowerCamelCase = latents
for i, t in enumerate(self.progress_bar(UpperCamelCase_ ) ):
# expand the sample if we are doing classifier free guidance
__lowerCamelCase = torch.cat([sample] * 2 ) if do_classifier_free_guidance else sample
# predict the un-noised image
# model_output == `log_p_x_0`
__lowerCamelCase = self.transformer(UpperCamelCase_ , encoder_hidden_states=UpperCamelCase_ , timestep=UpperCamelCase_ ).sample
if do_classifier_free_guidance:
__lowerCamelCase, __lowerCamelCase = model_output.chunk(2 )
__lowerCamelCase = model_output_uncond + guidance_scale * (model_output_text - model_output_uncond)
model_output -= torch.logsumexp(UpperCamelCase_ , dim=1 , keepdim=UpperCamelCase_ )
__lowerCamelCase = self.truncate(UpperCamelCase_ , UpperCamelCase_ )
# remove `log(0)`'s (`-inf`s)
__lowerCamelCase = model_output.clamp(-70 )
# compute the previous noisy sample x_t -> x_t-1
__lowerCamelCase = self.scheduler.step(UpperCamelCase_ , timestep=UpperCamelCase_ , sample=UpperCamelCase_ , generator=UpperCamelCase_ ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
__lowerCamelCase = self.vqvae.config.vq_embed_dim
__lowerCamelCase = (batch_size, self.transformer.height, self.transformer.width, embedding_channels)
__lowerCamelCase = self.vqvae.quantize.get_codebook_entry(UpperCamelCase_ , shape=UpperCamelCase_ )
__lowerCamelCase = self.vqvae.decode(UpperCamelCase_ , force_not_quantize=UpperCamelCase_ ).sample
__lowerCamelCase = (image / 2 + 0.5).clamp(0 , 1 )
__lowerCamelCase = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
__lowerCamelCase = self.numpy_to_pil(UpperCamelCase_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=UpperCamelCase_ )
def lowerCAmelCase__ ( self: Union[str, Any] , UpperCamelCase_: torch.FloatTensor , UpperCamelCase_: float ):
__lowerCamelCase, __lowerCamelCase = torch.sort(UpperCamelCase_ , 1 , descending=UpperCamelCase_ )
__lowerCamelCase = torch.exp(UpperCamelCase_ )
__lowerCamelCase = sorted_p_x_0.cumsum(dim=1 ) < truncation_rate
# Ensure that at least the largest probability is not zeroed out
__lowerCamelCase = torch.full_like(keep_mask[:, 0:1, :] , UpperCamelCase_ )
__lowerCamelCase = torch.cat((all_true, keep_mask) , dim=1 )
__lowerCamelCase = keep_mask[:, :-1, :]
__lowerCamelCase = keep_mask.gather(1 , indices.argsort(1 ) )
__lowerCamelCase = log_p_x_0.clone()
__lowerCamelCase = -torch.inf # -inf = log(0)
return rv
| 12
| 1
|
import colorsys
from PIL import Image # type: ignore
def UpperCamelCase__( UpperCamelCase__ : float , UpperCamelCase__ : float , UpperCamelCase__ : int ):
A__ = x
A__ = y
for step in range(lowercase__ ): # noqa: B007
A__ = a * a - b * b + x
A__ = 2 * a * b + y
A__ = a_new
# divergence happens for all complex number with an absolute value
# greater than 4
if a * a + b * b > 4:
break
return step / (max_step - 1)
def UpperCamelCase__( UpperCamelCase__ : float ):
if distance == 1:
return (0, 0, 0)
else:
return (2_55, 2_55, 2_55)
def UpperCamelCase__( UpperCamelCase__ : float ):
if distance == 1:
return (0, 0, 0)
else:
return tuple(round(i * 2_55 ) for i in colorsys.hsv_to_rgb(lowercase__ , 1 , 1 ) )
def UpperCamelCase__( UpperCamelCase__ : int = 8_00 , UpperCamelCase__ : int = 6_00 , UpperCamelCase__ : float = -0.6 , UpperCamelCase__ : float = 0 , UpperCamelCase__ : float = 3.2 , UpperCamelCase__ : int = 50 , UpperCamelCase__ : bool = True , ):
A__ = Image.new('''RGB''' , (image_width, image_height) )
A__ = img.load()
# loop through the image-coordinates
for image_x in range(lowercase__ ):
for image_y in range(lowercase__ ):
# determine the figure-coordinates based on the image-coordinates
A__ = figure_width / image_width * image_height
A__ = figure_center_x + (image_x / image_width - 0.5) * figure_width
A__ = figure_center_y + (image_y / image_height - 0.5) * figure_height
A__ = get_distance(lowercase__ , lowercase__ , lowercase__ )
# color the corresponding pixel based on the selected coloring-function
if use_distance_color_coding:
A__ = get_color_coded_rgb(lowercase__ )
else:
A__ = get_black_and_white_rgb(lowercase__ )
return img
if __name__ == "__main__":
import doctest
doctest.testmod()
# colored version, full figure
a__: List[Any] = get_image()
# uncomment for colored version, different section, zoomed in
# img = get_image(figure_center_x = -0.6, figure_center_y = -0.4,
# figure_width = 0.8)
# uncomment for black and white version, full figure
# img = get_image(use_distance_color_coding = False)
# uncomment to save the image
# img.save("mandelbrot.png")
img.show()
| 351
|
# Algorithm for the pigeonhole sorting
def UpperCamelCase__( UpperCamelCase__ : int )->str:
A__ = min(UpperCamelCase__ ) # min() finds the minimum value
A__ = max(UpperCamelCase__ ) # max() finds the maximum value
A__ = max_val - min_val + 1 # size is difference of max and min values plus one
# list of pigeonholes of size equal to the variable size
A__ = [0] * size
# Populate the pigeonholes.
for x in a:
assert isinstance(UpperCamelCase__ , UpperCamelCase__ ), "integers only please"
holes[x - min_val] += 1
# Putting the elements back into the array in an order.
A__ = 0
for count in range(UpperCamelCase__ ):
while holes[count] > 0:
holes[count] -= 1
A__ = count + min_val
i += 1
def UpperCamelCase__( )->Tuple:
A__ = [8, 3, 2, 7, 4, 6, 8]
pigeonhole_sort(UpperCamelCase__ )
print('''Sorted order is:''' , ''' '''.join(UpperCamelCase__ ) )
if __name__ == "__main__":
main()
| 39
| 0
|
"""simple docstring"""
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv("""TEST_SAGEMAKER""" , """False""" ) ) is not True , reason="""Skipping test because should only be run when releasing minor transformers version""" , )
@pytest.mark.usefixtures("""sm_env""" )
@parameterized_class(
[
{
"""framework""": """pytorch""",
"""script""": """run_glue.py""",
"""model_name_or_path""": """distilbert-base-cased""",
"""instance_type""": """ml.p3.16xlarge""",
"""results""": {"""train_runtime""": 6_50, """eval_accuracy""": 0.7, """eval_loss""": 0.6},
},
{
"""framework""": """pytorch""",
"""script""": """run_ddp.py""",
"""model_name_or_path""": """distilbert-base-cased""",
"""instance_type""": """ml.p3.16xlarge""",
"""results""": {"""train_runtime""": 6_00, """eval_accuracy""": 0.7, """eval_loss""": 0.6},
},
{
"""framework""": """tensorflow""",
"""script""": """run_tf_dist.py""",
"""model_name_or_path""": """distilbert-base-cased""",
"""instance_type""": """ml.p3.16xlarge""",
"""results""": {"""train_runtime""": 6_00, """eval_accuracy""": 0.6, """eval_loss""": 0.7},
},
] )
class __snake_case ( unittest.TestCase ):
"""simple docstring"""
def UpperCamelCase__( self ):
'''simple docstring'''
if self.framework == "pytorch":
subprocess.run(
F"""cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py""".split() , encoding='''utf-8''' , check=__lowerCamelCase , )
assert hasattr(self , '''env''' )
def UpperCamelCase__( self , __lowerCamelCase ):
'''simple docstring'''
__A : Dict = F"""{self.env.base_job_name}-{instance_count}-{'ddp' if 'ddp' in self.script else 'smd'}"""
# distributed data settings
__A : Any = {'''smdistributed''': {'''dataparallel''': {'''enabled''': True}}} if self.script != '''run_ddp.py''' else None
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=__lowerCamelCase , instance_count=__lowerCamelCase , instance_type=self.instance_type , debugger_hook_config=__lowerCamelCase , hyperparameters={**self.env.distributed_hyperparameters, '''model_name_or_path''': self.model_name_or_path} , metric_definitions=self.env.metric_definitions , distribution=__lowerCamelCase , py_version='''py36''' , )
def UpperCamelCase__( self , __lowerCamelCase ):
'''simple docstring'''
TrainingJobAnalytics(__lowerCamelCase ).export_csv(F"""{self.env.test_path}/{job_name}_metrics.csv""" )
@parameterized.expand([(2,)] )
def UpperCamelCase__( self , __lowerCamelCase ):
'''simple docstring'''
__A : List[str] = self.create_estimator(__lowerCamelCase )
# run training
estimator.fit()
# result dataframe
__A : Any = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
__A : Optional[Any] = list(result_metrics_df[result_metrics_df.metric_name == '''eval_accuracy''']['''value'''] )
__A : Tuple = list(result_metrics_df[result_metrics_df.metric_name == '''eval_loss''']['''value'''] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
__A : Union[str, Any] = (
Session().describe_training_job(estimator.latest_training_job.name ).get('''TrainingTimeInSeconds''' , 99_9999 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results['''eval_accuracy'''] for t in eval_accuracy )
assert all(t <= self.results['''eval_loss'''] for t in eval_loss )
# dump tests result into json file to share in PR
with open(F"""{estimator.latest_training_job.name}.json""" , '''w''' ) as outfile:
json.dump({'''train_time''': train_runtime, '''eval_accuracy''': eval_accuracy, '''eval_loss''': eval_loss} , __lowerCamelCase )
| 179
|
"""simple docstring"""
import logging
from transformers.configuration_utils import PretrainedConfig
a_ = logging.getLogger(__name__)
class __snake_case ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = """masked_bert"""
def __init__( self , __lowerCamelCase=3_0522 , __lowerCamelCase=768 , __lowerCamelCase=12 , __lowerCamelCase=12 , __lowerCamelCase=3072 , __lowerCamelCase="gelu" , __lowerCamelCase=0.1 , __lowerCamelCase=0.1 , __lowerCamelCase=512 , __lowerCamelCase=2 , __lowerCamelCase=0.0_2 , __lowerCamelCase=1e-1_2 , __lowerCamelCase=0 , __lowerCamelCase="topK" , __lowerCamelCase="constant" , __lowerCamelCase=0.0 , **__lowerCamelCase , ):
'''simple docstring'''
super().__init__(pad_token_id=__lowerCamelCase , **__lowerCamelCase )
__A : Dict = vocab_size
__A : Union[str, Any] = hidden_size
__A : Tuple = num_hidden_layers
__A : Tuple = num_attention_heads
__A : Optional[Any] = hidden_act
__A : List[str] = intermediate_size
__A : Any = hidden_dropout_prob
__A : Optional[Any] = attention_probs_dropout_prob
__A : Any = max_position_embeddings
__A : str = type_vocab_size
__A : List[Any] = initializer_range
__A : str = layer_norm_eps
__A : Optional[int] = pruning_method
__A : str = mask_init
__A : Any = mask_scale
| 179
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__A : List[str] = {
"configuration_bigbird_pegasus": [
"BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP",
"BigBirdPegasusConfig",
"BigBirdPegasusOnnxConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Union[str, Any] = [
"BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST",
"BigBirdPegasusForCausalLM",
"BigBirdPegasusForConditionalGeneration",
"BigBirdPegasusForQuestionAnswering",
"BigBirdPegasusForSequenceClassification",
"BigBirdPegasusModel",
"BigBirdPegasusPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_bigbird_pegasus import (
BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP,
BigBirdPegasusConfig,
BigBirdPegasusOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bigbird_pegasus import (
BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST,
BigBirdPegasusForCausalLM,
BigBirdPegasusForConditionalGeneration,
BigBirdPegasusForQuestionAnswering,
BigBirdPegasusForSequenceClassification,
BigBirdPegasusModel,
BigBirdPegasusPreTrainedModel,
)
else:
import sys
__A : List[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 368
|
'''simple docstring'''
import inspect
import math
import tempfile
import unittest
import numpy as np
from transformers import ViTMAEConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMAEForPreTraining, ViTMAEModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class __snake_case :
"""simple docstring"""
def __init__( self : Optional[Any] , lowerCamelCase : Tuple , lowerCamelCase : Tuple=13 , lowerCamelCase : Dict=30 , lowerCamelCase : Dict=2 , lowerCamelCase : Optional[int]=3 , lowerCamelCase : List[Any]=True , lowerCamelCase : Any=True , lowerCamelCase : str=32 , lowerCamelCase : Any=5 , lowerCamelCase : int=4 , lowerCamelCase : List[str]=37 , lowerCamelCase : Any="gelu" , lowerCamelCase : Optional[Any]=0.1 , lowerCamelCase : List[str]=0.1 , lowerCamelCase : str=10 , lowerCamelCase : Optional[Any]=0.02 , lowerCamelCase : List[str]=3 , lowerCamelCase : Union[str, Any]=0.6 , lowerCamelCase : List[Any]=None , ) -> Optional[int]:
lowerCAmelCase_ : Optional[Any] = parent
lowerCAmelCase_ : Optional[int] = batch_size
lowerCAmelCase_ : int = image_size
lowerCAmelCase_ : List[Any] = patch_size
lowerCAmelCase_ : int = num_channels
lowerCAmelCase_ : Any = is_training
lowerCAmelCase_ : Tuple = use_labels
lowerCAmelCase_ : Optional[Any] = hidden_size
lowerCAmelCase_ : List[Any] = num_hidden_layers
lowerCAmelCase_ : Optional[Any] = num_attention_heads
lowerCAmelCase_ : Dict = intermediate_size
lowerCAmelCase_ : Union[str, Any] = hidden_act
lowerCAmelCase_ : Union[str, Any] = hidden_dropout_prob
lowerCAmelCase_ : Any = attention_probs_dropout_prob
lowerCAmelCase_ : List[Any] = type_sequence_label_size
lowerCAmelCase_ : Dict = initializer_range
lowerCAmelCase_ : List[str] = mask_ratio
lowerCAmelCase_ : Tuple = scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
lowerCAmelCase_ : Union[str, Any] = (image_size // patch_size) ** 2
lowerCAmelCase_ : Any = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) )
def __lowercase ( self : Optional[int] ) -> str:
lowerCAmelCase_ : str = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCAmelCase_ : Optional[int] = None
if self.use_labels:
lowerCAmelCase_ : int = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase_ : str = self.get_config()
return config, pixel_values, labels
def __lowercase ( self : Optional[int] ) -> Optional[int]:
return ViTMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowerCamelCase , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , )
def __lowercase ( self : Any , lowerCamelCase : Any , lowerCamelCase : Union[str, Any] , lowerCamelCase : Dict ) -> Tuple:
lowerCAmelCase_ : Tuple = ViTMAEModel(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
lowerCAmelCase_ : Dict = model(lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __lowercase ( self : List[str] , lowerCamelCase : Union[str, Any] , lowerCamelCase : List[str] , lowerCamelCase : Union[str, Any] ) -> Dict:
lowerCAmelCase_ : Tuple = ViTMAEForPreTraining(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
lowerCAmelCase_ : List[str] = model(lowerCamelCase )
lowerCAmelCase_ : int = (self.image_size // self.patch_size) ** 2
lowerCAmelCase_ : int = self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
# test greyscale images
lowerCAmelCase_ : List[Any] = 1
lowerCAmelCase_ : List[str] = ViTMAEForPreTraining(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
lowerCAmelCase_ : Optional[Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowerCAmelCase_ : Tuple = model(lowerCamelCase )
lowerCAmelCase_ : List[Any] = self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
def __lowercase ( self : Optional[int] ) -> str:
lowerCAmelCase_ : Any = self.prepare_config_and_inputs()
lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ : List[Any] = config_and_inputs
lowerCAmelCase_ : Any = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class __snake_case ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,unittest.TestCase):
"""simple docstring"""
lowercase = (ViTMAEModel, ViTMAEForPreTraining) if is_torch_available() else ()
lowercase = {'feature-extraction': ViTMAEModel} if is_torch_available() else {}
lowercase = False
lowercase = False
lowercase = False
lowercase = False
def __lowercase ( self : Optional[Any] ) -> List[Any]:
lowerCAmelCase_ : Optional[int] = ViTMAEModelTester(self )
lowerCAmelCase_ : Optional[int] = ConfigTester(self , config_class=lowerCamelCase , has_text_modality=lowerCamelCase , hidden_size=37 )
def __lowercase ( self : Dict ) -> Tuple:
self.config_tester.run_common_tests()
@unittest.skip(reason="""ViTMAE does not use inputs_embeds""" )
def __lowercase ( self : Optional[int] ) -> Optional[int]:
pass
def __lowercase ( self : List[str] ) -> Tuple:
lowerCAmelCase_, lowerCAmelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase_ : List[str] = model_class(lowerCamelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowerCAmelCase_ : Tuple = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCamelCase , nn.Linear ) )
def __lowercase ( self : Optional[Any] ) -> Any:
lowerCAmelCase_, lowerCAmelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase_ : Optional[int] = model_class(lowerCamelCase )
lowerCAmelCase_ : Optional[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase_ : Any = [*signature.parameters.keys()]
lowerCAmelCase_ : Optional[Any] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , lowerCamelCase )
def __lowercase ( self : Tuple ) -> str:
lowerCAmelCase_ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase )
def __lowercase ( self : Optional[int] ) -> str:
lowerCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*lowerCamelCase )
def __lowercase ( self : Optional[int] , lowerCamelCase : Dict , lowerCamelCase : List[Any] , lowerCamelCase : Optional[Any] ) -> str:
# make masks reproducible
np.random.seed(2 )
lowerCAmelCase_ : Tuple = int((pt_model.config.image_size // pt_model.config.patch_size) ** 2 )
lowerCAmelCase_ : Optional[int] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
lowerCAmelCase_ : Optional[Any] = torch.from_numpy(lowerCamelCase )
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
lowerCAmelCase_ : int = pt_noise
super().check_pt_tf_models(lowerCamelCase , lowerCamelCase , lowerCamelCase )
def __lowercase ( self : int ) -> Dict:
lowerCAmelCase_, lowerCAmelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase_ : Optional[int] = model_class(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
# make random mask reproducible
torch.manual_seed(2 )
with torch.no_grad():
lowerCAmelCase_ : Any = model(**self._prepare_for_class(lowerCamelCase , lowerCamelCase ) )
lowerCAmelCase_ : Any = outputs[0].cpu().numpy()
lowerCAmelCase_ : List[str] = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(lowerCamelCase )
lowerCAmelCase_ : int = model_class.from_pretrained(lowerCamelCase )
model.to(lowerCamelCase )
# make random mask reproducible
torch.manual_seed(2 )
with torch.no_grad():
lowerCAmelCase_ : str = model(**self._prepare_for_class(lowerCamelCase , lowerCamelCase ) )
# Make sure we don't have nans
lowerCAmelCase_ : Optional[Any] = after_outputs[0].cpu().numpy()
lowerCAmelCase_ : str = 0
lowerCAmelCase_ : List[Any] = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(lowerCamelCase , 1E-5 )
@unittest.skip(
reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load
to get deterministic results.""" )
def __lowercase ( self : Optional[int] ) -> List[Any]:
pass
@unittest.skip(
reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load
to get deterministic results.""" )
def __lowercase ( self : Union[str, Any] ) -> str:
pass
@unittest.skip(
reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load
to get deterministic results.""" )
def __lowercase ( self : Optional[Any] ) -> Union[str, Any]:
pass
@unittest.skip(reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load""" )
def __lowercase ( self : Tuple ) -> Optional[Any]:
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def __lowercase ( self : List[Any] ) -> str:
pass
@slow
def __lowercase ( self : List[str] ) -> List[Any]:
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase_ : List[Any] = ViTMAEModel.from_pretrained(lowerCamelCase )
self.assertIsNotNone(lowerCamelCase )
def UpperCamelCase_ ( ):
'''simple docstring'''
lowerCAmelCase_ : List[str] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class __snake_case ( unittest.TestCase):
"""simple docstring"""
@cached_property
def __lowercase ( self : Union[str, Any] ) -> Union[str, Any]:
return ViTImageProcessor.from_pretrained("""facebook/vit-mae-base""" ) if is_vision_available() else None
@slow
def __lowercase ( self : int ) -> List[Any]:
# make random mask reproducible across the PT and TF model
np.random.seed(2 )
lowerCAmelCase_ : Dict = ViTMAEForPreTraining.from_pretrained("""facebook/vit-mae-base""" ).to(lowerCamelCase )
lowerCAmelCase_ : Union[str, Any] = self.default_image_processor
lowerCAmelCase_ : Union[str, Any] = prepare_img()
lowerCAmelCase_ : Dict = image_processor(images=lowerCamelCase , return_tensors="""pt""" ).to(lowerCamelCase )
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
lowerCAmelCase_ : Optional[int] = ViTMAEConfig()
lowerCAmelCase_ : Optional[Any] = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 )
lowerCAmelCase_ : Optional[int] = np.random.uniform(size=(1, num_patches) )
# forward pass
with torch.no_grad():
lowerCAmelCase_ : str = model(**lowerCamelCase , noise=torch.from_numpy(lowerCamelCase ).to(device=lowerCamelCase ) )
# verify the logits
lowerCAmelCase_ : str = torch.Size((1, 1_96, 7_68) )
self.assertEqual(outputs.logits.shape , lowerCamelCase )
lowerCAmelCase_ : str = torch.tensor(
[[-0.0_548, -1.7_023, -0.9_325], [0.3_721, -0.5_670, -0.2_233], [0.8_235, -1.3_878, -0.3_524]] )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , expected_slice.to(lowerCamelCase ) , atol=1E-4 ) )
| 89
| 0
|
import os
import socket
from contextlib import contextmanager
import torch
from ..commands.config.default import write_basic_config # noqa: F401
from ..state import PartialState
from .dataclasses import DistributedType
from .imports import is_deepspeed_available, is_tpu_available
from .transformer_engine import convert_model
from .versions import is_torch_version
if is_deepspeed_available():
from deepspeed import DeepSpeedEngine
if is_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
def __UpperCamelCase ( _A : Dict ) ->Optional[int]:
"""simple docstring"""
if is_torch_version("""<""" , """2.0.0""" ) or not hasattr(_A , """_dynamo""" ):
return False
return isinstance(_A , torch._dynamo.eval_frame.OptimizedModule )
def __UpperCamelCase ( _A : str , _A : bool = True ) ->Tuple:
"""simple docstring"""
lowerCamelCase_ =(torch.nn.parallel.DistributedDataParallel, torch.nn.DataParallel)
lowerCamelCase_ =is_compiled_module(_A )
if is_compiled:
lowerCamelCase_ =model
lowerCamelCase_ =model._orig_mod
if is_deepspeed_available():
options += (DeepSpeedEngine,)
while isinstance(_A , _A ):
lowerCamelCase_ =model.module
if not keep_fpaa_wrapper:
lowerCamelCase_ =getattr(_A , """forward""" )
lowerCamelCase_ =model.__dict__.pop("""_original_forward""" , _A )
if original_forward is not None:
while hasattr(_A , """__wrapped__""" ):
lowerCamelCase_ =forward.__wrapped__
if forward == original_forward:
break
lowerCamelCase_ =forward
if getattr(_A , """_converted_to_transformer_engine""" , _A ):
convert_model(_A , to_transformer_engine=_A )
if is_compiled:
lowerCamelCase_ =model
lowerCamelCase_ =compiled_model
return model
def __UpperCamelCase ( ) ->List[str]:
"""simple docstring"""
PartialState().wait_for_everyone()
def __UpperCamelCase ( _A : Optional[Any] , _A : str ) ->Optional[int]:
"""simple docstring"""
if PartialState().distributed_type == DistributedType.TPU:
xm.save(_A , _A )
elif PartialState().local_process_index == 0:
torch.save(_A , _A )
@contextmanager
def __UpperCamelCase ( **_A : Optional[Any] ) ->int:
"""simple docstring"""
for key, value in kwargs.items():
lowerCamelCase_ =str(_A )
yield
for key in kwargs:
if key.upper() in os.environ:
del os.environ[key.upper()]
def __UpperCamelCase ( _A : Optional[Any] ) ->Dict:
"""simple docstring"""
if not hasattr(_A , """__qualname__""" ) and not hasattr(_A , """__name__""" ):
lowerCamelCase_ =getattr(_A , """__class__""" , _A )
if hasattr(_A , """__qualname__""" ):
return obj.__qualname__
if hasattr(_A , """__name__""" ):
return obj.__name__
return str(_A )
def __UpperCamelCase ( _A : Dict , _A : Any ) ->Optional[int]:
"""simple docstring"""
for key, value in source.items():
if isinstance(_A , _A ):
lowerCamelCase_ =destination.setdefault(_A , {} )
merge_dicts(_A , _A )
else:
lowerCamelCase_ =value
return destination
def __UpperCamelCase ( _A : int = None ) ->bool:
"""simple docstring"""
if port is None:
lowerCamelCase_ =29500
with socket.socket(socket.AF_INET , socket.SOCK_STREAM ) as s:
return s.connect_ex(("""localhost""", port) ) == 0
| 154
|
from __future__ import annotations
from random import choice
def __UpperCamelCase ( _A : str ) ->int:
"""simple docstring"""
return choice(_A )
def __UpperCamelCase ( _A : list[int] , _A : int ) ->int:
"""simple docstring"""
lowerCamelCase_ =random_pivot(_A )
# partition based on pivot
# linear time
lowerCamelCase_ =[e for e in lst if e < pivot]
lowerCamelCase_ =[e for e in lst if e > pivot]
# if we get lucky, pivot might be the element we want.
# we can easily see this:
# small (elements smaller than k)
# + pivot (kth element)
# + big (elements larger than k)
if len(_A ) == k - 1:
return pivot
# pivot is in elements bigger than k
elif len(_A ) < k - 1:
return kth_number(_A , k - len(_A ) - 1 )
# pivot is in elements smaller than k
else:
return kth_number(_A , _A )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 154
| 1
|
from typing import Any, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from torch.utils.data import DistributedSampler, RandomSampler
from transformers import PreTrainedModel, Trainer, logging
from transformers.integrations import is_fairscale_available
from transformers.models.fsmt.configuration_fsmt import FSMTConfig
from transformers.optimization import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
from transformers.trainer_pt_utils import get_tpu_sampler
from transformers.training_args import ParallelMode
from transformers.utils import is_torch_tpu_available
if is_fairscale_available():
from fairscale.optim import OSS
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {
'linear': get_linear_schedule_with_warmup,
'cosine': get_cosine_schedule_with_warmup,
'cosine_w_restarts': get_cosine_with_hard_restarts_schedule_with_warmup,
'polynomial': get_polynomial_decay_schedule_with_warmup,
'constant': get_constant_schedule,
'constant_w_warmup': get_constant_schedule_with_warmup,
}
class lowercase__ ( UpperCAmelCase_ ):
'''simple docstring'''
def __init__( self, __magic_name__=None, __magic_name__=None, *__magic_name__, **__magic_name__ ) -> str:
"""simple docstring"""
super().__init__(*__lowercase, **__lowercase )
if config is None:
assert isinstance(self.model, __lowercase ), (
"If no `config` is passed the model to be trained has to be of type `PreTrainedModel`, but is"
f" {self.model.__class__}"
)
UpperCamelCase__ : List[str] = self.model.config
else:
UpperCamelCase__ : Optional[int] = config
UpperCamelCase__ : str = data_args
UpperCamelCase__ : List[Any] = self.config.tgt_vocab_size if isinstance(self.config, __lowercase ) else self.config.vocab_size
if self.args.label_smoothing != 0 or (self.data_args is not None and self.data_args.ignore_pad_token_for_loss):
assert self.config.pad_token_id is not None, (
"Make sure that `config.pad_token_id` is correcly defined when ignoring `pad_token` for loss"
" calculation or doing label smoothing."
)
if self.config.pad_token_id is None and self.config.eos_token_id is not None:
logger.warning(
f"The `config.pad_token_id` is `None`. Using `config.eos_token_id` = {self.config.eos_token_id} for"
''' padding..''' )
if self.args.label_smoothing == 0:
UpperCamelCase__ : Any = torch.nn.CrossEntropyLoss(ignore_index=self.config.pad_token_id )
else:
# dynamically import label_smoothed_nll_loss
from utils import label_smoothed_nll_loss
UpperCamelCase__ : Optional[Any] = label_smoothed_nll_loss
def UpperCamelCase__ ( self, __magic_name__ ) -> Optional[int]:
"""simple docstring"""
if self.optimizer is None:
UpperCamelCase__ : Any = ['''bias''', '''LayerNorm.weight''']
UpperCamelCase__ : List[str] = [
{
'''params''': [p for n, p in self.model.named_parameters() if not any(nd in n for nd in no_decay )],
'''weight_decay''': self.args.weight_decay,
},
{
'''params''': [p for n, p in self.model.named_parameters() if any(nd in n for nd in no_decay )],
'''weight_decay''': 0.0,
},
]
UpperCamelCase__ : int = Adafactor if self.args.adafactor else AdamW
if self.args.adafactor:
UpperCamelCase__ : Any = Adafactor
UpperCamelCase__ : Dict = {'''scale_parameter''': False, '''relative_step''': False}
else:
UpperCamelCase__ : int = AdamW
UpperCamelCase__ : Dict = {
'''betas''': (self.args.adam_betaa, self.args.adam_betaa),
'''eps''': self.args.adam_epsilon,
}
UpperCamelCase__ : int = self.args.learning_rate
if self.sharded_ddp:
UpperCamelCase__ : Any = OSS(
params=__lowercase, optim=__lowercase, **__lowercase, )
else:
UpperCamelCase__ : str = optimizer_cls(__lowercase, **__lowercase )
if self.lr_scheduler is None:
UpperCamelCase__ : List[str] = self._get_lr_scheduler(__lowercase )
else: # ignoring --lr_scheduler
logger.warning('''scheduler is passed to `Seq2SeqTrainer`, `--lr_scheduler` arg is ignored.''' )
def UpperCamelCase__ ( self, __magic_name__ ) -> Tuple:
"""simple docstring"""
UpperCamelCase__ : Dict = arg_to_scheduler[self.args.lr_scheduler]
if self.args.lr_scheduler == "constant":
UpperCamelCase__ : List[str] = schedule_func(self.optimizer )
elif self.args.lr_scheduler == "constant_w_warmup":
UpperCamelCase__ : List[Any] = schedule_func(self.optimizer, num_warmup_steps=self.args.warmup_steps )
else:
UpperCamelCase__ : str = schedule_func(
self.optimizer, num_warmup_steps=self.args.warmup_steps, num_training_steps=__lowercase )
return scheduler
def UpperCamelCase__ ( self ) -> Optional[torch.utils.data.Sampler]:
"""simple docstring"""
if isinstance(self.train_dataset, torch.utils.data.IterableDataset ):
return None
elif is_torch_tpu_available():
return get_tpu_sampler(self.train_dataset )
else:
if self.args.sortish_sampler:
self.train_dataset.make_sortish_sampler(
self.args.per_device_train_batch_size, distributed=(self.args.parallel_mode == ParallelMode.DISTRIBUTED), )
return (
RandomSampler(self.train_dataset )
if self.args.local_rank == -1
else DistributedSampler(self.train_dataset )
)
def UpperCamelCase__ ( self, __magic_name__, __magic_name__, __magic_name__ ) -> Any:
"""simple docstring"""
if self.args.label_smoothing == 0:
if self.data_args is not None and self.data_args.ignore_pad_token_for_loss:
# force training to ignore pad token
UpperCamelCase__ : str = model(**__lowercase, use_cache=__lowercase )[0]
UpperCamelCase__ : Any = self.loss_fn(logits.view(-1, logits.shape[-1] ), labels.view(-1 ) )
else:
# compute usual loss via models
UpperCamelCase__ : Optional[int] = model(**__lowercase, labels=__lowercase, use_cache=__lowercase )[:2]
else:
# compute label smoothed loss
UpperCamelCase__ : List[str] = model(**__lowercase, use_cache=__lowercase )[0]
UpperCamelCase__ : Optional[int] = torch.nn.functional.log_softmax(__lowercase, dim=-1 )
UpperCamelCase__ : List[str] = self.loss_fn(__lowercase, __lowercase, self.args.label_smoothing, ignore_index=self.config.pad_token_id )
return loss, logits
def UpperCamelCase__ ( self, __magic_name__, __magic_name__ ) -> int:
"""simple docstring"""
UpperCamelCase__ : Optional[Any] = inputs.pop('''labels''' )
UpperCamelCase__ : int = self._compute_loss(__lowercase, __lowercase, __lowercase )
return loss
def UpperCamelCase__ ( self, __magic_name__, __magic_name__, __magic_name__, __magic_name__ = None, ) -> Tuple[Optional[float], Optional[torch.Tensor], Optional[torch.Tensor]]:
"""simple docstring"""
UpperCamelCase__ : List[str] = self._prepare_inputs(__lowercase )
UpperCamelCase__ : str = {
'''max_length''': self.data_args.val_max_target_length
if self.data_args is not None
else self.config.max_length,
'''num_beams''': self.data_args.eval_beams if self.data_args is not None else self.config.num_beams,
}
if self.args.predict_with_generate and not self.args.prediction_loss_only:
UpperCamelCase__ : Dict = self.model.generate(
inputs['''input_ids'''], attention_mask=inputs['''attention_mask'''], **__lowercase, )
# in case the batch is shorter than max length, the output should be padded
if generated_tokens.shape[-1] < gen_kwargs["max_length"]:
UpperCamelCase__ : Any = self._pad_tensors_to_max_len(__lowercase, gen_kwargs['''max_length'''] )
UpperCamelCase__ : str = inputs.pop('''labels''' )
with torch.no_grad():
# compute loss on predict data
UpperCamelCase__ : Optional[Any] = self._compute_loss(__lowercase, __lowercase, __lowercase )
UpperCamelCase__ : List[Any] = loss.mean().detach()
if self.args.prediction_loss_only:
return (loss, None, None)
UpperCamelCase__ : str = generated_tokens if self.args.predict_with_generate else logits
if labels.shape[-1] < gen_kwargs["max_length"]:
UpperCamelCase__ : Any = self._pad_tensors_to_max_len(__lowercase, gen_kwargs['''max_length'''] )
return (loss, logits, labels)
def UpperCamelCase__ ( self, __magic_name__, __magic_name__ ) -> Tuple:
"""simple docstring"""
# If PAD token is not defined at least EOS token has to be defined
UpperCamelCase__ : List[Any] = self.config.pad_token_id if self.config.pad_token_id is not None else self.config.eos_token_id
if pad_token_id is None:
raise ValueError(
'''Make sure that either `config.pad_token_id` or `config.eos_token_id` is defined if tensor has to be'''
f" padded to `max_length`={max_length}" )
UpperCamelCase__ : Optional[Any] = pad_token_id * torch.ones(
(tensor.shape[0], max_length), dtype=tensor.dtype, device=tensor.device )
UpperCamelCase__ : List[Any] = tensor
return padded_tensor
| 367
|
def lowerCAmelCase_ ( __UpperCAmelCase: int ) -> list[int]:
if length <= 0 or not isinstance(__UpperCAmelCase , __UpperCAmelCase ):
raise ValueError('''Length must be a positive integer.''' )
return [n * (2 * n - 1) for n in range(__UpperCAmelCase )]
if __name__ == "__main__":
print(hexagonal_numbers(length=5))
print(hexagonal_numbers(length=10))
| 247
| 0
|
def SCREAMING_SNAKE_CASE__ ( __a ):
if not isinstance(__a , __a ):
raise TypeError('only integers accepted as input' )
else:
snake_case_ : Any = str(abs(__a ) )
snake_case_ : List[Any] = [list(__a ) for char in range(len(__a ) )]
for index in range(len(__a ) ):
num_transpositions[index].pop(__a )
return max(
int(''.join(list(__a ) ) ) for transposition in num_transpositions )
if __name__ == "__main__":
__import__("""doctest""").testmod()
| 327
|
import numpy as np
import torch
from torch.utils.data import Dataset
from utils import logger
class SCREAMING_SNAKE_CASE_ ( snake_case_ ):
def __init__( self : Union[str, Any] , _A : Any , _A : Dict ) -> Union[str, Any]:
"""simple docstring"""
snake_case_ : str = params
snake_case_ : int = np.array(_A )
snake_case_ : Optional[int] = np.array([len(_A ) for t in data] )
self.check()
self.remove_long_sequences()
self.remove_empty_sequences()
self.remove_unknown_sequences()
self.check()
self.print_statistics()
def __getitem__( self : Tuple , _A : Optional[int] ) -> str:
"""simple docstring"""
return (self.token_ids[index], self.lengths[index])
def __len__( self : List[str] ) -> str:
"""simple docstring"""
return len(self.lengths )
def UpperCAmelCase_ ( self : Dict ) -> str:
"""simple docstring"""
assert len(self.token_ids ) == len(self.lengths )
assert all(self.lengths[i] == len(self.token_ids[i] ) for i in range(len(self.lengths ) ) )
def UpperCAmelCase_ ( self : Any ) -> Optional[Any]:
"""simple docstring"""
snake_case_ : Dict = self.params.max_model_input_size
snake_case_ : Tuple = self.lengths > max_len
logger.info(F"""Splitting {sum(_A )} too long sequences.""" )
def divide_chunks(_A : Union[str, Any] , _A : Dict ):
return [l[i : i + n] for i in range(0 , len(_A ) , _A )]
snake_case_ : Dict = []
snake_case_ : Union[str, Any] = []
if self.params.mlm:
snake_case_ ,snake_case_ : Optional[int] = self.params.special_tok_ids['cls_token'], self.params.special_tok_ids['sep_token']
else:
snake_case_ ,snake_case_ : Any = self.params.special_tok_ids['bos_token'], self.params.special_tok_ids['eos_token']
for seq_, len_ in zip(self.token_ids , self.lengths ):
assert (seq_[0] == cls_id) and (seq_[-1] == sep_id), seq_
if len_ <= max_len:
new_tok_ids.append(seq_ )
new_lengths.append(len_ )
else:
snake_case_ : List[Any] = []
for sub_s in divide_chunks(seq_ , max_len - 2 ):
if sub_s[0] != cls_id:
snake_case_ : Optional[int] = np.insert(_A , 0 , _A )
if sub_s[-1] != sep_id:
snake_case_ : Optional[Any] = np.insert(_A , len(_A ) , _A )
assert len(_A ) <= max_len
assert (sub_s[0] == cls_id) and (sub_s[-1] == sep_id), sub_s
sub_seqs.append(_A )
new_tok_ids.extend(_A )
new_lengths.extend([len(_A ) for l in sub_seqs] )
snake_case_ : Tuple = np.array(_A )
snake_case_ : int = np.array(_A )
def UpperCAmelCase_ ( self : List[str] ) -> List[str]:
"""simple docstring"""
snake_case_ : Tuple = len(self )
snake_case_ : int = self.lengths > 11
snake_case_ : Dict = self.token_ids[indices]
snake_case_ : int = self.lengths[indices]
snake_case_ : List[Any] = len(self )
logger.info(F"""Remove {init_size - new_size} too short (<=11 tokens) sequences.""" )
def UpperCAmelCase_ ( self : Union[str, Any] ) -> int:
"""simple docstring"""
if "unk_token" not in self.params.special_tok_ids:
return
else:
snake_case_ : Optional[Any] = self.params.special_tok_ids['unk_token']
snake_case_ : Dict = len(self )
snake_case_ : str = np.array([np.count_nonzero(a == unk_token_id ) for a in self.token_ids] )
snake_case_ : Any = (unk_occs / self.lengths) < 0.5
snake_case_ : List[Any] = self.token_ids[indices]
snake_case_ : int = self.lengths[indices]
snake_case_ : Tuple = len(self )
logger.info(F"""Remove {init_size - new_size} sequences with a high level of unknown tokens (50%).""" )
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
if not self.params.is_master:
return
logger.info(F"""{len(self )} sequences""" )
# data_len = sum(self.lengths)
# nb_unique_tokens = len(Counter(list(chain(*self.token_ids))))
# logger.info(f'{data_len} tokens ({nb_unique_tokens} unique)')
# unk_idx = self.params.special_tok_ids['unk_token']
# nb_unknown = sum([(t==unk_idx).sum() for t in self.token_ids])
# logger.info(f'{nb_unknown} unknown tokens (covering {100*nb_unknown/data_len:.2f}% of the data)')
def UpperCAmelCase_ ( self : Optional[int] , _A : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
snake_case_ : Any = [t[0] for t in batch]
snake_case_ : int = [t[1] for t in batch]
assert len(_A ) == len(_A )
# Max for paddings
snake_case_ : str = max(_A )
# Pad token ids
if self.params.mlm:
snake_case_ : int = self.params.special_tok_ids['pad_token']
else:
snake_case_ : Dict = self.params.special_tok_ids['unk_token']
snake_case_ : Dict = [list(t.astype(_A ) ) + [pad_idx] * (max_seq_len_ - len(_A )) for t in token_ids]
assert len(tk_ ) == len(_A )
assert all(len(_A ) == max_seq_len_ for t in tk_ )
snake_case_ : Any = torch.tensor(tk_ ) # (bs, max_seq_len_)
snake_case_ : Optional[Any] = torch.tensor(_A ) # (bs)
return tk_t, lg_t
| 327
| 1
|
"""simple docstring"""
import os
import shutil
from pathlib import Path
from typing import Optional, Union
import numpy as np
from huggingface_hub import hf_hub_download
from ..utils import ONNX_EXTERNAL_WEIGHTS_NAME, ONNX_WEIGHTS_NAME, is_onnx_available, logging
if is_onnx_available():
import onnxruntime as ort
lowerCAmelCase : List[str] = logging.get_logger(__name__)
lowerCAmelCase : str = {
"""tensor(bool)""": np.bool_,
"""tensor(int8)""": np.inta,
"""tensor(uint8)""": np.uinta,
"""tensor(int16)""": np.intaa,
"""tensor(uint16)""": np.uintaa,
"""tensor(int32)""": np.intaa,
"""tensor(uint32)""": np.uintaa,
"""tensor(int64)""": np.intaa,
"""tensor(uint64)""": np.uintaa,
"""tensor(float16)""": np.floataa,
"""tensor(float)""": np.floataa,
"""tensor(double)""": np.floataa,
}
class __magic_name__ :
'''simple docstring'''
def __init__( self , _a=None , **_a ):
"""simple docstring"""
logger.info("""`diffusers.OnnxRuntimeModel` is experimental and might change in the future.""" )
lowerCamelCase = model
lowerCamelCase = kwargs.get("""model_save_dir""" , _a )
lowerCamelCase = kwargs.get("""latest_model_name""" , _a )
def __call__( self , **_a ):
"""simple docstring"""
lowerCamelCase = {k: np.array(_a ) for k, v in kwargs.items()}
return self.model.run(_a , _a )
@staticmethod
def _lowerCAmelCase ( _a , _a=None , _a=None ):
"""simple docstring"""
if provider is None:
logger.info("""No onnxruntime provider specified, using CPUExecutionProvider""" )
lowerCamelCase = """CPUExecutionProvider"""
return ort.InferenceSession(_a , providers=[provider] , sess_options=_a )
def _lowerCAmelCase ( self , _a , _a = None , **_a ):
"""simple docstring"""
lowerCamelCase = file_name if file_name is not None else ONNX_WEIGHTS_NAME
lowerCamelCase = self.model_save_dir.joinpath(self.latest_model_name )
lowerCamelCase = Path(_a ).joinpath(_a )
try:
shutil.copyfile(_a , _a )
except shutil.SameFileError:
pass
# copy external weights (for models >2GB)
lowerCamelCase = self.model_save_dir.joinpath(_a )
if src_path.exists():
lowerCamelCase = Path(_a ).joinpath(_a )
try:
shutil.copyfile(_a , _a )
except shutil.SameFileError:
pass
def _lowerCAmelCase ( self , _a , **_a , ):
"""simple docstring"""
if os.path.isfile(_a ):
logger.error(f'Provided path ({save_directory}) should be a directory, not a file' )
return
os.makedirs(_a , exist_ok=_a )
# saving model weights/files
self._save_pretrained(_a , **_a )
@classmethod
def _lowerCAmelCase ( cls , _a , _a = None , _a = None , _a = False , _a = None , _a = None , _a = None , _a = None , **_a , ):
"""simple docstring"""
lowerCamelCase = file_name if file_name is not None else ONNX_WEIGHTS_NAME
# load model from local directory
if os.path.isdir(_a ):
lowerCamelCase = OnnxRuntimeModel.load_model(
os.path.join(_a , _a ) , provider=_a , sess_options=_a )
lowerCamelCase = Path(_a )
# load model from hub
else:
# download model
lowerCamelCase = hf_hub_download(
repo_id=_a , filename=_a , use_auth_token=_a , revision=_a , cache_dir=_a , force_download=_a , )
lowerCamelCase = Path(_a ).parent
lowerCamelCase = Path(_a ).name
lowerCamelCase = OnnxRuntimeModel.load_model(_a , provider=_a , sess_options=_a )
return cls(model=_a , **_a )
@classmethod
def _lowerCAmelCase ( cls , _a , _a = True , _a = None , _a = None , **_a , ):
"""simple docstring"""
lowerCamelCase = None
if len(str(_a ).split("""@""" ) ) == 2:
lowerCamelCase , lowerCamelCase = model_id.split("""@""" )
return cls._from_pretrained(
model_id=_a , revision=_a , cache_dir=_a , force_download=_a , use_auth_token=_a , **_a , )
| 360
|
"""simple docstring"""
import torch
from diffusers import KDPMaDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class __magic_name__ ( UpperCAmelCase__ ):
'''simple docstring'''
__UpperCamelCase = (KDPMaDiscreteScheduler,)
__UpperCamelCase = 10
def _lowerCAmelCase ( self , **_a ):
"""simple docstring"""
lowerCamelCase = {
"""num_train_timesteps""": 1_100,
"""beta_start""": 0.0_001,
"""beta_end""": 0.02,
"""beta_schedule""": """linear""",
}
config.update(**_a )
return config
def _lowerCAmelCase ( self ):
"""simple docstring"""
for timesteps in [10, 50, 100, 1_000]:
self.check_over_configs(num_train_timesteps=_a )
def _lowerCAmelCase ( self ):
"""simple docstring"""
for beta_start, beta_end in zip([0.00_001, 0.0_001, 0.001] , [0.0_002, 0.002, 0.02] ):
self.check_over_configs(beta_start=_a , beta_end=_a )
def _lowerCAmelCase ( self ):
"""simple docstring"""
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=_a )
def _lowerCAmelCase ( self ):
"""simple docstring"""
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=_a )
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = self.scheduler_classes[0]
lowerCamelCase = self.get_scheduler_config(prediction_type="""v_prediction""" )
lowerCamelCase = scheduler_class(**_a )
scheduler.set_timesteps(self.num_inference_steps )
lowerCamelCase = self.dummy_model()
lowerCamelCase = self.dummy_sample_deter * scheduler.init_noise_sigma
lowerCamelCase = sample.to(_a )
for i, t in enumerate(scheduler.timesteps ):
lowerCamelCase = scheduler.scale_model_input(_a , _a )
lowerCamelCase = model(_a , _a )
lowerCamelCase = scheduler.step(_a , _a , _a )
lowerCamelCase = output.prev_sample
lowerCamelCase = torch.sum(torch.abs(_a ) )
lowerCamelCase = torch.mean(torch.abs(_a ) )
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 4.6_9_3_4e-0_7 ) < 1e-2
assert abs(result_mean.item() - 6.1_1_1_2e-1_0 ) < 1e-3
else:
# CUDA
assert abs(result_sum.item() - 4.6_9_3_4_2_8_6_5_0_1_7_0_9_7_2e-0_7 ) < 1e-2
assert abs(result_mean.item() - 0.0_002 ) < 1e-3
def _lowerCAmelCase ( self ):
"""simple docstring"""
if torch_device == "mps":
return
lowerCamelCase = self.scheduler_classes[0]
lowerCamelCase = self.get_scheduler_config()
lowerCamelCase = scheduler_class(**_a )
scheduler.set_timesteps(self.num_inference_steps )
lowerCamelCase = self.dummy_model()
lowerCamelCase = self.dummy_sample_deter * scheduler.init_noise_sigma
lowerCamelCase = sample.to(_a )
for i, t in enumerate(scheduler.timesteps ):
lowerCamelCase = scheduler.scale_model_input(_a , _a )
lowerCamelCase = model(_a , _a )
lowerCamelCase = scheduler.step(_a , _a , _a )
lowerCamelCase = output.prev_sample
lowerCamelCase = torch.sum(torch.abs(_a ) )
lowerCamelCase = torch.mean(torch.abs(_a ) )
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 20.4_125 ) < 1e-2
assert abs(result_mean.item() - 0.0_266 ) < 1e-3
else:
# CUDA
assert abs(result_sum.item() - 20.4_125 ) < 1e-2
assert abs(result_mean.item() - 0.0_266 ) < 1e-3
def _lowerCAmelCase ( self ):
"""simple docstring"""
if torch_device == "mps":
return
lowerCamelCase = self.scheduler_classes[0]
lowerCamelCase = self.get_scheduler_config()
lowerCamelCase = scheduler_class(**_a )
scheduler.set_timesteps(self.num_inference_steps , device=_a )
lowerCamelCase = self.dummy_model()
lowerCamelCase = self.dummy_sample_deter.to(_a ) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
lowerCamelCase = scheduler.scale_model_input(_a , _a )
lowerCamelCase = model(_a , _a )
lowerCamelCase = scheduler.step(_a , _a , _a )
lowerCamelCase = output.prev_sample
lowerCamelCase = torch.sum(torch.abs(_a ) )
lowerCamelCase = torch.mean(torch.abs(_a ) )
if str(_a ).startswith("""cpu""" ):
# The following sum varies between 148 and 156 on mps. Why?
assert abs(result_sum.item() - 20.4_125 ) < 1e-2
assert abs(result_mean.item() - 0.0_266 ) < 1e-3
else:
# CUDA
assert abs(result_sum.item() - 20.4_125 ) < 1e-2
assert abs(result_mean.item() - 0.0_266 ) < 1e-3
| 168
| 0
|
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import OwlViTImageProcessor, OwlViTProcessor
@require_vision
class UpperCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : str = tempfile.mkdtemp()
# fmt: off
lowerCamelCase : Any = ["", "l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "lo", "l</w>", "w</w>", "r</w>", "t</w>", "low</w>", "er</w>", "lowest</w>", "newer</w>", "wider", "<unk>", "<|startoftext|>", "<|endoftext|>"]
# fmt: on
lowerCamelCase : List[Any] = dict(zip(__A , range(len(__A ) ) ) )
lowerCamelCase : List[Any] = ["#version: 0.2", "l o", "lo w</w>", "e r</w>", ""]
lowerCamelCase : Optional[Any] = {"unk_token": "<unk>"}
lowerCamelCase : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
lowerCamelCase : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(__A ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(__A ) )
lowerCamelCase : str = {
"do_resize": True,
"size": 20,
"do_center_crop": True,
"crop_size": 18,
"do_normalize": True,
"image_mean": [0.48145466, 0.4578275, 0.40821073],
"image_std": [0.26862954, 0.26130258, 0.27577711],
}
lowerCamelCase : str = os.path.join(self.tmpdirname , __A )
with open(self.image_processor_file , "w" , encoding="utf-8" ) as fp:
json.dump(__A , __A )
def _snake_case ( self , **__A ):
"""simple docstring"""
return CLIPTokenizer.from_pretrained(self.tmpdirname , pad_token="!" , **__A )
def _snake_case ( self , **__A ):
"""simple docstring"""
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , pad_token="!" , **__A )
def _snake_case ( self , **__A ):
"""simple docstring"""
return OwlViTImageProcessor.from_pretrained(self.tmpdirname , **__A )
def _snake_case ( self ):
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : Dict = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
lowerCamelCase : Tuple = [Image.fromarray(np.moveaxis(__A , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : List[Any] = self.get_tokenizer()
lowerCamelCase : Optional[Any] = self.get_rust_tokenizer()
lowerCamelCase : Tuple = self.get_image_processor()
lowerCamelCase : List[Any] = OwlViTProcessor(tokenizer=__A , image_processor=__A )
processor_slow.save_pretrained(self.tmpdirname )
lowerCamelCase : Optional[Any] = OwlViTProcessor.from_pretrained(self.tmpdirname , use_fast=__A )
lowerCamelCase : Optional[int] = OwlViTProcessor(tokenizer=__A , image_processor=__A )
processor_fast.save_pretrained(self.tmpdirname )
lowerCamelCase : Tuple = OwlViTProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , __A )
self.assertIsInstance(processor_fast.tokenizer , __A )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , __A )
self.assertIsInstance(processor_fast.image_processor , __A )
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : Optional[Any] = OwlViTProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
lowerCamelCase : int = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" )
lowerCamelCase : List[str] = self.get_image_processor(do_normalize=__A )
lowerCamelCase : Optional[int] = OwlViTProcessor.from_pretrained(
self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=__A )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , __A )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __A )
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : List[Any] = self.get_image_processor()
lowerCamelCase : Optional[int] = self.get_tokenizer()
lowerCamelCase : Dict = OwlViTProcessor(tokenizer=__A , image_processor=__A )
lowerCamelCase : Tuple = self.prepare_image_inputs()
lowerCamelCase : int = image_processor(__A , return_tensors="np" )
lowerCamelCase : Union[str, Any] = processor(images=__A , return_tensors="np" )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2 )
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : Union[str, Any] = self.get_image_processor()
lowerCamelCase : Dict = self.get_tokenizer()
lowerCamelCase : Union[str, Any] = OwlViTProcessor(tokenizer=__A , image_processor=__A )
lowerCamelCase : Tuple = "lower newer"
lowerCamelCase : Union[str, Any] = processor(text=__A , return_tensors="np" )
lowerCamelCase : List[Any] = tokenizer(__A , return_tensors="np" )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key][0].tolist() , encoded_processor[key][0].tolist() )
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : Any = self.get_image_processor()
lowerCamelCase : Any = self.get_tokenizer()
lowerCamelCase : int = OwlViTProcessor(tokenizer=__A , image_processor=__A )
lowerCamelCase : Optional[Any] = "lower newer"
lowerCamelCase : Dict = self.prepare_image_inputs()
lowerCamelCase : Any = processor(text=__A , images=__A )
self.assertListEqual(list(inputs.keys() ) , ["input_ids", "attention_mask", "pixel_values"] )
# test if it raises when no input is passed
with pytest.raises(__A ):
processor()
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : Any = "google/owlvit-base-patch32"
lowerCamelCase : List[Any] = OwlViTProcessor.from_pretrained(__A )
lowerCamelCase : Tuple = ["cat", "nasa badge"]
lowerCamelCase : str = processor(text=__A )
lowerCamelCase : Union[str, Any] = 16
self.assertListEqual(list(inputs.keys() ) , ["input_ids", "attention_mask"] )
self.assertEqual(inputs["input_ids"].shape , (2, seq_length) )
# test if it raises when no input is passed
with pytest.raises(__A ):
processor()
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : str = "google/owlvit-base-patch32"
lowerCamelCase : Optional[int] = OwlViTProcessor.from_pretrained(__A )
lowerCamelCase : Dict = [["cat", "nasa badge"], ["person"]]
lowerCamelCase : int = processor(text=__A )
lowerCamelCase : Tuple = 16
lowerCamelCase : Any = len(__A )
lowerCamelCase : Optional[Any] = max([len(__A ) for texts in input_texts] )
self.assertListEqual(list(inputs.keys() ) , ["input_ids", "attention_mask"] )
self.assertEqual(inputs["input_ids"].shape , (batch_size * num_max_text_queries, seq_length) )
# test if it raises when no input is passed
with pytest.raises(__A ):
processor()
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : Dict = "google/owlvit-base-patch32"
lowerCamelCase : Tuple = OwlViTProcessor.from_pretrained(__A )
lowerCamelCase : List[Any] = ["cat", "nasa badge"]
lowerCamelCase : Optional[Any] = processor(text=__A )
lowerCamelCase : int = 16
lowerCamelCase : List[str] = inputs["input_ids"]
lowerCamelCase : int = [
[4_9406, 2368, 4_9407, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[4_9406, 6841, 1_1301, 4_9407, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
self.assertListEqual(list(inputs.keys() ) , ["input_ids", "attention_mask"] )
self.assertEqual(inputs["input_ids"].shape , (2, seq_length) )
self.assertListEqual(list(input_ids[0] ) , predicted_ids[0] )
self.assertListEqual(list(input_ids[1] ) , predicted_ids[1] )
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : Any = self.get_image_processor()
lowerCamelCase : List[str] = self.get_tokenizer()
lowerCamelCase : str = OwlViTProcessor(tokenizer=__A , image_processor=__A )
lowerCamelCase : Dict = self.prepare_image_inputs()
lowerCamelCase : Union[str, Any] = self.prepare_image_inputs()
lowerCamelCase : Any = processor(images=__A , query_images=__A )
self.assertListEqual(list(inputs.keys() ) , ["query_pixel_values", "pixel_values"] )
# test if it raises when no input is passed
with pytest.raises(__A ):
processor()
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : Optional[Any] = self.get_image_processor()
lowerCamelCase : Optional[int] = self.get_tokenizer()
lowerCamelCase : Dict = OwlViTProcessor(tokenizer=__A , image_processor=__A )
lowerCamelCase : Optional[int] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowerCamelCase : List[Any] = processor.batch_decode(__A )
lowerCamelCase : Union[str, Any] = tokenizer.batch_decode(__A )
self.assertListEqual(__A , __A )
| 283
|
import math
from typing import Callable, List, Optional, Union
import numpy as np
import PIL
import torch
from PIL import Image
from transformers import CLIPTextModel, CLIPTokenizer
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_upscale import StableDiffusionUpscalePipeline
from diffusers.schedulers import DDIMScheduler, DDPMScheduler, LMSDiscreteScheduler, PNDMScheduler
def lowercase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=[] ):
'''simple docstring'''
lowerCamelCase : Optional[Any] = size[0] - overlap_pixels * 2
lowerCamelCase : int = size[1] - overlap_pixels * 2
for letter in ["l", "r"]:
if letter in remove_borders:
size_x += overlap_pixels
for letter in ["t", "b"]:
if letter in remove_borders:
size_y += overlap_pixels
lowerCamelCase : Tuple = np.ones((size_y, size_x) , dtype=np.uinta ) * 255
lowerCamelCase : List[Any] = np.pad(SCREAMING_SNAKE_CASE_ , mode="linear_ramp" , pad_width=SCREAMING_SNAKE_CASE_ , end_values=0 )
if "l" in remove_borders:
lowerCamelCase : Optional[Any] = mask[:, overlap_pixels : mask.shape[1]]
if "r" in remove_borders:
lowerCamelCase : List[Any] = mask[:, 0 : mask.shape[1] - overlap_pixels]
if "t" in remove_borders:
lowerCamelCase : List[Any] = mask[overlap_pixels : mask.shape[0], :]
if "b" in remove_borders:
lowerCamelCase : Tuple = mask[0 : mask.shape[0] - overlap_pixels, :]
return mask
def lowercase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
return max(SCREAMING_SNAKE_CASE_ , min(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
def lowercase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
return (
clamp(rect[0] , min[0] , max[0] ),
clamp(rect[1] , min[1] , max[1] ),
clamp(rect[2] , min[0] , max[0] ),
clamp(rect[3] , min[1] , max[1] ),
)
def lowercase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
lowerCamelCase : Optional[Any] = list(SCREAMING_SNAKE_CASE_ )
rect[0] -= overlap
rect[1] -= overlap
rect[2] += overlap
rect[3] += overlap
lowerCamelCase : Any = clamp_rect(SCREAMING_SNAKE_CASE_ , [0, 0] , [image_size[0], image_size[1]] )
return rect
def lowercase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
lowerCamelCase : Dict = Image.new("RGB" , (tile.size[0] + original_slice, tile.size[1]) )
result.paste(
original_image.resize((tile.size[0], tile.size[1]) , Image.BICUBIC ).crop(
(slice_x, 0, slice_x + original_slice, tile.size[1]) ) , (0, 0) , )
result.paste(SCREAMING_SNAKE_CASE_ , (original_slice, 0) )
return result
def lowercase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
lowerCamelCase : Union[str, Any] = (original_image_slice * 4, 0, tile.size[0], tile.size[1])
lowerCamelCase : int = tile.crop(SCREAMING_SNAKE_CASE_ )
return tile
def lowercase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
lowerCamelCase : int = n % d
return n - divisor
class UpperCAmelCase_ ( UpperCamelCase ):
'''simple docstring'''
def __init__( self , __A , __A , __A , __A , __A , __A , __A = 350 , ):
"""simple docstring"""
super().__init__(
vae=__A , text_encoder=__A , tokenizer=__A , unet=__A , low_res_scheduler=__A , scheduler=__A , max_noise_level=__A , )
def _snake_case ( self , __A , __A , __A , __A , __A , __A , __A , **__A ):
"""simple docstring"""
torch.manual_seed(0 )
lowerCamelCase : Tuple = (
min(image.size[0] - (tile_size + original_image_slice) , x * tile_size ),
min(image.size[1] - (tile_size + original_image_slice) , y * tile_size ),
min(image.size[0] , (x + 1) * tile_size ),
min(image.size[1] , (y + 1) * tile_size ),
)
lowerCamelCase : Union[str, Any] = add_overlap_rect(__A , __A , image.size )
lowerCamelCase : List[str] = image.crop(__A )
lowerCamelCase : Optional[int] = ((crop_rect[0] + ((crop_rect[2] - crop_rect[0]) / 2)) / image.size[0]) * tile.size[0]
lowerCamelCase : int = translated_slice_x - (original_image_slice / 2)
lowerCamelCase : Optional[Any] = max(0 , __A )
lowerCamelCase : Tuple = squeeze_tile(__A , __A , __A , __A )
lowerCamelCase : Dict = to_input.size
lowerCamelCase : Optional[int] = to_input.resize((tile_size, tile_size) , Image.BICUBIC )
lowerCamelCase : Dict = super(__A , self ).__call__(image=__A , **__A ).images[0]
lowerCamelCase : Tuple = upscaled_tile.resize((orig_input_size[0] * 4, orig_input_size[1] * 4) , Image.BICUBIC )
lowerCamelCase : Optional[Any] = unsqueeze_tile(__A , __A )
lowerCamelCase : Optional[Any] = upscaled_tile.resize((tile.size[0] * 4, tile.size[1] * 4) , Image.BICUBIC )
lowerCamelCase : int = []
if x == 0:
remove_borders.append("l" )
elif crop_rect[2] == image.size[0]:
remove_borders.append("r" )
if y == 0:
remove_borders.append("t" )
elif crop_rect[3] == image.size[1]:
remove_borders.append("b" )
lowerCamelCase : int = Image.fromarray(
make_transparency_mask(
(upscaled_tile.size[0], upscaled_tile.size[1]) , tile_border * 4 , remove_borders=__A ) , mode="L" , )
final_image.paste(
__A , (crop_rect_with_overlap[0] * 4, crop_rect_with_overlap[1] * 4) , __A )
@torch.no_grad()
def __call__( self , __A , __A , __A = 75 , __A = 9.0 , __A = 50 , __A = None , __A = 1 , __A = 0.0 , __A = None , __A = None , __A = None , __A = 1 , __A = 128 , __A = 32 , __A = 32 , ):
"""simple docstring"""
lowerCamelCase : Dict = Image.new("RGB" , (image.size[0] * 4, image.size[1] * 4) )
lowerCamelCase : Union[str, Any] = math.ceil(image.size[0] / tile_size )
lowerCamelCase : Dict = math.ceil(image.size[1] / tile_size )
lowerCamelCase : str = tcx * tcy
lowerCamelCase : int = 0
for y in range(__A ):
for x in range(__A ):
self._process_tile(
__A , __A , __A , __A , __A , __A , __A , prompt=__A , num_inference_steps=__A , guidance_scale=__A , noise_level=__A , negative_prompt=__A , num_images_per_prompt=__A , eta=__A , generator=__A , latents=__A , )
current_count += 1
if callback is not None:
callback({"progress": current_count / total_tile_count, "image": final_image} )
return final_image
def lowercase_( ):
'''simple docstring'''
lowerCamelCase : Dict = "stabilityai/stable-diffusion-x4-upscaler"
lowerCamelCase : Union[str, Any] = StableDiffusionTiledUpscalePipeline.from_pretrained(SCREAMING_SNAKE_CASE_ , revision="fp16" , torch_dtype=torch.floataa )
lowerCamelCase : Optional[Any] = pipe.to("cuda" )
lowerCamelCase : List[str] = Image.open("../../docs/source/imgs/diffusers_library.jpg" )
def callback(SCREAMING_SNAKE_CASE_ ):
print(f"""progress: {obj['progress']:.4f}""" )
obj["image"].save("diffusers_library_progress.jpg" )
lowerCamelCase : int = pipe(image=SCREAMING_SNAKE_CASE_ , prompt="Black font, white background, vector" , noise_level=40 , callback=SCREAMING_SNAKE_CASE_ )
final_image.save("diffusers_library.jpg" )
if __name__ == "__main__":
main()
| 283
| 1
|
class lowerCamelCase_ :
'''simple docstring'''
def __init__( self : Tuple ) -> Optional[int]:
A : List[Any] = {}
def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> None:
print(self.vertex )
for i in self.vertex:
print(__lowerCamelCase , " -> " , " -> ".join([str(__lowerCamelCase ) for j in self.vertex[i]] ) )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , __lowerCamelCase : int , __lowerCamelCase : int ) -> None:
# check if vertex is already present,
if from_vertex in self.vertex:
self.vertex[from_vertex].append(__lowerCamelCase )
else:
# else make a new vertex
A : List[str] = [to_vertex]
def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> None:
# visited array for storing already visited nodes
A : str = [False] * len(self.vertex )
# call the recursive helper function
for i in range(len(self.vertex ) ):
if not visited[i]:
self.dfs_recursive(__lowerCamelCase , __lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , __lowerCamelCase : int , __lowerCamelCase : list ) -> None:
# mark start vertex as visited
A : List[Any] = True
print(__lowerCamelCase , end=" " )
# Recur for all the vertices that are adjacent to this node
for i in self.vertex:
if not visited[i]:
self.dfs_recursive(__lowerCamelCase , __lowerCamelCase )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE = Graph()
g.add_edge(0, 1)
g.add_edge(0, 2)
g.add_edge(1, 2)
g.add_edge(2, 0)
g.add_edge(2, 3)
g.add_edge(3, 3)
g.print_graph()
print("""DFS:""")
g.dfs()
# OUTPUT:
# 0 -> 1 -> 2
# 1 -> 2
# 2 -> 0 -> 3
# 3 -> 3
# DFS:
# 0 1 2 3
| 256
|
from collections import deque
from .hash_table import HashTable
class lowerCamelCase_ ( _A ):
'''simple docstring'''
def __init__( self : Optional[int] , *__lowerCamelCase : int , **__lowerCamelCase : Tuple ) -> Optional[Any]:
super().__init__(*__lowerCamelCase , **__lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , __lowerCamelCase : List[Any] , __lowerCamelCase : List[Any] ) -> Optional[int]:
A : Any = deque([] ) if self.values[key] is None else self.values[key]
self.values[key].appendleft(__lowerCamelCase )
A : int = self.values[key]
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> Optional[int]:
return (
sum(self.charge_factor - len(__lowerCamelCase ) for slot in self.values )
/ self.size_table
* self.charge_factor
)
def SCREAMING_SNAKE_CASE__ ( self : Dict , __lowerCamelCase : int , __lowerCamelCase : Tuple=None ) -> List[str]:
if not (
len(self.values[key] ) == self.charge_factor and self.values.count(__lowerCamelCase ) == 0
):
return key
return super()._collision_resolution(__lowerCamelCase , __lowerCamelCase )
| 256
| 1
|
'''simple docstring'''
from math import ceil
def _UpperCamelCase ( __A , __A ) -> Tuple:
'''simple docstring'''
UpperCamelCase__ = list(range(0 , __A ) )
UpperCamelCase__ = [item for sublist in list(device_map.values() ) for item in sublist]
# Duplicate check
UpperCamelCase__ = []
for i in device_map_blocks:
if device_map_blocks.count(__A ) > 1 and i not in duplicate_blocks:
duplicate_blocks.append(__A )
# Missing blocks
UpperCamelCase__ = [i for i in blocks if i not in device_map_blocks]
UpperCamelCase__ = [i for i in device_map_blocks if i not in blocks]
if len(__A ) != 0:
raise ValueError(
"Duplicate attention blocks specified in device_map. Attention blocks must be specified to one device."
" These attention blocks were specified more than once: " + str(__A ) )
if len(__A ) != 0:
raise ValueError(
"There are attention blocks for this model that are not specified in the device_map. Add these attention "
"blocks to a device on the device_map: " + str(__A ) )
if len(__A ) != 0:
raise ValueError(
"The device_map contains more attention blocks than this model has. Remove these from the device_map:"
+ str(__A ) )
def _UpperCamelCase ( __A , __A ) -> int:
'''simple docstring'''
UpperCamelCase__ = list(range(__A ) )
UpperCamelCase__ = int(ceil(n_layers / len(__A ) ) )
UpperCamelCase__ = [layers[i : i + n_blocks] for i in range(0 , __A , __A )]
return dict(zip(__A , __A ) )
| 80
|
import functools
import gc
import inspect
import torch
from .imports import is_npu_available, is_xpu_available
def __SCREAMING_SNAKE_CASE (*SCREAMING_SNAKE_CASE__ ):
if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
snake_case_ = list(SCREAMING_SNAKE_CASE__ )
for i in range(len(SCREAMING_SNAKE_CASE__ ) ):
snake_case_ = None
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
return objects
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ):
snake_case_ = [
'''CUDA out of memory.''', # CUDA OOM
'''cuDNN error: CUDNN_STATUS_NOT_SUPPORTED.''', # CUDNN SNAFU
'''DefaultCPUAllocator: can\'t allocate memory''', # CPU OOM
]
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and len(exception.args ) == 1:
return any(err in exception.args[0] for err in _statements )
return False
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = 128 ):
if function is None:
return functools.partial(SCREAMING_SNAKE_CASE__ , starting_batch_size=SCREAMING_SNAKE_CASE__ )
snake_case_ = starting_batch_size
def decorator(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ):
nonlocal batch_size
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
snake_case_ = list(inspect.signature(SCREAMING_SNAKE_CASE__ ).parameters.keys() )
# Guard against user error
if len(SCREAMING_SNAKE_CASE__ ) < (len(SCREAMING_SNAKE_CASE__ ) + 1):
snake_case_ = ''', '''.join([F'''{arg}={value}''' for arg, value in zip(params[1:] , args[1:] )] )
raise TypeError(
F'''Batch size was passed into `{function.__name__}` as the first argument when called.'''
F'''Remove this as the decorator already does so: `{function.__name__}({arg_str})`''' )
while True:
if batch_size == 0:
raise RuntimeError('''No executable batch size found, reached zero.''' )
try:
return function(SCREAMING_SNAKE_CASE__ , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
except Exception as e:
if should_reduce_batch_size(SCREAMING_SNAKE_CASE__ ):
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
batch_size //= 2
else:
raise
return decorator
| 8
| 0
|
from collections.abc import Callable
def UpperCamelCase ( __magic_name__ : Callable[[float], float] , __magic_name__ : float , __magic_name__ : float ) -> float:
"""simple docstring"""
lowercase__ = a
lowercase__ = b
if function(__magic_name__ ) == 0: # one of the a or b is a root for the function
return a
elif function(__magic_name__ ) == 0:
return b
elif (
function(__magic_name__ ) * function(__magic_name__ ) > 0
): # if none of these are root and they are both positive or negative,
# then this algorithm can't find the root
raise ValueError("""could not find root in given interval.""" )
else:
lowercase__ = start + (end - start) / 2.0
while abs(start - mid ) > 10**-7: # until precisely equals to 10^-7
if function(__magic_name__ ) == 0:
return mid
elif function(__magic_name__ ) * function(__magic_name__ ) < 0:
lowercase__ = mid
else:
lowercase__ = mid
lowercase__ = start + (end - start) / 2.0
return mid
def UpperCamelCase ( __magic_name__ : float ) -> float:
"""simple docstring"""
return x**3 - 2 * x - 5
if __name__ == "__main__":
print(bisection(f, 1, 1_0_0_0))
import doctest
doctest.testmod()
| 363
|
import inspect
import unittest
from transformers import ViTHybridConfig
from transformers.testing_utils import require_accelerate, require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTHybridForImageClassification, ViTHybridImageProcessor, ViTHybridModel
from transformers.models.vit_hybrid.modeling_vit_hybrid import VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class A :
'''simple docstring'''
def __init__(self : List[str] , _UpperCAmelCase : str , _UpperCAmelCase : Any=13 , _UpperCAmelCase : int=64 , _UpperCAmelCase : Any=2 , _UpperCAmelCase : str=3 , _UpperCAmelCase : str=True , _UpperCAmelCase : List[Any]=True , _UpperCAmelCase : Optional[int]=32 , _UpperCAmelCase : Tuple=5 , _UpperCAmelCase : Tuple=4 , _UpperCAmelCase : Union[str, Any]=37 , _UpperCAmelCase : Tuple="gelu" , _UpperCAmelCase : Optional[int]=0.1 , _UpperCAmelCase : int=0.1 , _UpperCAmelCase : Any=10 , _UpperCAmelCase : str=0.02 , _UpperCAmelCase : Any=[1, 16, 4, 4] , _UpperCAmelCase : str=None , ) -> int:
"""simple docstring"""
lowercase__ = parent
lowercase__ = batch_size
lowercase__ = image_size
lowercase__ = patch_size
lowercase__ = num_channels
lowercase__ = is_training
lowercase__ = use_labels
lowercase__ = hidden_size
lowercase__ = num_hidden_layers
lowercase__ = num_attention_heads
lowercase__ = intermediate_size
lowercase__ = hidden_act
lowercase__ = hidden_dropout_prob
lowercase__ = attention_probs_dropout_prob
lowercase__ = type_sequence_label_size
lowercase__ = initializer_range
lowercase__ = scope
lowercase__ = backbone_featmap_shape
# in ViT hybrid, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
# the number of patches is based on the feature map of the backbone, which by default uses an output stride
# of 32, which means that the feature map has a spatial resolution of 1/32 of the input image size
lowercase__ = (self.image_size // 32) ** 2
lowercase__ = num_patches + 1
def lowerCamelCase__ (self : Any ) -> Optional[int]:
"""simple docstring"""
lowercase__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase__ = None
if self.use_labels:
lowercase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase__ = self.get_config()
return config, pixel_values, labels
def lowerCamelCase__ (self : int ) -> Tuple:
"""simple docstring"""
lowercase__ = {
"""global_padding""": """same""",
"""layer_type""": """bottleneck""",
"""depths""": [3, 4, 9],
"""out_features""": ["""stage1""", """stage2""", """stage3"""],
"""embedding_dynamic_padding""": True,
"""hidden_sizes""": [4, 8, 16, 32],
"""num_groups""": 2,
}
return ViTHybridConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_UpperCAmelCase , initializer_range=self.initializer_range , backbone_featmap_shape=self.backbone_featmap_shape , backbone_config=_UpperCAmelCase , )
def lowerCamelCase__ (self : Union[str, Any] , _UpperCAmelCase : int , _UpperCAmelCase : Tuple , _UpperCAmelCase : Dict ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = ViTHybridModel(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
lowercase__ = model(_UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase__ (self : List[Any] , _UpperCAmelCase : str , _UpperCAmelCase : str , _UpperCAmelCase : int ) -> List[Any]:
"""simple docstring"""
lowercase__ = self.type_sequence_label_size
lowercase__ = ViTHybridForImageClassification(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
lowercase__ = model(_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def lowerCamelCase__ (self : int ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = self.prepare_config_and_inputs()
lowercase__ , lowercase__ , lowercase__ = config_and_inputs
lowercase__ = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class A ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
A__ = (ViTHybridModel, ViTHybridForImageClassification) if is_torch_available() else ()
A__ = (
{'''feature-extraction''': ViTHybridModel, '''image-classification''': ViTHybridForImageClassification}
if is_torch_available()
else {}
)
A__ = False
A__ = False
A__ = False
def lowerCamelCase__ (self : Any ) -> Any:
"""simple docstring"""
lowercase__ = ViTHybridModelTester(self )
lowercase__ = ConfigTester(self , config_class=_UpperCAmelCase , has_text_modality=_UpperCAmelCase , hidden_size=37 )
def lowerCamelCase__ (self : Dict ) -> List[str]:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason="""ViT does not use inputs_embeds""" )
def lowerCamelCase__ (self : List[str] ) -> Any:
"""simple docstring"""
pass
def lowerCamelCase__ (self : Dict ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ = model_class(_UpperCAmelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowercase__ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_UpperCAmelCase , nn.Linear ) )
def lowerCamelCase__ (self : Dict ) -> Optional[Any]:
"""simple docstring"""
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ = model_class(_UpperCAmelCase )
lowercase__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase__ = [*signature.parameters.keys()]
lowercase__ = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , _UpperCAmelCase )
def lowerCamelCase__ (self : Optional[Any] ) -> Dict:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCAmelCase )
def lowerCamelCase__ (self : Tuple ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_UpperCAmelCase )
def lowerCamelCase__ (self : Dict ) -> Any:
"""simple docstring"""
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ = _config_zero_init(_UpperCAmelCase )
for model_class in self.all_model_classes:
lowercase__ = model_class(config=_UpperCAmelCase )
# Skip the check for the backbone
for name, module in model.named_modules():
if module.__class__.__name__ == "ViTHybridPatchEmbeddings":
lowercase__ = [f'''{name}.{key}''' for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , )
@slow
def lowerCamelCase__ (self : Dict ) -> List[Any]:
"""simple docstring"""
for model_name in VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ = ViTHybridModel.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
def UpperCamelCase ( ) -> Optional[int]:
"""simple docstring"""
lowercase__ = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class A ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def lowerCamelCase__ (self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
return (
ViTHybridImageProcessor.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def lowerCamelCase__ (self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = ViTHybridForImageClassification.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(
_UpperCAmelCase )
lowercase__ = self.default_image_processor
lowercase__ = prepare_img()
lowercase__ = image_processor(images=_UpperCAmelCase , return_tensors="""pt""" ).to(_UpperCAmelCase )
# forward pass
with torch.no_grad():
lowercase__ = model(**_UpperCAmelCase )
# verify the logits
lowercase__ = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , _UpperCAmelCase )
lowercase__ = torch.tensor([-1.9_090, -0.4_993, -0.2_389] ).to(_UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _UpperCAmelCase , atol=1E-4 ) )
@slow
@require_accelerate
def lowerCamelCase__ (self : Any ) -> Optional[int]:
"""simple docstring"""
lowercase__ = ViTHybridImageProcessor.from_pretrained("""google/vit-hybrid-base-bit-384""" )
lowercase__ = ViTHybridForImageClassification.from_pretrained("""google/vit-hybrid-base-bit-384""" , device_map="""auto""" )
lowercase__ = prepare_img()
lowercase__ = image_processor(images=_UpperCAmelCase , return_tensors="""pt""" )
lowercase__ = model(**_UpperCAmelCase )
lowercase__ = outputs.logits
# model predicts one of the 1000 ImageNet classes
lowercase__ = logits.argmax(-1 ).item()
self.assertTrue(model.config.idalabel[predicted_class_idx] , """tabby, tabby cat""" )
| 146
| 0
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
"""xlm-roberta-base""": """https://huggingface.co/xlm-roberta-base/resolve/main/config.json""",
"""xlm-roberta-large""": """https://huggingface.co/xlm-roberta-large/resolve/main/config.json""",
"""xlm-roberta-large-finetuned-conll02-dutch""": (
"""https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/config.json"""
),
"""xlm-roberta-large-finetuned-conll02-spanish""": (
"""https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/config.json"""
),
"""xlm-roberta-large-finetuned-conll03-english""": (
"""https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/config.json"""
),
"""xlm-roberta-large-finetuned-conll03-german""": (
"""https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/config.json"""
),
}
class A__ ( __magic_name__ ):
lowercase = 'xlm-roberta'
def __init__( self : int , a : List[str]=30_522 , a : str=768 , a : str=12 , a : str=12 , a : str=3_072 , a : Tuple="gelu" , a : str=0.1 , a : Union[str, Any]=0.1 , a : Any=512 , a : int=2 , a : Any=0.0_2 , a : List[Any]=1E-12 , a : Union[str, Any]=1 , a : Dict=0 , a : Any=2 , a : str="absolute" , a : Union[str, Any]=True , a : int=None , **a : List[Any] , ):
'''simple docstring'''
super().__init__(pad_token_id=a , bos_token_id=a , eos_token_id=a , **a )
lowerCAmelCase__ : str = vocab_size
lowerCAmelCase__ : Any = hidden_size
lowerCAmelCase__ : Union[str, Any] = num_hidden_layers
lowerCAmelCase__ : Dict = num_attention_heads
lowerCAmelCase__ : str = hidden_act
lowerCAmelCase__ : Union[str, Any] = intermediate_size
lowerCAmelCase__ : Dict = hidden_dropout_prob
lowerCAmelCase__ : int = attention_probs_dropout_prob
lowerCAmelCase__ : int = max_position_embeddings
lowerCAmelCase__ : List[Any] = type_vocab_size
lowerCAmelCase__ : int = initializer_range
lowerCAmelCase__ : str = layer_norm_eps
lowerCAmelCase__ : Any = position_embedding_type
lowerCAmelCase__ : List[Any] = use_cache
lowerCAmelCase__ : str = classifier_dropout
class A__ ( __magic_name__ ):
@property
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
if self.task == "multiple-choice":
lowerCAmelCase__ : Any = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
lowerCAmelCase__ : Any = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 212
|
import json
import os
import unittest
from transformers import BatchEncoding, MvpTokenizer, MvpTokenizerFast
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin, filter_roberta_detectors
@require_tokenizers
class A__ ( __magic_name__ , unittest.TestCase ):
lowercase = MvpTokenizer
lowercase = MvpTokenizerFast
lowercase = True
lowercase = filter_roberta_detectors
def _lowerCamelCase ( self : int ):
'''simple docstring'''
super().setUp()
lowerCAmelCase__ : Dict = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'\u0120',
'\u0120l',
'\u0120n',
'\u0120lo',
'\u0120low',
'er',
'\u0120lowest',
'\u0120newer',
'\u0120wider',
'<unk>',
]
lowerCAmelCase__ : Any = dict(zip(a , range(len(a ) ) ) )
lowerCAmelCase__ : Union[str, Any] = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', '']
lowerCAmelCase__ : Any = {'unk_token': '<unk>'}
lowerCAmelCase__ : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
lowerCAmelCase__ : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(a ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(a ) )
def _lowerCamelCase ( self : str , **a : Tuple ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **a )
def _lowerCamelCase ( self : Dict , **a : Optional[int] ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **a )
def _lowerCamelCase ( self : Tuple , a : Dict ):
'''simple docstring'''
return "lower newer", "lower newer"
@cached_property
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
return MvpTokenizer.from_pretrained('RUCAIBox/mvp' )
@cached_property
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
return MvpTokenizerFast.from_pretrained('RUCAIBox/mvp' )
@require_torch
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
lowerCAmelCase__ : List[Any] = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
lowerCAmelCase__ : List[Any] = [0, 250, 251, 17_818, 13, 39_186, 1_938, 4, 2]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowerCAmelCase__ : int = tokenizer(a , max_length=len(a ) , padding=a , return_tensors='pt' )
self.assertIsInstance(a , a )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
lowerCAmelCase__ : List[Any] = batch.input_ids.tolist()[0]
self.assertListEqual(a , a )
# Test that special tokens are reset
@require_torch
def _lowerCamelCase ( self : str ):
'''simple docstring'''
lowerCAmelCase__ : List[Any] = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowerCAmelCase__ : Any = tokenizer(a , padding=a , return_tensors='pt' )
# check if input_ids are returned and no labels
self.assertIn('input_ids' , a )
self.assertIn('attention_mask' , a )
self.assertNotIn('labels' , a )
self.assertNotIn('decoder_attention_mask' , a )
@require_torch
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = [
'Summary of the text.',
'Another summary.',
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowerCAmelCase__ : Tuple = tokenizer(text_target=a , max_length=32 , padding='max_length' , return_tensors='pt' )
self.assertEqual(32 , targets['input_ids'].shape[1] )
@require_torch
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowerCAmelCase__ : str = tokenizer(
['I am a small frog' * 1_024, 'I am a small frog'] , padding=a , truncation=a , return_tensors='pt' )
self.assertIsInstance(a , a )
self.assertEqual(batch.input_ids.shape , (2, 1_024) )
@require_torch
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = ['A long paragraph for summarization.']
lowerCAmelCase__ : Any = [
'Summary of the text.',
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowerCAmelCase__ : List[Any] = tokenizer(a , text_target=a , return_tensors='pt' )
lowerCAmelCase__ : Optional[int] = inputs['input_ids']
lowerCAmelCase__ : str = inputs['labels']
self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() )
self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() )
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
pass
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
lowerCAmelCase__ : str = self.rust_tokenizer_class.from_pretrained(a , **a )
lowerCAmelCase__ : List[str] = self.tokenizer_class.from_pretrained(a , **a )
lowerCAmelCase__ : Optional[int] = 'A, <mask> AllenNLP sentence.'
lowerCAmelCase__ : int = tokenizer_r.encode_plus(a , add_special_tokens=a , return_token_type_ids=a )
lowerCAmelCase__ : Optional[int] = tokenizer_p.encode_plus(a , add_special_tokens=a , return_token_type_ids=a )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r['token_type_ids'] ) , sum(tokens_p['token_type_ids'] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r['attention_mask'] ) / len(tokens_r['attention_mask'] ) , sum(tokens_p['attention_mask'] ) / len(tokens_p['attention_mask'] ) , )
lowerCAmelCase__ : List[Any] = tokenizer_r.convert_ids_to_tokens(tokens_r['input_ids'] )
lowerCAmelCase__ : Dict = tokenizer_p.convert_ids_to_tokens(tokens_p['input_ids'] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p['input_ids'] , [0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] )
self.assertSequenceEqual(tokens_r['input_ids'] , [0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] )
self.assertSequenceEqual(
a , ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] )
self.assertSequenceEqual(
a , ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] )
| 212
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCAmelCase__ = {
'''configuration_blenderbot''': [
'''BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''BlenderbotConfig''',
'''BlenderbotOnnxConfig''',
],
'''tokenization_blenderbot''': ['''BlenderbotTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = ['''BlenderbotTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'''BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BlenderbotForCausalLM''',
'''BlenderbotForConditionalGeneration''',
'''BlenderbotModel''',
'''BlenderbotPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'''TFBlenderbotForConditionalGeneration''',
'''TFBlenderbotModel''',
'''TFBlenderbotPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'''FlaxBlenderbotForConditionalGeneration''',
'''FlaxBlenderbotModel''',
'''FlaxBlenderbotPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_blenderbot import (
BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlenderbotConfig,
BlenderbotOnnxConfig,
)
from .tokenization_blenderbot import BlenderbotTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_blenderbot_fast import BlenderbotTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blenderbot import (
BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST,
BlenderbotForCausalLM,
BlenderbotForConditionalGeneration,
BlenderbotModel,
BlenderbotPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blenderbot import (
TFBlenderbotForConditionalGeneration,
TFBlenderbotModel,
TFBlenderbotPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
FlaxBlenderbotPreTrainedModel,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 354
|
"""simple docstring"""
import random
import unittest
import numpy as np
import torch
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionUpscalePipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class __snake_case ( _lowercase , unittest.TestCase):
# TODO: is there an appropriate internal test set?
snake_case__ : List[str] = "ssube/stable-diffusion-x4-upscaler-onnx"
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , __lowerCAmelCase : int=0 ):
"""simple docstring"""
_lowerCamelCase : Tuple = floats_tensor((1, 3, 1_2_8, 1_2_8) , rng=random.Random(__lowerCAmelCase ) )
_lowerCamelCase : Union[str, Any] = torch.manual_seed(__lowerCAmelCase )
_lowerCamelCase : Tuple = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''generator''': generator,
'''num_inference_steps''': 3,
'''guidance_scale''': 7.5,
'''output_type''': '''numpy''',
}
return inputs
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
pipe.set_progress_bar_config(disable=__lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = self.get_dummy_inputs()
_lowerCamelCase : Any = pipe(**__lowerCAmelCase ).images
_lowerCamelCase : Dict = image[0, -3:, -3:, -1].flatten()
# started as 128, should now be 512
assert image.shape == (1, 5_1_2, 5_1_2, 3)
_lowerCamelCase : str = np.array(
[0.6_97_47_82, 0.68_90_20_93, 0.70_13_58_85, 0.7_58_36_18, 0.7_80_45_45, 0.7_85_49_12, 0.78_66_74_26, 0.78_74_38_63, 0.78_07_02_23] )
assert np.abs(image_slice - expected_slice ).max() < 1E-1
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
_lowerCamelCase : List[str] = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=__lowerCAmelCase )
pipe.set_progress_bar_config(disable=__lowerCAmelCase )
_lowerCamelCase : int = self.get_dummy_inputs()
_lowerCamelCase : Optional[Any] = pipe(**__lowerCAmelCase ).images
_lowerCamelCase : str = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
_lowerCamelCase : Optional[int] = np.array(
[0.6_89_88_92, 0.59_24_05_56, 0.52_49_95_27, 0.58_86_62_15, 0.52_25_82_35, 0.52_57_27_15, 0.62_41_44_73, 0.6_17_43_87, 0.6_21_49_64] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
_lowerCamelCase : Optional[int] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=__lowerCAmelCase )
_lowerCamelCase : Tuple = self.get_dummy_inputs()
_lowerCamelCase : str = pipe(**__lowerCAmelCase ).images
_lowerCamelCase : Tuple = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
_lowerCamelCase : str = np.array(
[0.7_65_92_78, 0.76_43_76_64, 0.75_57_91_07, 0.7_69_11_16, 0.77_66_69_86, 0.7_72_76_72, 0.7_75_86_64, 0.7_81_22_26, 0.76_94_25_15] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
_lowerCamelCase : Any = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
_lowerCamelCase : Union[str, Any] = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=__lowerCAmelCase )
_lowerCamelCase : Optional[Any] = self.get_dummy_inputs()
_lowerCamelCase : Tuple = pipe(**__lowerCAmelCase ).images
_lowerCamelCase : Union[str, Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
_lowerCamelCase : Union[str, Any] = np.array(
[0.6_97_47_82, 0.68_90_20_93, 0.70_13_58_85, 0.7_58_36_18, 0.7_80_45_45, 0.7_85_49_12, 0.78_66_74_26, 0.78_74_38_63, 0.78_07_02_23] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
_lowerCamelCase : Dict = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
_lowerCamelCase : int = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=__lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = self.get_dummy_inputs()
_lowerCamelCase : List[Any] = pipe(**__lowerCAmelCase ).images
_lowerCamelCase : Optional[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
_lowerCamelCase : Optional[int] = np.array(
[0.77_42_44_96, 0.77_36_01, 0.7_64_52_88, 0.7_76_95_98, 0.7_77_27_39, 0.7_73_86_88, 0.78_18_72_33, 0.77_87_95_84, 0.76_70_43] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
@nightly
@require_onnxruntime
@require_torch_gpu
class __snake_case ( unittest.TestCase):
@property
def SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = ort.SessionOptions()
_lowerCamelCase : List[str] = False
return options
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/img2img/sketch-mountains-input.jpg''' )
_lowerCamelCase : Any = init_image.resize((1_2_8, 1_2_8) )
# using the PNDM scheduler by default
_lowerCamelCase : List[str] = OnnxStableDiffusionUpscalePipeline.from_pretrained(
'''ssube/stable-diffusion-x4-upscaler-onnx''' , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=__lowerCAmelCase )
_lowerCamelCase : int = '''A fantasy landscape, trending on artstation'''
_lowerCamelCase : List[Any] = torch.manual_seed(0 )
_lowerCamelCase : List[str] = pipe(
prompt=__lowerCAmelCase , image=__lowerCAmelCase , guidance_scale=7.5 , num_inference_steps=1_0 , generator=__lowerCAmelCase , output_type='''np''' , )
_lowerCamelCase : List[Any] = output.images
_lowerCamelCase : List[Any] = images[0, 2_5_5:2_5_8, 3_8_3:3_8_6, -1]
assert images.shape == (1, 5_1_2, 5_1_2, 3)
_lowerCamelCase : str = np.array([0.48_83, 0.49_47, 0.49_80, 0.49_75, 0.49_82, 0.49_80, 0.50_00, 0.50_06, 0.49_72] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
def SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/img2img/sketch-mountains-input.jpg''' )
_lowerCamelCase : int = init_image.resize((1_2_8, 1_2_8) )
_lowerCamelCase : str = LMSDiscreteScheduler.from_pretrained(
'''ssube/stable-diffusion-x4-upscaler-onnx''' , subfolder='''scheduler''' )
_lowerCamelCase : Dict = OnnxStableDiffusionUpscalePipeline.from_pretrained(
'''ssube/stable-diffusion-x4-upscaler-onnx''' , scheduler=__lowerCAmelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=__lowerCAmelCase )
_lowerCamelCase : Optional[int] = '''A fantasy landscape, trending on artstation'''
_lowerCamelCase : int = torch.manual_seed(0 )
_lowerCamelCase : List[str] = pipe(
prompt=__lowerCAmelCase , image=__lowerCAmelCase , guidance_scale=7.5 , num_inference_steps=2_0 , generator=__lowerCAmelCase , output_type='''np''' , )
_lowerCamelCase : Union[str, Any] = output.images
_lowerCamelCase : Optional[Any] = images[0, 2_5_5:2_5_8, 3_8_3:3_8_6, -1]
assert images.shape == (1, 5_1_2, 5_1_2, 3)
_lowerCamelCase : str = np.array(
[0.50_17_37_53, 0.50_22_33_56, 0.50_20_39, 0.50_23_30_36, 0.5_02_37_25, 0.5_02_26_01, 0.5_01_87_58, 0.50_23_40_85, 0.50_24_15_66] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
| 175
| 0
|
import os
import re
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__a = logging.get_logger(__name__)
__a = {
'vocab_file': 'vocab.txt',
'merges_file': 'bpe.codes',
}
__a = {
'vocab_file': {
'vinai/phobert-base': 'https://huggingface.co/vinai/phobert-base/resolve/main/vocab.txt',
'vinai/phobert-large': 'https://huggingface.co/vinai/phobert-large/resolve/main/vocab.txt',
},
'merges_file': {
'vinai/phobert-base': 'https://huggingface.co/vinai/phobert-base/resolve/main/bpe.codes',
'vinai/phobert-large': 'https://huggingface.co/vinai/phobert-large/resolve/main/bpe.codes',
},
}
__a = {
'vinai/phobert-base': 2_5_6,
'vinai/phobert-large': 2_5_6,
}
def a ( snake_case__: List[str] ):
'''simple docstring'''
lowercase_ = set()
lowercase_ = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
lowercase_ = char
lowercase_ = set(snake_case__ )
return pairs
class lowercase__( UpperCAmelCase ):
"""simple docstring"""
a :Optional[Any] = VOCAB_FILES_NAMES
a :List[str] = PRETRAINED_VOCAB_FILES_MAP
a :Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : int , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Union[str, Any]="<s>" , SCREAMING_SNAKE_CASE_ : Union[str, Any]="</s>" , SCREAMING_SNAKE_CASE_ : List[Any]="</s>" , SCREAMING_SNAKE_CASE_ : Optional[Any]="<s>" , SCREAMING_SNAKE_CASE_ : List[str]="<unk>" , SCREAMING_SNAKE_CASE_ : List[Any]="<pad>" , SCREAMING_SNAKE_CASE_ : List[Any]="<mask>" , **SCREAMING_SNAKE_CASE_ : Optional[Any] , ) -> Union[str, Any]:
super().__init__(
bos_token=SCREAMING_SNAKE_CASE_ , eos_token=SCREAMING_SNAKE_CASE_ , unk_token=SCREAMING_SNAKE_CASE_ , sep_token=SCREAMING_SNAKE_CASE_ , cls_token=SCREAMING_SNAKE_CASE_ , pad_token=SCREAMING_SNAKE_CASE_ , mask_token=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
lowercase_ = vocab_file
lowercase_ = merges_file
lowercase_ = {}
lowercase_ = 0
lowercase_ = 1
lowercase_ = 2
lowercase_ = 3
self.add_from_file(SCREAMING_SNAKE_CASE_ )
lowercase_ = {v: k for k, v in self.encoder.items()}
with open(SCREAMING_SNAKE_CASE_ , encoding='''utf-8''' ) as merges_handle:
lowercase_ = merges_handle.read().split('''\n''' )[:-1]
lowercase_ = [tuple(merge.split()[:-1] ) for merge in merges]
lowercase_ = dict(zip(SCREAMING_SNAKE_CASE_ , range(len(SCREAMING_SNAKE_CASE_ ) ) ) )
lowercase_ = {}
def _lowercase ( self : Any , SCREAMING_SNAKE_CASE_ : List[int] , SCREAMING_SNAKE_CASE_ : Optional[List[int]] = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowercase_ = [self.cls_token_id]
lowercase_ = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def _lowercase ( self : List[Any] , SCREAMING_SNAKE_CASE_ : List[int] , SCREAMING_SNAKE_CASE_ : Optional[List[int]] = None , SCREAMING_SNAKE_CASE_ : bool = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=SCREAMING_SNAKE_CASE_ , token_ids_a=SCREAMING_SNAKE_CASE_ , already_has_special_tokens=SCREAMING_SNAKE_CASE_ )
if token_ids_a is None:
return [1] + ([0] * len(SCREAMING_SNAKE_CASE_ )) + [1]
return [1] + ([0] * len(SCREAMING_SNAKE_CASE_ )) + [1, 1] + ([0] * len(SCREAMING_SNAKE_CASE_ )) + [1]
def _lowercase ( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : List[int] , SCREAMING_SNAKE_CASE_ : Optional[List[int]] = None ) -> List[int]:
lowercase_ = [self.sep_token_id]
lowercase_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def _lowercase ( self : Any ) -> Any:
return len(self.encoder )
def _lowercase ( self : Any ) -> Dict:
return dict(self.encoder , **self.added_tokens_encoder )
def _lowercase ( self : Dict , SCREAMING_SNAKE_CASE_ : Any ) -> Any:
if token in self.cache:
return self.cache[token]
lowercase_ = tuple(SCREAMING_SNAKE_CASE_ )
lowercase_ = tuple(list(word[:-1] ) + [word[-1] + '''</w>'''] )
lowercase_ = get_pairs(SCREAMING_SNAKE_CASE_ )
if not pairs:
return token
while True:
lowercase_ = min(SCREAMING_SNAKE_CASE_ , key=lambda SCREAMING_SNAKE_CASE_ : self.bpe_ranks.get(SCREAMING_SNAKE_CASE_ , float('''inf''' ) ) )
if bigram not in self.bpe_ranks:
break
lowercase_ , lowercase_ = bigram
lowercase_ = []
lowercase_ = 0
while i < len(SCREAMING_SNAKE_CASE_ ):
try:
lowercase_ = word.index(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
lowercase_ = j
if word[i] == first and i < len(SCREAMING_SNAKE_CASE_ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
lowercase_ = tuple(SCREAMING_SNAKE_CASE_ )
lowercase_ = new_word
if len(SCREAMING_SNAKE_CASE_ ) == 1:
break
else:
lowercase_ = get_pairs(SCREAMING_SNAKE_CASE_ )
lowercase_ = '''@@ '''.join(SCREAMING_SNAKE_CASE_ )
lowercase_ = word[:-4]
lowercase_ = word
return word
def _lowercase ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : Dict ) -> int:
lowercase_ = []
lowercase_ = re.findall(R'''\S+\n?''' , SCREAMING_SNAKE_CASE_ )
for token in words:
split_tokens.extend(list(self.bpe(SCREAMING_SNAKE_CASE_ ).split(''' ''' ) ) )
return split_tokens
def _lowercase ( self : List[str] , SCREAMING_SNAKE_CASE_ : List[Any] ) -> Union[str, Any]:
return self.encoder.get(SCREAMING_SNAKE_CASE_ , self.encoder.get(self.unk_token ) )
def _lowercase ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : Optional[Any] ) -> List[Any]:
return self.decoder.get(SCREAMING_SNAKE_CASE_ , self.unk_token )
def _lowercase ( self : Any , SCREAMING_SNAKE_CASE_ : Any ) -> Tuple:
lowercase_ = ''' '''.join(SCREAMING_SNAKE_CASE_ ).replace('''@@ ''' , '''''' ).strip()
return out_string
def _lowercase ( self : List[str] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(SCREAMING_SNAKE_CASE_ ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
lowercase_ = os.path.join(
SCREAMING_SNAKE_CASE_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
lowercase_ = os.path.join(
SCREAMING_SNAKE_CASE_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(SCREAMING_SNAKE_CASE_ ):
copyfile(self.vocab_file , SCREAMING_SNAKE_CASE_ )
if os.path.abspath(self.merges_file ) != os.path.abspath(SCREAMING_SNAKE_CASE_ ):
copyfile(self.merges_file , SCREAMING_SNAKE_CASE_ )
return out_vocab_file, out_merge_file
def _lowercase ( self : List[str] , SCREAMING_SNAKE_CASE_ : List[str] ) -> Dict:
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
try:
with open(SCREAMING_SNAKE_CASE_ , '''r''' , encoding='''utf-8''' ) as fd:
self.add_from_file(SCREAMING_SNAKE_CASE_ )
except FileNotFoundError as fnfe:
raise fnfe
except UnicodeError:
raise Exception(f'''Incorrect encoding detected in {f}, please rebuild the dataset''' )
return
lowercase_ = f.readlines()
for lineTmp in lines:
lowercase_ = lineTmp.strip()
lowercase_ = line.rfind(''' ''' )
if idx == -1:
raise ValueError('''Incorrect dictionary format, expected \'<token> <cnt>\'''' )
lowercase_ = line[:idx]
lowercase_ = len(self.encoder )
| 30
|
'''simple docstring'''
from __future__ import annotations
__lowerCAmelCase = [-1_0, -5, 0, 5, 5.1, 1_1, 1_3, 2_1, 3, 4, -2_1, -1_0, -5, -1, 0]
__lowerCAmelCase = [-5, 0, 5, 5.1, 1_1, 1_3, 2_1, -1, 4, -1, -1_0, -5, -1, 0, -1]
def UpperCAmelCase_ (__a : list[float] ):
"""simple docstring"""
_a : Optional[int] = []
_a : int = len(__a )
for i in range(__a ):
_a : float = -1
for j in range(i + 1 , __a ):
if arr[i] < arr[j]:
_a : Any = arr[j]
break
result.append(__a )
return result
def UpperCAmelCase_ (__a : list[float] ):
"""simple docstring"""
_a : Tuple = []
for i, outer in enumerate(__a ):
_a : float = -1
for inner in arr[i + 1 :]:
if outer < inner:
_a : Dict = inner
break
result.append(__a )
return result
def UpperCAmelCase_ (__a : list[float] ):
"""simple docstring"""
_a : int = len(__a )
_a : list[float] = []
_a : list[float] = [-1] * arr_size
for index in reversed(range(__a ) ):
if stack:
while stack[-1] <= arr[index]:
stack.pop()
if not stack:
break
if stack:
_a : Dict = stack[-1]
stack.append(arr[index] )
return result
if __name__ == "__main__":
from doctest import testmod
from timeit import timeit
testmod()
print(next_greatest_element_slow(arr))
print(next_greatest_element_fast(arr))
print(next_greatest_element(arr))
__lowerCAmelCase = (
"""from __main__ import arr, next_greatest_element_slow, """
"""next_greatest_element_fast, next_greatest_element"""
)
print(
"""next_greatest_element_slow():""",
timeit("""next_greatest_element_slow(arr)""", setup=setup),
)
print(
"""next_greatest_element_fast():""",
timeit("""next_greatest_element_fast(arr)""", setup=setup),
)
print(
""" next_greatest_element():""",
timeit("""next_greatest_element(arr)""", setup=setup),
)
| 271
| 0
|
import unittest
import numpy as np
from diffusers import LMSDiscreteScheduler, OnnxStableDiffusionInpaintPipeline
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class lowercase ( _SCREAMING_SNAKE_CASE , unittest.TestCase ):
# FIXME: add fast tests
pass
@nightly
@require_onnxruntime
@require_torch_gpu
class lowercase ( unittest.TestCase ):
@property
def __UpperCamelCase ( self ) -> List[Any]:
"""simple docstring"""
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def __UpperCamelCase ( self ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase = ort.SessionOptions()
UpperCamelCase = False
return options
def __UpperCamelCase ( self ) -> Tuple:
"""simple docstring"""
UpperCamelCase = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/in_paint/overture-creations-5sI6fQgYIuo.png' )
UpperCamelCase = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/in_paint/overture-creations-5sI6fQgYIuo_mask.png' )
UpperCamelCase = OnnxStableDiffusionInpaintPipeline.from_pretrained(
'runwayml/stable-diffusion-inpainting' , revision='onnx' , safety_checker=A_ , feature_extractor=A_ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=A_ )
UpperCamelCase = 'A red cat sitting on a park bench'
UpperCamelCase = np.random.RandomState(0 )
UpperCamelCase = pipe(
prompt=A_ , image=A_ , mask_image=A_ , guidance_scale=7.5 , num_inference_steps=10 , generator=A_ , output_type='np' , )
UpperCamelCase = output.images
UpperCamelCase = images[0, 255:258, 255:258, -1]
assert images.shape == (1, 512, 512, 3)
UpperCamelCase = np.array([0.2514, 0.3007, 0.3517, 0.1790, 0.2382, 0.3167, 0.1944, 0.2273, 0.2464] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def __UpperCamelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/in_paint/overture-creations-5sI6fQgYIuo.png' )
UpperCamelCase = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/in_paint/overture-creations-5sI6fQgYIuo_mask.png' )
UpperCamelCase = LMSDiscreteScheduler.from_pretrained(
'runwayml/stable-diffusion-inpainting' , subfolder='scheduler' , revision='onnx' )
UpperCamelCase = OnnxStableDiffusionInpaintPipeline.from_pretrained(
'runwayml/stable-diffusion-inpainting' , revision='onnx' , scheduler=A_ , safety_checker=A_ , feature_extractor=A_ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=A_ )
UpperCamelCase = 'A red cat sitting on a park bench'
UpperCamelCase = np.random.RandomState(0 )
UpperCamelCase = pipe(
prompt=A_ , image=A_ , mask_image=A_ , guidance_scale=7.5 , num_inference_steps=20 , generator=A_ , output_type='np' , )
UpperCamelCase = output.images
UpperCamelCase = images[0, 255:258, 255:258, -1]
assert images.shape == (1, 512, 512, 3)
UpperCamelCase = np.array([0.0086, 0.0077, 0.0083, 0.0093, 0.0107, 0.0139, 0.0094, 0.0097, 0.0125] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
| 110
|
from __future__ import annotations
import copy
import inspect
import json
import math
import os
import tempfile
import unittest
from importlib import import_module
import numpy as np
from transformers import ViTMAEConfig
from transformers.file_utils import cached_property, is_tf_available, is_vision_available
from transformers.testing_utils import require_tf, require_vision, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFViTMAEForPreTraining, TFViTMAEModel
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class lowercase :
def __init__( self , A_ , A_=13 , A_=30 , A_=2 , A_=3 , A_=True , A_=True , A_=32 , A_=2 , A_=4 , A_=37 , A_="gelu" , A_=0.1 , A_=0.1 , A_=10 , A_=0.02 , A_=3 , A_=0.6 , A_=None , ) -> str:
"""simple docstring"""
UpperCamelCase = parent
UpperCamelCase = batch_size
UpperCamelCase = image_size
UpperCamelCase = patch_size
UpperCamelCase = num_channels
UpperCamelCase = is_training
UpperCamelCase = use_labels
UpperCamelCase = hidden_size
UpperCamelCase = num_hidden_layers
UpperCamelCase = num_attention_heads
UpperCamelCase = intermediate_size
UpperCamelCase = hidden_act
UpperCamelCase = hidden_dropout_prob
UpperCamelCase = attention_probs_dropout_prob
UpperCamelCase = type_sequence_label_size
UpperCamelCase = initializer_range
UpperCamelCase = mask_ratio
UpperCamelCase = scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
UpperCamelCase = (image_size // patch_size) ** 2
UpperCamelCase = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) )
def __UpperCamelCase ( self ) -> int:
"""simple docstring"""
UpperCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCamelCase = None
if self.use_labels:
UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase = self.get_config()
return config, pixel_values, labels
def __UpperCamelCase ( self ) -> Any:
"""simple docstring"""
return ViTMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , decoder_hidden_size=self.hidden_size , decoder_num_hidden_layers=self.num_hidden_layers , decoder_num_attention_heads=self.num_attention_heads , decoder_intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=A_ , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , )
def __UpperCamelCase ( self , A_ , A_ , A_ ) -> Dict:
"""simple docstring"""
UpperCamelCase = TFViTMAEModel(config=A_ )
UpperCamelCase = model(A_ , training=A_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __UpperCamelCase ( self , A_ , A_ , A_ ) -> Tuple:
"""simple docstring"""
UpperCamelCase = TFViTMAEForPreTraining(A_ )
UpperCamelCase = model(A_ , training=A_ )
# expected sequence length = num_patches
UpperCamelCase = (self.image_size // self.patch_size) ** 2
UpperCamelCase = self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
# test greyscale images
UpperCamelCase = 1
UpperCamelCase = TFViTMAEForPreTraining(A_ )
UpperCamelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCamelCase = model(A_ , training=A_ )
UpperCamelCase = self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
def __UpperCamelCase ( self ) -> Any:
"""simple docstring"""
UpperCamelCase = self.prepare_config_and_inputs()
((UpperCamelCase) , (UpperCamelCase) , (UpperCamelCase)) = config_and_inputs
UpperCamelCase = {'pixel_values': pixel_values}
return config, inputs_dict
@require_tf
class lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , unittest.TestCase ):
__lowercase : Union[str, Any] = (TFViTMAEModel, TFViTMAEForPreTraining) if is_tf_available() else ()
__lowercase : Optional[int] = {"feature-extraction": TFViTMAEModel} if is_tf_available() else {}
__lowercase : Optional[Any] = False
__lowercase : Optional[Any] = False
__lowercase : Dict = False
__lowercase : List[str] = False
def __UpperCamelCase ( self ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase = TFViTMAEModelTester(self )
UpperCamelCase = ConfigTester(self , config_class=A_ , has_text_modality=A_ , hidden_size=37 )
def __UpperCamelCase ( self ) -> Dict:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='ViTMAE does not use inputs_embeds' )
def __UpperCamelCase ( self ) -> List[Any]:
"""simple docstring"""
pass
def __UpperCamelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase = model_class(A_ )
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) )
UpperCamelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(A_ , tf.keras.layers.Layer ) )
def __UpperCamelCase ( self ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase = model_class(A_ )
UpperCamelCase = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase = [*signature.parameters.keys()]
UpperCamelCase = ['pixel_values']
self.assertListEqual(arg_names[:1] , A_ )
def __UpperCamelCase ( self ) -> str:
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A_ )
def __UpperCamelCase ( self ) -> str:
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*A_ )
def __UpperCamelCase ( self ) -> List[str]:
"""simple docstring"""
# make the mask reproducible
np.random.seed(2 )
UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase = int((config.image_size // config.patch_size) ** 2 )
UpperCamelCase = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
UpperCamelCase = model_class(A_ )
UpperCamelCase = self._prepare_for_class(A_ , A_ )
UpperCamelCase = model(A_ , noise=A_ )
UpperCamelCase = copy.deepcopy(self._prepare_for_class(A_ , A_ ) )
UpperCamelCase = model(**A_ , noise=A_ )
UpperCamelCase = outputs_dict[0].numpy()
UpperCamelCase = outputs_keywords[0].numpy()
self.assertLess(np.sum(np.abs(output_dict - output_keywords ) ) , 1e-6 )
def __UpperCamelCase ( self ) -> int:
"""simple docstring"""
# make the mask reproducible
np.random.seed(2 )
UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase = int((config.image_size // config.patch_size) ** 2 )
UpperCamelCase = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
def prepare_numpy_arrays(A_ ):
UpperCamelCase = {}
for k, v in inputs_dict.items():
if tf.is_tensor(A_ ):
UpperCamelCase = v.numpy()
else:
UpperCamelCase = np.array(A_ )
return inputs_np_dict
for model_class in self.all_model_classes:
UpperCamelCase = model_class(A_ )
UpperCamelCase = self._prepare_for_class(A_ , A_ )
UpperCamelCase = prepare_numpy_arrays(A_ )
UpperCamelCase = model(A_ , noise=A_ )
UpperCamelCase = model(**A_ , noise=A_ )
self.assert_outputs_same(A_ , A_ )
def __UpperCamelCase ( self , A_ , A_ , A_ ) -> List[Any]:
"""simple docstring"""
# make masks reproducible
np.random.seed(2 )
UpperCamelCase = int((tf_model.config.image_size // tf_model.config.patch_size) ** 2 )
UpperCamelCase = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
UpperCamelCase = tf.constant(A_ )
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
UpperCamelCase = tf_noise
super().check_pt_tf_models(A_ , A_ , A_ )
def __UpperCamelCase ( self ) -> Dict:
"""simple docstring"""
# make mask reproducible
np.random.seed(2 )
UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase = {
module_member
for model_class in self.all_model_classes
for module in (import_module(model_class.__module__ ),)
for module_member_name in dir(A_ )
if module_member_name.endswith('MainLayer' )
# This condition is required, since `modeling_tf_clip.py` has 3 classes whose names end with `MainLayer`.
and module_member_name[: -len('MainLayer' )] == model_class.__name__[: -len('Model' )]
for module_member in (getattr(A_ , A_ ),)
if isinstance(A_ , A_ )
and tf.keras.layers.Layer in module_member.__bases__
and getattr(A_ , '_keras_serializable' , A_ )
}
UpperCamelCase = int((config.image_size // config.patch_size) ** 2 )
UpperCamelCase = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
UpperCamelCase = tf.convert_to_tensor(A_ )
inputs_dict.update({'noise': noise} )
for main_layer_class in tf_main_layer_classes:
UpperCamelCase = main_layer_class(A_ )
UpperCamelCase = {
name: tf.keras.Input(tensor.shape[1:] , dtype=tensor.dtype ) for name, tensor in inputs_dict.items()
}
UpperCamelCase = tf.keras.Model(A_ , outputs=main_layer(A_ ) )
UpperCamelCase = model(A_ )
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCamelCase = os.path.join(A_ , 'keras_model.h5' )
model.save(A_ )
UpperCamelCase = tf.keras.models.load_model(
A_ , custom_objects={main_layer_class.__name__: main_layer_class} )
assert isinstance(A_ , tf.keras.Model )
UpperCamelCase = model(A_ )
self.assert_outputs_same(A_ , A_ )
@slow
def __UpperCamelCase ( self ) -> int:
"""simple docstring"""
# make mask reproducible
np.random.seed(2 )
UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase = int((config.image_size // config.patch_size) ** 2 )
UpperCamelCase = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
UpperCamelCase = model_class(A_ )
UpperCamelCase = self._prepare_for_class(A_ , A_ )
UpperCamelCase = model(A_ , noise=A_ )
if model_class.__name__ == "TFViTMAEModel":
UpperCamelCase = outputs.last_hidden_state.numpy()
UpperCamelCase = 0
else:
UpperCamelCase = outputs.logits.numpy()
UpperCamelCase = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(A_ , saved_model=A_ )
UpperCamelCase = model_class.from_pretrained(A_ )
UpperCamelCase = model(A_ , noise=A_ )
if model_class.__name__ == "TFViTMAEModel":
UpperCamelCase = after_outputs['last_hidden_state'].numpy()
UpperCamelCase = 0
else:
UpperCamelCase = after_outputs['logits'].numpy()
UpperCamelCase = 0
UpperCamelCase = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(A_ , 1e-5 )
def __UpperCamelCase ( self ) -> Optional[Any]:
"""simple docstring"""
# make mask reproducible
np.random.seed(2 )
UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase = int((config.image_size // config.patch_size) ** 2 )
UpperCamelCase = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
UpperCamelCase = model_class(A_ )
UpperCamelCase = self._prepare_for_class(A_ , A_ )
UpperCamelCase = model(A_ , noise=A_ )
UpperCamelCase = model.get_config()
# make sure that returned config is jsonifiable, which is required by keras
json.dumps(A_ )
UpperCamelCase = model_class.from_config(model.get_config() )
# make sure it also accepts a normal config
UpperCamelCase = model_class.from_config(model.config )
UpperCamelCase = new_model(A_ ) # Build model
new_model.set_weights(model.get_weights() )
UpperCamelCase = new_model(A_ , noise=A_ )
self.assert_outputs_same(A_ , A_ )
@unittest.skip(
reason='ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results.' )
def __UpperCamelCase ( self ) -> List[str]:
"""simple docstring"""
pass
@unittest.skip(reason='ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load' )
def __UpperCamelCase ( self ) -> Optional[Any]:
"""simple docstring"""
pass
@slow
def __UpperCamelCase ( self ) -> List[Any]:
"""simple docstring"""
UpperCamelCase = TFViTMAEModel.from_pretrained('google/vit-base-patch16-224' )
self.assertIsNotNone(A_ )
def A ( ) -> Optional[int]:
'''simple docstring'''
UpperCamelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_tf
@require_vision
class lowercase ( unittest.TestCase ):
@cached_property
def __UpperCamelCase ( self ) -> Tuple:
"""simple docstring"""
return ViTImageProcessor.from_pretrained('facebook/vit-mae-base' ) if is_vision_available() else None
@slow
def __UpperCamelCase ( self ) -> List[str]:
"""simple docstring"""
# make random mask reproducible across the PT and TF model
np.random.seed(2 )
UpperCamelCase = TFViTMAEForPreTraining.from_pretrained('facebook/vit-mae-base' )
UpperCamelCase = self.default_image_processor
UpperCamelCase = prepare_img()
UpperCamelCase = image_processor(images=A_ , return_tensors='tf' )
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
UpperCamelCase = ViTMAEConfig()
UpperCamelCase = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 )
UpperCamelCase = np.random.uniform(size=(1, num_patches) )
# forward pass
UpperCamelCase = model(**A_ , noise=A_ )
# verify the logits
UpperCamelCase = tf.convert_to_tensor([1, 196, 768] )
self.assertEqual(outputs.logits.shape , A_ )
UpperCamelCase = tf.convert_to_tensor(
[[-0.0548, -1.7023, -0.9325], [0.3721, -0.5670, -0.2233], [0.8235, -1.3878, -0.3524]] )
tf.debugging.assert_near(outputs.logits[0, :3, :3] , A_ , atol=1e-4 )
| 110
| 1
|
import string
def _a ( lowerCamelCase ):
for key in range(len(string.ascii_uppercase ) ):
lowerCamelCase : Optional[int] = """"""
for symbol in message:
if symbol in string.ascii_uppercase:
lowerCamelCase : str = string.ascii_uppercase.find(SCREAMING_SNAKE_CASE_ )
lowerCamelCase : List[str] = num - key
if num < 0:
lowerCamelCase : List[Any] = num + len(string.ascii_uppercase )
lowerCamelCase : List[Any] = translated + string.ascii_uppercase[num]
else:
lowerCamelCase : Optional[Any] = translated + symbol
print(F'''Decryption using Key #{key}: {translated}''' )
def _a ( ):
lowerCamelCase : List[str] = input("""Encrypted message: """ )
lowerCamelCase : str = message.upper()
decrypt(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 287
|
from . import (
albert,
align,
altclip,
audio_spectrogram_transformer,
auto,
autoformer,
bark,
bart,
barthez,
bartpho,
beit,
bert,
bert_generation,
bert_japanese,
bertweet,
big_bird,
bigbird_pegasus,
biogpt,
bit,
blenderbot,
blenderbot_small,
blip,
blip_a,
bloom,
bridgetower,
byta,
camembert,
canine,
chinese_clip,
clap,
clip,
clipseg,
codegen,
conditional_detr,
convbert,
convnext,
convnextva,
cpm,
cpmant,
ctrl,
cvt,
dataavec,
deberta,
deberta_va,
decision_transformer,
deformable_detr,
deit,
deprecated,
deta,
detr,
dialogpt,
dinat,
distilbert,
dit,
donut,
dpr,
dpt,
efficientformer,
efficientnet,
electra,
encodec,
encoder_decoder,
ernie,
ernie_m,
esm,
falcon,
flaubert,
flava,
fnet,
focalnet,
fsmt,
funnel,
git,
glpn,
gpta,
gpt_bigcode,
gpt_neo,
gpt_neox,
gpt_neox_japanese,
gpt_swa,
gptj,
gptsan_japanese,
graphormer,
groupvit,
herbert,
hubert,
ibert,
imagegpt,
informer,
instructblip,
jukebox,
layoutlm,
layoutlmva,
layoutlmva,
layoutxlm,
led,
levit,
lilt,
llama,
longformer,
longta,
luke,
lxmert,
mam_aaa,
marian,
markuplm,
maskaformer,
maskformer,
mbart,
mbartaa,
mega,
megatron_bert,
megatron_gpta,
mgp_str,
mluke,
mobilebert,
mobilenet_va,
mobilenet_va,
mobilevit,
mobilevitva,
mpnet,
mra,
mta,
musicgen,
mvp,
nat,
nezha,
nllb,
nllb_moe,
nystromformer,
oneformer,
open_llama,
openai,
opt,
owlvit,
pegasus,
pegasus_x,
perceiver,
phobert,
pixastruct,
plbart,
poolformer,
prophetnet,
qdqbert,
rag,
realm,
reformer,
regnet,
rembert,
resnet,
roberta,
roberta_prelayernorm,
roc_bert,
roformer,
rwkv,
sam,
segformer,
sew,
sew_d,
speech_encoder_decoder,
speech_to_text,
speech_to_text_a,
speechta,
splinter,
squeezebert,
swiftformer,
swin,
swinasr,
swinva,
switch_transformers,
ta,
table_transformer,
tapas,
time_series_transformer,
timesformer,
timm_backbone,
transfo_xl,
trocr,
tvlt,
umta,
unispeech,
unispeech_sat,
upernet,
videomae,
vilt,
vision_encoder_decoder,
vision_text_dual_encoder,
visual_bert,
vit,
vit_hybrid,
vit_mae,
vit_msn,
vivit,
wavaveca,
wavaveca_conformer,
wavaveca_phoneme,
wavaveca_with_lm,
wavlm,
whisper,
x_clip,
xglm,
xlm,
xlm_prophetnet,
xlm_roberta,
xlm_roberta_xl,
xlnet,
xmod,
yolos,
yoso,
)
| 68
| 0
|
'''simple docstring'''
__lowerCamelCase = {
"joule": 1.0,
"kilojoule": 1000,
"megajoule": 100_0000,
"gigajoule": 10_0000_0000,
"wattsecond": 1.0,
"watthour": 3600,
"kilowatthour": 360_0000,
"newtonmeter": 1.0,
"calorie_nutr": 4186.8,
"kilocalorie_nutr": 418_6800.00,
"electronvolt": 1.6_0217_6634e-19,
"britishthermalunit_it": 1055.0_5585,
"footpound": 1.355818,
}
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ ) -> float:
if to_type not in ENERGY_CONVERSION or from_type not in ENERGY_CONVERSION:
A_ = (
F'''Incorrect \'from_type\' or \'to_type\' value: {from_type!r}, {to_type!r}\n'''
F'''Valid values are: {", ".join(UpperCAmelCase__ )}'''
)
raise ValueError(UpperCAmelCase__ )
return value * ENERGY_CONVERSION[from_type] / ENERGY_CONVERSION[to_type]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 101
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__lowerCamelCase = {
'''configuration_blenderbot''': [
'''BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''BlenderbotConfig''',
'''BlenderbotOnnxConfig''',
],
'''tokenization_blenderbot''': ['''BlenderbotTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = ['''BlenderbotTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = [
'''BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BlenderbotForCausalLM''',
'''BlenderbotForConditionalGeneration''',
'''BlenderbotModel''',
'''BlenderbotPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = [
'''TFBlenderbotForConditionalGeneration''',
'''TFBlenderbotModel''',
'''TFBlenderbotPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = [
'''FlaxBlenderbotForConditionalGeneration''',
'''FlaxBlenderbotModel''',
'''FlaxBlenderbotPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_blenderbot import (
BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlenderbotConfig,
BlenderbotOnnxConfig,
)
from .tokenization_blenderbot import BlenderbotTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_blenderbot_fast import BlenderbotTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blenderbot import (
BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST,
BlenderbotForCausalLM,
BlenderbotForConditionalGeneration,
BlenderbotModel,
BlenderbotPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blenderbot import (
TFBlenderbotForConditionalGeneration,
TFBlenderbotModel,
TFBlenderbotPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
FlaxBlenderbotPreTrainedModel,
)
else:
import sys
__lowerCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 101
| 1
|
'''simple docstring'''
class __magic_name__ :
def __init__( self : int , lowercase_ : list ):
lowercase_ : List[Any] = set_counts
lowercase_ : Optional[int] = max(lowercase_ )
lowercase_ : List[Any] = len(lowercase_ )
lowercase_ : Any = [1] * num_sets
lowercase_ : Tuple = list(range(lowercase_ ) )
def SCREAMING_SNAKE_CASE_ ( self : int , lowercase_ : int , lowercase_ : int ):
lowercase_ : Dict = self.get_parent(lowercase_ )
lowercase_ : Tuple = self.get_parent(lowercase_ )
if src_parent == dst_parent:
return False
if self.ranks[dst_parent] >= self.ranks[src_parent]:
self.set_counts[dst_parent] += self.set_counts[src_parent]
lowercase_ : Dict = 0
lowercase_ : int = dst_parent
if self.ranks[dst_parent] == self.ranks[src_parent]:
self.ranks[dst_parent] += 1
lowercase_ : List[str] = self.set_counts[dst_parent]
else:
self.set_counts[src_parent] += self.set_counts[dst_parent]
lowercase_ : Dict = 0
lowercase_ : int = src_parent
lowercase_ : Any = self.set_counts[src_parent]
lowercase_ : int = max(self.max_set , lowercase_ )
return True
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , lowercase_ : int ):
if self.parents[disj_set] == disj_set:
return disj_set
lowercase_ : Dict = self.get_parent(self.parents[disj_set] )
return self.parents[disj_set]
| 239
|
'''simple docstring'''
import math
import unittest
def lowerCamelCase ( UpperCAmelCase__ : int ) -> bool:
assert isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) and (
number >= 0
), "'number' must been an int and positive"
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(UpperCAmelCase__ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
class __magic_name__ ( unittest.TestCase):
def SCREAMING_SNAKE_CASE_ ( self : Dict ):
self.assertTrue(is_prime(2 ) )
self.assertTrue(is_prime(3 ) )
self.assertTrue(is_prime(5 ) )
self.assertTrue(is_prime(7 ) )
self.assertTrue(is_prime(11 ) )
self.assertTrue(is_prime(13 ) )
self.assertTrue(is_prime(17 ) )
self.assertTrue(is_prime(19 ) )
self.assertTrue(is_prime(23 ) )
self.assertTrue(is_prime(29 ) )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ):
with self.assertRaises(lowercase_ ):
is_prime(-19 )
self.assertFalse(
is_prime(0 ) , """Zero doesn't have any positive factors, primes must have exactly two.""" , )
self.assertFalse(
is_prime(1 ) , """One only has 1 positive factor, primes must have exactly two.""" , )
self.assertFalse(is_prime(2 * 2 ) )
self.assertFalse(is_prime(2 * 3 ) )
self.assertFalse(is_prime(3 * 3 ) )
self.assertFalse(is_prime(3 * 5 ) )
self.assertFalse(is_prime(3 * 5 * 7 ) )
if __name__ == "__main__":
unittest.main()
| 239
| 1
|
"""simple docstring"""
import numpy as np
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self ):
'''simple docstring'''
lowerCAmelCase__ :int = (0, 0)
lowerCAmelCase__ :Optional[Any] = None
lowerCAmelCase__ :Union[str, Any] = 0
lowerCAmelCase__ :int = 0
lowerCAmelCase__ :Union[str, Any] = 0
def __eq__( self , __UpperCAmelCase ):
'''simple docstring'''
return self.position == cell.position
def snake_case ( self ):
'''simple docstring'''
print(self.position )
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self , __UpperCAmelCase=(5, 5) ):
'''simple docstring'''
lowerCAmelCase__ :Tuple = np.zeros(__UpperCAmelCase )
lowerCAmelCase__ :Optional[Any] = world_size[0]
lowerCAmelCase__ :Any = world_size[1]
def snake_case ( self ):
'''simple docstring'''
print(self.w )
def snake_case ( self , __UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :Any = [
(-1, -1),
(-1, 0),
(-1, 1),
(0, -1),
(0, 1),
(1, -1),
(1, 0),
(1, 1),
]
lowerCAmelCase__ :int = cell.position[0]
lowerCAmelCase__ :Optional[int] = cell.position[1]
lowerCAmelCase__ :int = []
for n in neughbour_cord:
lowerCAmelCase__ :Tuple = current_x + n[0]
lowerCAmelCase__ :Dict = current_y + n[1]
if 0 <= x < self.world_x_limit and 0 <= y < self.world_y_limit:
lowerCAmelCase__ :Dict = Cell()
lowerCAmelCase__ :List[str] = (x, y)
lowerCAmelCase__ :List[str] = cell
neighbours.append(__UpperCAmelCase )
return neighbours
def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->Optional[Any]:
"""simple docstring"""
lowerCAmelCase__ :Any = []
lowerCAmelCase__ :Any = []
_open.append(_SCREAMING_SNAKE_CASE )
while _open:
lowerCAmelCase__ :List[Any] = np.argmin([n.f for n in _open] )
lowerCAmelCase__ :Tuple = _open[min_f]
_closed.append(_open.pop(_SCREAMING_SNAKE_CASE ) )
if current == goal:
break
for n in world.get_neigbours(_SCREAMING_SNAKE_CASE ):
for c in _closed:
if c == n:
continue
lowerCAmelCase__ :List[str] = current.g + 1
lowerCAmelCase__ , lowerCAmelCase__ :Union[str, Any] = n.position
lowerCAmelCase__ , lowerCAmelCase__ :int = goal.position
lowerCAmelCase__ :str = (ya - ya) ** 2 + (xa - xa) ** 2
lowerCAmelCase__ :List[str] = n.h + n.g
for c in _open:
if c == n and c.f < n.f:
continue
_open.append(_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ :List[Any] = []
while current.parent is not None:
path.append(current.position )
lowerCAmelCase__ :Any = current.parent
path.append(current.position )
return path[::-1]
if __name__ == "__main__":
__A = Gridworld()
# Start position and goal
__A = Cell()
__A = (0, 0)
__A = Cell()
__A = (4, 4)
print(F'''path from {start.position} to {goal.position}''')
__A = astar(world, start, goal)
# Just for visual reasons.
for i in s:
__A = 1
print(world.w)
| 254
|
"""simple docstring"""
import json
from typing import Iterator, List, Union
from tokenizers import AddedToken, Regex, Tokenizer, decoders, normalizers, pre_tokenizers, trainers
from tokenizers.implementations.base_tokenizer import BaseTokenizer
from tokenizers.models import Unigram
from tokenizers.processors import TemplateProcessing
class _lowerCAmelCase ( a ):
"""simple docstring"""
def __init__( self , __UpperCAmelCase = "▁" , __UpperCAmelCase = True , __UpperCAmelCase = "<unk>" , __UpperCAmelCase = "</s>" , __UpperCAmelCase = "<pad>" , ):
'''simple docstring'''
lowerCAmelCase__ :Tuple = {
'pad': {'id': 0, 'token': pad_token},
'eos': {'id': 1, 'token': eos_token},
'unk': {'id': 2, 'token': unk_token},
}
lowerCAmelCase__ :Optional[int] = [None] * len(self.special_tokens )
for token_dict in self.special_tokens.values():
lowerCAmelCase__ :Any = token_dict['token']
lowerCAmelCase__ :int = Tokenizer(Unigram() )
lowerCAmelCase__ :Tuple = normalizers.Sequence(
[
normalizers.Nmt(),
normalizers.NFKC(),
normalizers.Replace(Regex(' {2,}' ) , ' ' ),
normalizers.Lowercase(),
] )
lowerCAmelCase__ :Any = pre_tokenizers.Sequence(
[
pre_tokenizers.Metaspace(replacement=__UpperCAmelCase , add_prefix_space=__UpperCAmelCase ),
pre_tokenizers.Digits(individual_digits=__UpperCAmelCase ),
pre_tokenizers.Punctuation(),
] )
lowerCAmelCase__ :List[str] = decoders.Metaspace(replacement=__UpperCAmelCase , add_prefix_space=__UpperCAmelCase )
lowerCAmelCase__ :Tuple = TemplateProcessing(
single=F"$A {self.special_tokens['eos']['token']}" , special_tokens=[(self.special_tokens['eos']['token'], self.special_tokens['eos']['id'])] , )
lowerCAmelCase__ :Optional[int] = {
'model': 'SentencePieceUnigram',
'replacement': replacement,
'add_prefix_space': add_prefix_space,
}
super().__init__(__UpperCAmelCase , __UpperCAmelCase )
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase = 8_0_0_0 , __UpperCAmelCase = True , ):
'''simple docstring'''
lowerCAmelCase__ :int = trainers.UnigramTrainer(
vocab_size=__UpperCAmelCase , special_tokens=self.special_tokens_list , show_progress=__UpperCAmelCase , )
if isinstance(__UpperCAmelCase , __UpperCAmelCase ):
lowerCAmelCase__ :int = [files]
self._tokenizer.train(__UpperCAmelCase , trainer=__UpperCAmelCase )
self.add_unk_id()
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase = 8_0_0_0 , __UpperCAmelCase = True , ):
'''simple docstring'''
lowerCAmelCase__ :Optional[int] = trainers.UnigramTrainer(
vocab_size=__UpperCAmelCase , special_tokens=self.special_tokens_list , show_progress=__UpperCAmelCase , )
self._tokenizer.train_from_iterator(__UpperCAmelCase , trainer=__UpperCAmelCase )
self.add_unk_id()
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Optional[int] = json.loads(self._tokenizer.to_str() )
lowerCAmelCase__ :List[str] = self.special_tokens['unk']['id']
lowerCAmelCase__ :Union[str, Any] = Tokenizer.from_str(json.dumps(__UpperCAmelCase ) )
| 254
| 1
|
"""simple docstring"""
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class lowerCamelCase ( _snake_case ):
'''simple docstring'''
_A : Any = (UniPCMultistepScheduler,)
_A : Dict = (("""num_inference_steps""", 2_5),)
def lowerCAmelCase_ ( self: int , **snake_case: str ) -> Dict:
snake_case_ :Optional[int] = {
'num_train_timesteps': 1_000,
'beta_start': 0.0_0_0_1,
'beta_end': 0.0_2,
'beta_schedule': 'linear',
'solver_order': 2,
'solver_type': 'bh2',
}
config.update(**lowercase_ )
return config
def lowerCAmelCase_ ( self: List[str] , snake_case: Tuple=0 , **snake_case: int ) -> Optional[int]:
snake_case_ :Optional[int] = dict(self.forward_default_kwargs )
snake_case_ :str = kwargs.pop("""num_inference_steps""" , lowercase_ )
snake_case_ :Union[str, Any] = self.dummy_sample
snake_case_ :str = 0.1 * sample
snake_case_ :Tuple = [residual + 0.2, residual + 0.1_5, residual + 0.1_0]
for scheduler_class in self.scheduler_classes:
snake_case_ :Tuple = self.get_scheduler_config(**lowercase_ )
snake_case_ :Optional[int] = scheduler_class(**lowercase_ )
scheduler.set_timesteps(lowercase_ )
# copy over dummy past residuals
snake_case_ :str = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowercase_ )
snake_case_ :List[str] = scheduler_class.from_pretrained(lowercase_ )
new_scheduler.set_timesteps(lowercase_ )
# copy over dummy past residuals
snake_case_ :int = dummy_past_residuals[: new_scheduler.config.solver_order]
snake_case_ :List[str] = sample, sample
for t in range(lowercase_ , time_step + scheduler.config.solver_order + 1 ):
snake_case_ :List[Any] = scheduler.step(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ).prev_sample
snake_case_ :Tuple = new_scheduler.step(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def lowerCAmelCase_ ( self: int , snake_case: Union[str, Any]=0 , **snake_case: str ) -> int:
snake_case_ :int = dict(self.forward_default_kwargs )
snake_case_ :Union[str, Any] = kwargs.pop("""num_inference_steps""" , lowercase_ )
snake_case_ :List[str] = self.dummy_sample
snake_case_ :Any = 0.1 * sample
snake_case_ :List[Any] = [residual + 0.2, residual + 0.1_5, residual + 0.1_0]
for scheduler_class in self.scheduler_classes:
snake_case_ :List[Any] = self.get_scheduler_config()
snake_case_ :str = scheduler_class(**lowercase_ )
scheduler.set_timesteps(lowercase_ )
# copy over dummy past residuals (must be after setting timesteps)
snake_case_ :List[str] = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowercase_ )
snake_case_ :Tuple = scheduler_class.from_pretrained(lowercase_ )
# copy over dummy past residuals
new_scheduler.set_timesteps(lowercase_ )
# copy over dummy past residual (must be after setting timesteps)
snake_case_ :Any = dummy_past_residuals[: new_scheduler.config.solver_order]
snake_case_ :int = scheduler.step(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ).prev_sample
snake_case_ :Optional[Any] = new_scheduler.step(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def lowerCAmelCase_ ( self: Dict , snake_case: Tuple=None , **snake_case: Optional[Any] ) -> int:
if scheduler is None:
snake_case_ :int = self.scheduler_classes[0]
snake_case_ :Optional[Any] = self.get_scheduler_config(**lowercase_ )
snake_case_ :str = scheduler_class(**lowercase_ )
snake_case_ :Any = self.scheduler_classes[0]
snake_case_ :Tuple = self.get_scheduler_config(**lowercase_ )
snake_case_ :Tuple = scheduler_class(**lowercase_ )
snake_case_ :List[Any] = 10
snake_case_ :str = self.dummy_model()
snake_case_ :Optional[Any] = self.dummy_sample_deter
scheduler.set_timesteps(lowercase_ )
for i, t in enumerate(scheduler.timesteps ):
snake_case_ :int = model(lowercase_ , lowercase_ )
snake_case_ :Any = scheduler.step(lowercase_ , lowercase_ , lowercase_ ).prev_sample
return sample
def lowerCAmelCase_ ( self: List[Any] ) -> Optional[int]:
snake_case_ :Any = dict(self.forward_default_kwargs )
snake_case_ :Dict = kwargs.pop("""num_inference_steps""" , lowercase_ )
for scheduler_class in self.scheduler_classes:
snake_case_ :Tuple = self.get_scheduler_config()
snake_case_ :Optional[Any] = scheduler_class(**lowercase_ )
snake_case_ :Optional[int] = self.dummy_sample
snake_case_ :List[Any] = 0.1 * sample
if num_inference_steps is not None and hasattr(lowercase_ , """set_timesteps""" ):
scheduler.set_timesteps(lowercase_ )
elif num_inference_steps is not None and not hasattr(lowercase_ , """set_timesteps""" ):
snake_case_ :str = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
snake_case_ :Tuple = [residual + 0.2, residual + 0.1_5, residual + 0.1_0]
snake_case_ :str = dummy_past_residuals[: scheduler.config.solver_order]
snake_case_ :Dict = scheduler.timesteps[5]
snake_case_ :Optional[Any] = scheduler.timesteps[6]
snake_case_ :Tuple = scheduler.step(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ).prev_sample
snake_case_ :str = scheduler.step(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def lowerCAmelCase_ ( self: List[Any] ) -> Optional[int]:
# make sure that iterating over schedulers with same config names gives same results
# for defaults
snake_case_ :List[Any] = UniPCMultistepScheduler(**self.get_scheduler_config() )
snake_case_ :Optional[Any] = self.full_loop(scheduler=lowercase_ )
snake_case_ :int = torch.mean(torch.abs(lowercase_ ) )
assert abs(result_mean.item() - 0.2_4_6_4 ) < 1E-3
snake_case_ :Dict = DPMSolverSinglestepScheduler.from_config(scheduler.config )
snake_case_ :Any = DEISMultistepScheduler.from_config(scheduler.config )
snake_case_ :int = DPMSolverMultistepScheduler.from_config(scheduler.config )
snake_case_ :Union[str, Any] = UniPCMultistepScheduler.from_config(scheduler.config )
snake_case_ :Optional[int] = self.full_loop(scheduler=lowercase_ )
snake_case_ :Tuple = torch.mean(torch.abs(lowercase_ ) )
assert abs(result_mean.item() - 0.2_4_6_4 ) < 1E-3
def lowerCAmelCase_ ( self: List[str] ) -> Union[str, Any]:
for timesteps in [25, 50, 100, 999, 1_000]:
self.check_over_configs(num_train_timesteps=lowercase_ )
def lowerCAmelCase_ ( self: Dict ) -> Optional[Any]:
self.check_over_configs(thresholding=lowercase_ )
for order in [1, 2, 3]:
for solver_type in ["bh1", "bh2"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=lowercase_ , prediction_type=lowercase_ , sample_max_value=lowercase_ , solver_order=lowercase_ , solver_type=lowercase_ , )
def lowerCAmelCase_ ( self: Union[str, Any] ) -> Tuple:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=lowercase_ )
def lowerCAmelCase_ ( self: Optional[int] ) -> Optional[Any]:
for solver_type in ["bh1", "bh2"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=lowercase_ , solver_type=lowercase_ , prediction_type=lowercase_ , )
snake_case_ :Optional[int] = self.full_loop(
solver_order=lowercase_ , solver_type=lowercase_ , prediction_type=lowercase_ , )
assert not torch.isnan(lowercase_ ).any(), "Samples have nan numbers"
def lowerCAmelCase_ ( self: Optional[int] ) -> Tuple:
self.check_over_configs(lower_order_final=lowercase_ )
self.check_over_configs(lower_order_final=lowercase_ )
def lowerCAmelCase_ ( self: Tuple ) -> Union[str, Any]:
for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1_000]:
self.check_over_forward(num_inference_steps=lowercase_ , time_step=0 )
def lowerCAmelCase_ ( self: Dict ) -> Union[str, Any]:
snake_case_ :str = self.full_loop()
snake_case_ :Any = torch.mean(torch.abs(lowercase_ ) )
assert abs(result_mean.item() - 0.2_4_6_4 ) < 1E-3
def lowerCAmelCase_ ( self: Dict ) -> int:
snake_case_ :Optional[Any] = self.full_loop(prediction_type="""v_prediction""" )
snake_case_ :Optional[int] = torch.mean(torch.abs(lowercase_ ) )
assert abs(result_mean.item() - 0.1_0_1_4 ) < 1E-3
def lowerCAmelCase_ ( self: List[str] ) -> List[Any]:
snake_case_ :Optional[Any] = self.scheduler_classes[0]
snake_case_ :List[Any] = self.get_scheduler_config(thresholding=lowercase_ , dynamic_thresholding_ratio=0 )
snake_case_ :Optional[Any] = scheduler_class(**lowercase_ )
snake_case_ :int = 10
snake_case_ :Optional[int] = self.dummy_model()
snake_case_ :Any = self.dummy_sample_deter.half()
scheduler.set_timesteps(lowercase_ )
for i, t in enumerate(scheduler.timesteps ):
snake_case_ :Union[str, Any] = model(lowercase_ , lowercase_ )
snake_case_ :Optional[int] = scheduler.step(lowercase_ , lowercase_ , lowercase_ ).prev_sample
assert sample.dtype == torch.floataa
def lowerCAmelCase_ ( self: int , **snake_case: Any ) -> Any:
for scheduler_class in self.scheduler_classes:
snake_case_ :str = self.get_scheduler_config(**lowercase_ )
snake_case_ :Any = scheduler_class(**lowercase_ )
scheduler.set_timesteps(scheduler.config.num_train_timesteps )
assert len(scheduler.timesteps.unique() ) == scheduler.num_inference_steps
| 66
|
'''simple docstring'''
import copy
from dataclasses import dataclass
from pathlib import Path
from typing import Dict, Optional, Union
@dataclass
class A_ :
'''simple docstring'''
UpperCAmelCase_ : Optional[Union[str, Path]] = None
UpperCAmelCase_ : bool = False
UpperCAmelCase_ : bool = False
UpperCAmelCase_ : bool = False
UpperCAmelCase_ : Optional[Dict] = None
UpperCAmelCase_ : Optional[str] = None
UpperCAmelCase_ : bool = False
UpperCAmelCase_ : bool = False
UpperCAmelCase_ : bool = False
UpperCAmelCase_ : bool = True
UpperCAmelCase_ : Optional[int] = None
UpperCAmelCase_ : int = 1
UpperCAmelCase_ : Optional[Union[str, bool]] = None
UpperCAmelCase_ : bool = False
UpperCAmelCase_ : Optional[Dict] = None
UpperCAmelCase_ : Optional[str] = None
def UpperCAmelCase_ ( self : Tuple ) -> "DownloadConfig":
return self.__class__(**{k: copy.deepcopy(lowercase_ ) for k, v in self.__dict__.items()} )
| 151
| 0
|
import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
lowercase : str = logging.get_logger(__name__)
lowercase : Any = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt"""}
lowercase : List[Any] = {
"""vocab_file""": {
"""allenai/longformer-base-4096""": """https://huggingface.co/allenai/longformer-base-4096/resolve/main/vocab.json""",
"""allenai/longformer-large-4096""": (
"""https://huggingface.co/allenai/longformer-large-4096/resolve/main/vocab.json"""
),
"""allenai/longformer-large-4096-finetuned-triviaqa""": (
"""https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/vocab.json"""
),
"""allenai/longformer-base-4096-extra.pos.embd.only""": (
"""https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/vocab.json"""
),
"""allenai/longformer-large-4096-extra.pos.embd.only""": (
"""https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/vocab.json"""
),
},
"""merges_file""": {
"""allenai/longformer-base-4096""": """https://huggingface.co/allenai/longformer-base-4096/resolve/main/merges.txt""",
"""allenai/longformer-large-4096""": (
"""https://huggingface.co/allenai/longformer-large-4096/resolve/main/merges.txt"""
),
"""allenai/longformer-large-4096-finetuned-triviaqa""": (
"""https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/merges.txt"""
),
"""allenai/longformer-base-4096-extra.pos.embd.only""": (
"""https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/merges.txt"""
),
"""allenai/longformer-large-4096-extra.pos.embd.only""": (
"""https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/merges.txt"""
),
},
}
lowercase : List[str] = {
"""allenai/longformer-base-4096""": 4_0_9_6,
"""allenai/longformer-large-4096""": 4_0_9_6,
"""allenai/longformer-large-4096-finetuned-triviaqa""": 4_0_9_6,
"""allenai/longformer-base-4096-extra.pos.embd.only""": 4_0_9_6,
"""allenai/longformer-large-4096-extra.pos.embd.only""": 4_0_9_6,
}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def A_ ( ) -> Dict:
a__ : int = (
list(range(ord('!' ) , ord('~' ) + 1 ) ) + list(range(ord('¡' ) , ord('¬' ) + 1 ) ) + list(range(ord('®' ) , ord('ÿ' ) + 1 ) )
)
a__ : Dict = bs[:]
a__ : str = 0
for b in range(2**8 ):
if b not in bs:
bs.append(A__ )
cs.append(2**8 + n )
n += 1
a__ : int = [chr(A__ ) for n in cs]
return dict(zip(A__ , A__ ) )
def A_ ( A__ ) -> Union[str, Any]:
a__ : Any = set()
a__ : List[str] = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
a__ : Optional[Any] = char
return pairs
class A__ ( __UpperCAmelCase ):
"""simple docstring"""
__A : List[Any] = VOCAB_FILES_NAMES
__A : Tuple = PRETRAINED_VOCAB_FILES_MAP
__A : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__A : List[Any] = ['''input_ids''', '''attention_mask''']
def __init__( self , lowercase , lowercase , lowercase="replace" , lowercase="<s>" , lowercase="</s>" , lowercase="</s>" , lowercase="<s>" , lowercase="<unk>" , lowercase="<pad>" , lowercase="<mask>" , lowercase=False , **lowercase , ) -> Union[str, Any]:
'''simple docstring'''
a__ : Dict = AddedToken(lowercase , lstrip=lowercase , rstrip=lowercase) if isinstance(lowercase , lowercase) else bos_token
a__ : int = AddedToken(lowercase , lstrip=lowercase , rstrip=lowercase) if isinstance(lowercase , lowercase) else eos_token
a__ : int = AddedToken(lowercase , lstrip=lowercase , rstrip=lowercase) if isinstance(lowercase , lowercase) else sep_token
a__ : Tuple = AddedToken(lowercase , lstrip=lowercase , rstrip=lowercase) if isinstance(lowercase , lowercase) else cls_token
a__ : List[Any] = AddedToken(lowercase , lstrip=lowercase , rstrip=lowercase) if isinstance(lowercase , lowercase) else unk_token
a__ : List[str] = AddedToken(lowercase , lstrip=lowercase , rstrip=lowercase) if isinstance(lowercase , lowercase) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
a__ : Dict = AddedToken(lowercase , lstrip=lowercase , rstrip=lowercase) if isinstance(lowercase , lowercase) else mask_token
super().__init__(
errors=lowercase , bos_token=lowercase , eos_token=lowercase , unk_token=lowercase , sep_token=lowercase , cls_token=lowercase , pad_token=lowercase , mask_token=lowercase , add_prefix_space=lowercase , **lowercase , )
with open(lowercase , encoding='utf-8') as vocab_handle:
a__ : int = json.load(lowercase)
a__ : List[str] = {v: k for k, v in self.encoder.items()}
a__ : List[Any] = errors # how to handle errors in decoding
a__ : Tuple = bytes_to_unicode()
a__ : str = {v: k for k, v in self.byte_encoder.items()}
with open(lowercase , encoding='utf-8') as merges_handle:
a__ : List[str] = merges_handle.read().split('\n')[1:-1]
a__ : List[str] = [tuple(merge.split()) for merge in bpe_merges]
a__ : Optional[int] = dict(zip(lowercase , range(len(lowercase))))
a__ : List[str] = {}
a__ : List[Any] = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
a__ : Optional[int] = re.compile(r'\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+')
@property
def __lowercase ( self) -> List[str]:
'''simple docstring'''
return len(self.encoder)
def __lowercase ( self) -> List[str]:
'''simple docstring'''
return dict(self.encoder , **self.added_tokens_encoder)
def __lowercase ( self , lowercase) -> Optional[Any]:
'''simple docstring'''
if token in self.cache:
return self.cache[token]
a__ : Any = tuple(lowercase)
a__ : Union[str, Any] = get_pairs(lowercase)
if not pairs:
return token
while True:
a__ : Union[str, Any] = min(lowercase , key=lambda lowercase: self.bpe_ranks.get(lowercase , float('inf')))
if bigram not in self.bpe_ranks:
break
a__ , a__ : str = bigram
a__ : Union[str, Any] = []
a__ : List[str] = 0
while i < len(lowercase):
try:
a__ : Optional[int] = word.index(lowercase , lowercase)
except ValueError:
new_word.extend(word[i:])
break
else:
new_word.extend(word[i:j])
a__ : Dict = j
if word[i] == first and i < len(lowercase) - 1 and word[i + 1] == second:
new_word.append(first + second)
i += 2
else:
new_word.append(word[i])
i += 1
a__ : Dict = tuple(lowercase)
a__ : List[str] = new_word
if len(lowercase) == 1:
break
else:
a__ : int = get_pairs(lowercase)
a__ : Union[str, Any] = ' '.join(lowercase)
a__ : Optional[int] = word
return word
def __lowercase ( self , lowercase) -> Tuple:
'''simple docstring'''
a__ : Union[str, Any] = []
for token in re.findall(self.pat , lowercase):
a__ : Union[str, Any] = ''.join(
self.byte_encoder[b] for b in token.encode('utf-8')) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(lowercase).split(' '))
return bpe_tokens
def __lowercase ( self , lowercase) -> List[Any]:
'''simple docstring'''
return self.encoder.get(lowercase , self.encoder.get(self.unk_token))
def __lowercase ( self , lowercase) -> Union[str, Any]:
'''simple docstring'''
return self.decoder.get(lowercase)
def __lowercase ( self , lowercase) -> Dict:
'''simple docstring'''
a__ : int = ''.join(lowercase)
a__ : Union[str, Any] = bytearray([self.byte_decoder[c] for c in text]).decode('utf-8' , errors=self.errors)
return text
def __lowercase ( self , lowercase , lowercase = None) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(lowercase):
logger.error(F'Vocabulary path ({save_directory}) should be a directory')
return
a__ : str = os.path.join(
lowercase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'])
a__ : Optional[Any] = os.path.join(
lowercase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'])
with open(lowercase , 'w' , encoding='utf-8') as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=lowercase , ensure_ascii=lowercase) + '\n')
a__ : str = 0
with open(lowercase , 'w' , encoding='utf-8') as writer:
writer.write('#version: 0.2\n')
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda lowercase: kv[1]):
if index != token_index:
logger.warning(
F'Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'
' Please check that the tokenizer is not corrupted!')
a__ : Dict = token_index
writer.write(' '.join(lowercase) + '\n')
index += 1
return vocab_file, merge_file
def __lowercase ( self , lowercase , lowercase = None) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
a__ : Dict = [self.cls_token_id]
a__ : Optional[Any] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def __lowercase ( self , lowercase , lowercase = None , lowercase = False) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowercase , token_ids_a=lowercase , already_has_special_tokens=lowercase)
if token_ids_a is None:
return [1] + ([0] * len(lowercase)) + [1]
return [1] + ([0] * len(lowercase)) + [1, 1] + ([0] * len(lowercase)) + [1]
def __lowercase ( self , lowercase , lowercase = None) -> List[int]:
'''simple docstring'''
a__ : Dict = [self.sep_token_id]
a__ : Dict = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
def __lowercase ( self , lowercase , lowercase=False , **lowercase) -> Union[str, Any]:
'''simple docstring'''
a__ : Tuple = kwargs.pop('add_prefix_space' , self.add_prefix_space)
if (is_split_into_words or add_prefix_space) and (len(lowercase) > 0 and not text[0].isspace()):
a__ : List[str] = ' ' + text
return (text, kwargs)
| 225
|
import os
import socket
from contextlib import contextmanager
import torch
from ..commands.config.default import write_basic_config # noqa: F401
from ..state import PartialState
from .dataclasses import DistributedType
from .imports import is_deepspeed_available, is_tpu_available
from .transformer_engine import convert_model
from .versions import is_torch_version
if is_deepspeed_available():
from deepspeed import DeepSpeedEngine
if is_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
def A_ ( A__ ) -> Optional[int]:
if is_torch_version('<' , '2.0.0' ) or not hasattr(A__ , '_dynamo' ):
return False
return isinstance(A__ , torch._dynamo.eval_frame.OptimizedModule )
def A_ ( A__ , A__ = True ) -> int:
a__ : Optional[Any] = (torch.nn.parallel.DistributedDataParallel, torch.nn.DataParallel)
a__ : Union[str, Any] = is_compiled_module(A__ )
if is_compiled:
a__ : List[str] = model
a__ : Dict = model._orig_mod
if is_deepspeed_available():
options += (DeepSpeedEngine,)
while isinstance(A__ , A__ ):
a__ : str = model.module
if not keep_fpaa_wrapper:
a__ : Union[str, Any] = getattr(A__ , 'forward' )
a__ : List[Any] = model.__dict__.pop('_original_forward' , A__ )
if original_forward is not None:
while hasattr(A__ , '__wrapped__' ):
a__ : int = forward.__wrapped__
if forward == original_forward:
break
a__ : List[Any] = forward
if getattr(A__ , '_converted_to_transformer_engine' , A__ ):
convert_model(A__ , to_transformer_engine=A__ )
if is_compiled:
a__ : List[str] = model
a__ : Any = compiled_model
return model
def A_ ( ) -> int:
PartialState().wait_for_everyone()
def A_ ( A__ , A__ ) -> Dict:
if PartialState().distributed_type == DistributedType.TPU:
xm.save(A__ , A__ )
elif PartialState().local_process_index == 0:
torch.save(A__ , A__ )
@contextmanager
def A_ ( **A__ ) -> Any:
for key, value in kwargs.items():
a__ : Optional[int] = str(A__ )
yield
for key in kwargs:
if key.upper() in os.environ:
del os.environ[key.upper()]
def A_ ( A__ ) -> List[str]:
if not hasattr(A__ , '__qualname__' ) and not hasattr(A__ , '__name__' ):
a__ : Dict = getattr(A__ , '__class__' , A__ )
if hasattr(A__ , '__qualname__' ):
return obj.__qualname__
if hasattr(A__ , '__name__' ):
return obj.__name__
return str(A__ )
def A_ ( A__ , A__ ) -> Dict:
for key, value in source.items():
if isinstance(A__ , A__ ):
a__ : Optional[Any] = destination.setdefault(A__ , {} )
merge_dicts(A__ , A__ )
else:
a__ : Optional[int] = value
return destination
def A_ ( A__ = None ) -> bool:
if port is None:
a__ : List[Any] = 2_9500
with socket.socket(socket.AF_INET , socket.SOCK_STREAM ) as s:
return s.connect_ex(('localhost', port) ) == 0
| 225
| 1
|
from __future__ import annotations
def UpperCamelCase_( lowerCamelCase_ ) -> bool:
if len(lowerCamelCase_ ) < 2:
raise ValueError('Monogons and Digons are not polygons in the Euclidean space' )
if any(i <= 0 for i in nums ):
raise ValueError('All values must be greater than 0' )
_lowercase : Tuple = nums.copy()
copy_nums.sort()
return copy_nums[-1] < sum(copy_nums[:-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 21
|
def UpperCamelCase_( lowerCamelCase_ ) -> int:
if not numbers:
return 0
if not isinstance(lowerCamelCase_ , (list, tuple) ) or not all(
isinstance(lowerCamelCase_ , lowerCamelCase_ ) for number in numbers ):
raise ValueError('numbers must be an iterable of integers' )
_lowercase : int = numbers[0]
for i in range(1 , len(lowerCamelCase_ ) ):
# update the maximum and minimum subarray products
_lowercase : Union[str, Any] = numbers[i]
if number < 0:
_lowercase , _lowercase : Any = min_till_now, max_till_now
_lowercase : Union[str, Any] = max(lowerCamelCase_ , max_till_now * number )
_lowercase : Union[str, Any] = min(lowerCamelCase_ , min_till_now * number )
# update the maximum product found till now
_lowercase : Optional[Any] = max(lowerCamelCase_ , lowerCamelCase_ )
return max_prod
| 21
| 1
|
from collections import defaultdict
class _UpperCAmelCase :
"""simple docstring"""
def __init__( self : List[str] , lowerCAmelCase_ : Any , lowerCAmelCase_ : Dict ) -> Optional[int]:
__lowerCAmelCase = total # total no of tasks (N)
# DP table will have a dimension of (2^M)*N
# initially all values are set to -1
__lowerCAmelCase = [
[-1 for i in range(total + 1 )] for j in range(2 ** len(lowerCAmelCase_ ) )
]
__lowerCAmelCase = defaultdict(lowerCAmelCase_ ) # stores the list of persons for each task
# final_mask is used to check if all persons are included by setting all bits
# to 1
__lowerCAmelCase = (1 << len(lowerCAmelCase_ )) - 1
def lowercase ( self : int , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Union[str, Any] ) -> Optional[Any]:
# if mask == self.finalmask all persons are distributed tasks, return 1
if mask == self.final_mask:
return 1
# if not everyone gets the task and no more tasks are available, return 0
if task_no > self.total_tasks:
return 0
# if case already considered
if self.dp[mask][task_no] != -1:
return self.dp[mask][task_no]
# Number of ways when we don't this task in the arrangement
__lowerCAmelCase = self.count_ways_until(lowerCAmelCase_ , task_no + 1 )
# now assign the tasks one by one to all possible persons and recursively
# assign for the remaining tasks.
if task_no in self.task:
for p in self.task[task_no]:
# if p is already given a task
if mask & (1 << p):
continue
# assign this task to p and change the mask value. And recursively
# assign tasks with the new mask value.
total_ways_util += self.count_ways_until(mask | (1 << p) , task_no + 1 )
# save the value.
__lowerCAmelCase = total_ways_util
return self.dp[mask][task_no]
def lowercase ( self : int , lowerCAmelCase_ : Optional[int] ) -> Any:
# Store the list of persons for each task
for i in range(len(lowerCAmelCase_ ) ):
for j in task_performed[i]:
self.task[j].append(lowerCAmelCase_ )
# call the function to fill the DP table, final answer is stored in dp[0][1]
return self.count_ways_until(0 , 1 )
if __name__ == "__main__":
_snake_case : int = 5 # total no of tasks (the value of N)
# the list of tasks that can be done by M persons.
_snake_case : Optional[Any] = [[1, 3, 4], [1, 2, 5], [3, 4]]
print(
AssignmentUsingBitmask(task_performed, total_tasks).count_no_of_ways(
task_performed
)
)
| 207
|
from __future__ import annotations
import time
from math import sqrt
# 1 for manhattan, 0 for euclidean
_snake_case : Dict = 0
_snake_case : Dict = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
_snake_case : List[str] = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
_snake_case : Tuple = tuple[int, int]
class _UpperCAmelCase :
"""simple docstring"""
def __init__( self : Dict , lowerCAmelCase_ : int , lowerCAmelCase_ : int , lowerCAmelCase_ : int , lowerCAmelCase_ : int , lowerCAmelCase_ : int , lowerCAmelCase_ : Node | None , ) -> None:
__lowerCAmelCase = pos_x
__lowerCAmelCase = pos_y
__lowerCAmelCase = (pos_y, pos_x)
__lowerCAmelCase = goal_x
__lowerCAmelCase = goal_y
__lowerCAmelCase = g_cost
__lowerCAmelCase = parent
__lowerCAmelCase = self.calculate_heuristic()
__lowerCAmelCase = self.g_cost + self.h_cost
def lowercase ( self : Any ) -> float:
__lowerCAmelCase = self.pos_x - self.goal_x
__lowerCAmelCase = self.pos_y - self.goal_y
if HEURISTIC == 1:
return abs(lowerCAmelCase_ ) + abs(lowerCAmelCase_ )
else:
return sqrt(dy**2 + dx**2 )
def __lt__( self : Union[str, Any] , lowerCAmelCase_ : Node ) -> bool:
return self.f_cost < other.f_cost
class _UpperCAmelCase :
"""simple docstring"""
def __init__( self : List[str] , lowerCAmelCase_ : TPosition , lowerCAmelCase_ : TPosition ) -> Tuple:
__lowerCAmelCase = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , lowerCAmelCase_ )
__lowerCAmelCase = Node(goal[1] , goal[0] , goal[1] , goal[0] , 9_9_9_9_9 , lowerCAmelCase_ )
__lowerCAmelCase = [self.start]
__lowerCAmelCase = []
__lowerCAmelCase = False
def lowercase ( self : str ) -> list[TPosition]:
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
__lowerCAmelCase = self.open_nodes.pop(0 )
if current_node.pos == self.target.pos:
return self.retrace_path(lowerCAmelCase_ )
self.closed_nodes.append(lowerCAmelCase_ )
__lowerCAmelCase = self.get_successors(lowerCAmelCase_ )
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(lowerCAmelCase_ )
else:
# retrieve the best current path
__lowerCAmelCase = self.open_nodes.pop(self.open_nodes.index(lowerCAmelCase_ ) )
if child_node.g_cost < better_node.g_cost:
self.open_nodes.append(lowerCAmelCase_ )
else:
self.open_nodes.append(lowerCAmelCase_ )
return [self.start.pos]
def lowercase ( self : Optional[Any] , lowerCAmelCase_ : Node ) -> list[Node]:
__lowerCAmelCase = []
for action in delta:
__lowerCAmelCase = parent.pos_x + action[1]
__lowerCAmelCase = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(lowerCAmelCase_ ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(
lowerCAmelCase_ , lowerCAmelCase_ , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , lowerCAmelCase_ , ) )
return successors
def lowercase ( self : Tuple , lowerCAmelCase_ : Node | None ) -> list[TPosition]:
__lowerCAmelCase = node
__lowerCAmelCase = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
__lowerCAmelCase = current_node.parent
path.reverse()
return path
class _UpperCAmelCase :
"""simple docstring"""
def __init__( self : int , lowerCAmelCase_ : TPosition , lowerCAmelCase_ : TPosition ) -> None:
__lowerCAmelCase = AStar(lowerCAmelCase_ , lowerCAmelCase_ )
__lowerCAmelCase = AStar(lowerCAmelCase_ , lowerCAmelCase_ )
__lowerCAmelCase = False
def lowercase ( self : Dict ) -> list[TPosition]:
while self.fwd_astar.open_nodes or self.bwd_astar.open_nodes:
self.fwd_astar.open_nodes.sort()
self.bwd_astar.open_nodes.sort()
__lowerCAmelCase = self.fwd_astar.open_nodes.pop(0 )
__lowerCAmelCase = self.bwd_astar.open_nodes.pop(0 )
if current_bwd_node.pos == current_fwd_node.pos:
return self.retrace_bidirectional_path(
lowerCAmelCase_ , lowerCAmelCase_ )
self.fwd_astar.closed_nodes.append(lowerCAmelCase_ )
self.bwd_astar.closed_nodes.append(lowerCAmelCase_ )
__lowerCAmelCase = current_bwd_node
__lowerCAmelCase = current_fwd_node
__lowerCAmelCase = {
self.fwd_astar: self.fwd_astar.get_successors(lowerCAmelCase_ ),
self.bwd_astar: self.bwd_astar.get_successors(lowerCAmelCase_ ),
}
for astar in [self.fwd_astar, self.bwd_astar]:
for child_node in successors[astar]:
if child_node in astar.closed_nodes:
continue
if child_node not in astar.open_nodes:
astar.open_nodes.append(lowerCAmelCase_ )
else:
# retrieve the best current path
__lowerCAmelCase = astar.open_nodes.pop(
astar.open_nodes.index(lowerCAmelCase_ ) )
if child_node.g_cost < better_node.g_cost:
astar.open_nodes.append(lowerCAmelCase_ )
else:
astar.open_nodes.append(lowerCAmelCase_ )
return [self.fwd_astar.start.pos]
def lowercase ( self : Dict , lowerCAmelCase_ : Node , lowerCAmelCase_ : Node ) -> list[TPosition]:
__lowerCAmelCase = self.fwd_astar.retrace_path(lowerCAmelCase_ )
__lowerCAmelCase = self.bwd_astar.retrace_path(lowerCAmelCase_ )
bwd_path.pop()
bwd_path.reverse()
__lowerCAmelCase = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
_snake_case : List[Any] = (0, 0)
_snake_case : Union[str, Any] = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
_snake_case : int = time.time()
_snake_case : Optional[int] = AStar(init, goal)
_snake_case : int = a_star.search()
_snake_case : Union[str, Any] = time.time() - start_time
print(F"""AStar execution time = {end_time:f} seconds""")
_snake_case : Any = time.time()
_snake_case : Dict = BidirectionalAStar(init, goal)
_snake_case : Optional[int] = time.time() - bd_start_time
print(F"""BidirectionalAStar execution time = {bd_end_time:f} seconds""")
| 207
| 1
|
'''simple docstring'''
import argparse
import json
import os
from collections import OrderedDict
import torch
from transformers import LukeConfig, LukeForMaskedLM, MLukeTokenizer, XLMRobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def lowerCamelCase ( lowerCAmelCase : List[str] , lowerCAmelCase : int , lowerCAmelCase : Dict , lowerCAmelCase : List[Any] , lowerCAmelCase : Dict ):
"""simple docstring"""
with open(lowerCAmelCase ) as metadata_file:
__magic_name__ : Optional[Any] = json.load(lowerCAmelCase )
__magic_name__ : Any = LukeConfig(use_entity_aware_attention=lowerCAmelCase , **metadata['model_config'] )
# Load in the weights from the checkpoint_path
__magic_name__ : List[Any] = torch.load(lowerCAmelCase , map_location='cpu' )['module']
# Load the entity vocab file
__magic_name__ : Tuple = load_original_entity_vocab(lowerCAmelCase )
# add an entry for [MASK2]
__magic_name__ : Any = max(entity_vocab.values() ) + 1
config.entity_vocab_size += 1
__magic_name__ : str = XLMRobertaTokenizer.from_pretrained(metadata['model_config']['bert_model_name'] )
# Add special tokens to the token vocabulary for downstream tasks
__magic_name__ : Any = AddedToken('<ent>' , lstrip=lowerCAmelCase , rstrip=lowerCAmelCase )
__magic_name__ : List[str] = AddedToken('<ent2>' , lstrip=lowerCAmelCase , rstrip=lowerCAmelCase )
tokenizer.add_special_tokens({'additional_special_tokens': [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(f'Saving tokenizer to {pytorch_dump_folder_path}' )
tokenizer.save_pretrained(lowerCAmelCase )
with open(os.path.join(lowerCAmelCase , 'tokenizer_config.json' ) , 'r' ) as f:
__magic_name__ : Dict = json.load(lowerCAmelCase )
__magic_name__ : Dict = 'MLukeTokenizer'
with open(os.path.join(lowerCAmelCase , 'tokenizer_config.json' ) , 'w' ) as f:
json.dump(lowerCAmelCase , lowerCAmelCase )
with open(os.path.join(lowerCAmelCase , MLukeTokenizer.vocab_files_names['entity_vocab_file'] ) , 'w' ) as f:
json.dump(lowerCAmelCase , lowerCAmelCase )
__magic_name__ : Optional[Any] = MLukeTokenizer.from_pretrained(lowerCAmelCase )
# Initialize the embeddings of the special tokens
__magic_name__ : str = tokenizer.convert_tokens_to_ids(['@'] )[0]
__magic_name__ : Optional[Any] = tokenizer.convert_tokens_to_ids(['#'] )[0]
__magic_name__ : str = state_dict['embeddings.word_embeddings.weight']
__magic_name__ : Optional[Any] = word_emb[ent_init_index].unsqueeze(0 )
__magic_name__ : int = word_emb[enta_init_index].unsqueeze(0 )
__magic_name__ : str = torch.cat([word_emb, ent_emb, enta_emb] )
# add special tokens for 'entity_predictions.bias'
for bias_name in ["lm_head.decoder.bias", "lm_head.bias"]:
__magic_name__ : int = state_dict[bias_name]
__magic_name__ : Optional[int] = decoder_bias[ent_init_index].unsqueeze(0 )
__magic_name__ : Optional[Any] = decoder_bias[enta_init_index].unsqueeze(0 )
__magic_name__ : Optional[Any] = torch.cat([decoder_bias, ent_decoder_bias, enta_decoder_bias] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
__magic_name__ : List[str] = f'encoder.layer.{layer_index}.attention.self.'
__magic_name__ : Optional[int] = state_dict[prefix + matrix_name]
__magic_name__ : Union[str, Any] = state_dict[prefix + matrix_name]
__magic_name__ : Optional[Any] = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
__magic_name__ : int = state_dict['entity_embeddings.entity_embeddings.weight']
__magic_name__ : List[Any] = entity_emb[entity_vocab['[MASK]']].unsqueeze(0 )
__magic_name__ : Union[str, Any] = torch.cat([entity_emb, entity_mask_emb] )
# add [MASK2] for 'entity_predictions.bias'
__magic_name__ : int = state_dict['entity_predictions.bias']
__magic_name__ : int = entity_prediction_bias[entity_vocab['[MASK]']].unsqueeze(0 )
__magic_name__ : Union[str, Any] = torch.cat([entity_prediction_bias, entity_mask_bias] )
__magic_name__ : Union[str, Any] = LukeForMaskedLM(config=lowerCAmelCase ).eval()
state_dict.pop('entity_predictions.decoder.weight' )
state_dict.pop('lm_head.decoder.weight' )
state_dict.pop('lm_head.decoder.bias' )
__magic_name__ : Optional[Any] = OrderedDict()
for key, value in state_dict.items():
if not (key.startswith('lm_head' ) or key.startswith('entity_predictions' )):
__magic_name__ : Tuple = state_dict[key]
else:
__magic_name__ : int = state_dict[key]
__magic_name__ , __magic_name__ : Optional[int] = model.load_state_dict(lowerCAmelCase , strict=lowerCAmelCase )
if set(lowerCAmelCase ) != {"luke.embeddings.position_ids"}:
raise ValueError(f'Unexpected unexpected_keys: {unexpected_keys}' )
if set(lowerCAmelCase ) != {
"lm_head.decoder.weight",
"lm_head.decoder.bias",
"entity_predictions.decoder.weight",
}:
raise ValueError(f'Unexpected missing_keys: {missing_keys}' )
model.tie_weights()
assert (model.luke.embeddings.word_embeddings.weight == model.lm_head.decoder.weight).all()
assert (model.luke.entity_embeddings.entity_embeddings.weight == model.entity_predictions.decoder.weight).all()
# Check outputs
__magic_name__ : Union[str, Any] = MLukeTokenizer.from_pretrained(lowerCAmelCase , task='entity_classification' )
__magic_name__ : Optional[int] = 'ISO 639-3 uses the code fas for the dialects spoken across Iran and アフガニスタン (Afghanistan).'
__magic_name__ : Dict = (0, 9)
__magic_name__ : Union[str, Any] = tokenizer(lowerCAmelCase , entity_spans=[span] , return_tensors='pt' )
__magic_name__ : Optional[int] = model(**lowerCAmelCase )
# Verify word hidden states
if model_size == "large":
raise NotImplementedError
else: # base
__magic_name__ : Optional[int] = torch.Size((1, 33, 768) )
__magic_name__ : Dict = torch.tensor([[0.0892, 0.0596, -0.2819], [0.0134, 0.1199, 0.0573], [-0.0169, 0.0927, 0.0644]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
f'Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}' )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] , lowerCAmelCase , atol=1e-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
raise NotImplementedError
else: # base
__magic_name__ : Tuple = torch.Size((1, 1, 768) )
__magic_name__ : str = torch.tensor([[-0.1482, 0.0609, 0.0322]] )
if not (outputs.entity_last_hidden_state.shape == expected_shape):
raise ValueError(
f'Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is'
f' {expected_shape}' )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] , lowerCAmelCase , atol=1e-4 ):
raise ValueError
# Verify masked word/entity prediction
__magic_name__ : Dict = MLukeTokenizer.from_pretrained(lowerCAmelCase )
__magic_name__ : List[str] = 'Tokyo is the capital of <mask>.'
__magic_name__ : Union[str, Any] = (24, 30)
__magic_name__ : Optional[int] = tokenizer(lowerCAmelCase , entity_spans=[span] , return_tensors='pt' )
__magic_name__ : Dict = model(**lowerCAmelCase )
__magic_name__ : List[Any] = encoding['input_ids'][0].tolist()
__magic_name__ : Optional[Any] = input_ids.index(tokenizer.convert_tokens_to_ids('<mask>' ) )
__magic_name__ : str = outputs.logits[0][mask_position_id].argmax(dim=-1 )
assert "Japan" == tokenizer.decode(lowerCAmelCase )
__magic_name__ : Optional[Any] = outputs.entity_logits[0][0].argmax().item()
__magic_name__ : List[Any] = [
entity for entity, entity_id in tokenizer.entity_vocab.items() if entity_id == predicted_entity_id
]
assert [e for e in multilingual_predicted_entities if e.startswith('en:' )][0] == "en:Japan"
# Finally, save our PyTorch model and tokenizer
print('Saving PyTorch model to {}'.format(lowerCAmelCase ) )
model.save_pretrained(lowerCAmelCase )
def lowerCamelCase ( lowerCAmelCase : Tuple ):
"""simple docstring"""
__magic_name__ : List[str] = ['[MASK]', '[PAD]', '[UNK]']
__magic_name__ : Any = [json.loads(lowerCAmelCase ) for line in open(lowerCAmelCase )]
__magic_name__ : str = {}
for entry in data:
__magic_name__ : int = entry['id']
for entity_name, language in entry["entities"]:
if entity_name in SPECIAL_TOKENS:
__magic_name__ : str = entity_id
break
__magic_name__ : Union[str, Any] = f'{language}:{entity_name}'
__magic_name__ : Optional[int] = entity_id
return new_mapping
if __name__ == "__main__":
lowerCAmelCase :Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''--checkpoint_path''', type=str, help='''Path to a pytorch_model.bin file.''')
parser.add_argument(
'''--metadata_path''', default=None, type=str, help='''Path to a metadata.json file, defining the configuration.'''
)
parser.add_argument(
'''--entity_vocab_path''',
default=None,
type=str,
help='''Path to an entity_vocab.tsv file, containing the entity vocabulary.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to where to dump the output PyTorch model.'''
)
parser.add_argument(
'''--model_size''', default='''base''', type=str, choices=['''base''', '''large'''], help='''Size of the model to be converted.'''
)
lowerCAmelCase :List[Any] = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 331
|
"""simple docstring"""
from typing import Optional, Union
import torch
from torch import nn
from ...configuration_utils import ConfigMixin, register_to_config
from ...models.modeling_utils import ModelMixin
class snake_case__ ( snake_case_, snake_case_ ):
@register_to_config
def __init__( self , lowerCamelCase = 768 , ):
super().__init__()
__a = nn.Parameter(torch.zeros(1 , lowerCamelCase ) )
__a = nn.Parameter(torch.ones(1 , lowerCamelCase ) )
def a__ ( self , lowerCamelCase = None , lowerCamelCase = None , ):
__a = nn.Parameter(self.mean.to(lowerCamelCase ).to(lowerCamelCase ) )
__a = nn.Parameter(self.std.to(lowerCamelCase ).to(lowerCamelCase ) )
return self
def a__ ( self , lowerCamelCase ):
__a = (embeds - self.mean) * 1.0 / self.std
return embeds
def a__ ( self , lowerCamelCase ):
__a = (embeds * self.std) + self.mean
return embeds
| 261
| 0
|
"""simple docstring"""
import unittest
from transformers import XLMConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMWithLMHeadModel,
)
from transformers.models.xlm.modeling_xlm import XLM_PRETRAINED_MODEL_ARCHIVE_LIST
class SCREAMING_SNAKE_CASE__ :
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=13 , _SCREAMING_SNAKE_CASE=7 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=99 , _SCREAMING_SNAKE_CASE=0 , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE=5 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=512 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=0.02 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE="last" , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=0 , ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase : Optional[int] = parent
UpperCAmelCase : List[str] = batch_size
UpperCAmelCase : str = seq_length
UpperCAmelCase : List[Any] = is_training
UpperCAmelCase : Optional[int] = use_input_lengths
UpperCAmelCase : List[Any] = use_token_type_ids
UpperCAmelCase : List[Any] = use_labels
UpperCAmelCase : List[Any] = gelu_activation
UpperCAmelCase : Union[str, Any] = sinusoidal_embeddings
UpperCAmelCase : List[Any] = causal
UpperCAmelCase : List[Any] = asm
UpperCAmelCase : Union[str, Any] = n_langs
UpperCAmelCase : Optional[int] = vocab_size
UpperCAmelCase : Dict = n_special
UpperCAmelCase : List[str] = hidden_size
UpperCAmelCase : str = num_hidden_layers
UpperCAmelCase : Tuple = num_attention_heads
UpperCAmelCase : Union[str, Any] = hidden_dropout_prob
UpperCAmelCase : Tuple = attention_probs_dropout_prob
UpperCAmelCase : Any = max_position_embeddings
UpperCAmelCase : Optional[Any] = type_sequence_label_size
UpperCAmelCase : Optional[int] = initializer_range
UpperCAmelCase : List[Any] = num_labels
UpperCAmelCase : Any = num_choices
UpperCAmelCase : Any = summary_type
UpperCAmelCase : Union[str, Any] = use_proj
UpperCAmelCase : int = scope
UpperCAmelCase : Optional[Any] = bos_token_id
def SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase : List[Any] = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase : str = None
if self.use_input_lengths:
UpperCAmelCase : str = (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
UpperCAmelCase : int = None
if self.use_token_type_ids:
UpperCAmelCase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
UpperCAmelCase : Any = None
UpperCAmelCase : Optional[Any] = None
UpperCAmelCase : List[str] = None
if self.use_labels:
UpperCAmelCase : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCAmelCase : Any = ids_tensor([self.batch_size] , 2 ).float()
UpperCAmelCase : Any = ids_tensor([self.batch_size] , self.num_choices )
UpperCAmelCase : int = self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
'''simple docstring'''
return XLMConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , num_labels=self.num_labels , bos_token_id=self.bos_token_id , )
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , ) -> Any:
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = XLMModel(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
UpperCAmelCase : int = model(_SCREAMING_SNAKE_CASE , lengths=_SCREAMING_SNAKE_CASE , langs=_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Optional[Any] = model(_SCREAMING_SNAKE_CASE , langs=_SCREAMING_SNAKE_CASE )
UpperCAmelCase : int = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , ) -> Dict:
'''simple docstring'''
UpperCAmelCase : Dict = XLMWithLMHeadModel(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
UpperCAmelCase : str = model(_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , ) -> str:
'''simple docstring'''
UpperCAmelCase : List[Any] = XLMForQuestionAnsweringSimple(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
UpperCAmelCase : Optional[int] = model(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Optional[int] = model(_SCREAMING_SNAKE_CASE , start_positions=_SCREAMING_SNAKE_CASE , end_positions=_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Tuple = outputs
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase : Tuple = XLMForQuestionAnswering(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
UpperCAmelCase : int = model(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Any = model(
_SCREAMING_SNAKE_CASE , start_positions=_SCREAMING_SNAKE_CASE , end_positions=_SCREAMING_SNAKE_CASE , cls_index=_SCREAMING_SNAKE_CASE , is_impossible=_SCREAMING_SNAKE_CASE , p_mask=_SCREAMING_SNAKE_CASE , )
UpperCAmelCase : List[str] = model(
_SCREAMING_SNAKE_CASE , start_positions=_SCREAMING_SNAKE_CASE , end_positions=_SCREAMING_SNAKE_CASE , cls_index=_SCREAMING_SNAKE_CASE , is_impossible=_SCREAMING_SNAKE_CASE , )
((UpperCAmelCase) , ) : Optional[Any] = result_with_labels.to_tuple()
UpperCAmelCase : Dict = model(_SCREAMING_SNAKE_CASE , start_positions=_SCREAMING_SNAKE_CASE , end_positions=_SCREAMING_SNAKE_CASE )
((UpperCAmelCase) , ) : Dict = result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape , () )
self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) )
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase : str = XLMForSequenceClassification(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
UpperCAmelCase : List[str] = model(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : int = model(_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , ) -> List[str]:
'''simple docstring'''
UpperCAmelCase : Dict = self.num_labels
UpperCAmelCase : Any = XLMForTokenClassification(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
UpperCAmelCase : Union[str, Any] = model(_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase : Optional[Any] = self.num_choices
UpperCAmelCase : Tuple = XLMForMultipleChoice(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
UpperCAmelCase : Tuple = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCAmelCase : Tuple = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCAmelCase : Tuple = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCAmelCase : Any = model(
_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def SCREAMING_SNAKE_CASE ( self ) -> List[str]:
'''simple docstring'''
UpperCAmelCase : Optional[int] = self.prepare_config_and_inputs()
(
(
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) ,
) : Any = config_and_inputs
UpperCAmelCase : Union[str, Any] = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """lengths""": input_lengths}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
__lowerCAmelCase : Union[str, Any] = (
(
XLMModel,
XLMWithLMHeadModel,
XLMForQuestionAnswering,
XLMForSequenceClassification,
XLMForQuestionAnsweringSimple,
XLMForTokenClassification,
XLMForMultipleChoice,
)
if is_torch_available()
else ()
)
__lowerCAmelCase : Optional[int] = (
(XLMWithLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
__lowerCAmelCase : Dict = (
{
'feature-extraction': XLMModel,
'fill-mask': XLMWithLMHeadModel,
'question-answering': XLMForQuestionAnsweringSimple,
'text-classification': XLMForSequenceClassification,
'text-generation': XLMWithLMHeadModel,
'token-classification': XLMForTokenClassification,
'zero-shot': XLMForSequenceClassification,
}
if is_torch_available()
else {}
)
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> int:
'''simple docstring'''
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith("""Fast""" )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False ) -> Dict:
'''simple docstring'''
UpperCAmelCase : Optional[int] = super()._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , return_labels=_SCREAMING_SNAKE_CASE )
if return_labels:
if model_class.__name__ == "XLMForQuestionAnswering":
UpperCAmelCase : Union[str, Any] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Optional[Any] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_SCREAMING_SNAKE_CASE )
return inputs_dict
def SCREAMING_SNAKE_CASE ( self ) -> List[str]:
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = XLMModelTester(self )
UpperCAmelCase : Dict = ConfigTester(self , config_class=_SCREAMING_SNAKE_CASE , emb_dim=37 )
def SCREAMING_SNAKE_CASE ( self ) -> int:
'''simple docstring'''
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE ( self ) -> List[str]:
'''simple docstring'''
UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_model(*_SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_lm_head(*_SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self ) -> int:
'''simple docstring'''
UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_simple_qa(*_SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_qa(*_SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_sequence_classif(*_SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_token_classif(*_SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self ) -> int:
'''simple docstring'''
UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_for_multiple_choice(*_SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=1 ) -> int:
'''simple docstring'''
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
self.assertListEqual(
[isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for iter_attentions in attentions] , [True] * len(_SCREAMING_SNAKE_CASE ) )
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , (max_length - min_length) * num_beam_groups )
for idx, iter_attentions in enumerate(_SCREAMING_SNAKE_CASE ):
# adds PAD dummy token
UpperCAmelCase : Optional[int] = min_length + idx + 1
UpperCAmelCase : Union[str, Any] = min_length + idx + 1
UpperCAmelCase : str = (
batch_size * num_beam_groups,
config.num_attention_heads,
tgt_len,
src_len,
)
# check attn size
self.assertListEqual(
[layer_attention.shape for layer_attention in iter_attentions] , [expected_shape] * len(_SCREAMING_SNAKE_CASE ) )
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=1 ) -> Union[str, Any]:
'''simple docstring'''
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
self.assertListEqual(
[isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for iter_hidden_states in hidden_states] , [True] * len(_SCREAMING_SNAKE_CASE ) , )
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , (max_length - min_length) * num_beam_groups )
for idx, iter_hidden_states in enumerate(_SCREAMING_SNAKE_CASE ):
# adds PAD dummy token
UpperCAmelCase : int = min_length + idx + 1
UpperCAmelCase : Optional[int] = (batch_size * num_beam_groups, seq_len, config.hidden_size)
# check hidden size
self.assertListEqual(
[layer_hidden_states.shape for layer_hidden_states in iter_hidden_states] , [expected_shape] * len(_SCREAMING_SNAKE_CASE ) , )
pass
@slow
def SCREAMING_SNAKE_CASE ( self ) -> Tuple:
'''simple docstring'''
for model_name in XLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase : List[Any] = XLMModel.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
@require_torch
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
@slow
def SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase : Any = XLMWithLMHeadModel.from_pretrained("""xlm-mlm-en-2048""" )
model.to(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : List[str] = torch.tensor([[14, 447]] , dtype=torch.long , device=_SCREAMING_SNAKE_CASE ) # the president
UpperCAmelCase : Any = [
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
] # the president the president the president the president the president the president the president the president the president the president
# TODO(PVP): this and other input_ids I tried for generation give pretty bad results. Not sure why. Model might just not be made for auto-regressive inference
UpperCAmelCase : Optional[Any] = model.generate(_SCREAMING_SNAKE_CASE , do_sample=_SCREAMING_SNAKE_CASE )
self.assertListEqual(output_ids[0].cpu().numpy().tolist() , _SCREAMING_SNAKE_CASE )
| 76
|
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_convbert import ConvBertTokenizer
A: int = logging.get_logger(__name__)
A: Any = {"vocab_file": "vocab.txt"}
A: Optional[int] = {
"vocab_file": {
"YituTech/conv-bert-base": "https://huggingface.co/YituTech/conv-bert-base/resolve/main/vocab.txt",
"YituTech/conv-bert-medium-small": (
"https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/vocab.txt"
),
"YituTech/conv-bert-small": "https://huggingface.co/YituTech/conv-bert-small/resolve/main/vocab.txt",
}
}
A: Optional[int] = {
"YituTech/conv-bert-base": 5_1_2,
"YituTech/conv-bert-medium-small": 5_1_2,
"YituTech/conv-bert-small": 5_1_2,
}
A: int = {
"YituTech/conv-bert-base": {"do_lower_case": True},
"YituTech/conv-bert-medium-small": {"do_lower_case": True},
"YituTech/conv-bert-small": {"do_lower_case": True},
}
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase__ ):
__lowerCAmelCase : List[Any] = VOCAB_FILES_NAMES
__lowerCAmelCase : Any = PRETRAINED_VOCAB_FILES_MAP
__lowerCAmelCase : List[str] = PRETRAINED_INIT_CONFIGURATION
__lowerCAmelCase : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCAmelCase : int = ConvBertTokenizer
def __init__( self , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE="[UNK]" , _SCREAMING_SNAKE_CASE="[SEP]" , _SCREAMING_SNAKE_CASE="[PAD]" , _SCREAMING_SNAKE_CASE="[CLS]" , _SCREAMING_SNAKE_CASE="[MASK]" , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=None , **_SCREAMING_SNAKE_CASE , ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(
_SCREAMING_SNAKE_CASE , tokenizer_file=_SCREAMING_SNAKE_CASE , do_lower_case=_SCREAMING_SNAKE_CASE , unk_token=_SCREAMING_SNAKE_CASE , sep_token=_SCREAMING_SNAKE_CASE , pad_token=_SCREAMING_SNAKE_CASE , cls_token=_SCREAMING_SNAKE_CASE , mask_token=_SCREAMING_SNAKE_CASE , tokenize_chinese_chars=_SCREAMING_SNAKE_CASE , strip_accents=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
UpperCAmelCase : int = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("""lowercase""" , _SCREAMING_SNAKE_CASE ) != do_lower_case
or normalizer_state.get("""strip_accents""" , _SCREAMING_SNAKE_CASE ) != strip_accents
or normalizer_state.get("""handle_chinese_chars""" , _SCREAMING_SNAKE_CASE ) != tokenize_chinese_chars
):
UpperCAmelCase : Dict = getattr(_SCREAMING_SNAKE_CASE , normalizer_state.pop("""type""" ) )
UpperCAmelCase : str = do_lower_case
UpperCAmelCase : Optional[int] = strip_accents
UpperCAmelCase : List[str] = tokenize_chinese_chars
UpperCAmelCase : Dict = normalizer_class(**_SCREAMING_SNAKE_CASE )
UpperCAmelCase : int = do_lower_case
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ) -> Tuple:
'''simple docstring'''
UpperCAmelCase : Any = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ) -> List[int]:
'''simple docstring'''
UpperCAmelCase : str = [self.sep_token_id]
UpperCAmelCase : str = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ) -> Tuple[str]:
'''simple docstring'''
UpperCAmelCase : Dict = self._tokenizer.model.save(_SCREAMING_SNAKE_CASE , name=_SCREAMING_SNAKE_CASE )
return tuple(_SCREAMING_SNAKE_CASE )
| 76
| 1
|
import json
import os
from datetime import date
from pathlib import Path
from tabulate import DataRow, TableFormat, tabulate
UpperCAmelCase_ : List[Any] = TableFormat(
lineabove=None,
linebelowheader=None,
linebetweenrows=None,
linebelow=None,
headerrow=DataRow('''''', '''|''', '''|'''),
datarow=DataRow('''''', '''|''', '''|'''),
padding=1,
with_header_hide=None,
)
UpperCAmelCase_ : List[str] = []
UpperCAmelCase_ : Any = []
UpperCAmelCase_ : Any = {'''type''': '''section''', '''text''': {'''type''': '''plain_text''', '''text''': '''No failed tests! 🤗''', '''emoji''': True}}
UpperCAmelCase_ : Optional[int] = [
{
'''type''': '''header''',
'''text''': {
'''type''': '''plain_text''',
'''text''': F'''🤗 Accelerate nightly {os.environ.get("TEST_TYPE", "")} test results''',
'''emoji''': True,
},
}
]
UpperCAmelCase_ : List[str] = 0
for log in Path().glob('''*.log'''):
UpperCAmelCase_ : Optional[int] = 0
with open(log, '''r''') as f:
for line in f:
UpperCAmelCase_ : Optional[int] = json.loads(line)
if line.get('''nodeid''', '''''') != "":
UpperCAmelCase_ : List[str] = line['''nodeid''']
if line.get('''duration''', None) is not None:
UpperCAmelCase_ : Optional[int] = F'''{line["duration"]:.4f}'''
if line.get('''outcome''', '''''') == "failed":
section_num_failed += 1
failed.append([test, duration, log.name.split('''_''')[0]])
total_num_failed += 1
group_info.append([str(log), section_num_failed, failed])
UpperCAmelCase_ : Optional[Any] = []
log.unlink()
UpperCAmelCase_ : Optional[int] = ''''''
UpperCAmelCase_ : Dict = []
if total_num_failed > 0:
for name, num_failed, failed_tests in group_info:
if num_failed > 0:
if num_failed == 1:
message += F"*{name[1:]}: {num_failed} failed test*\n"
else:
message += F"*{name[1:]}: {num_failed} failed tests*\n"
UpperCAmelCase_ : Tuple = []
UpperCAmelCase_ : Tuple = {}
for test in failed_tests:
UpperCAmelCase_ : Optional[Any] = test[0].split('''::''')
UpperCAmelCase_ : str = data[0].split('''/''')[-1]
if data[0] not in filesafailed:
UpperCAmelCase_ : str = [data[1:]]
else:
filesafailed[data[0]] += [data[1:]]
failed_table.append(data)
UpperCAmelCase_ : List[Any] = [test[0] for test in failed_table]
UpperCAmelCase_ : Tuple = list(set(files))
# Count number of instances in failed_tests
UpperCAmelCase_ : Tuple = []
for file in individual_files:
table.append([file, len(filesafailed[file])])
UpperCAmelCase_ : Dict = tabulate(
table,
headers=['''Test Location''', '''Num Failed'''],
tablefmt=hf_table_format,
stralign='''right''',
)
message += F"\n```\n{failed_table}\n```"
all_filesafailed.append(filesafailed)
if len(message) > 30_00:
UpperCAmelCase_ : str = '''Too many failed tests, please see the full report in the Action results.'''
UpperCAmelCase_ : str = len(err) + 10
UpperCAmelCase_ : Optional[Any] = message[: 30_00 - offset] + F'''\n...\n```\n{err}'''
print(F'''### {message}''')
else:
UpperCAmelCase_ : Any = '''No failed tests! 🤗'''
print(F'''## {message}''')
payload.append(no_error_payload)
if os.environ.get('''TEST_TYPE''', '''''') != "":
from slack_sdk import WebClient
UpperCAmelCase_ : List[str] = WebClient(token=os.environ['''SLACK_API_TOKEN'''])
if message != "No failed tests! 🤗":
UpperCAmelCase_ : Tuple = {
'''type''': '''section''',
'''text''': {
'''type''': '''mrkdwn''',
'''text''': message,
},
}
payload.append(md_report)
UpperCAmelCase_ : Dict = {
'''type''': '''section''',
'''text''': {
'''type''': '''mrkdwn''',
'''text''': '''*For more details:*''',
},
'''accessory''': {
'''type''': '''button''',
'''text''': {
'''type''': '''plain_text''',
'''text''': '''Check Action results''',
'''emoji''': True,
},
'''url''': F'''https://github.com/{os.environ["GITHUB_REPOSITORY"]}/actions/runs/{os.environ["GITHUB_RUN_ID"]}''',
},
}
payload.append(action_button)
UpperCAmelCase_ : str = {
'''type''': '''context''',
'''elements''': [
{
'''type''': '''plain_text''',
'''text''': F'''Nightly {os.environ.get("TEST_TYPE")} test results for {date.today()}''',
}
],
}
payload.append(date_report)
UpperCAmelCase_ : Any = client.chat_postMessage(channel='''#accelerate-ci-daily''', text=message, blocks=payload)
UpperCAmelCase_ : Any = response.data['''ts''']
for failed_file in all_filesafailed:
for test_location, test_failures in failed_file.items():
# Keep only the first instance of the test name
UpperCAmelCase_ : int = ''''''
for i, row in enumerate(test_failures):
if row[0] != test_class:
UpperCAmelCase_ : Optional[Any] = row[0]
else:
UpperCAmelCase_ : List[Any] = ''''''
UpperCAmelCase_ : Tuple = {
'''type''': '''section''',
'''text''': {
'''type''': '''mrkdwn''',
'''text''': F'''Test location: {test_location}\n```\n{tabulate(test_failures, headers=["Class", "Test"], tablefmt=hf_table_format, stralign="right")}\n```''',
},
}
client.chat_postMessage(
channel='''#accelerate-ci-daily''',
thread_ts=ts,
blocks=[payload],
)
| 38
|
from copy import deepcopy
import torch
import torch.nn.functional as F
from torch.optim import AdamW
from torch.optim.lr_scheduler import LambdaLR
from torch.utils.data import DataLoader
from accelerate.accelerator import Accelerator
from accelerate.state import GradientState
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import DistributedType, is_torch_version, set_seed
def _a ( lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase ):
for param, grad_param in zip(model_a.parameters(), model_b.parameters() ):
if not param.requires_grad:
continue
if not did_step:
# Grads should not be in sync
assert (
torch.allclose(param.grad, grad_param.grad ) is False
), F'''Gradients in sync when they should not be at iteration {iteration}:\nmodel_a grad ({param.grad}) == model_b grad ({grad_param.grad})'''
else:
# Grads should be in sync
assert (
torch.allclose(param.grad, grad_param.grad ) is True
), F'''Gradients not in sync when they should be at iteration {iteration}:\nmodel_a grad ({param.grad}) != model_b grad ({grad_param.grad})'''
def _a ( lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase=True ):
model.train()
lowerCamelCase : Dict = model(lowerCamelCase )
lowerCamelCase : Any = F.mse_loss(lowerCamelCase, target.to(output.device ) )
if not do_backward:
loss /= accelerator.gradient_accumulation_steps
loss.backward()
else:
accelerator.backward(lowerCamelCase )
def _a ( lowerCamelCase, lowerCamelCase=False ):
set_seed(42 )
lowerCamelCase : Tuple = RegressionModel()
lowerCamelCase : Any = deepcopy(lowerCamelCase )
lowerCamelCase : Any = RegressionDataset(length=80 )
lowerCamelCase : Dict = DataLoader(lowerCamelCase, batch_size=16 )
model.to(accelerator.device )
if sched:
lowerCamelCase : int = AdamW(params=model.parameters(), lr=1e-3 )
lowerCamelCase : Optional[Any] = AdamW(params=ddp_model.parameters(), lr=1e-3 )
lowerCamelCase : str = LambdaLR(lowerCamelCase, lr_lambda=lambda lowerCamelCase : epoch**0.6_5 )
lowerCamelCase : Tuple = LambdaLR(lowerCamelCase, lr_lambda=lambda lowerCamelCase : epoch**0.6_5 )
# Make a copy of `model`
if sched:
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase : Optional[int] = accelerator.prepare(lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase )
else:
lowerCamelCase , lowerCamelCase : List[Any] = accelerator.prepare(lowerCamelCase, lowerCamelCase )
if sched:
return (model, opt, sched, dataloader, ddp_model, ddp_opt, ddp_sched)
return model, ddp_model, dataloader
def _a ( lowerCamelCase ):
# Test when on a single CPU or GPU that the context manager does nothing
lowerCamelCase , lowerCamelCase , lowerCamelCase : List[Any] = get_training_setup(lowerCamelCase )
# Use a single batch
lowerCamelCase , lowerCamelCase : Union[str, Any] = next(iter(lowerCamelCase ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
lowerCamelCase , lowerCamelCase : Optional[Any] = accelerator.gather((ddp_input, ddp_target) )
lowerCamelCase , lowerCamelCase : int = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(lowerCamelCase ):
step_model(lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase )
else:
# Sync grads
step_model(lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase )
# Since `no_sync` is a noop, `ddp_model` and `model` grads should always be in sync
check_model_parameters(lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase )
for param, ddp_param in zip(model.parameters(), ddp_model.parameters() ):
if not param.requires_grad:
continue
assert torch.allclose(
param.grad, ddp_param.grad ), F'''Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'''
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
lowerCamelCase : List[Any] = ddp_input[torch.randperm(len(lowerCamelCase ) )]
def _a ( lowerCamelCase ):
# Test on distributed setup that context manager behaves properly
lowerCamelCase , lowerCamelCase , lowerCamelCase : int = get_training_setup(lowerCamelCase )
# Use a single batch
lowerCamelCase , lowerCamelCase : Union[str, Any] = next(iter(lowerCamelCase ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
lowerCamelCase , lowerCamelCase : Any = accelerator.gather((ddp_input, ddp_target) )
lowerCamelCase , lowerCamelCase : str = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(lowerCamelCase ):
step_model(lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase )
else:
# Sync grads
step_model(lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters(), ddp_model.parameters() ):
if not param.requires_grad:
continue
if iteration % 2 == 0:
# Grads should not be in sync
assert (
torch.allclose(param.grad, ddp_param.grad ) is False
), F'''Gradients in sync when they should not be:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})'''
else:
# Grads should be in sync
assert (
torch.allclose(param.grad, ddp_param.grad ) is True
), F'''Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'''
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
lowerCamelCase : Optional[Any] = ddp_input[torch.randperm(len(lowerCamelCase ) )]
def _a ( lowerCamelCase=False, lowerCamelCase=False ):
lowerCamelCase : Any = Accelerator(
split_batches=lowerCamelCase, dispatch_batches=lowerCamelCase, gradient_accumulation_steps=2 )
# Test that context manager behaves properly
lowerCamelCase , lowerCamelCase , lowerCamelCase : Any = get_training_setup(lowerCamelCase )
for iteration, batch in enumerate(lowerCamelCase ):
lowerCamelCase , lowerCamelCase : Union[str, Any] = batch.values()
# Gather the distributed inputs and targs for the base model
lowerCamelCase , lowerCamelCase : str = accelerator.gather((ddp_input, ddp_target) )
lowerCamelCase , lowerCamelCase : Tuple = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase )
# Do "gradient accumulation" (noop)
with accelerator.accumulate(lowerCamelCase ):
step_model(lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters(), ddp_model.parameters() ):
if not param.requires_grad:
continue
if ((iteration + 1) % 2 == 0) or (iteration == len(lowerCamelCase ) - 1):
# Grads should be in sync
assert (
torch.allclose(param.grad, ddp_param.grad ) is True
), F'''Gradients not in sync when they should be at iteration {iteration}:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'''
else:
# Grads should not be in sync
assert (
torch.allclose(param.grad, ddp_param.grad ) is False
), F'''Gradients in sync when they should not be at iteration {iteration}:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})'''
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
lowerCamelCase : Any = ddp_input[torch.randperm(len(lowerCamelCase ) )]
GradientState._reset_state()
def _a ( lowerCamelCase=False, lowerCamelCase=False ):
lowerCamelCase : List[Any] = Accelerator(
split_batches=lowerCamelCase, dispatch_batches=lowerCamelCase, gradient_accumulation_steps=2 )
# Test that context manager behaves properly
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase : List[Any] = get_training_setup(lowerCamelCase, lowerCamelCase )
for iteration, batch in enumerate(lowerCamelCase ):
lowerCamelCase , lowerCamelCase : Union[str, Any] = batch.values()
# Gather the distributed inputs and targs for the base model
lowerCamelCase , lowerCamelCase : Optional[Any] = accelerator.gather((ddp_input, ddp_target) )
lowerCamelCase , lowerCamelCase : Tuple = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
model.train()
ddp_model.train()
step_model(lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase )
opt.step()
if ((iteration + 1) % 2 == 0) or ((iteration + 1) == len(lowerCamelCase )):
if split_batches:
sched.step()
else:
for _ in range(accelerator.num_processes ):
sched.step()
opt.zero_grad()
# Perform gradient accumulation under wrapper
with accelerator.accumulate(lowerCamelCase ):
step_model(lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase )
ddp_opt.step()
ddp_sched.step()
ddp_opt.zero_grad()
# Learning rates should be the same
assert (
opt.param_groups[0]["lr"] == ddp_opt.param_groups[0]["lr"]
), F'''Learning rates found in each optimizer did not align\nopt: {opt.param_groups[0]["lr"]}\nDDP opt: {ddp_opt.param_groups[0]["lr"]}\n'''
lowerCamelCase : Union[str, Any] = (((iteration + 1) % 2) == 0) or ((iteration + 1) == len(lowerCamelCase ))
if accelerator.num_processes > 1:
check_model_parameters(lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase )
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
GradientState._reset_state()
def _a ( ):
lowerCamelCase : int = Accelerator()
lowerCamelCase : Optional[Any] = RegressionDataset(length=80 )
lowerCamelCase : List[str] = DataLoader(lowerCamelCase, batch_size=16 )
lowerCamelCase : int = RegressionDataset(length=96 )
lowerCamelCase : Optional[int] = DataLoader(lowerCamelCase, batch_size=16 )
lowerCamelCase , lowerCamelCase : Optional[Any] = accelerator.prepare(lowerCamelCase, lowerCamelCase )
assert accelerator.gradient_state.active_dataloader is None
for iteration, _ in enumerate(lowerCamelCase ):
assert id(accelerator.gradient_state.active_dataloader ) == id(lowerCamelCase )
if iteration < len(lowerCamelCase ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
if iteration == 1:
for batch_num, _ in enumerate(lowerCamelCase ):
assert id(accelerator.gradient_state.active_dataloader ) == id(lowerCamelCase )
if batch_num < len(lowerCamelCase ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
assert accelerator.gradient_state.active_dataloader is None
def _a ( ):
lowerCamelCase : List[Any] = Accelerator()
lowerCamelCase : int = accelerator.state
if state.local_process_index == 0:
print("""**Test `accumulate` gradient accumulation with dataloader break**""" )
test_dataloader_break()
if state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print("""**Test NOOP `no_sync` context manager**""" )
test_noop_sync(lowerCamelCase )
if state.distributed_type in (DistributedType.MULTI_GPU, DistributedType.MULTI_CPU):
if state.local_process_index == 0:
print("""**Test Distributed `no_sync` context manager**""" )
test_distributed_sync(lowerCamelCase )
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if state.local_process_index == 0:
print(
"""**Test `accumulate` gradient accumulation, """, F'''`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**''', )
test_gradient_accumulation(lowerCamelCase, lowerCamelCase )
# Currently will break on torch 2.0 +, need to investigate why
if is_torch_version("""<""", """2.0""" ) or state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print(
"""**Test `accumulate` gradient accumulation with optimizer and scheduler, """, """`split_batches=False`, `dispatch_batches=False`**""", )
test_gradient_accumulation_with_opt_and_scheduler()
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if not split_batch and not dispatch_batches:
continue
if state.local_process_index == 0:
print(
"""**Test `accumulate` gradient accumulation with optimizer and scheduler, """, F'''`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**''', )
test_gradient_accumulation_with_opt_and_scheduler(lowerCamelCase, lowerCamelCase )
def _a ( lowerCamelCase ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 287
| 0
|
"""simple docstring"""
import argparse
import json
import os
from pathlib import Path
import requests
import torch
from transformers import JukeboxConfig, JukeboxModel
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = '''https://openaipublic.azureedge.net/jukebox/models/'''
lowerCAmelCase__ = {
'''jukebox-1b-lyrics''': [
'''5b/vqvae.pth.tar''',
'''5b/prior_level_0.pth.tar''',
'''5b/prior_level_1.pth.tar''',
'''1b_lyrics/prior_level_2.pth.tar''',
],
'''jukebox-5b-lyrics''': [
'''5b/vqvae.pth.tar''',
'''5b/prior_level_0.pth.tar''',
'''5b/prior_level_1.pth.tar''',
'''5b_lyrics/prior_level_2.pth.tar''',
],
}
def a__ ( SCREAMING_SNAKE_CASE : str ):
'''simple docstring'''
if key.endswith(".model.1.bias" ) and len(key.split("." ) ) > 1_0:
lowerCAmelCase : Union[str, Any] = key.replace(".model.1.bias" , ".conv1d_1.bias" )
elif key.endswith(".model.1.weight" ) and len(key.split("." ) ) > 1_0:
lowerCAmelCase : Optional[int] = key.replace(".model.1.weight" , ".conv1d_1.weight" )
elif key.endswith(".model.3.bias" ) and len(key.split("." ) ) > 1_0:
lowerCAmelCase : Tuple = key.replace(".model.3.bias" , ".conv1d_2.bias" )
elif key.endswith(".model.3.weight" ) and len(key.split("." ) ) > 1_0:
lowerCAmelCase : Union[str, Any] = key.replace(".model.3.weight" , ".conv1d_2.weight" )
if "conditioner_blocks.0." in key:
lowerCAmelCase : int = key.replace("conditioner_blocks.0" , "conditioner_blocks" )
if "prime_prior" in key:
lowerCAmelCase : Dict = key.replace("prime_prior" , "encoder" )
if ".emb." in key and "total" not in key and "absolute" not in key and "relative" not in key:
lowerCAmelCase : Any = key.replace(".emb." , "." )
if key.endswith("k" ): # replace vqvae.X.k with vqvae.X.codebook
return key.replace(".k" , ".codebook" )
if "y_emb." in key:
return key.replace("y_emb." , "metadata_embedding." )
if "x_emb.emb." in key:
lowerCAmelCase : List[str] = key.replace("0.x_emb.emb" , "embed_tokens" )
if "prime_state_ln" in key:
return key.replace("prime_state_ln" , "encoder.final_layer_norm" )
if ".ln" in key:
return key.replace(".ln" , ".layer_norm" )
if "_ln" in key:
return key.replace("_ln" , "_layer_norm" )
if "prime_state_proj" in key:
return key.replace("prime_state_proj" , "encoder.proj_in" )
if "prime_x_out" in key:
return key.replace("prime_x_out" , "encoder.lm_head" )
if "prior.x_out" in key:
return key.replace("x_out" , "fc_proj_out" )
if "x_emb" in key:
return key.replace("x_emb" , "embed_tokens" )
return key
def a__ ( SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Optional[int] ):
'''simple docstring'''
lowerCAmelCase : int = {}
import re
lowerCAmelCase : Any = re.compile(r"encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)" )
lowerCAmelCase : Optional[Any] = re.compile(
r"encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)" )
lowerCAmelCase : Union[str, Any] = re.compile(r"encoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)" )
lowerCAmelCase : Union[str, Any] = re.compile(r"decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)" )
lowerCAmelCase : List[str] = re.compile(
r"decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)" )
lowerCAmelCase : int = re.compile(r"decoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)" )
lowerCAmelCase : List[str] = re.compile(r"conditioner_blocks.(\d*).cond.model.(\d*).(\d).(bias|weight)" )
lowerCAmelCase : List[str] = re.compile(
r"conditioner_blocks.(\d*).cond.model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)" )
lowerCAmelCase : Union[str, Any] = re.compile(r"conditioner_blocks.(\d*).cond.model.(\d*).(bias|weight)" )
for original_key, value in state_dict.items():
# rename vqvae.encoder keys
if re_encoder_block_conv_in.fullmatch(SCREAMING_SNAKE_CASE ):
lowerCAmelCase : List[str] = re_encoder_block_conv_in.match(SCREAMING_SNAKE_CASE )
lowerCAmelCase : Tuple = regex_match.groups()
lowerCAmelCase : Optional[int] = int(groups[2] ) * 2 + int(groups[3] )
lowerCAmelCase : List[Any] = f"""encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.{groups[-1]}"""
lowerCAmelCase : Optional[Any] = re_encoder_block_conv_in.sub(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
elif re_encoder_block_resnet.fullmatch(SCREAMING_SNAKE_CASE ):
lowerCAmelCase : Union[str, Any] = re_encoder_block_resnet.match(SCREAMING_SNAKE_CASE )
lowerCAmelCase : Any = regex_match.groups()
lowerCAmelCase : int = int(groups[2] ) * 2 + int(groups[3] )
lowerCAmelCase : List[Any] = {"1": 1, "3": 2}[groups[-2]]
lowerCAmelCase : Dict = f"""encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}."""
lowerCAmelCase : Union[str, Any] = f"""resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"""
lowerCAmelCase : Tuple = prefix + resnet_block
lowerCAmelCase : Tuple = re_encoder_block_resnet.sub(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
elif re_encoder_block_proj_out.fullmatch(SCREAMING_SNAKE_CASE ):
lowerCAmelCase : Optional[Any] = re_encoder_block_proj_out.match(SCREAMING_SNAKE_CASE )
lowerCAmelCase : int = regex_match.groups()
lowerCAmelCase : List[Any] = f"""encoders.{groups[0]}.level_blocks.{groups[1]}.proj_out.{groups[-1]}"""
lowerCAmelCase : int = re_encoder_block_proj_out.sub(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# rename vqvae.decoder keys
elif re_decoder_block_conv_out.fullmatch(SCREAMING_SNAKE_CASE ):
lowerCAmelCase : Dict = re_decoder_block_conv_out.match(SCREAMING_SNAKE_CASE )
lowerCAmelCase : Tuple = regex_match.groups()
lowerCAmelCase : Tuple = int(groups[2] ) * 2 + int(groups[3] ) - 2
lowerCAmelCase : List[Any] = f"""decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.{groups[-1]}"""
lowerCAmelCase : Optional[int] = re_decoder_block_conv_out.sub(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
elif re_decoder_block_resnet.fullmatch(SCREAMING_SNAKE_CASE ):
lowerCAmelCase : Any = re_decoder_block_resnet.match(SCREAMING_SNAKE_CASE )
lowerCAmelCase : List[str] = regex_match.groups()
lowerCAmelCase : List[str] = int(groups[2] ) * 2 + int(groups[3] ) - 2
lowerCAmelCase : List[Any] = {"1": 1, "3": 2}[groups[-2]]
lowerCAmelCase : Optional[Any] = f"""decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}."""
lowerCAmelCase : Dict = f"""resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"""
lowerCAmelCase : List[Any] = prefix + resnet_block
lowerCAmelCase : Optional[int] = re_decoder_block_resnet.sub(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
elif re_decoder_block_proj_in.fullmatch(SCREAMING_SNAKE_CASE ):
lowerCAmelCase : int = re_decoder_block_proj_in.match(SCREAMING_SNAKE_CASE )
lowerCAmelCase : List[str] = regex_match.groups()
lowerCAmelCase : List[str] = f"""decoders.{groups[0]}.level_blocks.{groups[1]}.proj_in.{groups[-1]}"""
lowerCAmelCase : Optional[int] = re_decoder_block_proj_in.sub(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# rename prior cond.model to upsampler.upsample_block and resnet
elif re_prior_cond_conv_out.fullmatch(SCREAMING_SNAKE_CASE ):
lowerCAmelCase : Optional[Any] = re_prior_cond_conv_out.match(SCREAMING_SNAKE_CASE )
lowerCAmelCase : str = regex_match.groups()
lowerCAmelCase : List[str] = int(groups[1] ) * 2 + int(groups[2] ) - 2
lowerCAmelCase : Union[str, Any] = f"""conditioner_blocks.upsampler.upsample_block.{block_index}.{groups[-1]}"""
lowerCAmelCase : str = re_prior_cond_conv_out.sub(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
elif re_prior_cond_resnet.fullmatch(SCREAMING_SNAKE_CASE ):
lowerCAmelCase : Union[str, Any] = re_prior_cond_resnet.match(SCREAMING_SNAKE_CASE )
lowerCAmelCase : Any = regex_match.groups()
lowerCAmelCase : Optional[int] = int(groups[1] ) * 2 + int(groups[2] ) - 2
lowerCAmelCase : int = {"1": 1, "3": 2}[groups[-2]]
lowerCAmelCase : Any = f"""conditioner_blocks.upsampler.upsample_block.{block_index}."""
lowerCAmelCase : Tuple = f"""resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"""
lowerCAmelCase : Optional[int] = prefix + resnet_block
lowerCAmelCase : Tuple = re_prior_cond_resnet.sub(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
elif re_prior_cond_proj_in.fullmatch(SCREAMING_SNAKE_CASE ):
lowerCAmelCase : Optional[Any] = re_prior_cond_proj_in.match(SCREAMING_SNAKE_CASE )
lowerCAmelCase : int = regex_match.groups()
lowerCAmelCase : str = f"""conditioner_blocks.upsampler.proj_in.{groups[-1]}"""
lowerCAmelCase : int = re_prior_cond_proj_in.sub(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# keep original key
else:
lowerCAmelCase : Union[str, Any] = original_key
lowerCAmelCase : Any = replace_key(SCREAMING_SNAKE_CASE )
if f"""{key_prefix}.{key}""" not in model_state_dict or key is None:
print(f"""failed converting {original_key} to {key}, does not match""" )
# handle missmatched shape
elif value.shape != model_state_dict[f"""{key_prefix}.{key}"""].shape:
lowerCAmelCase : Dict = model_state_dict[f"""{key_prefix}.{key}"""]
print(f"""{original_key}-> {key} : \nshape {val.shape} and { value.shape}, do not match""" )
lowerCAmelCase : Dict = original_key
lowerCAmelCase : Union[str, Any] = original_key
lowerCAmelCase : Any = value
return new_dict
@torch.no_grad()
def a__ ( SCREAMING_SNAKE_CASE : str=None , SCREAMING_SNAKE_CASE : Union[str, Any]=None ):
'''simple docstring'''
for file in MODEL_MAPPING[model_name]:
if not os.path.isfile(f"""{pytorch_dump_folder_path}/{file.split("/" )[-1]}""" ):
lowerCAmelCase : Any = requests.get(f"""{PREFIX}{file}""" , allow_redirects=SCREAMING_SNAKE_CASE )
os.makedirs(f"""{pytorch_dump_folder_path}/""" , exist_ok=SCREAMING_SNAKE_CASE )
open(f"""{pytorch_dump_folder_path}/{file.split("/" )[-1]}""" , "wb" ).write(r.content )
lowerCAmelCase : Optional[Any] = MODEL_MAPPING[model_name.split("/" )[-1]]
lowerCAmelCase : Optional[Any] = JukeboxConfig.from_pretrained(SCREAMING_SNAKE_CASE )
lowerCAmelCase : str = JukeboxModel(SCREAMING_SNAKE_CASE )
lowerCAmelCase : List[str] = []
lowerCAmelCase : Tuple = {}
for i, dict_name in enumerate(SCREAMING_SNAKE_CASE ):
lowerCAmelCase : List[Any] = torch.load(f"""{pytorch_dump_folder_path}/{dict_name.split("/" )[-1]}""" )["model"]
lowerCAmelCase : List[str] = {}
for k in old_dic.keys():
if k.endswith(".b" ):
lowerCAmelCase : Tuple = old_dic[k]
elif k.endswith(".w" ):
lowerCAmelCase : Tuple = old_dic[k]
elif "level_2" not in dict_name and "cond.model." in k:
lowerCAmelCase : int = old_dic[k]
else:
lowerCAmelCase : List[Any] = old_dic[k]
lowerCAmelCase : Tuple = "vqvae" if i == 0 else f"""priors.{3 - i}"""
lowerCAmelCase : Any = fix_jukebox_keys(SCREAMING_SNAKE_CASE , model.state_dict() , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
weight_dict.append(SCREAMING_SNAKE_CASE )
lowerCAmelCase : int = weight_dict.pop(0 )
model.vqvae.load_state_dict(SCREAMING_SNAKE_CASE )
for i in range(len(SCREAMING_SNAKE_CASE ) ):
model.priors[i].load_state_dict(weight_dict[2 - i] )
Path(SCREAMING_SNAKE_CASE ).mkdir(exist_ok=SCREAMING_SNAKE_CASE )
with open(f"""{pytorch_dump_folder_path}/mapping.json""" , "w" ) as txtfile:
json.dump(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
print(f"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(SCREAMING_SNAKE_CASE )
return weight_dict
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''jukebox-5b-lyrics''',
type=str,
help='''Name of the model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default='''jukebox-5b-lyrics-converted''',
type=str,
help='''Path to the output PyTorch model directory.''',
)
lowerCAmelCase__ = parser.parse_args()
convert_openai_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 133
|
"""simple docstring"""
import pytest
from datasets.parallel import ParallelBackendConfig, parallel_backend
from datasets.utils.py_utils import map_nested
from .utils import require_dill_gt_0_3_2, require_joblibspark, require_not_windows
def a__ ( SCREAMING_SNAKE_CASE : str ): # picklable for multiprocessing
'''simple docstring'''
return i + 1
@require_dill_gt_0_3_2
@require_joblibspark
@require_not_windows
def a__ ( ):
'''simple docstring'''
with parallel_backend("spark" ):
assert ParallelBackendConfig.backend_name == "spark"
lowerCAmelCase : List[str] = [1, 2, 3]
with pytest.raises(SCREAMING_SNAKE_CASE ):
with parallel_backend("unsupported backend" ):
map_nested(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , num_proc=2 )
with pytest.raises(SCREAMING_SNAKE_CASE ):
with parallel_backend("unsupported backend" ):
map_nested(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , num_proc=-1 )
@require_dill_gt_0_3_2
@require_joblibspark
@require_not_windows
@pytest.mark.parametrize("num_proc" , [2, -1] )
def a__ ( SCREAMING_SNAKE_CASE : Tuple ):
'''simple docstring'''
lowerCAmelCase : Tuple = [1, 2]
lowerCAmelCase : int = {"a": 1, "b": 2}
lowerCAmelCase : List[str] = {"a": [1, 2], "b": [3, 4]}
lowerCAmelCase : Dict = {"a": {"1": 1}, "b": 2}
lowerCAmelCase : Tuple = {"a": 1, "b": 2, "c": 3, "d": 4}
lowerCAmelCase : Any = [2, 3]
lowerCAmelCase : Any = {"a": 2, "b": 3}
lowerCAmelCase : Optional[int] = {"a": [2, 3], "b": [4, 5]}
lowerCAmelCase : Optional[int] = {"a": {"1": 2}, "b": 3}
lowerCAmelCase : str = {"a": 2, "b": 3, "c": 4, "d": 5}
with parallel_backend("spark" ):
assert map_nested(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , num_proc=SCREAMING_SNAKE_CASE ) == expected_map_nested_sa
assert map_nested(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , num_proc=SCREAMING_SNAKE_CASE ) == expected_map_nested_sa
assert map_nested(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , num_proc=SCREAMING_SNAKE_CASE ) == expected_map_nested_sa
assert map_nested(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , num_proc=SCREAMING_SNAKE_CASE ) == expected_map_nested_sa
assert map_nested(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , num_proc=SCREAMING_SNAKE_CASE ) == expected_map_nested_sa
| 133
| 1
|
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
lowerCAmelCase__ : List[str] =logging.get_logger(__name__)
lowerCAmelCase__ : str ={'''tokenizer_file''': '''tokenizer.json'''}
lowerCAmelCase__ : Dict ={
'''tokenizer_file''': {
'''bigscience/tokenizer''': '''https://huggingface.co/bigscience/tokenizer/blob/main/tokenizer.json''',
'''bigscience/bloom-560m''': '''https://huggingface.co/bigscience/bloom-560m/blob/main/tokenizer.json''',
'''bigscience/bloom-1b1''': '''https://huggingface.co/bigscience/bloom-1b1/blob/main/tokenizer.json''',
'''bigscience/bloom-1b7''': '''https://huggingface.co/bigscience/bloom-1b7/blob/main/tokenizer.json''',
'''bigscience/bloom-3b''': '''https://huggingface.co/bigscience/bloom-3b/blob/main/tokenizer.json''',
'''bigscience/bloom-7b1''': '''https://huggingface.co/bigscience/bloom-7b1/blob/main/tokenizer.json''',
'''bigscience/bloom''': '''https://huggingface.co/bigscience/bloom/blob/main/tokenizer.json''',
},
}
class UpperCAmelCase_ ( UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ : Any = VOCAB_FILES_NAMES
UpperCamelCase__ : List[Any] = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase__ : Tuple = ['''input_ids''', '''attention_mask''']
UpperCamelCase__ : List[Any] = None
def __init__( self , _A=None , _A=None , _A=None , _A="<unk>" , _A="<s>" , _A="</s>" , _A="<pad>" , _A=False , _A=False , **_A , ):
'''simple docstring'''
super().__init__(
_A , _A , tokenizer_file=_A , unk_token=_A , bos_token=_A , eos_token=_A , pad_token=_A , add_prefix_space=_A , clean_up_tokenization_spaces=_A , **_A , )
__SCREAMING_SNAKE_CASE = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('add_prefix_space' , _A ) != add_prefix_space:
__SCREAMING_SNAKE_CASE = getattr(_A , pre_tok_state.pop('type' ) )
__SCREAMING_SNAKE_CASE = add_prefix_space
__SCREAMING_SNAKE_CASE = pre_tok_class(**_A )
__SCREAMING_SNAKE_CASE = add_prefix_space
def _A ( self , *_A , **_A ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = kwargs.get('is_split_into_words' , _A )
if not (self.add_prefix_space or not is_split_into_words):
raise Exception(
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with"""
' pretokenized inputs.' )
return super()._batch_encode_plus(*_A , **_A )
def _A ( self , *_A , **_A ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = kwargs.get('is_split_into_words' , _A )
if not (self.add_prefix_space or not is_split_into_words):
raise Exception(
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with"""
' pretokenized inputs.' )
return super()._encode_plus(*_A , **_A )
def _A ( self , _A , _A = None ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self._tokenizer.model.save(_A , name=_A )
return tuple(_A )
def _A ( self , _A ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(_A , add_special_tokens=_A ) + [self.eos_token_id] )
if len(_A ) > self.model_max_length:
__SCREAMING_SNAKE_CASE = input_ids[-self.model_max_length :]
return input_ids
| 257
|
import math
from typing import Dict, Iterable, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
get_image_size,
is_torch_available,
is_torch_tensor,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_torch_available():
import torch
if is_vision_available():
import PIL
lowerCAmelCase__ : List[Any] =logging.get_logger(__name__)
def __lowercase ( a__ , a__ , a__ , a__ ) -> Tuple[int, int]:
def constraint_to_multiple_of(a__ , a__ , a__=0 , a__=None ):
__SCREAMING_SNAKE_CASE = round(val / multiple ) * multiple
if max_val is not None and x > max_val:
__SCREAMING_SNAKE_CASE = math.floor(val / multiple ) * multiple
if x < min_val:
__SCREAMING_SNAKE_CASE = math.ceil(val / multiple ) * multiple
return x
__SCREAMING_SNAKE_CASE = (output_size, output_size) if isinstance(a__ , a__ ) else output_size
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = get_image_size(a__ )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = output_size
# determine new height and width
__SCREAMING_SNAKE_CASE = output_height / input_height
__SCREAMING_SNAKE_CASE = output_width / input_width
if keep_aspect_ratio:
# scale as little as possible
if abs(1 - scale_width ) < abs(1 - scale_height ):
# fit width
__SCREAMING_SNAKE_CASE = scale_width
else:
# fit height
__SCREAMING_SNAKE_CASE = scale_height
__SCREAMING_SNAKE_CASE = constraint_to_multiple_of(scale_height * input_height , multiple=a__ )
__SCREAMING_SNAKE_CASE = constraint_to_multiple_of(scale_width * input_width , multiple=a__ )
return (new_height, new_width)
class UpperCAmelCase_ ( UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ : List[str] = ['''pixel_values''']
def __init__( self , _A = True , _A = None , _A = PILImageResampling.BILINEAR , _A = False , _A = 1 , _A = True , _A = 1 / 255 , _A = True , _A = None , _A = None , **_A , ):
'''simple docstring'''
super().__init__(**_A )
__SCREAMING_SNAKE_CASE = size if size is not None else {'height': 384, 'width': 384}
__SCREAMING_SNAKE_CASE = get_size_dict(_A )
__SCREAMING_SNAKE_CASE = do_resize
__SCREAMING_SNAKE_CASE = size
__SCREAMING_SNAKE_CASE = keep_aspect_ratio
__SCREAMING_SNAKE_CASE = ensure_multiple_of
__SCREAMING_SNAKE_CASE = resample
__SCREAMING_SNAKE_CASE = do_rescale
__SCREAMING_SNAKE_CASE = rescale_factor
__SCREAMING_SNAKE_CASE = do_normalize
__SCREAMING_SNAKE_CASE = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
__SCREAMING_SNAKE_CASE = image_std if image_std is not None else IMAGENET_STANDARD_STD
def _A ( self , _A , _A , _A = False , _A = 1 , _A = PILImageResampling.BICUBIC , _A = None , **_A , ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = get_size_dict(_A )
if "height" not in size or "width" not in size:
raise ValueError(f"""The size dictionary must contain the keys 'height' and 'width'. Got {size.keys()}""" )
__SCREAMING_SNAKE_CASE = get_resize_output_image_size(
_A , output_size=(size['height'], size['width']) , keep_aspect_ratio=_A , multiple=_A , )
return resize(_A , size=_A , resample=_A , data_format=_A , **_A )
def _A ( self , _A , _A , _A = None , **_A , ):
'''simple docstring'''
return rescale(_A , scale=_A , data_format=_A , **_A )
def _A ( self , _A , _A , _A , _A = None , **_A , ):
'''simple docstring'''
return normalize(_A , mean=_A , std=_A , data_format=_A , **_A )
def _A ( self , _A , _A = None , _A = None , _A = None , _A = None , _A = None , _A = None , _A = None , _A = None , _A = None , _A = None , _A = None , _A = ChannelDimension.FIRST , **_A , ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = do_resize if do_resize is not None else self.do_resize
__SCREAMING_SNAKE_CASE = size if size is not None else self.size
__SCREAMING_SNAKE_CASE = get_size_dict(_A )
__SCREAMING_SNAKE_CASE = keep_aspect_ratio if keep_aspect_ratio is not None else self.keep_aspect_ratio
__SCREAMING_SNAKE_CASE = ensure_multiple_of if ensure_multiple_of is not None else self.ensure_multiple_of
__SCREAMING_SNAKE_CASE = resample if resample is not None else self.resample
__SCREAMING_SNAKE_CASE = do_rescale if do_rescale is not None else self.do_rescale
__SCREAMING_SNAKE_CASE = rescale_factor if rescale_factor is not None else self.rescale_factor
__SCREAMING_SNAKE_CASE = do_normalize if do_normalize is not None else self.do_normalize
__SCREAMING_SNAKE_CASE = image_mean if image_mean is not None else self.image_mean
__SCREAMING_SNAKE_CASE = image_std if image_std is not None else self.image_std
__SCREAMING_SNAKE_CASE = make_list_of_images(_A )
if not valid_images(_A ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None or resample is None:
raise ValueError('Size and resample must be specified if do_resize is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# All transformations expect numpy arrays.
__SCREAMING_SNAKE_CASE = [to_numpy_array(_A ) for image in images]
if do_resize:
__SCREAMING_SNAKE_CASE = [self.resize(image=_A , size=_A , resample=_A ) for image in images]
if do_rescale:
__SCREAMING_SNAKE_CASE = [self.rescale(image=_A , scale=_A ) for image in images]
if do_normalize:
__SCREAMING_SNAKE_CASE = [self.normalize(image=_A , mean=_A , std=_A ) for image in images]
__SCREAMING_SNAKE_CASE = [to_channel_dimension_format(_A , _A ) for image in images]
__SCREAMING_SNAKE_CASE = {'pixel_values': images}
return BatchFeature(data=_A , tensor_type=_A )
def _A ( self , _A , _A = None ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(_A ) != len(_A ):
raise ValueError(
'Make sure that you pass in as many target sizes as the batch dimension of the logits' )
if is_torch_tensor(_A ):
__SCREAMING_SNAKE_CASE = target_sizes.numpy()
__SCREAMING_SNAKE_CASE = []
for idx in range(len(_A ) ):
__SCREAMING_SNAKE_CASE = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode='bilinear' , align_corners=_A )
__SCREAMING_SNAKE_CASE = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(_A )
else:
__SCREAMING_SNAKE_CASE = logits.argmax(dim=1 )
__SCREAMING_SNAKE_CASE = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 257
| 1
|
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a_ = {'configuration_mmbt': ['MMBTConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = ['MMBTForClassification', 'MMBTModel', 'ModalEmbeddings']
if TYPE_CHECKING:
from .configuration_mmbt import MMBTConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mmbt import MMBTForClassification, MMBTModel, ModalEmbeddings
else:
import sys
a_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 50
|
import copy
import tempfile
import unittest
from transformers import MaMaaaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from transformers.utils import cached_property
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaMaaaForConditionalGeneration, MaMaaaModel, MaMaaaTokenizer
from transformers.models.mam_aaa.modeling_mam_aaa import MaMaaaDecoder, MaMaaaEncoder
def __lowercase ( lowerCamelCase : str , lowerCamelCase : Tuple , lowerCamelCase : Optional[int] , lowerCamelCase : str=None , lowerCamelCase : Dict=None , lowerCamelCase : Optional[int]=None , lowerCamelCase : Dict=None , lowerCamelCase : Optional[int]=None , ):
if attention_mask is None:
UpperCamelCase_ : int = input_ids.ne(config.pad_token_id )
if decoder_attention_mask is None:
UpperCamelCase_ : Dict = decoder_input_ids.ne(config.pad_token_id )
if head_mask is None:
UpperCamelCase_ : Optional[Any] = torch.ones(config.encoder_layers , config.encoder_attention_heads , device=lowerCamelCase )
if decoder_head_mask is None:
UpperCamelCase_ : int = torch.ones(config.decoder_layers , config.decoder_attention_heads , device=lowerCamelCase )
if cross_attn_head_mask is None:
UpperCamelCase_ : Tuple = torch.ones(config.decoder_layers , config.decoder_attention_heads , device=lowerCamelCase )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
class _lowercase :
def __init__( self : Union[str, Any] , snake_case : str , snake_case : str=1_3 , snake_case : Optional[int]=7 , snake_case : int=True , snake_case : str=False , snake_case : str=9_9 , snake_case : int=1_6 , snake_case : str=2 , snake_case : Dict=4 , snake_case : Tuple=4 , snake_case : List[Any]="relu" , snake_case : str=0.1 , snake_case : Any=0.1 , snake_case : List[str]=0.0 , snake_case : int=0.0 , snake_case : Any=2_0 , snake_case : Union[str, Any]=2 , snake_case : Tuple=1 , snake_case : Optional[int]=0 , ) -> int:
"""simple docstring"""
UpperCamelCase_ : Tuple = parent
UpperCamelCase_ : Optional[Any] = batch_size
UpperCamelCase_ : Tuple = seq_length
UpperCamelCase_ : Dict = is_training
UpperCamelCase_ : Tuple = use_labels
UpperCamelCase_ : Tuple = vocab_size
UpperCamelCase_ : List[str] = hidden_size
UpperCamelCase_ : List[str] = num_hidden_layers
UpperCamelCase_ : Tuple = num_attention_heads
UpperCamelCase_ : Dict = intermediate_size
UpperCamelCase_ : Dict = hidden_act
UpperCamelCase_ : int = hidden_dropout_prob
UpperCamelCase_ : str = attention_probs_dropout_prob
UpperCamelCase_ : List[Any] = encoder_layerdrop
UpperCamelCase_ : Any = decoder_layerdrop
UpperCamelCase_ : Tuple = max_position_embeddings
UpperCamelCase_ : Dict = eos_token_id
UpperCamelCase_ : int = pad_token_id
UpperCamelCase_ : str = bos_token_id
def SCREAMING_SNAKE_CASE__ ( self : str ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase_ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase_ : Any = self.eos_token_id # Eos Token
UpperCamelCase_ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
# we need to clamp the input ids here to avoid having pad token in between
# this is because for M2M100 the position_ids are prepared such that
# all pad tokens have pos id = 2 and rest are between 2..seq_length
# and the seq_length here is seq_length - num_pad_tokens
# but when using past, there is no way of knowing if the past input ids had
# pad tokens in them, which results in incorrect seq_lenth and which in turn results in
# position_ids being off by num_pad_tokens in past input
UpperCamelCase_ : str = input_ids.clamp(self.pad_token_id + 1 )
UpperCamelCase_ : List[Any] = decoder_input_ids.clamp(self.pad_token_id + 1 )
UpperCamelCase_ : str = self.get_config()
UpperCamelCase_ : Any = prepare_mam_aaa_inputs_dict(snake_case , snake_case , snake_case )
return config, inputs_dict
def SCREAMING_SNAKE_CASE__ ( self : str ) -> int:
"""simple docstring"""
return MaMaaaConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , encoder_layerdrop=self.encoder_layerdrop , decoder_layerdrop=self.decoder_layerdrop , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , )
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> List[str]:
"""simple docstring"""
UpperCamelCase_, UpperCamelCase_ : List[str] = self.prepare_config_and_inputs()
return config, inputs_dict
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , snake_case : List[Any] , snake_case : Optional[int] ) -> Dict:
"""simple docstring"""
UpperCamelCase_ : str = MaMaaaModel(config=snake_case ).get_decoder().to(snake_case ).eval()
UpperCamelCase_ : str = inputs_dict['input_ids']
UpperCamelCase_ : Any = inputs_dict['attention_mask']
UpperCamelCase_ : Optional[int] = inputs_dict['head_mask']
# first forward pass
UpperCamelCase_ : int = model(snake_case , attention_mask=snake_case , head_mask=snake_case , use_cache=snake_case )
UpperCamelCase_, UpperCamelCase_ : int = outputs.to_tuple()
# create hypothetical multiple next token and extent to next_input_ids
UpperCamelCase_ : int = ids_tensor((self.batch_size, 3) , config.vocab_size )
UpperCamelCase_ : Optional[int] = ids_tensor((self.batch_size, 3) , 2 )
# append to next input_ids and
UpperCamelCase_ : Tuple = torch.cat([input_ids, next_tokens] , dim=-1 )
UpperCamelCase_ : Optional[Any] = torch.cat([attention_mask, next_attn_mask] , dim=-1 )
UpperCamelCase_ : Union[str, Any] = model(snake_case , attention_mask=snake_case )['last_hidden_state']
UpperCamelCase_ : int = model(snake_case , attention_mask=snake_case , past_key_values=snake_case )[
'last_hidden_state'
]
# select random slice
UpperCamelCase_ : List[str] = ids_tensor((1,) , output_from_past.shape[-1] ).item()
UpperCamelCase_ : Tuple = output_from_no_past[:, -3:, random_slice_idx].detach()
UpperCamelCase_ : List[str] = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(snake_case , snake_case , atol=1e-2 ) )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , snake_case : int , snake_case : str ) -> Dict:
"""simple docstring"""
UpperCamelCase_ : Tuple = MaMaaaModel(config=snake_case ).to(snake_case ).eval()
UpperCamelCase_ : List[str] = model(**snake_case )
UpperCamelCase_ : List[Any] = outputs.encoder_last_hidden_state
UpperCamelCase_ : Optional[int] = outputs.last_hidden_state
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCamelCase_ : Optional[int] = model.get_encoder()
encoder.save_pretrained(snake_case )
UpperCamelCase_ : Tuple = MaMaaaEncoder.from_pretrained(snake_case ).to(snake_case )
UpperCamelCase_ : Optional[Any] = encoder(inputs_dict['input_ids'] , attention_mask=inputs_dict['attention_mask'] )[
0
]
self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1e-3 )
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCamelCase_ : int = model.get_decoder()
decoder.save_pretrained(snake_case )
UpperCamelCase_ : int = MaMaaaDecoder.from_pretrained(snake_case ).to(snake_case )
UpperCamelCase_ : int = decoder(
input_ids=inputs_dict['decoder_input_ids'] , attention_mask=inputs_dict['decoder_attention_mask'] , encoder_hidden_states=snake_case , encoder_attention_mask=inputs_dict['attention_mask'] , )[0]
self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1e-3 )
@require_torch
class _lowercase ( snake_case_ , snake_case_ , snake_case_ , unittest.TestCase ):
lowercase = (
(
MaMaaaModel,
MaMaaaForConditionalGeneration,
)
if is_torch_available()
else ()
)
lowercase = (MaMaaaForConditionalGeneration,) if is_torch_available() else ()
lowercase = (
{
'conversational': MaMaaaForConditionalGeneration,
'feature-extraction': MaMaaaModel,
'summarization': MaMaaaForConditionalGeneration,
'text2text-generation': MaMaaaForConditionalGeneration,
'translation': MaMaaaForConditionalGeneration,
}
if is_torch_available()
else {}
)
lowercase = True
lowercase = True
lowercase = False
lowercase = False
def SCREAMING_SNAKE_CASE__ ( self : List[Any] , snake_case : Union[str, Any] , snake_case : List[Any] , snake_case : str , snake_case : str , snake_case : Dict ) -> List[Any]:
"""simple docstring"""
if pipeline_test_casse_name == "TranslationPipelineTests":
# Get `ValueError: Translation requires a `src_lang` and a `tgt_lang` for this model`.
# `M2M100Config` was never used in pipeline tests: cannot create a simple tokenizer.
return True
return False
def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase_ : Tuple = MaMaaaModelTester(self )
UpperCamelCase_ : Optional[Any] = ConfigTester(self , config_class=snake_case )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase_, UpperCamelCase_ : str = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
UpperCamelCase_ : int = model_class(snake_case )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(snake_case )
UpperCamelCase_, UpperCamelCase_ : str = model_class.from_pretrained(snake_case , output_loading_info=snake_case )
self.assertEqual(info['missing_keys'] , [] )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
UpperCamelCase_ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(*snake_case )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
UpperCamelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_encoder_decoder_model_standalone(*snake_case )
def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> Tuple:
"""simple docstring"""
UpperCamelCase_, UpperCamelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in (MaMaaaModel, MaMaaaForConditionalGeneration):
UpperCamelCase_ : Optional[Any] = model_class(snake_case )
model.to(snake_case )
model.eval()
UpperCamelCase_ : List[Any] = copy.deepcopy(self._prepare_for_class(snake_case , snake_case ) )
if not self.is_encoder_decoder:
UpperCamelCase_ : List[Any] = inputs['input_ids']
del inputs["input_ids"]
else:
UpperCamelCase_ : str = inputs['input_ids']
UpperCamelCase_ : List[str] = inputs.get('decoder_input_ids' , snake_case )
del inputs["input_ids"]
inputs.pop('decoder_input_ids' , snake_case )
UpperCamelCase_ : List[str] = model.get_input_embeddings()
if not self.is_encoder_decoder:
UpperCamelCase_ : Tuple = wte(snake_case )
else:
UpperCamelCase_ : Optional[int] = wte(snake_case )
UpperCamelCase_ : Optional[int] = wte(snake_case )
with torch.no_grad():
model(**snake_case )[0]
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> List[str]:
"""simple docstring"""
UpperCamelCase_, UpperCamelCase_ : int = self.model_tester.prepare_config_and_inputs()
UpperCamelCase_ : str = input_dict['input_ids']
UpperCamelCase_ : int = input_ids.ne(1 ).to(snake_case )
UpperCamelCase_ : Dict = MaMaaaForConditionalGeneration(snake_case ).eval().to(snake_case )
if torch_device == "cuda":
model.half()
model.generate(snake_case , attention_mask=snake_case )
model.generate(num_beams=4 , do_sample=snake_case , early_stopping=snake_case , num_return_sequences=3 )
def __lowercase ( lowerCamelCase : List[Any] ):
return torch.tensor(lowerCamelCase , dtype=torch.long , device=lowerCamelCase )
a_ = 1E-4
@require_torch
@require_sentencepiece
@require_tokenizers
@slow
class _lowercase ( unittest.TestCase ):
@cached_property
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> int:
"""simple docstring"""
return MaMaaaTokenizer.from_pretrained('facebook/m2m100_418M' )
def SCREAMING_SNAKE_CASE__ ( self : str ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase_ : Union[str, Any] = MaMaaaModel.from_pretrained('facebook/m2m100_418M' ).to(snake_case )
UpperCamelCase_ : str = _long_tensor([[1_2_8_0_2_8, 9_8, 1_2, 3_0_5_2_7, 2_7_3_2, 1_5_9, 7_7_5_5, 6_1_9_0_4, 3_9_1_4_4, 3_8, 2]] )
UpperCamelCase_ : Dict = _long_tensor([[2, 1_2_8_0_2_8, 9_8, 1_2, 3_0_5_2_7, 2_7_3_2, 1_5_9, 7_7_5_5, 6_1_9_0_4, 3_9_1_4_4, 3_8]] )
UpperCamelCase_ : Optional[int] = prepare_mam_aaa_inputs_dict(model.config , snake_case , snake_case )
with torch.no_grad():
UpperCamelCase_ : Any = model(**snake_case )[0]
UpperCamelCase_ : Tuple = torch.Size((1, 1_1, 1_0_2_4) )
self.assertEqual(output.shape , snake_case )
# change to expected output here
UpperCamelCase_ : Dict = torch.tensor(
[[-0.7780, -0.1676, 0.1038], [-6.7556, -1.3992, 0.0567], [-7.5383, -0.5920, -0.2779]] , device=snake_case )
self.assertTrue(torch.allclose(output[:, :3, :3] , snake_case , atol=snake_case ) )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> str:
"""simple docstring"""
UpperCamelCase_ : List[str] = MaMaaaForConditionalGeneration.from_pretrained('facebook/m2m100_418M' ).to(snake_case )
# change to intended input
UpperCamelCase_ : Tuple = _long_tensor([[1_2_8_0_2_8, 9_8, 1_2, 3_0_5_2_7, 2_7_3_2, 1_5_9, 7_7_5_5, 6_1_9_0_4, 3_9_1_4_4, 3_8, 2]] )
UpperCamelCase_ : Union[str, Any] = _long_tensor([[2, 1_2_8_0_2_8, 9_8, 1_2, 3_0_5_2_7, 2_7_3_2, 1_5_9, 7_7_5_5, 6_1_9_0_4, 3_9_1_4_4, 3_8]] )
UpperCamelCase_ : Dict = prepare_mam_aaa_inputs_dict(model.config , snake_case , snake_case )
with torch.no_grad():
UpperCamelCase_ : Dict = model(**snake_case )[0]
UpperCamelCase_ : Union[str, Any] = torch.Size((1, 1_1, model.config.vocab_size) )
self.assertEqual(output.shape , snake_case )
# change to expected output here
UpperCamelCase_ : Any = torch.tensor(
[[-1.0448, -1.0411, 3.7992], [-3.2191, -3.2386, -1.3451], [-3.6210, -3.5993, 0.4925]] , device=snake_case )
self.assertTrue(torch.allclose(output[:, :3, :3] , snake_case , atol=snake_case ) )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> int:
"""simple docstring"""
UpperCamelCase_ : str = MaMaaaForConditionalGeneration.from_pretrained('facebook/m2m100_418M' ).to(snake_case )
UpperCamelCase_ : Optional[int] = MaMaaaTokenizer.from_pretrained('facebook/m2m100_418M' , src_lang='fr' , tgt_lang='en' )
UpperCamelCase_ : Union[str, Any] = [
'L\'affaire NSA souligne l\'absence totale de débat sur le renseignement',
'Selon moi, il y a deux niveaux de réponse de la part du gouvernement français.',
'Lorsque François Hollande téléphone à Barack Obama ou quand le ministre des affaires étrangères Laurent'
' Fabius convoque l\'ambassadeur des Etats-Unis, ils réagissent à une vraie découverte, qui est celle de'
' l\'ampleur de la surveillance américaine sur l\'ensemble des communications en France.',
]
# The below article tests that we don't add any hypotheses outside of the top n_beams
UpperCamelCase_ : Optional[Any] = tokenizer(snake_case , padding=snake_case , return_tensors='pt' )
UpperCamelCase_ : Dict = model.generate(
input_ids=dct['input_ids'].to(snake_case ) , attention_mask=dct['attention_mask'].to(snake_case ) , num_beams=5 , forced_bos_token_id=tokenizer.get_lang_id('en' ) , )
UpperCamelCase_ : Optional[int] = [
'The NSA case highlights the total absence of intelligence debate',
'I think there are two levels of response from the French government.',
'When François Hollande calls Barack Obama or when Foreign Minister Laurent Fabius calls the U.S.'
' Ambassador, they respond to a real discovery, which is that of the scale of U.S. surveillance on all'
' communications in France.',
]
UpperCamelCase_ : List[str] = tokenizer.batch_decode(
hypotheses_batch.tolist() , clean_up_tokenization_spaces=snake_case , skip_special_tokens=snake_case )
assert generated == expected_en
| 50
| 1
|
'''simple docstring'''
import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
A__ : Optional[Any] = logging.get_logger(__name__)
A__ : Dict = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt'}
# See all BART models at https://huggingface.co/models?filter=bart
A__ : Dict = {
'vocab_file': {
'facebook/bart-base': 'https://huggingface.co/facebook/bart-base/resolve/main/vocab.json',
'facebook/bart-large': 'https://huggingface.co/facebook/bart-large/resolve/main/vocab.json',
'facebook/bart-large-mnli': 'https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json',
'facebook/bart-large-cnn': 'https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json',
'facebook/bart-large-xsum': 'https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json',
'yjernite/bart_eli5': 'https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json',
},
'merges_file': {
'facebook/bart-base': 'https://huggingface.co/facebook/bart-base/resolve/main/merges.txt',
'facebook/bart-large': 'https://huggingface.co/facebook/bart-large/resolve/main/merges.txt',
'facebook/bart-large-mnli': 'https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt',
'facebook/bart-large-cnn': 'https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt',
'facebook/bart-large-xsum': 'https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt',
'yjernite/bart_eli5': 'https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt',
},
}
A__ : List[str] = {
'facebook/bart-base': 1024,
'facebook/bart-large': 1024,
'facebook/bart-large-mnli': 1024,
'facebook/bart-large-cnn': 1024,
'facebook/bart-large-xsum': 1024,
'yjernite/bart_eli5': 1024,
}
@lru_cache()
def UpperCAmelCase__ ( ) -> int:
__lowerCamelCase : int = (
list(range(ord('!' ) , ord('~' ) + 1 ) ) + list(range(ord('¡' ) , ord('¬' ) + 1 ) ) + list(range(ord('®' ) , ord('ÿ' ) + 1 ) )
)
__lowerCamelCase : Dict = bs[:]
__lowerCamelCase : Dict = 0
for b in range(2**8 ):
if b not in bs:
bs.append(__UpperCAmelCase )
cs.append(2**8 + n )
n += 1
__lowerCamelCase : int = [chr(__UpperCAmelCase ) for n in cs]
return dict(zip(__UpperCAmelCase , __UpperCAmelCase ) )
def UpperCAmelCase__ ( UpperCAmelCase_ : str ) -> Optional[int]:
__lowerCamelCase : Optional[Any] = set()
__lowerCamelCase : Optional[Any] = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
__lowerCamelCase : int = char
return pairs
class UpperCAmelCase_ (_SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowerCamelCase : Union[str, Any] = VOCAB_FILES_NAMES
lowerCamelCase : str = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase : Tuple = ['input_ids', 'attention_mask']
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_="replace" , SCREAMING_SNAKE_CASE_="<s>" , SCREAMING_SNAKE_CASE_="</s>" , SCREAMING_SNAKE_CASE_="</s>" , SCREAMING_SNAKE_CASE_="<s>" , SCREAMING_SNAKE_CASE_="<unk>" , SCREAMING_SNAKE_CASE_="<pad>" , SCREAMING_SNAKE_CASE_="<mask>" , SCREAMING_SNAKE_CASE_=False , **SCREAMING_SNAKE_CASE_ , ) -> Tuple:
__lowerCamelCase : Optional[Any] = AddedToken(_lowerCAmelCase , lstrip=_lowerCAmelCase , rstrip=_lowerCAmelCase ) if isinstance(_lowerCAmelCase , _lowerCAmelCase ) else bos_token
__lowerCamelCase : int = AddedToken(_lowerCAmelCase , lstrip=_lowerCAmelCase , rstrip=_lowerCAmelCase ) if isinstance(_lowerCAmelCase , _lowerCAmelCase ) else eos_token
__lowerCamelCase : Union[str, Any] = AddedToken(_lowerCAmelCase , lstrip=_lowerCAmelCase , rstrip=_lowerCAmelCase ) if isinstance(_lowerCAmelCase , _lowerCAmelCase ) else sep_token
__lowerCamelCase : Tuple = AddedToken(_lowerCAmelCase , lstrip=_lowerCAmelCase , rstrip=_lowerCAmelCase ) if isinstance(_lowerCAmelCase , _lowerCAmelCase ) else cls_token
__lowerCamelCase : Optional[int] = AddedToken(_lowerCAmelCase , lstrip=_lowerCAmelCase , rstrip=_lowerCAmelCase ) if isinstance(_lowerCAmelCase , _lowerCAmelCase ) else unk_token
__lowerCamelCase : Any = AddedToken(_lowerCAmelCase , lstrip=_lowerCAmelCase , rstrip=_lowerCAmelCase ) if isinstance(_lowerCAmelCase , _lowerCAmelCase ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
__lowerCamelCase : Optional[int] = AddedToken(_lowerCAmelCase , lstrip=_lowerCAmelCase , rstrip=_lowerCAmelCase ) if isinstance(_lowerCAmelCase , _lowerCAmelCase ) else mask_token
super().__init__(
errors=_lowerCAmelCase , bos_token=_lowerCAmelCase , eos_token=_lowerCAmelCase , unk_token=_lowerCAmelCase , sep_token=_lowerCAmelCase , cls_token=_lowerCAmelCase , pad_token=_lowerCAmelCase , mask_token=_lowerCAmelCase , add_prefix_space=_lowerCAmelCase , **_lowerCAmelCase , )
with open(_lowerCAmelCase , encoding='utf-8' ) as vocab_handle:
__lowerCamelCase : Union[str, Any] = json.load(_lowerCAmelCase )
__lowerCamelCase : Dict = {v: k for k, v in self.encoder.items()}
__lowerCamelCase : Any = errors # how to handle errors in decoding
__lowerCamelCase : Optional[int] = bytes_to_unicode()
__lowerCamelCase : int = {v: k for k, v in self.byte_encoder.items()}
with open(_lowerCAmelCase , encoding='utf-8' ) as merges_handle:
__lowerCamelCase : Union[str, Any] = merges_handle.read().split('\n' )[1:-1]
__lowerCamelCase : Optional[int] = [tuple(merge.split() ) for merge in bpe_merges]
__lowerCamelCase : int = dict(zip(_lowerCAmelCase , range(len(_lowerCAmelCase ) ) ) )
__lowerCamelCase : Optional[int] = {}
__lowerCamelCase : List[Any] = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
__lowerCamelCase : List[Any] = re.compile(r'\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+' )
@property
def lowercase_ ( self ) -> Optional[int]:
return len(self.encoder )
def lowercase_ ( self ) -> List[Any]:
return dict(self.encoder , **self.added_tokens_encoder )
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ ) -> str:
if token in self.cache:
return self.cache[token]
__lowerCamelCase : List[Any] = tuple(_lowerCAmelCase )
__lowerCamelCase : List[Any] = get_pairs(_lowerCAmelCase )
if not pairs:
return token
while True:
__lowerCamelCase : Optional[Any] = min(_lowerCAmelCase , key=lambda SCREAMING_SNAKE_CASE_ : self.bpe_ranks.get(_lowerCAmelCase , float('inf' ) ) )
if bigram not in self.bpe_ranks:
break
__lowerCamelCase , __lowerCamelCase : Any = bigram
__lowerCamelCase : int = []
__lowerCamelCase : Optional[int] = 0
while i < len(_lowerCAmelCase ):
try:
__lowerCamelCase : Union[str, Any] = word.index(_lowerCAmelCase , _lowerCAmelCase )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
__lowerCamelCase : List[Any] = j
if word[i] == first and i < len(_lowerCAmelCase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
__lowerCamelCase : Dict = tuple(_lowerCAmelCase )
__lowerCamelCase : Union[str, Any] = new_word
if len(_lowerCAmelCase ) == 1:
break
else:
__lowerCamelCase : Tuple = get_pairs(_lowerCAmelCase )
__lowerCamelCase : Optional[Any] = ' '.join(_lowerCAmelCase )
__lowerCamelCase : Tuple = word
return word
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ ) -> Optional[Any]:
__lowerCamelCase : Union[str, Any] = []
for token in re.findall(self.pat , _lowerCAmelCase ):
__lowerCamelCase : List[str] = ''.join(
self.byte_encoder[b] for b in token.encode('utf-8' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(_lowerCAmelCase ).split(' ' ) )
return bpe_tokens
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ ) -> int:
return self.encoder.get(_lowerCAmelCase , self.encoder.get(self.unk_token ) )
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
return self.decoder.get(_lowerCAmelCase )
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ ) -> str:
__lowerCamelCase : Any = ''.join(_lowerCAmelCase )
__lowerCamelCase : Union[str, Any] = bytearray([self.byte_decoder[c] for c in text] ).decode('utf-8' , errors=self.errors )
return text
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None ) -> Any:
if not os.path.isdir(_lowerCAmelCase ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
__lowerCamelCase : Union[str, Any] = os.path.join(
_lowerCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
__lowerCamelCase : Any = os.path.join(
_lowerCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'] )
with open(_lowerCAmelCase , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=_lowerCAmelCase , ensure_ascii=_lowerCAmelCase ) + '\n' )
__lowerCamelCase : Union[str, Any] = 0
with open(_lowerCAmelCase , 'w' , encoding='utf-8' ) as writer:
writer.write('#version: 0.2\n' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda SCREAMING_SNAKE_CASE_ : kv[1] ):
if index != token_index:
logger.warning(
f'Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'
' Please check that the tokenizer is not corrupted!' )
__lowerCamelCase : List[str] = token_index
writer.write(' '.join(_lowerCAmelCase ) + '\n' )
index += 1
return vocab_file, merge_file
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None ) -> Optional[Any]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__lowerCamelCase : Any = [self.cls_token_id]
__lowerCamelCase : str = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = False ) -> Any:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_lowerCAmelCase , token_ids_a=_lowerCAmelCase , already_has_special_tokens=_lowerCAmelCase )
if token_ids_a is None:
return [1] + ([0] * len(_lowerCAmelCase )) + [1]
return [1] + ([0] * len(_lowerCAmelCase )) + [1, 1] + ([0] * len(_lowerCAmelCase )) + [1]
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None ) -> int:
__lowerCamelCase : Union[str, Any] = [self.sep_token_id]
__lowerCamelCase : int = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=False , **SCREAMING_SNAKE_CASE_ ) -> List[str]:
__lowerCamelCase : List[str] = kwargs.pop('add_prefix_space' , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(_lowerCAmelCase ) > 0 and not text[0].isspace()):
__lowerCamelCase : Any = ' ' + text
return (text, kwargs)
| 185
|
def UpperCAmelCase_ ( __UpperCAmelCase : int , __UpperCAmelCase : int ) -> int:
return number | (1 << position)
def UpperCAmelCase_ ( __UpperCAmelCase : int , __UpperCAmelCase : int ) -> int:
return number & ~(1 << position)
def UpperCAmelCase_ ( __UpperCAmelCase : int , __UpperCAmelCase : int ) -> int:
return number ^ (1 << position)
def UpperCAmelCase_ ( __UpperCAmelCase : int , __UpperCAmelCase : int ) -> bool:
return ((number >> position) & 1) == 1
def UpperCAmelCase_ ( __UpperCAmelCase : int , __UpperCAmelCase : int ) -> int:
return int((number & (1 << position)) != 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 225
| 0
|
import logging
import os
import random
import sys
from dataclasses import dataclass, field
from typing import Optional
import datasets
import evaluate
import numpy as np
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
AutoModelForSequenceClassification,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('''4.31.0''')
require_version('''datasets>=1.8.0''', '''To fix: pip install -r examples/pytorch/text-classification/requirements.txt''')
__A =logging.getLogger(__name__)
@dataclass
class _SCREAMING_SNAKE_CASE :
lowerCAmelCase__ = field(
default=1_28 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
lowerCAmelCase__ = field(
default=snake_case_ , metadata={'help': 'Overwrite the cached preprocessed datasets or not.'} )
lowerCAmelCase__ = field(
default=snake_case_ , metadata={
'help': (
'Whether to pad all samples to `max_seq_length`. '
'If False, will pad the samples dynamically when batching to the maximum length in the batch.'
)
} , )
lowerCAmelCase__ = field(
default=snake_case_ , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of training examples to this '
'value if set.'
)
} , )
lowerCAmelCase__ = field(
default=snake_case_ , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of evaluation examples to this '
'value if set.'
)
} , )
lowerCAmelCase__ = field(
default=snake_case_ , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of prediction examples to this '
'value if set.'
)
} , )
@dataclass
class _SCREAMING_SNAKE_CASE :
lowerCAmelCase__ = field(
default=snake_case_ , metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
lowerCAmelCase__ = field(
default=snake_case_ , metadata={'help': 'Evaluation language. Also train language if `train_language` is set to None.'} )
lowerCAmelCase__ = field(
default=snake_case_ , metadata={'help': 'Train language if it is different from the evaluation language.'} )
lowerCAmelCase__ = field(
default=snake_case_ , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
lowerCAmelCase__ = field(
default=snake_case_ , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
lowerCAmelCase__ = field(
default=snake_case_ , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
lowerCAmelCase__ = field(
default=snake_case_ , metadata={'help': 'arg to indicate if tokenizer should do lower case in AutoTokenizer.from_pretrained()'} , )
lowerCAmelCase__ = field(
default=snake_case_ , metadata={'help': 'Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'} , )
lowerCAmelCase__ = field(
default='main' , metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'} , )
lowerCAmelCase__ = field(
default=snake_case_ , metadata={
'help': (
'Will use the token generated when running `huggingface-cli login` (necessary to use this script '
'with private models).'
)
} , )
lowerCAmelCase__ = field(
default=snake_case_ , metadata={'help': 'Will enable to load a pretrained model whose head dimensions are different.'} , )
def lowerCamelCase_ ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
lowerCamelCase_ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("run_xnli" , lowerCamelCase__ )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
lowerCamelCase_ = training_args.get_process_log_level()
logger.setLevel(lowerCamelCase__ )
datasets.utils.logging.set_verbosity(lowerCamelCase__ )
transformers.utils.logging.set_verbosity(lowerCamelCase__ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'
+ F'distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}' )
logger.info(F'Training/evaluation parameters {training_args}' )
# Detecting last checkpoint.
lowerCamelCase_ = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
lowerCamelCase_ = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F'Output directory ({training_args.output_dir}) already exists and is not empty. '
"Use --overwrite_output_dir to overcome." )
elif last_checkpoint is not None:
logger.info(
F'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch." )
# Set seed before initializing model.
set_seed(training_args.seed )
# In distributed training, the load_dataset function guarantees that only one local process can concurrently
# download the dataset.
# Downloading and loading xnli dataset from the hub.
if training_args.do_train:
if model_args.train_language is None:
lowerCamelCase_ = load_dataset(
"xnli" , model_args.language , split="train" , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
else:
lowerCamelCase_ = load_dataset(
"xnli" , model_args.train_language , split="train" , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
lowerCamelCase_ = train_dataset.features["label"].names
if training_args.do_eval:
lowerCamelCase_ = load_dataset(
"xnli" , model_args.language , split="validation" , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
lowerCamelCase_ = eval_dataset.features["label"].names
if training_args.do_predict:
lowerCamelCase_ = load_dataset(
"xnli" , model_args.language , split="test" , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
lowerCamelCase_ = predict_dataset.features["label"].names
# Labels
lowerCamelCase_ = len(lowerCamelCase__ )
# Load pretrained model and tokenizer
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowerCamelCase_ = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=lowerCamelCase__ , idalabel={str(lowerCamelCase__ ): label for i, label in enumerate(lowerCamelCase__ )} , labelaid={label: i for i, label in enumerate(lowerCamelCase__ )} , finetuning_task="xnli" , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
lowerCamelCase_ = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , do_lower_case=model_args.do_lower_case , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
lowerCamelCase_ = AutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=lowerCamelCase__ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , )
# Preprocessing the datasets
# Padding strategy
if data_args.pad_to_max_length:
lowerCamelCase_ = "max_length"
else:
# We will pad later, dynamically at batch creation, to the max sequence length in each batch
lowerCamelCase_ = False
def preprocess_function(lowerCamelCase__ ):
# Tokenize the texts
return tokenizer(
examples["premise"] , examples["hypothesis"] , padding=lowerCamelCase__ , max_length=data_args.max_seq_length , truncation=lowerCamelCase__ , )
if training_args.do_train:
if data_args.max_train_samples is not None:
lowerCamelCase_ = min(len(lowerCamelCase__ ) , data_args.max_train_samples )
lowerCamelCase_ = train_dataset.select(range(lowerCamelCase__ ) )
with training_args.main_process_first(desc="train dataset map pre-processing" ):
lowerCamelCase_ = train_dataset.map(
lowerCamelCase__ , batched=lowerCamelCase__ , load_from_cache_file=not data_args.overwrite_cache , desc="Running tokenizer on train dataset" , )
# Log a few random samples from the training set:
for index in random.sample(range(len(lowerCamelCase__ ) ) , 3 ):
logger.info(F'Sample {index} of the training set: {train_dataset[index]}.' )
if training_args.do_eval:
if data_args.max_eval_samples is not None:
lowerCamelCase_ = min(len(lowerCamelCase__ ) , data_args.max_eval_samples )
lowerCamelCase_ = eval_dataset.select(range(lowerCamelCase__ ) )
with training_args.main_process_first(desc="validation dataset map pre-processing" ):
lowerCamelCase_ = eval_dataset.map(
lowerCamelCase__ , batched=lowerCamelCase__ , load_from_cache_file=not data_args.overwrite_cache , desc="Running tokenizer on validation dataset" , )
if training_args.do_predict:
if data_args.max_predict_samples is not None:
lowerCamelCase_ = min(len(lowerCamelCase__ ) , data_args.max_predict_samples )
lowerCamelCase_ = predict_dataset.select(range(lowerCamelCase__ ) )
with training_args.main_process_first(desc="prediction dataset map pre-processing" ):
lowerCamelCase_ = predict_dataset.map(
lowerCamelCase__ , batched=lowerCamelCase__ , load_from_cache_file=not data_args.overwrite_cache , desc="Running tokenizer on prediction dataset" , )
# Get the metric function
lowerCamelCase_ = evaluate.load("xnli" )
# You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(lowerCamelCase__ ):
lowerCamelCase_ = p.predictions[0] if isinstance(p.predictions , lowerCamelCase__ ) else p.predictions
lowerCamelCase_ = np.argmax(lowerCamelCase__ , axis=1 )
return metric.compute(predictions=lowerCamelCase__ , references=p.label_ids )
# Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding.
if data_args.pad_to_max_length:
lowerCamelCase_ = default_data_collator
elif training_args.fpaa:
lowerCamelCase_ = DataCollatorWithPadding(lowerCamelCase__ , pad_to_multiple_of=8 )
else:
lowerCamelCase_ = None
# Initialize our Trainer
lowerCamelCase_ = Trainer(
model=lowerCamelCase__ , args=lowerCamelCase__ , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , compute_metrics=lowerCamelCase__ , tokenizer=lowerCamelCase__ , data_collator=lowerCamelCase__ , )
# Training
if training_args.do_train:
lowerCamelCase_ = None
if training_args.resume_from_checkpoint is not None:
lowerCamelCase_ = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
lowerCamelCase_ = last_checkpoint
lowerCamelCase_ = trainer.train(resume_from_checkpoint=lowerCamelCase__ )
lowerCamelCase_ = train_result.metrics
lowerCamelCase_ = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(lowerCamelCase__ )
)
lowerCamelCase_ = min(lowerCamelCase__ , len(lowerCamelCase__ ) )
trainer.save_model() # Saves the tokenizer too for easy upload
trainer.log_metrics("train" , lowerCamelCase__ )
trainer.save_metrics("train" , lowerCamelCase__ )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info("*** Evaluate ***" )
lowerCamelCase_ = trainer.evaluate(eval_dataset=lowerCamelCase__ )
lowerCamelCase_ = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(lowerCamelCase__ )
lowerCamelCase_ = min(lowerCamelCase__ , len(lowerCamelCase__ ) )
trainer.log_metrics("eval" , lowerCamelCase__ )
trainer.save_metrics("eval" , lowerCamelCase__ )
# Prediction
if training_args.do_predict:
logger.info("*** Predict ***" )
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = trainer.predict(lowerCamelCase__ , metric_key_prefix="predict" )
lowerCamelCase_ = (
data_args.max_predict_samples if data_args.max_predict_samples is not None else len(lowerCamelCase__ )
)
lowerCamelCase_ = min(lowerCamelCase__ , len(lowerCamelCase__ ) )
trainer.log_metrics("predict" , lowerCamelCase__ )
trainer.save_metrics("predict" , lowerCamelCase__ )
lowerCamelCase_ = np.argmax(lowerCamelCase__ , axis=1 )
lowerCamelCase_ = os.path.join(training_args.output_dir , "predictions.txt" )
if trainer.is_world_process_zero():
with open(lowerCamelCase__ , "w" ) as writer:
writer.write("index\tprediction\n" )
for index, item in enumerate(lowerCamelCase__ ):
lowerCamelCase_ = label_list[item]
writer.write(F'{index}\t{item}\n' )
if __name__ == "__main__":
main()
| 47
|
import importlib
import torch
import yaml
from omegaconf import OmegaConf
from taming.models.vqgan import VQModel
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__=False ):
lowerCamelCase_ = OmegaConf.load(lowerCamelCase__ )
if display:
print(yaml.dump(OmegaConf.to_container(lowerCamelCase__ ) ) )
return config
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__=None , lowerCamelCase__=None ):
if conf_path is None:
lowerCamelCase_ = "./model_checkpoints/vqgan_only.yaml"
lowerCamelCase_ = load_config(lowerCamelCase__ , display=lowerCamelCase__ )
lowerCamelCase_ = VQModel(**config.model.params )
if ckpt_path is None:
lowerCamelCase_ = "./model_checkpoints/vqgan_only.pt"
lowerCamelCase_ = torch.load(lowerCamelCase__ , map_location=lowerCamelCase__ )
if ".ckpt" in ckpt_path:
lowerCamelCase_ = sd["state_dict"]
model.load_state_dict(lowerCamelCase__ , strict=lowerCamelCase__ )
model.to(lowerCamelCase__ )
del sd
return model
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ ):
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = model.encode(lowerCamelCase__ )
print(F'VQGAN --- {model.__class__.__name__}: latent shape: {z.shape[2:]}' )
lowerCamelCase_ = model.decode(lowerCamelCase__ )
return xrec
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__=False ):
lowerCamelCase_ , lowerCamelCase_ = string.rsplit("." , 1 )
if reload:
lowerCamelCase_ = importlib.import_module(lowerCamelCase__ )
importlib.reload(lowerCamelCase__ )
return getattr(importlib.import_module(lowerCamelCase__ , package=lowerCamelCase__ ) , cls )
def lowerCamelCase_ ( lowerCamelCase__ ):
if "target" not in config:
raise KeyError("Expected key `target` to instantiate." )
return get_obj_from_str(config["target"] )(**config.get("params" , {} ) )
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=True , lowerCamelCase__=True ):
lowerCamelCase_ = instantiate_from_config(lowerCamelCase__ )
if sd is not None:
model.load_state_dict(lowerCamelCase__ )
if gpu:
model.cuda()
if eval_mode:
model.eval()
return {"model": model}
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
# load the specified checkpoint
if ckpt:
lowerCamelCase_ = torch.load(lowerCamelCase__ , map_location="cpu" )
lowerCamelCase_ = pl_sd["global_step"]
print(F'loaded model from global step {global_step}.' )
else:
lowerCamelCase_ = {"state_dict": None}
lowerCamelCase_ = None
lowerCamelCase_ = load_model_from_config(config.model , pl_sd["state_dict"] , gpu=lowerCamelCase__ , eval_mode=lowerCamelCase__ )["model"]
return model, global_step
| 47
| 1
|
from __future__ import annotations
from collections.abc import Iterable, Iterator
from dataclasses import dataclass
_lowerCamelCase : List[Any] = (3, 9, -11, 0, 7, 5, 1, -1)
_lowerCamelCase : int = (4, 6, 2, 0, 8, 10, 3, -2)
@dataclass
class SCREAMING_SNAKE_CASE__ :
'''simple docstring'''
_UpperCAmelCase : int
_UpperCAmelCase : Node | None
class SCREAMING_SNAKE_CASE__ :
'''simple docstring'''
def __init__( self : Optional[int] , lowercase : Iterable[int] ):
'''simple docstring'''
_snake_case = None
for i in sorted(lowercase , reverse=lowercase ):
_snake_case = Node(lowercase , self.head )
def __iter__( self : List[str] ):
'''simple docstring'''
_snake_case = self.head
while node:
yield node.data
_snake_case = node.next_node
def __len__( self : List[str] ):
'''simple docstring'''
return sum(1 for _ in self )
def __str__( self : str ):
'''simple docstring'''
return " -> ".join([str(lowercase ) for node in self] )
def a_ ( __lowercase : SortedLinkedList , __lowercase : SortedLinkedList ) -> SortedLinkedList:
return SortedLinkedList(list(__lowercase ) + list(__lowercase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
_lowerCamelCase : str = SortedLinkedList
print(merge_lists(SSL(test_data_odd), SSL(test_data_even)))
| 282
|
from __future__ import annotations
from typing import Any
class SCREAMING_SNAKE_CASE__ :
'''simple docstring'''
def __init__( self : Tuple , lowercase : int , lowercase : int , lowercase : float = 0 ):
'''simple docstring'''
_snake_case , _snake_case = row, column
_snake_case = [[default_value for c in range(lowercase )] for r in range(lowercase )]
def __str__( self : int ):
'''simple docstring'''
_snake_case = f'''Matrix consist of {self.row} rows and {self.column} columns\n'''
# Make string identifier
_snake_case = 0
for row_vector in self.array:
for obj in row_vector:
_snake_case = max(lowercase , len(str(lowercase ) ) )
_snake_case = f'''%{max_element_length}s'''
# Make string and return
def single_line(lowercase : list[float] ) -> str:
nonlocal string_format_identifier
_snake_case = '['
line += ", ".join(string_format_identifier % (obj,) for obj in row_vector )
line += "]"
return line
s += "\n".join(single_line(lowercase ) for row_vector in self.array )
return s
def __repr__( self : Dict ):
'''simple docstring'''
return str(self )
def A ( self : str , lowercase : tuple[int, int] ):
'''simple docstring'''
if not (isinstance(lowercase , (list, tuple) ) and len(lowercase ) == 2):
return False
elif not (0 <= loc[0] < self.row and 0 <= loc[1] < self.column):
return False
else:
return True
def __getitem__( self : Dict , lowercase : tuple[int, int] ):
'''simple docstring'''
assert self.validate_indicies(lowercase )
return self.array[loc[0]][loc[1]]
def __setitem__( self : str , lowercase : tuple[int, int] , lowercase : float ):
'''simple docstring'''
assert self.validate_indicies(lowercase )
_snake_case = value
def __add__( self : str , lowercase : Matrix ):
'''simple docstring'''
assert isinstance(lowercase , lowercase )
assert self.row == another.row and self.column == another.column
# Add
_snake_case = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
_snake_case = self[r, c] + another[r, c]
return result
def __neg__( self : Tuple ):
'''simple docstring'''
_snake_case = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
_snake_case = -self[r, c]
return result
def __sub__( self : List[str] , lowercase : Matrix ):
'''simple docstring'''
return self + (-another)
def __mul__( self : Dict , lowercase : int | float | Matrix ):
'''simple docstring'''
if isinstance(lowercase , (int, float) ): # Scalar multiplication
_snake_case = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
_snake_case = self[r, c] * another
return result
elif isinstance(lowercase , lowercase ): # Matrix multiplication
assert self.column == another.row
_snake_case = Matrix(self.row , another.column )
for r in range(self.row ):
for c in range(another.column ):
for i in range(self.column ):
result[r, c] += self[r, i] * another[i, c]
return result
else:
_snake_case = f'''Unsupported type given for another ({type(lowercase )})'''
raise TypeError(lowercase )
def A ( self : Optional[Any] ):
'''simple docstring'''
_snake_case = Matrix(self.column , self.row )
for r in range(self.row ):
for c in range(self.column ):
_snake_case = self[r, c]
return result
def A ( self : List[Any] , lowercase : Matrix , lowercase : Matrix ):
'''simple docstring'''
assert isinstance(lowercase , lowercase ) and isinstance(lowercase , lowercase )
assert self.row == self.column == u.row == v.row # u, v should be column vector
assert u.column == v.column == 1 # u, v should be column vector
# Calculate
_snake_case = v.transpose()
_snake_case = (v_t * self * u)[0, 0] + 1
if numerator_factor == 0:
return None # It's not invertable
return self - ((self * u) * (v_t * self) * (1.0 / numerator_factor))
# Testing
if __name__ == "__main__":
def a_ ( ) -> None:
# a^(-1)
_snake_case = Matrix(3 , 3 , 0 )
for i in range(3 ):
_snake_case = 1
print(f'''a^(-1) is {ainv}''' )
# u, v
_snake_case = Matrix(3 , 1 , 0 )
_snake_case , _snake_case , _snake_case = 1, 2, -3
_snake_case = Matrix(3 , 1 , 0 )
_snake_case , _snake_case , _snake_case = 4, -2, 5
print(f'''u is {u}''' )
print(f'''v is {v}''' )
print(f'''uv^T is {u * v.transpose()}''' )
# Sherman Morrison
print(f'''(a + uv^T)^(-1) is {ainv.sherman_morrison(__lowercase , __lowercase )}''' )
def a_ ( ) -> None:
import doctest
doctest.testmod()
testa()
| 282
| 1
|
'''simple docstring'''
import argparse
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
_SCREAMING_SNAKE_CASE = 16
_SCREAMING_SNAKE_CASE = 32
def __lowerCamelCase ( __lowerCAmelCase : Accelerator , __lowerCAmelCase : int = 16 ) -> List[Any]:
snake_case = AutoTokenizer.from_pretrained("""bert-base-cased""" )
snake_case = load_dataset("""glue""" , """mrpc""" )
def tokenize_function(__lowerCAmelCase : Optional[Any] ):
# max_length=None => use the model max length (it's actually the default)
snake_case = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=__lowerCAmelCase , max_length=__lowerCAmelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
snake_case = datasets.map(
__lowerCAmelCase , batched=__lowerCAmelCase , remove_columns=["""idx""", """sentence1""", """sentence2"""] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
snake_case = tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(__lowerCAmelCase : Optional[int] ):
# On TPU it's best to pad everything to the same length or training will be very slow.
snake_case = 1_28 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
snake_case = 16
elif accelerator.mixed_precision != "no":
snake_case = 8
else:
snake_case = None
return tokenizer.pad(
__lowerCAmelCase , padding="""longest""" , max_length=__lowerCAmelCase , pad_to_multiple_of=__lowerCAmelCase , return_tensors="""pt""" , )
# Instantiate dataloaders.
snake_case = DataLoader(
tokenized_datasets["""train"""] , shuffle=__lowerCAmelCase , collate_fn=__lowerCAmelCase , batch_size=__lowerCAmelCase , drop_last=__lowerCAmelCase )
snake_case = DataLoader(
tokenized_datasets["""validation"""] , shuffle=__lowerCAmelCase , collate_fn=__lowerCAmelCase , batch_size=__lowerCAmelCase , drop_last=(accelerator.mixed_precision == """fp8""") , )
return train_dataloader, eval_dataloader
def __lowerCamelCase ( __lowerCAmelCase : List[str] , __lowerCAmelCase : Tuple ) -> Optional[Any]:
# Initialize accelerator
snake_case = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
snake_case = config["""lr"""]
snake_case = int(config["""num_epochs"""] )
snake_case = int(config["""seed"""] )
snake_case = int(config["""batch_size"""] )
snake_case = evaluate.load("""glue""" , """mrpc""" )
# If the batch size is too big we use gradient accumulation
snake_case = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
snake_case = batch_size // MAX_GPU_BATCH_SIZE
snake_case = MAX_GPU_BATCH_SIZE
set_seed(__lowerCAmelCase )
snake_case , snake_case = get_dataloaders(__lowerCAmelCase , __lowerCAmelCase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
snake_case = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" , return_dict=__lowerCAmelCase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
snake_case = model.to(accelerator.device )
# Instantiate optimizer
snake_case = AdamW(params=model.parameters() , lr=__lowerCAmelCase )
# Instantiate scheduler
snake_case = get_linear_schedule_with_warmup(
optimizer=__lowerCAmelCase , num_warmup_steps=1_00 , num_training_steps=(len(__lowerCAmelCase ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
snake_case , snake_case , snake_case , snake_case , snake_case = accelerator.prepare(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# Now we train the model
for epoch in range(__lowerCAmelCase ):
model.train()
for step, batch in enumerate(__lowerCAmelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
snake_case = model(**__lowerCAmelCase )
snake_case = outputs.loss
snake_case = loss / gradient_accumulation_steps
accelerator.backward(__lowerCAmelCase )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(__lowerCAmelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
snake_case = model(**__lowerCAmelCase )
snake_case = outputs.logits.argmax(dim=-1 )
snake_case , snake_case = accelerator.gather_for_metrics((predictions, batch["""labels"""]) )
metric.add_batch(
predictions=__lowerCAmelCase , references=__lowerCAmelCase , )
snake_case = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'''epoch {epoch}:''' , __lowerCAmelCase )
def __lowerCamelCase ( ) -> Dict:
snake_case = argparse.ArgumentParser(description="""Simple example of training script.""" )
parser.add_argument(
"""--mixed_precision""" , type=__lowerCAmelCase , default=__lowerCAmelCase , choices=["""no""", """fp16""", """bf16""", """fp8"""] , help="""Whether to use mixed precision. Choose"""
"""between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."""
"""and an Nvidia Ampere GPU.""" , )
parser.add_argument("""--cpu""" , action="""store_true""" , help="""If passed, will train on the CPU.""" )
snake_case = parser.parse_args()
snake_case = {"""lr""": 2e-5, """num_epochs""": 3, """seed""": 42, """batch_size""": 16}
training_function(__lowerCAmelCase , __lowerCAmelCase )
if __name__ == "__main__":
main()
| 3
|
'''simple docstring'''
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoImageProcessor, ViTImageProcessor
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / "utils"))
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
_SCREAMING_SNAKE_CASE = get_tests_dir("fixtures")
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def lowerCAmelCase ( self : List[Any] )-> List[Any]:
# A mock response for an HTTP head request to emulate server down
snake_case = mock.Mock()
snake_case = 5_00
snake_case = {}
snake_case = HTTPError
snake_case = {}
# Download this model to make sure it's in the cache.
snake_case = ViTImageProcessor.from_pretrained("""hf-internal-testing/tiny-random-vit""" )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch("""requests.Session.request""" , return_value=__snake_case ) as mock_head:
snake_case = ViTImageProcessor.from_pretrained("""hf-internal-testing/tiny-random-vit""" )
# This check we did call the fake head request
mock_head.assert_called()
def lowerCAmelCase ( self : Tuple )-> Optional[Any]:
# This test is for deprecated behavior and can be removed in v5
snake_case = ViTImageProcessor.from_pretrained(
"""https://huggingface.co/hf-internal-testing/tiny-random-vit/resolve/main/preprocessor_config.json""" )
def lowerCAmelCase ( self : Union[str, Any] )-> str:
with self.assertRaises(__snake_case ):
# config is in subfolder, the following should not work without specifying the subfolder
snake_case = AutoImageProcessor.from_pretrained("""hf-internal-testing/stable-diffusion-all-variants""" )
snake_case = AutoImageProcessor.from_pretrained(
"""hf-internal-testing/stable-diffusion-all-variants""" , subfolder="""feature_extractor""" )
self.assertIsNotNone(__snake_case )
@is_staging_test
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@classmethod
def lowerCAmelCase ( cls : Optional[int] )-> Dict:
snake_case = TOKEN
HfFolder.save_token(__snake_case )
@classmethod
def lowerCAmelCase ( cls : List[Any] )-> str:
try:
delete_repo(token=cls._token , repo_id="""test-image-processor""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""valid_org/test-image-processor-org""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""test-dynamic-image-processor""" )
except HTTPError:
pass
def lowerCAmelCase ( self : Optional[Any] )-> Union[str, Any]:
snake_case = ViTImageProcessor.from_pretrained(__snake_case )
image_processor.push_to_hub("""test-image-processor""" , use_auth_token=self._token )
snake_case = ViTImageProcessor.from_pretrained(f'''{USER}/test-image-processor''' )
for k, v in image_processor.__dict__.items():
self.assertEqual(__snake_case , getattr(__snake_case , __snake_case ) )
# Reset repo
delete_repo(token=self._token , repo_id="""test-image-processor""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(
__snake_case , repo_id="""test-image-processor""" , push_to_hub=__snake_case , use_auth_token=self._token )
snake_case = ViTImageProcessor.from_pretrained(f'''{USER}/test-image-processor''' )
for k, v in image_processor.__dict__.items():
self.assertEqual(__snake_case , getattr(__snake_case , __snake_case ) )
def lowerCAmelCase ( self : List[Any] )-> int:
snake_case = ViTImageProcessor.from_pretrained(__snake_case )
image_processor.push_to_hub("""valid_org/test-image-processor""" , use_auth_token=self._token )
snake_case = ViTImageProcessor.from_pretrained("""valid_org/test-image-processor""" )
for k, v in image_processor.__dict__.items():
self.assertEqual(__snake_case , getattr(__snake_case , __snake_case ) )
# Reset repo
delete_repo(token=self._token , repo_id="""valid_org/test-image-processor""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(
__snake_case , repo_id="""valid_org/test-image-processor-org""" , push_to_hub=__snake_case , use_auth_token=self._token )
snake_case = ViTImageProcessor.from_pretrained("""valid_org/test-image-processor-org""" )
for k, v in image_processor.__dict__.items():
self.assertEqual(__snake_case , getattr(__snake_case , __snake_case ) )
def lowerCAmelCase ( self : str )-> Tuple:
CustomImageProcessor.register_for_auto_class()
snake_case = CustomImageProcessor.from_pretrained(__snake_case )
image_processor.push_to_hub("""test-dynamic-image-processor""" , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(
image_processor.auto_map , {"""AutoImageProcessor""": """custom_image_processing.CustomImageProcessor"""} , )
snake_case = AutoImageProcessor.from_pretrained(
f'''{USER}/test-dynamic-image-processor''' , trust_remote_code=__snake_case )
# Can't make an isinstance check because the new_image_processor is from the CustomImageProcessor class of a dynamic module
self.assertEqual(new_image_processor.__class__.__name__ , """CustomImageProcessor""" )
| 3
| 1
|
"""simple docstring"""
def lowercase ( _SCREAMING_SNAKE_CASE : str = 100_0000 ):
'''simple docstring'''
_UpperCAmelCase = 1
_UpperCAmelCase = 1
_UpperCAmelCase = {1: 1}
for inputa in range(2 , _SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = 0
_UpperCAmelCase = inputa
while True:
if number in counters:
counter += counters[number]
break
if number % 2 == 0:
number //= 2
counter += 1
else:
_UpperCAmelCase = (3 * number) + 1
counter += 1
if inputa not in counters:
_UpperCAmelCase = counter
if counter > pre_counter:
_UpperCAmelCase = inputa
_UpperCAmelCase = counter
return largest_number
if __name__ == "__main__":
print(solution(int(input().strip())))
| 260
|
import importlib
import json
import os
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
import transformers.models.auto
from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig
from transformers.models.bert.configuration_bert import BertConfig
from transformers.models.roberta.configuration_roberta import RobertaConfig
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir
sys.path.append(str(Path(__file__).parent.parent.parent.parent / """utils"""))
from test_module.custom_configuration import CustomConfig # noqa E402
SCREAMING_SNAKE_CASE_ = get_tests_dir("""fixtures/dummy-config.json""")
class UpperCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = 0
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> str:
'''simple docstring'''
self.assertIsNotNone(transformers.models.auto.__spec__ )
self.assertIsNotNone(importlib.util.find_spec("""transformers.models.auto""" ) )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained("""bert-base-uncased""" )
self.assertIsInstance(lowerCamelCase__ ,lowerCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self : int ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ ,lowerCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ ,lowerCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = AutoConfig.for_model("""roberta""" )
self.assertIsInstance(lowerCamelCase__ ,lowerCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self : int ) -> int:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
# This model name contains bert and roberta, but roberta ends up being picked.
SCREAMING_SNAKE_CASE = os.path.join(lowerCamelCase__ ,"""fake-roberta""" )
os.makedirs(lowerCamelCase__ ,exist_ok=lowerCamelCase__ )
with open(os.path.join(lowerCamelCase__ ,"""config.json""" ) ,"""w""" ) as f:
f.write(json.dumps({} ) )
SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(lowerCamelCase__ )
self.assertEqual(type(lowerCamelCase__ ) ,lowerCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> str:
'''simple docstring'''
try:
AutoConfig.register("""custom""" ,lowerCamelCase__ )
# Wrong model type will raise an error
with self.assertRaises(lowerCamelCase__ ):
AutoConfig.register("""model""" ,lowerCamelCase__ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(lowerCamelCase__ ):
AutoConfig.register("""bert""" ,lowerCamelCase__ )
# Now that the config is registered, it can be used as any other config with the auto-API
SCREAMING_SNAKE_CASE = CustomConfig()
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ ,lowerCamelCase__ )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
def SCREAMING_SNAKE_CASE__ ( self : str ) -> Dict:
'''simple docstring'''
with self.assertRaisesRegex(
lowerCamelCase__ ,"""bert-base is not a local folder and is not a valid model identifier""" ):
SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained("""bert-base""" )
def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> str:
'''simple docstring'''
with self.assertRaisesRegex(
lowerCamelCase__ ,R"""aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)""" ):
SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(lowerCamelCase__ ,revision="""aaaaaa""" )
def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> List[Any]:
'''simple docstring'''
with self.assertRaisesRegex(
lowerCamelCase__ ,"""hf-internal-testing/no-config-test-repo does not appear to have a file named config.json.""" ,):
SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained("""hf-internal-testing/no-config-test-repo""" )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
with self.assertRaises(lowerCamelCase__ ):
SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained("""hf-internal-testing/test_dynamic_model""" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(lowerCamelCase__ ):
SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained("""hf-internal-testing/test_dynamic_model""" ,trust_remote_code=lowerCamelCase__ )
SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained("""hf-internal-testing/test_dynamic_model""" ,trust_remote_code=lowerCamelCase__ )
self.assertEqual(config.__class__.__name__ ,"""NewModelConfig""" )
# Test config can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(lowerCamelCase__ ,trust_remote_code=lowerCamelCase__ )
self.assertEqual(reloaded_config.__class__.__name__ ,"""NewModelConfig""" )
def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> Union[str, Any]:
'''simple docstring'''
class UpperCamelCase__ ( lowerCAmelCase_ ):
'''simple docstring'''
__snake_case : Union[str, Any] = "new-model"
try:
AutoConfig.register("""new-model""" ,lowerCamelCase__ )
# If remote code is not set, the default is to use local
SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained("""hf-internal-testing/test_dynamic_model""" )
self.assertEqual(config.__class__.__name__ ,"""NewModelConfigLocal""" )
# If remote code is disabled, we load the local one.
SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained("""hf-internal-testing/test_dynamic_model""" ,trust_remote_code=lowerCamelCase__ )
self.assertEqual(config.__class__.__name__ ,"""NewModelConfigLocal""" )
# If remote is enabled, we load from the Hub
SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained("""hf-internal-testing/test_dynamic_model""" ,trust_remote_code=lowerCamelCase__ )
self.assertEqual(config.__class__.__name__ ,"""NewModelConfig""" )
finally:
if "new-model" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["new-model"]
| 296
| 0
|
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_UpperCamelCase = logging.get_logger(__name__)
_UpperCamelCase = """▁"""
_UpperCamelCase = {"""vocab_file""": """sentencepiece.bpe.model""", """monolingual_vocab_file""": """dict.txt"""}
_UpperCamelCase = {
"""vocab_file""": {
"""vinai/bartpho-syllable""": """https://huggingface.co/vinai/bartpho-syllable/resolve/main/sentencepiece.bpe.model""",
},
"""monolingual_vocab_file""": {
"""vinai/bartpho-syllable""": """https://huggingface.co/vinai/bartpho-syllable/resolve/main/dict.txt""",
},
}
_UpperCamelCase = {"""vinai/bartpho-syllable""": 1024}
class lowerCamelCase__ ( snake_case ):
SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE = ['''input_ids''', '''attention_mask''']
def __init__( self ,A ,A ,A="<s>" ,A="</s>" ,A="</s>" ,A="<s>" ,A="<unk>" ,A="<pad>" ,A="<mask>" ,A = None ,**A ,):
# Mask token behave like a normal word, i.e. include the space before it
UpperCAmelCase = AddedToken(A ,lstrip=A ,rstrip=A ) if isinstance(A ,A ) else mask_token
UpperCAmelCase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=A ,eos_token=A ,unk_token=A ,sep_token=A ,cls_token=A ,pad_token=A ,mask_token=A ,sp_model_kwargs=self.sp_model_kwargs ,**A ,)
UpperCAmelCase = vocab_file
UpperCAmelCase = monolingual_vocab_file
UpperCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(A ) )
# Load the reduced vocab
# Keep order of special tokens for backward compatibility
UpperCAmelCase = {}
UpperCAmelCase = 0
for token in [bos_token, pad_token, eos_token, unk_token, sep_token, cls_token]:
if str(A ) not in self.fairseq_tokens_to_ids:
UpperCAmelCase = cnt
cnt += 1
with open(A ,"""r""" ,encoding="""utf-8""" ) as f:
for line in f.readlines():
UpperCAmelCase = line.strip().split()[0]
UpperCAmelCase = len(self.fairseq_tokens_to_ids )
if str(A ) not in self.fairseq_tokens_to_ids:
UpperCAmelCase = len(self.fairseq_tokens_to_ids )
UpperCAmelCase = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self ):
UpperCAmelCase = self.__dict__.copy()
UpperCAmelCase = None
UpperCAmelCase = self.sp_model.serialized_model_proto()
return state
def __setstate__( self ,A ):
UpperCAmelCase = d
# for backward compatibility
if not hasattr(self ,"""sp_model_kwargs""" ):
UpperCAmelCase = {}
UpperCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def _UpperCamelCase ( self ,A ,A = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
UpperCAmelCase = [self.cls_token_id]
UpperCAmelCase = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def _UpperCamelCase ( self ,A ,A = None ,A = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=A ,token_ids_a=A ,already_has_special_tokens=A )
if token_ids_a is None:
return [1] + ([0] * len(A )) + [1]
return [1] + ([0] * len(A )) + [1, 1] + ([0] * len(A )) + [1]
def _UpperCamelCase ( self ,A ,A = None ):
UpperCAmelCase = [self.sep_token_id]
UpperCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def _UpperCamelCase ( self ):
return len(self.fairseq_ids_to_tokens )
def _UpperCamelCase ( self ):
UpperCAmelCase = {self.convert_ids_to_tokens(A ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def _UpperCamelCase ( self ,A ):
return self.sp_model.encode(A ,out_type=A )
def _UpperCamelCase ( self ,A ):
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
else:
return self.unk_token_id
def _UpperCamelCase ( self ,A ):
return self.fairseq_ids_to_tokens[index]
def _UpperCamelCase ( self ,A ):
UpperCAmelCase = """""".join(A ).replace(A ,""" """ ).strip()
return out_string
def _UpperCamelCase ( self ,A ,A = None ):
if not os.path.isdir(A ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
UpperCAmelCase = os.path.join(
A ,(filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
UpperCAmelCase = os.path.join(
A ,(filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""monolingual_vocab_file"""] ,)
if os.path.abspath(self.vocab_file ) != os.path.abspath(A ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file ,A )
elif not os.path.isfile(self.vocab_file ):
with open(A ,"""wb""" ) as fi:
UpperCAmelCase = self.sp_model.serialized_model_proto()
fi.write(A )
if os.path.abspath(self.monolingual_vocab_file ) != os.path.abspath(
A ) and os.path.isfile(self.monolingual_vocab_file ):
copyfile(self.monolingual_vocab_file ,A )
elif not os.path.isfile(self.monolingual_vocab_file ):
with open(A ,"""w""" ,encoding="""utf-8""" ) as fp:
for token in self.fairseq_tokens_to_ids:
if token not in self.all_special_tokens:
fp.write(F'''{str(A )} \n''' )
return out_vocab_file, out_monolingual_vocab_file
| 357
|
"""simple docstring"""
_UpperCamelCase = [4, 1, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
_UpperCamelCase = [3, 7, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
_UpperCamelCase = {
0: """Sunday""",
1: """Monday""",
2: """Tuesday""",
3: """Wednesday""",
4: """Thursday""",
5: """Friday""",
6: """Saturday""",
}
def _a ( _snake_case , _snake_case , _snake_case ):
"""simple docstring"""
assert len(str(_snake_case ) ) > 2, "year should be in YYYY format"
assert 1 <= month <= 12, "month should be between 1 to 12"
assert 1 <= day <= 31, "day should be between 1 to 31"
# Doomsday algorithm:
UpperCAmelCase = year // 100
UpperCAmelCase = (5 * (century % 4) + 2) % 7
UpperCAmelCase = year % 100
UpperCAmelCase = centurian % 12
UpperCAmelCase = (
(centurian // 12) + centurian_m + (centurian_m // 4) + century_anchor
) % 7
UpperCAmelCase = (
DOOMSDAY_NOT_LEAP[month - 1]
if (year % 4 != 0) or (centurian == 0 and (year % 400) == 0)
else DOOMSDAY_LEAP[month - 1]
)
UpperCAmelCase = (dooms_day + day - day_anchor) % 7
return WEEK_DAY_NAMES[week_day]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 234
| 0
|
"""simple docstring"""
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
SCREAMING_SNAKE_CASE__ = 16
SCREAMING_SNAKE_CASE__ = 32
def lowerCAmelCase__ ( _UpperCamelCase : Accelerator , _UpperCamelCase : int = 1_6 , _UpperCamelCase : str = "bert-base-cased" ) -> Any:
"""simple docstring"""
snake_case = AutoTokenizer.from_pretrained(__SCREAMING_SNAKE_CASE )
snake_case = load_dataset('glue' , 'mrpc' )
def tokenize_function(_UpperCamelCase : int ):
# max_length=None => use the model max length (it's actually the default)
snake_case = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=__SCREAMING_SNAKE_CASE , max_length=__SCREAMING_SNAKE_CASE )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
snake_case = datasets.map(
__SCREAMING_SNAKE_CASE , batched=__SCREAMING_SNAKE_CASE , remove_columns=['idx', 'sentence1', 'sentence2'] , load_from_cache_file=__SCREAMING_SNAKE_CASE )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
snake_case = tokenized_datasets.rename_column('label' , 'labels' )
def collate_fn(_UpperCamelCase : int ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(__SCREAMING_SNAKE_CASE , padding='max_length' , max_length=1_2_8 , return_tensors='pt' )
return tokenizer.pad(__SCREAMING_SNAKE_CASE , padding='longest' , return_tensors='pt' )
# Instantiate dataloaders.
snake_case = DataLoader(
tokenized_datasets['train'] , shuffle=__SCREAMING_SNAKE_CASE , collate_fn=__SCREAMING_SNAKE_CASE , batch_size=__SCREAMING_SNAKE_CASE )
snake_case = DataLoader(
tokenized_datasets['validation'] , shuffle=__SCREAMING_SNAKE_CASE , collate_fn=__SCREAMING_SNAKE_CASE , batch_size=__SCREAMING_SNAKE_CASE )
return train_dataloader, eval_dataloader
def lowerCAmelCase__ ( _UpperCamelCase : Optional[int] , _UpperCamelCase : int , _UpperCamelCase : List[str] , _UpperCamelCase : int ) -> Tuple:
"""simple docstring"""
model.eval()
snake_case = 0
for step, batch in enumerate(__SCREAMING_SNAKE_CASE ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
snake_case = model(**__SCREAMING_SNAKE_CASE )
snake_case = outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
snake_case = accelerator.gather(
(predictions, batch['labels']) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(__SCREAMING_SNAKE_CASE ) - 1:
snake_case = predictions[: len(eval_dataloader.dataset ) - samples_seen]
snake_case = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=__SCREAMING_SNAKE_CASE , references=__SCREAMING_SNAKE_CASE , )
snake_case = metric.compute()
return eval_metric["accuracy"]
def lowerCAmelCase__ ( _UpperCamelCase : Optional[int] , _UpperCamelCase : Optional[Any] ) -> Dict:
"""simple docstring"""
snake_case = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
snake_case = config['lr']
snake_case = int(config['num_epochs'] )
snake_case = int(config['seed'] )
snake_case = int(config['batch_size'] )
snake_case = args.model_name_or_path
set_seed(__SCREAMING_SNAKE_CASE )
snake_case = get_dataloaders(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
snake_case = AutoModelForSequenceClassification.from_pretrained(__SCREAMING_SNAKE_CASE , return_dict=__SCREAMING_SNAKE_CASE )
# Instantiate optimizer
snake_case = (
AdamW
if accelerator.state.deepspeed_plugin is None
or 'optimizer' not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
snake_case = optimizer_cls(params=model.parameters() , lr=__SCREAMING_SNAKE_CASE )
if accelerator.state.deepspeed_plugin is not None:
snake_case = accelerator.state.deepspeed_plugin.deepspeed_config[
'gradient_accumulation_steps'
]
else:
snake_case = 1
snake_case = (len(__SCREAMING_SNAKE_CASE ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
snake_case = get_linear_schedule_with_warmup(
optimizer=__SCREAMING_SNAKE_CASE , num_warmup_steps=0 , num_training_steps=__SCREAMING_SNAKE_CASE , )
else:
snake_case = DummyScheduler(__SCREAMING_SNAKE_CASE , total_num_steps=__SCREAMING_SNAKE_CASE , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
snake_case = accelerator.prepare(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# We need to keep track of how many total steps we have iterated over
snake_case = 0
# We also need to keep track of the stating epoch so files are named properly
snake_case = 0
snake_case = evaluate.load('glue' , 'mrpc' )
snake_case = num_epochs
if args.partial_train_epoch is not None:
snake_case = args.partial_train_epoch
if args.resume_from_checkpoint:
accelerator.load_state(args.resume_from_checkpoint )
snake_case = args.resume_from_checkpoint.split('epoch_' )[1]
snake_case = ''
for char in epoch_string:
if char.isdigit():
state_epoch_num += char
else:
break
snake_case = int(__SCREAMING_SNAKE_CASE ) + 1
snake_case = evaluation_loop(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
accelerator.print('resumed checkpoint performance:' , __SCREAMING_SNAKE_CASE )
accelerator.print('resumed checkpoint\'s scheduler\'s lr:' , lr_scheduler.get_lr()[0] )
accelerator.print('resumed optimizers\'s lr:' , optimizer.param_groups[0]['lr'] )
with open(os.path.join(args.output_dir , f"""state_{starting_epoch-1}.json""" ) , 'r' ) as f:
snake_case = json.load(__SCREAMING_SNAKE_CASE )
assert resumed_state["accuracy"] == accuracy, "Accuracy mismatch, loading from checkpoint failed"
assert (
resumed_state["lr"] == lr_scheduler.get_lr()[0]
), "Scheduler learning rate mismatch, loading from checkpoint failed"
assert (
resumed_state["optimizer_lr"] == optimizer.param_groups[0]["lr"]
), "Optimizer learning rate mismatch, loading from checkpoint failed"
assert resumed_state["epoch"] == starting_epoch - 1, "Epoch mismatch, loading from checkpoint failed"
return
# Now we train the model
snake_case = {}
for epoch in range(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
model.train()
for step, batch in enumerate(__SCREAMING_SNAKE_CASE ):
snake_case = model(**__SCREAMING_SNAKE_CASE )
snake_case = outputs.loss
snake_case = loss / gradient_accumulation_steps
accelerator.backward(__SCREAMING_SNAKE_CASE )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
snake_case = f"""epoch_{epoch}"""
snake_case = os.path.join(args.output_dir , __SCREAMING_SNAKE_CASE )
accelerator.save_state(__SCREAMING_SNAKE_CASE )
snake_case = evaluation_loop(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
snake_case = accuracy
snake_case = lr_scheduler.get_lr()[0]
snake_case = optimizer.param_groups[0]['lr']
snake_case = epoch
snake_case = overall_step
accelerator.print(f"""epoch {epoch}:""" , __SCREAMING_SNAKE_CASE )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , f"""state_{epoch}.json""" ) , 'w' ) as f:
json.dump(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def lowerCAmelCase__ ( ) -> List[Any]:
"""simple docstring"""
snake_case = argparse.ArgumentParser(description='Simple example of training script tracking peak GPU memory usage.' )
parser.add_argument(
'--model_name_or_path' , type=__SCREAMING_SNAKE_CASE , default='bert-base-cased' , help='Path to pretrained model or model identifier from huggingface.co/models.' , required=__SCREAMING_SNAKE_CASE , )
parser.add_argument(
'--output_dir' , type=__SCREAMING_SNAKE_CASE , default='.' , help='Optional save directory where all checkpoint folders will be stored. Default is the current working directory.' , )
parser.add_argument(
'--resume_from_checkpoint' , type=__SCREAMING_SNAKE_CASE , default=__SCREAMING_SNAKE_CASE , help='If the training should continue from a checkpoint folder.' , )
parser.add_argument(
'--partial_train_epoch' , type=__SCREAMING_SNAKE_CASE , default=__SCREAMING_SNAKE_CASE , help='If passed, the training will stop after this number of epochs.' , )
parser.add_argument(
'--num_epochs' , type=__SCREAMING_SNAKE_CASE , default=2 , help='Number of train epochs.' , )
snake_case = parser.parse_args()
snake_case = {'lr': 2e-5, 'num_epochs': args.num_epochs, 'seed': 4_2, 'batch_size': 1_6}
training_function(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
main()
| 150
|
"""simple docstring"""
from collections import Counter
from pathlib import Path
from typing import Optional, Tuple
import yaml
class UpperCamelCase ( yaml.SafeLoader ):
def _UpperCAmelCase ( self ,__UpperCamelCase ) -> Optional[int]:
'''simple docstring'''
lowercase_ : str = [self.constructed_objects[key_node] for key_node, _ in node.value]
lowercase_ : str = [tuple(__UpperCamelCase ) if isinstance(__UpperCamelCase ,__UpperCamelCase ) else key for key in keys]
lowercase_ : List[Any] = Counter(__UpperCamelCase )
lowercase_ : str = [key for key in counter if counter[key] > 1]
if duplicate_keys:
raise TypeError(f'''Got duplicate yaml keys: {duplicate_keys}''' )
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase=False ) -> List[Any]:
'''simple docstring'''
lowercase_ : Optional[int] = super().construct_mapping(__UpperCamelCase ,deep=__UpperCamelCase )
self._check_no_duplicates_on_constructed_node(__UpperCamelCase )
return mapping
def lowercase__( __SCREAMING_SNAKE_CASE : str ):
lowercase_ : Tuple = list(readme_content.splitlines() )
if full_content and full_content[0] == "---" and "---" in full_content[1:]:
lowercase_ : Dict = full_content[1:].index('---' ) + 1
lowercase_ : Optional[int] = '\n'.join(full_content[1:sep_idx] )
return yamlblock, "\n".join(full_content[sep_idx + 1 :] )
return None, "\n".join(__SCREAMING_SNAKE_CASE )
class UpperCamelCase ( lowercase_ ):
# class attributes
lowercase = {'train_eval_index'} # train-eval-index in the YAML metadata
@classmethod
def _UpperCAmelCase ( cls ,__UpperCamelCase ) -> "DatasetMetadata":
'''simple docstring'''
with open(__UpperCamelCase ,encoding='utf-8' ) as readme_file:
lowercase_ , lowercase_ : Optional[int] = _split_yaml_from_readme(readme_file.read() )
if yaml_string is not None:
return cls.from_yaml_string(__UpperCamelCase )
else:
return cls()
def _UpperCAmelCase ( self ,__UpperCamelCase ) -> List[Any]:
'''simple docstring'''
if path.exists():
with open(__UpperCamelCase ,encoding='utf-8' ) as readme_file:
lowercase_ : Dict = readme_file.read()
else:
lowercase_ : int = None
lowercase_ : Any = self._to_readme(__UpperCamelCase )
with open(__UpperCamelCase ,'w' ,encoding='utf-8' ) as readme_file:
readme_file.write(__UpperCamelCase )
def _UpperCAmelCase ( self ,__UpperCamelCase = None ) -> str:
'''simple docstring'''
if readme_content is not None:
lowercase_ , lowercase_ : Optional[Any] = _split_yaml_from_readme(__UpperCamelCase )
lowercase_ : Optional[Any] = '---\n' + self.to_yaml_string() + '---\n' + content
else:
lowercase_ : Tuple = '---\n' + self.to_yaml_string() + '---\n'
return full_content
@classmethod
def _UpperCAmelCase ( cls ,__UpperCamelCase ) -> "DatasetMetadata":
'''simple docstring'''
lowercase_ : List[str] = yaml.load(__UpperCamelCase ,Loader=_NoDuplicateSafeLoader ) or {}
# Convert the YAML keys to DatasetMetadata fields
lowercase_ : Dict = {
(key.replace('-' ,'_' ) if key.replace('-' ,'_' ) in cls._FIELDS_WITH_DASHES else key): value
for key, value in metadata_dict.items()
}
return cls(**__UpperCamelCase )
def _UpperCAmelCase ( self ) -> str:
'''simple docstring'''
return yaml.safe_dump(
{
(key.replace('_' ,'-' ) if key in self._FIELDS_WITH_DASHES else key): value
for key, value in self.items()
} ,sort_keys=__UpperCamelCase ,allow_unicode=__UpperCamelCase ,encoding='utf-8' ,).decode('utf-8' )
__SCREAMING_SNAKE_CASE ={
"image-classification": [],
"translation": [],
"image-segmentation": [],
"fill-mask": [],
"automatic-speech-recognition": [],
"token-classification": [],
"sentence-similarity": [],
"audio-classification": [],
"question-answering": [],
"summarization": [],
"zero-shot-classification": [],
"table-to-text": [],
"feature-extraction": [],
"other": [],
"multiple-choice": [],
"text-classification": [],
"text-to-image": [],
"text2text-generation": [],
"zero-shot-image-classification": [],
"tabular-classification": [],
"tabular-regression": [],
"image-to-image": [],
"tabular-to-text": [],
"unconditional-image-generation": [],
"text-retrieval": [],
"text-to-speech": [],
"object-detection": [],
"audio-to-audio": [],
"text-generation": [],
"conversational": [],
"table-question-answering": [],
"visual-question-answering": [],
"image-to-text": [],
"reinforcement-learning": [],
"voice-activity-detection": [],
"time-series-forecasting": [],
"document-question-answering": [],
}
if __name__ == "__main__":
from argparse import ArgumentParser
__SCREAMING_SNAKE_CASE =ArgumentParser(usage="Validate the yaml metadata block of a README.md file.")
ap.add_argument("readme_filepath")
__SCREAMING_SNAKE_CASE =ap.parse_args()
__SCREAMING_SNAKE_CASE =Path(args.readme_filepath)
__SCREAMING_SNAKE_CASE =DatasetMetadata.from_readme(readme_filepath)
print(dataset_metadata)
dataset_metadata.to_readme(readme_filepath)
| 213
| 0
|
'''simple docstring'''
from string import ascii_lowercase, ascii_uppercase
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE ):
if not sentence:
return ""
_snake_case = dict(zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
return lower_to_upper.get(sentence[0] , sentence[0] ) + sentence[1:]
if __name__ == "__main__":
from doctest import testmod
testmod()
| 270
|
'''simple docstring'''
from urllib.parse import quote
import pytest
from datasets.utils.hub import hf_hub_url
@pytest.mark.parametrize("""repo_id""" , ["""canonical_dataset_name""", """org-name/dataset-name"""] )
@pytest.mark.parametrize("""path""" , ["""filename.csv""", """filename with blanks.csv"""] )
@pytest.mark.parametrize("""revision""" , [None, """v2"""] )
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
_snake_case = hf_hub_url(repo_id=_SCREAMING_SNAKE_CASE , path=_SCREAMING_SNAKE_CASE , revision=_SCREAMING_SNAKE_CASE )
assert url == f"""https://huggingface.co/datasets/{repo_id}/resolve/{revision or "main"}/{quote(_SCREAMING_SNAKE_CASE )}"""
| 270
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__a = {
"configuration_maskformer": ["MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "MaskFormerConfig"],
"configuration_maskformer_swin": ["MaskFormerSwinConfig"],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = ["MaskFormerFeatureExtractor"]
__a = ["MaskFormerImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
"MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"MaskFormerForInstanceSegmentation",
"MaskFormerModel",
"MaskFormerPreTrainedModel",
]
__a = [
"MaskFormerSwinBackbone",
"MaskFormerSwinModel",
"MaskFormerSwinPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_maskformer import MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskFormerConfig
from .configuration_maskformer_swin import MaskFormerSwinConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_maskformer import MaskFormerFeatureExtractor
from .image_processing_maskformer import MaskFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_maskformer import (
MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
MaskFormerForInstanceSegmentation,
MaskFormerModel,
MaskFormerPreTrainedModel,
)
from .modeling_maskformer_swin import (
MaskFormerSwinBackbone,
MaskFormerSwinModel,
MaskFormerSwinPreTrainedModel,
)
else:
import sys
__a = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 35
|
'''simple docstring'''
import json
import os
import unittest
from transformers.models.gptsan_japanese.tokenization_gptsan_japanese import (
VOCAB_FILES_NAMES,
GPTSanJapaneseTokenizer,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class UpperCAmelCase_ ( _a , unittest.TestCase ):
"""simple docstring"""
lowercase = GPTSanJapaneseTokenizer
lowercase = False
lowercase = {"do_clean_text": False, "add_prefix_space": False}
def lowerCamelCase ( self : str ):
super().setUp()
# fmt: off
snake_case__ : Optional[Any] = ["""こん""", """こんに""", """にちは""", """ばんは""", """世界,㔺界""", """、""", """。""", """<BR>""", """<SP>""", """<TAB>""", """<URL>""", """<EMAIL>""", """<TEL>""", """<DATE>""", """<PRICE>""", """<BLOCK>""", """<KIGOU>""", """<U2000U2BFF>""", """<|emoji1|>""", """<unk>""", """<|bagoftoken|>""", """<|endoftext|>"""]
# fmt: on
snake_case__ : int = {"""emoji""": {"""\ud83d\ude00""": """<|emoji1|>"""}, """emoji_inv""": {"""<|emoji1|>""": """\ud83d\ude00"""}} # 😀
snake_case__ : List[Any] = {"""unk_token""": """<unk>"""}
snake_case__ : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
snake_case__ : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""emoji_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
with open(self.emoji_file , """w""" ) as emoji_writer:
emoji_writer.write(json.dumps(snake_case_ ) )
def lowerCamelCase ( self : Any , **snake_case_ : Union[str, Any] ):
kwargs.update(self.special_tokens_map )
return GPTSanJapaneseTokenizer.from_pretrained(self.tmpdirname , **snake_case_ )
def lowerCamelCase ( self : Any , snake_case_ : str ):
snake_case__ : Union[str, Any] = """こんにちは、世界。 \nこんばんは、㔺界。😀"""
snake_case__ : List[str] = """こんにちは、世界。 \nこんばんは、世界。😀"""
return input_text, output_text
def lowerCamelCase ( self : Any , snake_case_ : Dict ):
snake_case__ , snake_case__ : int = self.get_input_output_texts(snake_case_ )
snake_case__ : int = tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ )
snake_case__ : List[str] = tokenizer.decode(snake_case_ , clean_up_tokenization_spaces=snake_case_ )
return text, ids
def lowerCamelCase ( self : Optional[Any] ):
pass # TODO add if relevant
def lowerCamelCase ( self : Union[str, Any] ):
pass # TODO add if relevant
def lowerCamelCase ( self : List[str] ):
pass # TODO add if relevant
def lowerCamelCase ( self : Dict ):
snake_case__ : Optional[Any] = self.get_tokenizer()
# Testing tokenization
snake_case__ : int = """こんにちは、世界。 こんばんは、㔺界。"""
snake_case__ : Optional[int] = ["""こん""", """にちは""", """、""", """世界""", """。""", """<SP>""", """こん""", """ばんは""", """、""", """㔺界""", """。"""]
snake_case__ : Dict = tokenizer.tokenize(snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
# Testing conversion to ids without special tokens
snake_case__ : Union[str, Any] = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6]
snake_case__ : List[Any] = tokenizer.convert_tokens_to_ids(snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
# Testing conversion to ids with special tokens
snake_case__ : Union[str, Any] = tokens + [tokenizer.unk_token]
snake_case__ : Dict = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6, 19]
snake_case__ : Any = tokenizer.convert_tokens_to_ids(snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
def lowerCamelCase ( self : Optional[Any] ):
snake_case__ : Union[str, Any] = self.get_tokenizer()
# Testing tokenization
snake_case__ : Union[str, Any] = """こんにちは、<|bagoftoken|>世界。こんばんは、<|bagoftoken|>㔺界。"""
snake_case__ : Optional[int] = """こんにちは、、、、世界。こんばんは、、、、世界。"""
snake_case__ : Any = tokenizer.encode(snake_case_ )
snake_case__ : int = tokenizer.decode(snake_case_ )
self.assertEqual(snake_case_ , snake_case_ )
@slow
def lowerCamelCase ( self : Union[str, Any] ):
snake_case__ : Optional[Any] = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" )
# Testing tokenization
snake_case__ : Tuple = """こんにちは、世界。"""
snake_case__ : Optional[Any] = """こんばんは、㔺界。😀"""
snake_case__ : List[str] = """こんにちは、世界。こんばんは、世界。😀"""
snake_case__ : Dict = tokenizer.encode(prefix_text + input_text )
snake_case__ : Dict = tokenizer.encode("""""" , prefix_text=prefix_text + input_text )
snake_case__ : int = tokenizer.encode(snake_case_ , prefix_text=snake_case_ )
snake_case__ : Optional[Any] = tokenizer.decode(snake_case_ )
snake_case__ : Union[str, Any] = tokenizer.decode(snake_case_ )
snake_case__ : str = tokenizer.decode(snake_case_ )
self.assertEqual(snake_case_ , snake_case_ )
self.assertEqual(snake_case_ , snake_case_ )
self.assertEqual(snake_case_ , snake_case_ )
@slow
def lowerCamelCase ( self : Union[str, Any] ):
snake_case__ : Optional[Any] = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" )
# Testing tokenization
snake_case__ : Dict = """こんにちは、世界。"""
snake_case__ : Optional[int] = """こんばんは、㔺界。😀"""
snake_case__ : Any = len(tokenizer.encode(snake_case_ ) ) - 2
snake_case__ : Optional[int] = len(tokenizer.encode(snake_case_ ) ) - 2
snake_case__ : List[str] = [1] + [0] * (len_prefix + len_text + 1)
snake_case__ : Optional[int] = [1] * (len_prefix + len_text + 1) + [0]
snake_case__ : int = [1] + [1] * (len_prefix) + [0] * (len_text + 1)
snake_case__ : Any = tokenizer(prefix_text + input_text ).token_type_ids
snake_case__ : str = tokenizer("""""" , prefix_text=prefix_text + input_text ).token_type_ids
snake_case__ : Optional[Any] = tokenizer(snake_case_ , prefix_text=snake_case_ ).token_type_ids
self.assertListEqual(snake_case_ , snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
@slow
def lowerCamelCase ( self : Optional[int] ):
snake_case__ : Optional[Any] = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" )
snake_case__ : Union[str, Any] = tokenizer.encode("""あンいワ""" )
snake_case__ : int = tokenizer.encode("""""" , prefix_text="""あンいワ""" )
snake_case__ : Dict = tokenizer.encode("""いワ""" , prefix_text="""あン""" )
self.assertEqual(tokenizer.decode(snake_case_ ) , tokenizer.decode(snake_case_ ) )
self.assertEqual(tokenizer.decode(snake_case_ ) , tokenizer.decode(snake_case_ ) )
self.assertNotEqual(snake_case_ , snake_case_ )
self.assertNotEqual(snake_case_ , snake_case_ )
self.assertEqual(x_token_a[1] , x_token_a[-1] ) # SEG token
self.assertEqual(x_token_a[1] , x_token_a[3] ) # SEG token
@slow
def lowerCamelCase ( self : Any ):
snake_case__ : Optional[int] = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" )
snake_case__ : int = [["""武田信玄""", """は、"""], ["""織田信長""", """の配下の、"""]]
snake_case__ : Optional[Any] = tokenizer(snake_case_ , padding=snake_case_ )
snake_case__ : Tuple = tokenizer.batch_encode_plus(snake_case_ , padding=snake_case_ )
# fmt: off
snake_case__ : Optional[Any] = [[35_993, 8_640, 25_948, 35_998, 30_647, 35_675, 35_999, 35_999], [35_993, 10_382, 9_868, 35_998, 30_646, 9_459, 30_646, 35_675]]
snake_case__ : Optional[Any] = [[1, 1, 1, 0, 0, 0, 0, 0], [1, 1, 1, 0, 0, 0, 0, 0]]
snake_case__ : Optional[Any] = [[1, 1, 1, 1, 1, 1, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1]]
# fmt: on
self.assertListEqual(x_token.input_ids , snake_case_ )
self.assertListEqual(x_token.token_type_ids , snake_case_ )
self.assertListEqual(x_token.attention_mask , snake_case_ )
self.assertListEqual(x_token_a.input_ids , snake_case_ )
self.assertListEqual(x_token_a.token_type_ids , snake_case_ )
self.assertListEqual(x_token_a.attention_mask , snake_case_ )
def lowerCamelCase ( self : Any ):
# Intentionally convert some words to accommodate character fluctuations unique to Japanese
pass
def lowerCamelCase ( self : List[str] ):
# tokenizer has no padding token
pass
| 35
| 1
|
'''simple docstring'''
import doctest
from collections import deque
import numpy as np
class lowercase_ :
"""simple docstring"""
def __init__( self : Any ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = [2, 1, 2, -1]
_SCREAMING_SNAKE_CASE = [1, 2, 3, 4]
def lowerCAmelCase_ ( self : Dict ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = len(self.first_signal )
_SCREAMING_SNAKE_CASE = len(self.second_signal )
_SCREAMING_SNAKE_CASE = max(__lowerCamelCase , __lowerCamelCase )
# create a zero matrix of max_length x max_length
_SCREAMING_SNAKE_CASE = [[0] * max_length for i in range(__lowerCamelCase )]
# fills the smaller signal with zeros to make both signals of same length
if length_first_signal < length_second_signal:
self.first_signal += [0] * (max_length - length_first_signal)
elif length_first_signal > length_second_signal:
self.second_signal += [0] * (max_length - length_second_signal)
for i in range(__lowerCamelCase ):
_SCREAMING_SNAKE_CASE = deque(self.second_signal )
rotated_signal.rotate(__lowerCamelCase )
for j, item in enumerate(__lowerCamelCase ):
matrix[i][j] += item
# multiply the matrix with the first signal
_SCREAMING_SNAKE_CASE = np.matmul(np.transpose(__lowerCamelCase ) , np.transpose(self.first_signal ) )
# rounding-off to two decimal places
return [round(__lowerCamelCase , 2 ) for i in final_signal]
if __name__ == "__main__":
doctest.testmod()
| 361
|
'''simple docstring'''
def SCREAMING_SNAKE_CASE_ ( __A : int ) -> int:
if n == 1 or not isinstance(__A , __A ):
return 0
elif n == 2:
return 1
else:
_SCREAMING_SNAKE_CASE = [0, 1]
for i in range(2 , n + 1 ):
sequence.append(sequence[i - 1] + sequence[i - 2] )
return sequence[n]
def SCREAMING_SNAKE_CASE_ ( __A : int ) -> int:
_SCREAMING_SNAKE_CASE = 0
_SCREAMING_SNAKE_CASE = 2
while digits < n:
index += 1
_SCREAMING_SNAKE_CASE = len(str(fibonacci(__A ) ) )
return index
def SCREAMING_SNAKE_CASE_ ( __A : int = 10_00 ) -> int:
return fibonacci_digits_index(__A )
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 111
| 0
|
"""simple docstring"""
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DPMSolverMultistepScheduler,
TextToVideoSDPipeline,
UNetaDConditionModel,
)
from diffusers.utils import is_xformers_available, load_numpy, skip_mps, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
@skip_mps
class _snake_case ( a__ , unittest.TestCase ):
snake_case__ = TextToVideoSDPipeline
snake_case__ = TEXT_TO_IMAGE_PARAMS
snake_case__ = TEXT_TO_IMAGE_BATCH_PARAMS
# No `output_type`.
snake_case__ = frozenset(
[
"num_inference_steps",
"generator",
"latents",
"return_dict",
"callback",
"callback_steps",
] )
def lowerCamelCase__ ( self : str ):
torch.manual_seed(0 )
__lowerCamelCase : Optional[int] = UNetaDConditionModel(
block_out_channels=(32, 64, 64, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("CrossAttnDownBlock3D", "CrossAttnDownBlock3D", "CrossAttnDownBlock3D", "DownBlock3D") , up_block_types=("UpBlock3D", "CrossAttnUpBlock3D", "CrossAttnUpBlock3D", "CrossAttnUpBlock3D") , cross_attention_dim=32 , attention_head_dim=4 , )
__lowerCamelCase : Optional[int] = DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule="scaled_linear" , clip_sample=UpperCAmelCase , set_alpha_to_one=UpperCAmelCase , )
torch.manual_seed(0 )
__lowerCamelCase : Dict = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
__lowerCamelCase : List[Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act="gelu" , projection_dim=512 , )
__lowerCamelCase : Dict = CLIPTextModel(UpperCAmelCase )
__lowerCamelCase : Tuple = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
__lowerCamelCase : Any = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
}
return components
def lowerCamelCase__ ( self : int , UpperCAmelCase : Any , UpperCAmelCase : Optional[int]=0 ):
if str(UpperCAmelCase ).startswith("mps" ):
__lowerCamelCase : Tuple = torch.manual_seed(UpperCAmelCase )
else:
__lowerCamelCase : str = torch.Generator(device=UpperCAmelCase ).manual_seed(UpperCAmelCase )
__lowerCamelCase : Any = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""output_type""": """pt""",
}
return inputs
def lowerCamelCase__ ( self : Tuple ):
__lowerCamelCase : Tuple = """cpu""" # ensure determinism for the device-dependent torch.Generator
__lowerCamelCase : List[str] = self.get_dummy_components()
__lowerCamelCase : Tuple = TextToVideoSDPipeline(**UpperCAmelCase )
__lowerCamelCase : str = sd_pipe.to(UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase )
__lowerCamelCase : Any = self.get_dummy_inputs(UpperCAmelCase )
__lowerCamelCase : Union[str, Any] = """np"""
__lowerCamelCase : Any = sd_pipe(**UpperCAmelCase ).frames
__lowerCamelCase : str = frames[0][-3:, -3:, -1]
assert frames[0].shape == (64, 64, 3)
__lowerCamelCase : Optional[Any] = np.array([1_5_8.0, 1_6_0.0, 1_5_3.0, 1_2_5.0, 1_0_0.0, 1_2_1.0, 1_1_1.0, 9_3.0, 1_1_3.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def lowerCamelCase__ ( self : Optional[Any] ):
self._test_attention_slicing_forward_pass(test_mean_pixel_difference=UpperCAmelCase , expected_max_diff=3E-3 )
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def lowerCamelCase__ ( self : Tuple ):
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=UpperCAmelCase , expected_max_diff=1E-2 )
@unittest.skip(reason="Batching needs to be properly figured out first for this pipeline." )
def lowerCamelCase__ ( self : Dict ):
pass
@unittest.skip(reason="Batching needs to be properly figured out first for this pipeline." )
def lowerCamelCase__ ( self : List[str] ):
pass
@unittest.skip(reason="`num_images_per_prompt` argument is not supported for this pipeline." )
def lowerCamelCase__ ( self : List[Any] ):
pass
def lowerCamelCase__ ( self : Optional[int] ):
return super().test_progress_bar()
@slow
@skip_mps
class _snake_case ( unittest.TestCase ):
def lowerCamelCase__ ( self : List[Any] ):
__lowerCamelCase : Optional[Any] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video.npy" )
__lowerCamelCase : int = TextToVideoSDPipeline.from_pretrained("damo-vilab/text-to-video-ms-1.7b" )
__lowerCamelCase : Any = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
__lowerCamelCase : Optional[int] = pipe.to("cuda" )
__lowerCamelCase : Optional[Any] = """Spiderman is surfing"""
__lowerCamelCase : Tuple = torch.Generator(device="cpu" ).manual_seed(0 )
__lowerCamelCase : Tuple = pipe(UpperCAmelCase , generator=UpperCAmelCase , num_inference_steps=25 , output_type="pt" ).frames
__lowerCamelCase : Optional[int] = video_frames.cpu().numpy()
assert np.abs(expected_video - video ).mean() < 5E-2
def lowerCamelCase__ ( self : Optional[int] ):
__lowerCamelCase : Optional[Any] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video_2step.npy" )
__lowerCamelCase : Any = TextToVideoSDPipeline.from_pretrained("damo-vilab/text-to-video-ms-1.7b" )
__lowerCamelCase : Union[str, Any] = pipe.to("cuda" )
__lowerCamelCase : Optional[int] = """Spiderman is surfing"""
__lowerCamelCase : Optional[int] = torch.Generator(device="cpu" ).manual_seed(0 )
__lowerCamelCase : List[str] = pipe(UpperCAmelCase , generator=UpperCAmelCase , num_inference_steps=2 , output_type="pt" ).frames
__lowerCamelCase : Dict = video_frames.cpu().numpy()
assert np.abs(expected_video - video ).mean() < 5E-2
| 135
|
"""simple docstring"""
import math
def lowerCamelCase ( _UpperCamelCase : int ) -> list[int]:
'''simple docstring'''
__UpperCAmelCase : List[Any] = []
__UpperCAmelCase : Dict = 2
__UpperCAmelCase : Union[str, Any] = int(math.sqrt(_UpperCamelCase ) ) # Size of every segment
__UpperCAmelCase : Tuple = [True] * (end + 1)
__UpperCAmelCase : int = []
while start <= end:
if temp[start] is True:
in_prime.append(_UpperCamelCase )
for i in range(start * start , end + 1 , _UpperCamelCase ):
__UpperCAmelCase : Dict = False
start += 1
prime += in_prime
__UpperCAmelCase : Optional[int] = end + 1
__UpperCAmelCase : Dict = min(2 * end , _UpperCamelCase )
while low <= n:
__UpperCAmelCase : Union[str, Any] = [True] * (high - low + 1)
for each in in_prime:
__UpperCAmelCase : Dict = math.floor(low / each ) * each
if t < low:
t += each
for j in range(_UpperCamelCase , high + 1 , _UpperCamelCase ):
__UpperCAmelCase : Tuple = False
for j in range(len(_UpperCamelCase ) ):
if temp[j] is True:
prime.append(j + low )
__UpperCAmelCase : Tuple = high + 1
__UpperCAmelCase : Optional[int] = min(high + end , _UpperCamelCase )
return prime
print(sieve(10**6))
| 115
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCAmelCase_ = {
'configuration_convbert': ['CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ConvBertConfig', 'ConvBertOnnxConfig'],
'tokenization_convbert': ['ConvBertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = ['ConvBertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = [
'CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'ConvBertForMaskedLM',
'ConvBertForMultipleChoice',
'ConvBertForQuestionAnswering',
'ConvBertForSequenceClassification',
'ConvBertForTokenClassification',
'ConvBertLayer',
'ConvBertModel',
'ConvBertPreTrainedModel',
'load_tf_weights_in_convbert',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = [
'TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFConvBertForMaskedLM',
'TFConvBertForMultipleChoice',
'TFConvBertForQuestionAnswering',
'TFConvBertForSequenceClassification',
'TFConvBertForTokenClassification',
'TFConvBertLayer',
'TFConvBertModel',
'TFConvBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_convbert import CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvBertConfig, ConvBertOnnxConfig
from .tokenization_convbert import ConvBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_convbert_fast import ConvBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convbert import (
CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvBertForMaskedLM,
ConvBertForMultipleChoice,
ConvBertForQuestionAnswering,
ConvBertForSequenceClassification,
ConvBertForTokenClassification,
ConvBertLayer,
ConvBertModel,
ConvBertPreTrainedModel,
load_tf_weights_in_convbert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convbert import (
TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertLayer,
TFConvBertModel,
TFConvBertPreTrainedModel,
)
else:
import sys
UpperCAmelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 61
|
'''simple docstring'''
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : list[list[int]] ):
'''simple docstring'''
def update_area_of_max_square(SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ) -> int:
# BASE CASE
if row >= rows or col >= cols:
return 0
UpperCAmelCase__ = update_area_of_max_square(SCREAMING_SNAKE_CASE__ , col + 1 )
UpperCAmelCase__ = update_area_of_max_square(row + 1 , col + 1 )
UpperCAmelCase__ = update_area_of_max_square(row + 1 , SCREAMING_SNAKE_CASE__ )
if mat[row][col]:
UpperCAmelCase__ = 1 + min([right, diagonal, down] )
UpperCAmelCase__ = max(largest_square_area[0] , SCREAMING_SNAKE_CASE__ )
return sub_problem_sol
else:
return 0
UpperCAmelCase__ = [0]
update_area_of_max_square(0 , 0 )
return largest_square_area[0]
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : list[list[int]] ):
'''simple docstring'''
def update_area_of_max_square_using_dp_array(
SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : list[list[int]] ) -> int:
if row >= rows or col >= cols:
return 0
if dp_array[row][col] != -1:
return dp_array[row][col]
UpperCAmelCase__ = update_area_of_max_square_using_dp_array(SCREAMING_SNAKE_CASE__ , col + 1 , SCREAMING_SNAKE_CASE__ )
UpperCAmelCase__ = update_area_of_max_square_using_dp_array(row + 1 , col + 1 , SCREAMING_SNAKE_CASE__ )
UpperCAmelCase__ = update_area_of_max_square_using_dp_array(row + 1 , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if mat[row][col]:
UpperCAmelCase__ = 1 + min([right, diagonal, down] )
UpperCAmelCase__ = max(largest_square_area[0] , SCREAMING_SNAKE_CASE__ )
UpperCAmelCase__ = sub_problem_sol
return sub_problem_sol
else:
return 0
UpperCAmelCase__ = [0]
UpperCAmelCase__ = [[-1] * cols for _ in range(SCREAMING_SNAKE_CASE__ )]
update_area_of_max_square_using_dp_array(0 , 0 , SCREAMING_SNAKE_CASE__ )
return largest_square_area[0]
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : list[list[int]] ):
'''simple docstring'''
UpperCAmelCase__ = [[0] * (cols + 1) for _ in range(rows + 1 )]
UpperCAmelCase__ = 0
for row in range(rows - 1 , -1 , -1 ):
for col in range(cols - 1 , -1 , -1 ):
UpperCAmelCase__ = dp_array[row][col + 1]
UpperCAmelCase__ = dp_array[row + 1][col + 1]
UpperCAmelCase__ = dp_array[row + 1][col]
if mat[row][col] == 1:
UpperCAmelCase__ = 1 + min(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
UpperCAmelCase__ = max(dp_array[row][col] , SCREAMING_SNAKE_CASE__ )
else:
UpperCAmelCase__ = 0
return largest_square_area
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : list[list[int]] ):
'''simple docstring'''
UpperCAmelCase__ = [0] * (cols + 1)
UpperCAmelCase__ = [0] * (cols + 1)
UpperCAmelCase__ = 0
for row in range(rows - 1 , -1 , -1 ):
for col in range(cols - 1 , -1 , -1 ):
UpperCAmelCase__ = current_row[col + 1]
UpperCAmelCase__ = next_row[col + 1]
UpperCAmelCase__ = next_row[col]
if mat[row][col] == 1:
UpperCAmelCase__ = 1 + min(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
UpperCAmelCase__ = max(current_row[col] , SCREAMING_SNAKE_CASE__ )
else:
UpperCAmelCase__ = 0
UpperCAmelCase__ = current_row
return largest_square_area
if __name__ == "__main__":
import doctest
doctest.testmod()
print(largest_square_area_in_matrix_bottom_up(2, 2, [[1, 1], [1, 1]]))
| 61
| 1
|
"""simple docstring"""
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_torch_available, is_vision_available
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MgpstrProcessor, ViTImageProcessor
@require_torch
@require_vision
class UpperCamelCase ( unittest.TestCase ):
SCREAMING_SNAKE_CASE_ = ViTImageProcessor if is_vision_available() else None
@property
def a_ ( self) -> Union[str, Any]:
return self.image_processor_tester.prepare_image_processor_dict()
def a_ ( self) -> Optional[Any]:
snake_case_ = (3, 32, 128)
snake_case_ = tempfile.mkdtemp()
# fmt: off
snake_case_ = ['[GO]', '[s]', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z']
# fmt: on
snake_case_ = dict(zip(lowerCAmelCase__, range(len(lowerCAmelCase__))))
snake_case_ = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['vocab_file'])
with open(self.vocab_file, 'w', encoding='utf-8') as fp:
fp.write(json.dumps(lowerCAmelCase__) + '\n')
snake_case_ = {
'do_normalize': False,
'do_resize': True,
'image_processor_type': 'ViTImageProcessor',
'resample': 3,
'size': {'height': 32, 'width': 128},
}
snake_case_ = os.path.join(self.tmpdirname, lowerCAmelCase__)
with open(self.image_processor_file, 'w', encoding='utf-8') as fp:
json.dump(lowerCAmelCase__, lowerCAmelCase__)
def a_ ( self, **lowerCAmelCase__) -> Union[str, Any]:
return MgpstrTokenizer.from_pretrained(self.tmpdirname, **lowerCAmelCase__)
def a_ ( self, **lowerCAmelCase__) -> Union[str, Any]:
return ViTImageProcessor.from_pretrained(self.tmpdirname, **lowerCAmelCase__)
def a_ ( self) -> Tuple:
shutil.rmtree(self.tmpdirname)
def a_ ( self) -> Any:
snake_case_ = np.random.randint(255, size=(3, 30, 400), dtype=np.uinta)
snake_case_ = Image.fromarray(np.moveaxis(lowerCAmelCase__, 0, -1))
return image_input
def a_ ( self) -> Dict:
snake_case_ = self.get_tokenizer()
snake_case_ = self.get_image_processor()
snake_case_ = MgpstrProcessor(tokenizer=lowerCAmelCase__, image_processor=lowerCAmelCase__)
processor.save_pretrained(self.tmpdirname)
snake_case_ = MgpstrProcessor.from_pretrained(self.tmpdirname, use_fast=lowerCAmelCase__)
self.assertEqual(processor.char_tokenizer.get_vocab(), tokenizer.get_vocab())
self.assertIsInstance(processor.char_tokenizer, lowerCAmelCase__)
self.assertEqual(processor.image_processor.to_json_string(), image_processor.to_json_string())
self.assertIsInstance(processor.image_processor, lowerCAmelCase__)
def a_ ( self) -> Optional[int]:
snake_case_ = self.get_tokenizer()
snake_case_ = self.get_image_processor()
snake_case_ = MgpstrProcessor(tokenizer=lowerCAmelCase__, image_processor=lowerCAmelCase__)
processor.save_pretrained(self.tmpdirname)
snake_case_ = self.get_tokenizer(bos_token='(BOS)', eos_token='(EOS)')
snake_case_ = self.get_image_processor(do_normalize=lowerCAmelCase__, padding_value=1.0)
snake_case_ = MgpstrProcessor.from_pretrained(
self.tmpdirname, bos_token='(BOS)', eos_token='(EOS)', do_normalize=lowerCAmelCase__, padding_value=1.0)
self.assertEqual(processor.char_tokenizer.get_vocab(), tokenizer_add_kwargs.get_vocab())
self.assertIsInstance(processor.char_tokenizer, lowerCAmelCase__)
self.assertEqual(processor.image_processor.to_json_string(), image_processor_add_kwargs.to_json_string())
self.assertIsInstance(processor.image_processor, lowerCAmelCase__)
def a_ ( self) -> Any:
snake_case_ = self.get_image_processor()
snake_case_ = self.get_tokenizer()
snake_case_ = MgpstrProcessor(tokenizer=lowerCAmelCase__, image_processor=lowerCAmelCase__)
snake_case_ = self.prepare_image_inputs()
snake_case_ = image_processor(lowerCAmelCase__, return_tensors='np')
snake_case_ = processor(images=lowerCAmelCase__, return_tensors='np')
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum(), input_processor[key].sum(), delta=1e-2)
def a_ ( self) -> str:
snake_case_ = self.get_image_processor()
snake_case_ = self.get_tokenizer()
snake_case_ = MgpstrProcessor(tokenizer=lowerCAmelCase__, image_processor=lowerCAmelCase__)
snake_case_ = 'test'
snake_case_ = processor(text=lowerCAmelCase__)
snake_case_ = tokenizer(lowerCAmelCase__)
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key], encoded_processor[key])
def a_ ( self) -> Tuple:
snake_case_ = self.get_image_processor()
snake_case_ = self.get_tokenizer()
snake_case_ = MgpstrProcessor(tokenizer=lowerCAmelCase__, image_processor=lowerCAmelCase__)
snake_case_ = 'test'
snake_case_ = self.prepare_image_inputs()
snake_case_ = processor(text=lowerCAmelCase__, images=lowerCAmelCase__)
self.assertListEqual(list(inputs.keys()), ['pixel_values', 'labels'])
# test if it raises when no input is passed
with pytest.raises(lowerCAmelCase__):
processor()
def a_ ( self) -> List[Any]:
snake_case_ = self.get_image_processor()
snake_case_ = self.get_tokenizer()
snake_case_ = MgpstrProcessor(tokenizer=lowerCAmelCase__, image_processor=lowerCAmelCase__)
snake_case_ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9], [3, 4, 3, 1, 1, 8, 9]]
snake_case_ = processor.char_decode(lowerCAmelCase__)
snake_case_ = tokenizer.batch_decode(lowerCAmelCase__)
snake_case_ = [seq.replace(' ', '') for seq in decoded_tok]
self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__)
def a_ ( self) -> Optional[int]:
snake_case_ = self.get_image_processor()
snake_case_ = self.get_tokenizer()
snake_case_ = MgpstrProcessor(tokenizer=lowerCAmelCase__, image_processor=lowerCAmelCase__)
snake_case_ = None
snake_case_ = self.prepare_image_inputs()
snake_case_ = processor(text=lowerCAmelCase__, images=lowerCAmelCase__)
self.assertListEqual(list(inputs.keys()), processor.model_input_names)
def a_ ( self) -> List[Any]:
snake_case_ = self.get_image_processor()
snake_case_ = self.get_tokenizer()
snake_case_ = MgpstrProcessor(tokenizer=lowerCAmelCase__, image_processor=lowerCAmelCase__)
snake_case_ = torch.randn(1, 27, 38)
snake_case_ = torch.randn(1, 27, 5_0257)
snake_case_ = torch.randn(1, 27, 3_0522)
snake_case_ = processor.batch_decode([char_input, bpe_input, wp_input])
self.assertListEqual(list(results.keys()), ['generated_text', 'scores', 'char_preds', 'bpe_preds', 'wp_preds'])
| 69
|
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import subprocess
from packaging.version import Version, parse
from accelerate.commands.config.config_args import default_config_file, load_config_from_file
__A : Optional[int] = 'Run commands across TPU VMs for initial setup before running `accelerate launch`.'
def __UpperCamelCase ( _A : Dict=None ) ->Dict:
"""simple docstring"""
if subparsers is not None:
lowerCamelCase_ =subparsers.add_parser("""tpu-config""" , description=_description )
else:
lowerCamelCase_ =argparse.ArgumentParser("""Accelerate tpu-config command""" , description=_description )
# Core arguments
lowerCamelCase_ =parser.add_argument_group(
"""Config Arguments""" , """Arguments that can be configured through `accelerate config`.""" )
config_args.add_argument(
"""--config_file""" , type=_A , default=_A , help="""Path to the config file to use for accelerate.""" , )
config_args.add_argument(
"""--tpu_name""" , default=_A , help="""The name of the TPU to use. If not specified, will use the TPU specified in the config file.""" , )
config_args.add_argument(
"""--tpu_zone""" , default=_A , help="""The zone of the TPU to use. If not specified, will use the zone specified in the config file.""" , )
lowerCamelCase_ =parser.add_argument_group("""TPU Arguments""" , """Arguments for options ran inside the TPU.""" )
pod_args.add_argument(
"""--use_alpha""" , action="""store_true""" , help="""Whether to use `gcloud alpha` when running the TPU training script instead of `gcloud`.""" , )
pod_args.add_argument(
"""--command_file""" , default=_A , help="""The path to the file containing the commands to run on the pod on startup.""" , )
pod_args.add_argument(
"""--command""" , action="""append""" , nargs="""+""" , help="""A command to run on the pod. Can be passed multiple times.""" , )
pod_args.add_argument(
"""--install_accelerate""" , action="""store_true""" , help="""Whether to install accelerate on the pod. Defaults to False.""" , )
pod_args.add_argument(
"""--accelerate_version""" , default="""latest""" , help="""The version of accelerate to install on the pod. If not specified, will use the latest pypi version. Specify 'dev' to install from GitHub.""" , )
pod_args.add_argument(
"""--debug""" , action="""store_true""" , help="""If set, will print the command that would be run instead of running it.""" )
if subparsers is not None:
parser.set_defaults(func=_A )
return parser
def __UpperCamelCase ( _A : Tuple ) ->Optional[Any]:
"""simple docstring"""
lowerCamelCase_ =None
# Get the default from the config file if it exists.
if args.config_file is not None or os.path.isfile(_A ):
lowerCamelCase_ =load_config_from_file(args.config_file )
if not args.command_file and defaults.command_file is not None and not args.command:
lowerCamelCase_ =defaults.command_file
if not args.command and defaults.commands is not None:
lowerCamelCase_ =defaults.commands
if not args.tpu_name:
lowerCamelCase_ =defaults.tpu_name
if not args.tpu_zone:
lowerCamelCase_ =defaults.tpu_zone
if args.accelerate_version == "dev":
lowerCamelCase_ ="""git+https://github.com/huggingface/accelerate.git"""
elif args.accelerate_version == "latest":
lowerCamelCase_ ="""accelerate -U"""
elif isinstance(parse(args.accelerate_version ) , _A ):
lowerCamelCase_ =f'accelerate=={args.accelerate_version}'
if not args.command_file and not args.command:
raise ValueError("""You must specify either a command file or a command to run on the pod.""" )
if args.command_file:
with open(args.command_file , """r""" ) as f:
lowerCamelCase_ =[f.read().splitlines()]
# To turn list of lists into list of strings
if isinstance(args.command[0] , _A ):
lowerCamelCase_ =[line for cmd in args.command for line in cmd]
# Default to the shared folder and install accelerate
lowerCamelCase_ =["""cd /usr/share"""]
if args.install_accelerate:
new_cmd += [f'pip install {args.accelerate_version}']
new_cmd += args.command
lowerCamelCase_ ="""; """.join(_A )
# Then send it to gcloud
# Eventually try to use google-api-core to do this instead of subprocess
lowerCamelCase_ =["""gcloud"""]
if args.use_alpha:
cmd += ["alpha"]
cmd += [
"compute",
"tpus",
"tpu-vm",
"ssh",
args.tpu_name,
"--zone",
args.tpu_zone,
"--command",
args.command,
"--worker",
"all",
]
if args.debug:
print(f'Running {" ".join(_A )}' )
return
subprocess.run(_A )
print("""Successfully setup pod.""" )
def __UpperCamelCase ( ) ->Optional[Any]:
"""simple docstring"""
lowerCamelCase_ =tpu_command_parser()
lowerCamelCase_ =parser.parse_args()
tpu_command_launcher(_A )
| 154
| 0
|
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_UpperCAmelCase : List[str] = logging.get_logger(__name__)
_UpperCAmelCase : List[str] = {
"""facebook/wav2vec2-base-960h""": """https://huggingface.co/facebook/wav2vec2-base-960h/resolve/main/config.json""",
# See all Wav2Vec2 models at https://huggingface.co/models?filter=wav2vec2
}
class lowercase ( lowercase_ ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = '''wav2vec2'''
def __init__( self , snake_case=32 , snake_case=768 , snake_case=12 , snake_case=12 , snake_case=3072 , snake_case="gelu" , snake_case=0.1 , snake_case=0.1 , snake_case=0.1 , snake_case=0.0 , snake_case=0.0 , snake_case=0.1 , snake_case=0.1 , snake_case=0.02 , snake_case=1e-5 , snake_case="group" , snake_case="gelu" , snake_case=(512, 512, 512, 512, 512, 512, 512) , snake_case=(5, 2, 2, 2, 2, 2, 2) , snake_case=(10, 3, 3, 3, 3, 2, 2) , snake_case=False , snake_case=128 , snake_case=16 , snake_case=False , snake_case=True , snake_case=0.05 , snake_case=10 , snake_case=2 , snake_case=0.0 , snake_case=10 , snake_case=0 , snake_case=320 , snake_case=2 , snake_case=0.1 , snake_case=100 , snake_case=256 , snake_case=256 , snake_case=0.1 , snake_case="sum" , snake_case=False , snake_case=False , snake_case=256 , snake_case=(512, 512, 512, 512, 1500) , snake_case=(5, 3, 3, 1, 1) , snake_case=(1, 2, 3, 1, 1) , snake_case=512 , snake_case=0 , snake_case=1 , snake_case=2 , snake_case=False , snake_case=3 , snake_case=2 , snake_case=3 , snake_case=None , snake_case=None , **snake_case , ):
super().__init__(**snake_case , pad_token_id=snake_case , bos_token_id=snake_case , eos_token_id=snake_case )
snake_case_ = hidden_size
snake_case_ = feat_extract_norm
snake_case_ = feat_extract_activation
snake_case_ = list(snake_case )
snake_case_ = list(snake_case )
snake_case_ = list(snake_case )
snake_case_ = conv_bias
snake_case_ = num_conv_pos_embeddings
snake_case_ = num_conv_pos_embedding_groups
snake_case_ = len(self.conv_dim )
snake_case_ = num_hidden_layers
snake_case_ = intermediate_size
snake_case_ = hidden_act
snake_case_ = num_attention_heads
snake_case_ = hidden_dropout
snake_case_ = attention_dropout
snake_case_ = activation_dropout
snake_case_ = feat_proj_dropout
snake_case_ = final_dropout
snake_case_ = layerdrop
snake_case_ = layer_norm_eps
snake_case_ = initializer_range
snake_case_ = vocab_size
snake_case_ = do_stable_layer_norm
snake_case_ = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='
' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='
F''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'''
F''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
snake_case_ = apply_spec_augment
snake_case_ = mask_time_prob
snake_case_ = mask_time_length
snake_case_ = mask_time_min_masks
snake_case_ = mask_feature_prob
snake_case_ = mask_feature_length
snake_case_ = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
snake_case_ = num_codevectors_per_group
snake_case_ = num_codevector_groups
snake_case_ = contrastive_logits_temperature
snake_case_ = feat_quantizer_dropout
snake_case_ = num_negatives
snake_case_ = codevector_dim
snake_case_ = proj_codevector_dim
snake_case_ = diversity_loss_weight
# ctc loss
snake_case_ = ctc_loss_reduction
snake_case_ = ctc_zero_infinity
# adapter
snake_case_ = add_adapter
snake_case_ = adapter_kernel_size
snake_case_ = adapter_stride
snake_case_ = num_adapter_layers
snake_case_ = output_hidden_size or hidden_size
snake_case_ = adapter_attn_dim
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
snake_case_ = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
snake_case_ = list(snake_case )
snake_case_ = list(snake_case )
snake_case_ = list(snake_case )
snake_case_ = xvector_output_dim
@property
def a ( self ):
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 200
|
from __future__ import annotations
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
snake_case_ = []
create_all_state(1 , UpperCamelCase__ , UpperCamelCase__ , [] , UpperCamelCase__ )
return result
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , ):
'''simple docstring'''
if level == 0:
total_list.append(current_list[:] )
return
for i in range(UpperCamelCase__ , total_number - level + 2 ):
current_list.append(UpperCamelCase__ )
create_all_state(i + 1 , UpperCamelCase__ , level - 1 , UpperCamelCase__ , UpperCamelCase__ )
current_list.pop()
def __lowerCamelCase ( UpperCamelCase__ ):
'''simple docstring'''
for i in total_list:
print(*UpperCamelCase__ )
if __name__ == "__main__":
_UpperCAmelCase : str = 4
_UpperCAmelCase : Tuple = 2
_UpperCAmelCase : Optional[int] = generate_all_combinations(n, k)
print_all_state(total_list)
| 200
| 1
|
from __future__ import annotations
from collections import Counter
from random import random
class UpperCAmelCase :
'''simple docstring'''
def __init__( self : Dict ):
"""simple docstring"""
snake_case_ = {}
def snake_case__ ( self : List[Any] , __lowercase : str ):
"""simple docstring"""
snake_case_ = {}
def snake_case__ ( self : int , __lowercase : str , __lowercase : str , __lowercase : float ):
"""simple docstring"""
if nodea not in self.connections:
self.add_node(__lowercase )
if nodea not in self.connections:
self.add_node(__lowercase )
snake_case_ = probability
def snake_case__ ( self : Optional[Any] ):
"""simple docstring"""
return list(self.connections )
def snake_case__ ( self : Tuple , __lowercase : str ):
"""simple docstring"""
snake_case_ = 0
snake_case_ = random()
for dest in self.connections[node]:
current_probability += self.connections[node][dest]
if current_probability > random_value:
return dest
return ""
def lowerCamelCase__ ( _A , _A , _A ):
'''simple docstring'''
snake_case_ = MarkovChainGraphUndirectedUnweighted()
for nodea, nodea, probability in transitions:
graph.add_transition_probability(_A , _A , _A )
snake_case_ = Counter(graph.get_nodes() )
snake_case_ = start
for _ in range(_A ):
snake_case_ = graph.transition(_A )
visited[node] += 1
return visited
if __name__ == "__main__":
import doctest
doctest.testmod()
| 187
|
class UpperCAmelCase :
'''simple docstring'''
def __init__( self : Dict ):
"""simple docstring"""
snake_case_ = {} # Mapping from char to TrieNode
snake_case_ = False
def snake_case__ ( self : Dict , __lowercase : list[str] ):
"""simple docstring"""
for word in words:
self.insert(__lowercase )
def snake_case__ ( self : List[str] , __lowercase : str ):
"""simple docstring"""
snake_case_ = self
for char in word:
if char not in curr.nodes:
snake_case_ = TrieNode()
snake_case_ = curr.nodes[char]
snake_case_ = True
def snake_case__ ( self : List[Any] , __lowercase : str ):
"""simple docstring"""
snake_case_ = self
for char in word:
if char not in curr.nodes:
return False
snake_case_ = curr.nodes[char]
return curr.is_leaf
def snake_case__ ( self : Optional[Any] , __lowercase : str ):
"""simple docstring"""
def _delete(__lowercase : TrieNode , __lowercase : str , __lowercase : int ) -> bool:
if index == len(__lowercase ):
# If word does not exist
if not curr.is_leaf:
return False
snake_case_ = False
return len(curr.nodes ) == 0
snake_case_ = word[index]
snake_case_ = curr.nodes.get(__lowercase )
# If char not in current trie node
if not char_node:
return False
# Flag to check if node can be deleted
snake_case_ = _delete(__lowercase , __lowercase , index + 1 )
if delete_curr:
del curr.nodes[char]
return len(curr.nodes ) == 0
return delete_curr
_delete(self , __lowercase , 0 )
def lowerCamelCase__ ( _A , _A ):
'''simple docstring'''
if node.is_leaf:
print(_A , end=" " )
for key, value in node.nodes.items():
print_words(_A , word + key )
def lowerCamelCase__ ( ):
'''simple docstring'''
snake_case_ = "banana bananas bandana band apple all beast".split()
snake_case_ = TrieNode()
root.insert_many(_A )
# print_words(root, "")
assert all(root.find(_A ) for word in words )
assert root.find("banana" )
assert not root.find("bandanas" )
assert not root.find("apps" )
assert root.find("apple" )
assert root.find("all" )
root.delete("all" )
assert not root.find("all" )
root.delete("banana" )
assert not root.find("banana" )
assert root.find("bananas" )
return True
def lowerCamelCase__ ( _A , _A ):
'''simple docstring'''
print(str(_A ) , "works!" if passes else "doesn't work :(" )
def lowerCamelCase__ ( ):
'''simple docstring'''
assert test_trie()
def lowerCamelCase__ ( ):
'''simple docstring'''
print_results("Testing trie functionality" , test_trie() )
if __name__ == "__main__":
main()
| 187
| 1
|
'''simple docstring'''
from abc import ABC, abstractmethod
from typing import Optional, Union
from .. import Dataset, DatasetDict, Features, IterableDataset, IterableDatasetDict, NamedSplit
from ..utils.typing import NestedDataStructureLike, PathLike
class A__ ( A__ ):
def __init__( self : Dict , _a : Optional[NestedDataStructureLike[PathLike]] = None , _a : Optional[NamedSplit] = None , _a : Optional[Features] = None , _a : str = None , _a : bool = False , _a : bool = False , _a : Optional[int] = None , **_a : Union[str, Any] , ) -> str:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =path_or_paths
_SCREAMING_SNAKE_CASE =split if split or isinstance(_a , _a ) else 'train'
_SCREAMING_SNAKE_CASE =features
_SCREAMING_SNAKE_CASE =cache_dir
_SCREAMING_SNAKE_CASE =keep_in_memory
_SCREAMING_SNAKE_CASE =streaming
_SCREAMING_SNAKE_CASE =num_proc
_SCREAMING_SNAKE_CASE =kwargs
@abstractmethod
def A ( self : Optional[int] ) -> Union[Dataset, DatasetDict, IterableDataset, IterableDatasetDict]:
'''simple docstring'''
pass
class A__ ( A__ ):
def __init__( self : Union[str, Any] , _a : Optional[Features] = None , _a : str = None , _a : bool = False , _a : bool = False , _a : Optional[int] = None , **_a : Tuple , ) -> Dict:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =features
_SCREAMING_SNAKE_CASE =cache_dir
_SCREAMING_SNAKE_CASE =keep_in_memory
_SCREAMING_SNAKE_CASE =streaming
_SCREAMING_SNAKE_CASE =num_proc
_SCREAMING_SNAKE_CASE =kwargs
@abstractmethod
def A ( self : List[str] ) -> Union[Dataset, IterableDataset]:
'''simple docstring'''
pass
| 114
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCamelCase : Tuple = {
"configuration_blenderbot": [
"BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"BlenderbotConfig",
"BlenderbotOnnxConfig",
],
"tokenization_blenderbot": ["BlenderbotTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : Optional[int] = ["BlenderbotTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : Tuple = [
"BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST",
"BlenderbotForCausalLM",
"BlenderbotForConditionalGeneration",
"BlenderbotModel",
"BlenderbotPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : Tuple = [
"TFBlenderbotForConditionalGeneration",
"TFBlenderbotModel",
"TFBlenderbotPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : str = [
"FlaxBlenderbotForConditionalGeneration",
"FlaxBlenderbotModel",
"FlaxBlenderbotPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_blenderbot import (
BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlenderbotConfig,
BlenderbotOnnxConfig,
)
from .tokenization_blenderbot import BlenderbotTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_blenderbot_fast import BlenderbotTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blenderbot import (
BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST,
BlenderbotForCausalLM,
BlenderbotForConditionalGeneration,
BlenderbotModel,
BlenderbotPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blenderbot import (
TFBlenderbotForConditionalGeneration,
TFBlenderbotModel,
TFBlenderbotPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
FlaxBlenderbotPreTrainedModel,
)
else:
import sys
lowerCamelCase : Dict = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 114
| 1
|
# Logistic Regression from scratch
# In[62]:
# In[63]:
# importing all the required libraries
import numpy as np
from matplotlib import pyplot as plt
from sklearn import datasets
def lowerCamelCase__ (_UpperCAmelCase):
return 1 / (1 + np.exp(-z))
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase):
return (-y * np.log(_UpperCAmelCase) - (1 - y) * np.log(1 - h)).mean()
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
SCREAMING_SNAKE_CASE = np.dot(_UpperCAmelCase , _UpperCAmelCase)
return np.sum(y * scores - np.log(1 + np.exp(_UpperCAmelCase)))
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=7_0000):
SCREAMING_SNAKE_CASE = np.zeros(x.shape[1])
for iterations in range(_UpperCAmelCase):
SCREAMING_SNAKE_CASE = np.dot(_UpperCAmelCase , _UpperCAmelCase)
SCREAMING_SNAKE_CASE = sigmoid_function(_UpperCAmelCase)
SCREAMING_SNAKE_CASE = np.dot(x.T , h - y) / y.size
SCREAMING_SNAKE_CASE = theta - alpha * gradient # updating the weights
SCREAMING_SNAKE_CASE = np.dot(_UpperCAmelCase , _UpperCAmelCase)
SCREAMING_SNAKE_CASE = sigmoid_function(_UpperCAmelCase)
SCREAMING_SNAKE_CASE = cost_function(_UpperCAmelCase , _UpperCAmelCase)
if iterations % 100 == 0:
print(F'''loss: {j} \t''') # printing the loss after every 100 iterations
return theta
# In[68]:
if __name__ == "__main__":
a_ : Union[str, Any] = datasets.load_iris()
a_ : List[str] = iris.data[:, :2]
a_ : List[str] = (iris.target != 0) * 1
a_ : List[Any] = 0.1
a_ : List[Any] = logistic_reg(alpha, x, y, max_iterations=7_00_00)
print('theta: ', theta) # printing the theta i.e our weights vector
def lowerCamelCase__ (_UpperCAmelCase):
return sigmoid_function(
np.dot(_UpperCAmelCase , _UpperCAmelCase)) # predicting the value of probability from the logistic regression algorithm
plt.figure(figsize=(10, 6))
plt.scatter(x[y == 0][:, 0], x[y == 0][:, 1], color='b', label='0')
plt.scatter(x[y == 1][:, 0], x[y == 1][:, 1], color='r', label='1')
((a_) , (a_)) : int = (x[:, 0].min(), x[:, 0].max())
((a_) , (a_)) : List[str] = (x[:, 1].min(), x[:, 1].max())
((a_) , (a_)) : Tuple = np.meshgrid(np.linspace(xa_min, xa_max), np.linspace(xa_min, xa_max))
a_ : Optional[Any] = np.c_[xxa.ravel(), xxa.ravel()]
a_ : str = predict_prob(grid).reshape(xxa.shape)
plt.contour(xxa, xxa, probs, [0.5], linewidths=1, colors='black')
plt.legend()
plt.show()
| 137
|
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, PreTrainedTokenizerBase, TensorType
a_ : List[Any] = logging.get_logger(__name__)
a_ : str = {
'microsoft/deberta-v2-xlarge': 'https://huggingface.co/microsoft/deberta-v2-xlarge/resolve/main/config.json',
'microsoft/deberta-v2-xxlarge': 'https://huggingface.co/microsoft/deberta-v2-xxlarge/resolve/main/config.json',
'microsoft/deberta-v2-xlarge-mnli': (
'https://huggingface.co/microsoft/deberta-v2-xlarge-mnli/resolve/main/config.json'
),
'microsoft/deberta-v2-xxlarge-mnli': (
'https://huggingface.co/microsoft/deberta-v2-xxlarge-mnli/resolve/main/config.json'
),
}
class _snake_case ( A__ ):
_lowercase : Any = '''deberta-v2'''
def __init__( self , a=12_8100 , a=1536 , a=24 , a=24 , a=6144 , a="gelu" , a=0.1 , a=0.1 , a=512 , a=0 , a=0.02 , a=1E-7 , a=False , a=-1 , a=0 , a=True , a=None , a=0 , a="gelu" , **a , ) -> List[Any]:
super().__init__(**a)
SCREAMING_SNAKE_CASE = hidden_size
SCREAMING_SNAKE_CASE = num_hidden_layers
SCREAMING_SNAKE_CASE = num_attention_heads
SCREAMING_SNAKE_CASE = intermediate_size
SCREAMING_SNAKE_CASE = hidden_act
SCREAMING_SNAKE_CASE = hidden_dropout_prob
SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE = max_position_embeddings
SCREAMING_SNAKE_CASE = type_vocab_size
SCREAMING_SNAKE_CASE = initializer_range
SCREAMING_SNAKE_CASE = relative_attention
SCREAMING_SNAKE_CASE = max_relative_positions
SCREAMING_SNAKE_CASE = pad_token_id
SCREAMING_SNAKE_CASE = position_biased_input
# Backwards compatibility
if type(a) == str:
SCREAMING_SNAKE_CASE = [x.strip() for x in pos_att_type.lower().split('|')]
SCREAMING_SNAKE_CASE = pos_att_type
SCREAMING_SNAKE_CASE = vocab_size
SCREAMING_SNAKE_CASE = layer_norm_eps
SCREAMING_SNAKE_CASE = kwargs.get('pooler_hidden_size' , a)
SCREAMING_SNAKE_CASE = pooler_dropout
SCREAMING_SNAKE_CASE = pooler_hidden_act
class _snake_case ( A__ ):
@property
def SCREAMING_SNAKE_CASE__ ( self) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
SCREAMING_SNAKE_CASE = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
SCREAMING_SNAKE_CASE = {0: 'batch', 1: 'sequence'}
if self._config.type_vocab_size > 0:
return OrderedDict(
[('input_ids', dynamic_axis), ('attention_mask', dynamic_axis), ('token_type_ids', dynamic_axis)])
else:
return OrderedDict([('input_ids', dynamic_axis), ('attention_mask', dynamic_axis)])
@property
def SCREAMING_SNAKE_CASE__ ( self) -> int:
return 12
def SCREAMING_SNAKE_CASE__ ( self , a , a = -1 , a = -1 , a = -1 , a = False , a = None , a = 3 , a = 40 , a = 40 , a = None , ) -> Mapping[str, Any]:
SCREAMING_SNAKE_CASE = super().generate_dummy_inputs(preprocessor=a , framework=a)
if self._config.type_vocab_size == 0 and "token_type_ids" in dummy_inputs:
del dummy_inputs["token_type_ids"]
return dummy_inputs
| 137
| 1
|
'''simple docstring'''
def lowercase__( __UpperCamelCase: list[list[float]] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : list[list[float]] = []
for data in source_data:
for i, el in enumerate(__UpperCamelCase ):
if len(__UpperCamelCase ) < i + 1:
data_lists.append([] )
data_lists[i].append(float(__UpperCamelCase ) )
return data_lists
def lowercase__( __UpperCamelCase: list[list[float]] ,__UpperCamelCase: list[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : list[list[float]] = []
for dlist, weight in zip(__UpperCamelCase ,__UpperCamelCase ):
SCREAMING_SNAKE_CASE : Optional[int] = min(__UpperCamelCase )
SCREAMING_SNAKE_CASE : Any = max(__UpperCamelCase )
SCREAMING_SNAKE_CASE : list[float] = []
# for weight 0 score is 1 - actual score
if weight == 0:
for item in dlist:
try:
score.append(1 - ((item - mind) / (maxd - mind)) )
except ZeroDivisionError:
score.append(1 )
elif weight == 1:
for item in dlist:
try:
score.append((item - mind) / (maxd - mind) )
except ZeroDivisionError:
score.append(0 )
# weight not 0 or 1
else:
SCREAMING_SNAKE_CASE : List[Any] = f"Invalid weight of {weight:f} provided"
raise ValueError(__UpperCamelCase )
score_lists.append(__UpperCamelCase )
return score_lists
def lowercase__( __UpperCamelCase: list[list[float]] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : list[float] = [0 for i in range(len(score_lists[0] ) )]
for slist in score_lists:
for j, ele in enumerate(__UpperCamelCase ):
SCREAMING_SNAKE_CASE : Any = final_scores[j] + ele
return final_scores
def lowercase__( __UpperCamelCase: list[list[float]] ,__UpperCamelCase: list[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = get_data(__UpperCamelCase )
SCREAMING_SNAKE_CASE : Dict = calculate_each_score(__UpperCamelCase ,__UpperCamelCase )
SCREAMING_SNAKE_CASE : Any = generate_final_scores(__UpperCamelCase )
# append scores to source data
for i, ele in enumerate(__UpperCamelCase ):
source_data[i].append(__UpperCamelCase )
return source_data
| 363
|
'''simple docstring'''
import unittest
from transformers import BigBirdConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax
from transformers.models.big_bird.modeling_flax_big_bird import (
FlaxBigBirdForCausalLM,
FlaxBigBirdForMaskedLM,
FlaxBigBirdForMultipleChoice,
FlaxBigBirdForPreTraining,
FlaxBigBirdForQuestionAnswering,
FlaxBigBirdForSequenceClassification,
FlaxBigBirdForTokenClassification,
FlaxBigBirdModel,
)
class _a ( unittest.TestCase ):
'''simple docstring'''
def __init__( self, A, A=2, A=56, A=True, A=True, A=True, A=True, A=99, A=32, A=2, A=2, A=7, A="gelu_new", A=0.1, A=0.1, A=512, A=16, A=2, A=0.02, A=4, A="block_sparse", A=True, A=False, A=2, A=3, ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = parent
SCREAMING_SNAKE_CASE : int = batch_size
SCREAMING_SNAKE_CASE : List[str] = seq_length
SCREAMING_SNAKE_CASE : Optional[Any] = is_training
SCREAMING_SNAKE_CASE : Dict = use_attention_mask
SCREAMING_SNAKE_CASE : List[Any] = use_token_type_ids
SCREAMING_SNAKE_CASE : Optional[Any] = use_labels
SCREAMING_SNAKE_CASE : Optional[Any] = vocab_size
SCREAMING_SNAKE_CASE : int = hidden_size
SCREAMING_SNAKE_CASE : Optional[int] = num_hidden_layers
SCREAMING_SNAKE_CASE : List[str] = num_attention_heads
SCREAMING_SNAKE_CASE : Dict = intermediate_size
SCREAMING_SNAKE_CASE : Optional[int] = hidden_act
SCREAMING_SNAKE_CASE : str = hidden_dropout_prob
SCREAMING_SNAKE_CASE : Tuple = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : Any = max_position_embeddings
SCREAMING_SNAKE_CASE : List[Any] = type_vocab_size
SCREAMING_SNAKE_CASE : Any = type_sequence_label_size
SCREAMING_SNAKE_CASE : List[Any] = initializer_range
SCREAMING_SNAKE_CASE : Optional[Any] = num_choices
SCREAMING_SNAKE_CASE : int = rescale_embeddings
SCREAMING_SNAKE_CASE : Any = attention_type
SCREAMING_SNAKE_CASE : str = use_bias
SCREAMING_SNAKE_CASE : Tuple = block_size
SCREAMING_SNAKE_CASE : List[Any] = num_random_blocks
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
SCREAMING_SNAKE_CASE : Union[str, Any] = None
if self.use_attention_mask:
SCREAMING_SNAKE_CASE : Dict = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE : Optional[Any] = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE : Any = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size )
SCREAMING_SNAKE_CASE : List[str] = BigBirdConfig(
vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, is_decoder=A, initializer_range=self.initializer_range, attention_type=self.attention_type, block_size=self.block_size, num_random_blocks=self.num_random_blocks, use_bias=self.use_bias, rescale_embeddings=self.rescale_embeddings, )
return config, input_ids, token_type_ids, attention_mask
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = config_and_inputs
SCREAMING_SNAKE_CASE : List[str] = {
'input_ids': input_ids,
'token_type_ids': token_type_ids,
'attention_mask': attention_mask,
}
return config, inputs_dict
@require_flax
class _a ( SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
A : str = (
(
FlaxBigBirdForCausalLM,
FlaxBigBirdModel,
FlaxBigBirdForPreTraining,
FlaxBigBirdForMaskedLM,
FlaxBigBirdForMultipleChoice,
FlaxBigBirdForQuestionAnswering,
FlaxBigBirdForSequenceClassification,
FlaxBigBirdForTokenClassification,
)
if is_flax_available()
else ()
)
A : List[Any] = False
A : List[str] = False
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = FlaxBigBirdModelTester(self )
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def UpperCamelCase_ ( self ):
'''simple docstring'''
super().test_from_pretrained_save_pretrained()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def UpperCamelCase_ ( self ):
'''simple docstring'''
super().test_from_pretrained_with_no_automatic_init()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def UpperCamelCase_ ( self ):
'''simple docstring'''
super().test_no_automatic_init()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def UpperCamelCase_ ( self ):
'''simple docstring'''
super().test_hidden_states_output()
@slow
def UpperCamelCase_ ( self ):
'''simple docstring'''
for model_class_name in self.all_model_classes:
SCREAMING_SNAKE_CASE : Optional[Any] = model_class_name.from_pretrained('google/bigbird-roberta-base' )
self.assertIsNotNone(A )
def UpperCamelCase_ ( self ):
'''simple docstring'''
if self.test_attn_probs:
super().test_attention_outputs()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
SCREAMING_SNAKE_CASE : Any = self._prepare_for_class(A, A )
SCREAMING_SNAKE_CASE : List[str] = model_class(A )
@jax.jit
def model_jitted(A, A=None, **A ):
return model(input_ids=A, attention_mask=A, **A )
with self.subTest('JIT Enabled' ):
SCREAMING_SNAKE_CASE : List[str] = model_jitted(**A ).to_tuple()
with self.subTest('JIT Disabled' ):
with jax.disable_jit():
SCREAMING_SNAKE_CASE : Optional[Any] = model_jitted(**A ).to_tuple()
self.assertEqual(len(A ), len(A ) )
for jitted_output, output in zip(A, A ):
self.assertEqual(jitted_output.shape, output.shape )
def UpperCamelCase_ ( self, A, A, A, A=1E-5, A="outputs", A=None ):
'''simple docstring'''
if name.startswith('outputs.attentions' ):
return
else:
super().check_pt_flax_outputs(A, A, A, A, A, A )
| 246
| 0
|
from __future__ import annotations
import random
# Maximum size of the population. Bigger could be faster but is more memory expensive.
UpperCAmelCase = 200
# Number of elements selected in every generation of evolution. The selection takes
# place from best to worst of that generation and must be smaller than N_POPULATION.
UpperCAmelCase = 50
# Probability that an element of a generation can mutate, changing one of its genes.
# This will guarantee that all genes will be used during evolution.
UpperCAmelCase = 0.4
# Just a seed to improve randomness required by the algorithm.
random.seed(random.randint(0, 1000))
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
lowercase = len([g for position, g in enumerate(UpperCamelCase__ ) if g == main_target[position]] )
return (item, float(UpperCamelCase__ ))
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
lowercase = random.randint(0 , len(UpperCamelCase__ ) - 1 )
lowercase = parent_a[:random_slice] + parent_a[random_slice:]
lowercase = parent_a[:random_slice] + parent_a[random_slice:]
return (child_a, child_a)
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
lowercase = list(UpperCamelCase__ )
if random.uniform(0 , 1 ) < MUTATION_PROBABILITY:
lowercase = random.choice(UpperCamelCase__ )
return "".join(UpperCamelCase__ )
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , ):
lowercase = []
# Generate more children proportionally to the fitness score.
lowercase = int(parent_a[1] * 100 ) + 1
lowercase = 10 if child_n >= 10 else child_n
for _ in range(UpperCamelCase__ ):
lowercase = population_score[random.randint(0 , UpperCamelCase__ )][0]
lowercase , lowercase = crossover(parent_a[0] , UpperCamelCase__ )
# Append new string to the population list.
pop.append(mutate(UpperCamelCase__ , UpperCamelCase__ ) )
pop.append(mutate(UpperCamelCase__ , UpperCamelCase__ ) )
return pop
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = True ):
if N_POPULATION < N_SELECTED:
lowercase = F'''{N_POPULATION} must be bigger than {N_SELECTED}'''
raise ValueError(UpperCamelCase__ )
# Verify that the target contains no genes besides the ones inside genes variable.
lowercase = sorted({c for c in target if c not in genes} )
if not_in_genes_list:
lowercase = F'''{not_in_genes_list} is not in genes list, evolution cannot converge'''
raise ValueError(UpperCamelCase__ )
# Generate random starting population.
lowercase = []
for _ in range(UpperCamelCase__ ):
population.append(''.join([random.choice(UpperCamelCase__ ) for i in range(len(UpperCamelCase__ ) )] ) )
# Just some logs to know what the algorithms is doing.
lowercase , lowercase = 0, 0
# This loop will end when we find a perfect match for our target.
while True:
generation += 1
total_population += len(UpperCamelCase__ )
# Random population created. Now it's time to evaluate.
# Adding a bit of concurrency can make everything faster,
#
# import concurrent.futures
# population_score: list[tuple[str, float]] = []
# with concurrent.futures.ThreadPoolExecutor(
# max_workers=NUM_WORKERS) as executor:
# futures = {executor.submit(evaluate, item) for item in population}
# concurrent.futures.wait(futures)
# population_score = [item.result() for item in futures]
#
# but with a simple algorithm like this, it will probably be slower.
# We just need to call evaluate for every item inside the population.
lowercase = [evaluate(UpperCamelCase__ , UpperCamelCase__ ) for item in population]
# Check if there is a matching evolution.
lowercase = sorted(UpperCamelCase__ , key=lambda __SCREAMING_SNAKE_CASE : x[1] , reverse=UpperCamelCase__ )
if population_score[0][0] == target:
return (generation, total_population, population_score[0][0])
# Print the best result every 10 generation.
# Just to know that the algorithm is working.
if debug and generation % 10 == 0:
print(
F'''\nGeneration: {generation}'''
F'''\nTotal Population:{total_population}'''
F'''\nBest score: {population_score[0][1]}'''
F'''\nBest string: {population_score[0][0]}''' )
# Flush the old population, keeping some of the best evolutions.
# Keeping this avoid regression of evolution.
lowercase = population[: int(N_POPULATION / 3 )]
population.clear()
population.extend(UpperCamelCase__ )
# Normalize population score to be between 0 and 1.
lowercase = [
(item, score / len(UpperCamelCase__ )) for item, score in population_score
]
# This is selection
for i in range(UpperCamelCase__ ):
population.extend(select(population_score[int(UpperCamelCase__ )] , UpperCamelCase__ , UpperCamelCase__ ) )
# Check if the population has already reached the maximum value and if so,
# break the cycle. If this check is disabled, the algorithm will take
# forever to compute large strings, but will also calculate small strings in
# a far fewer generations.
if len(UpperCamelCase__ ) > N_POPULATION:
break
if __name__ == "__main__":
UpperCAmelCase = (
'''This is a genetic algorithm to evaluate, combine, evolve, and mutate a string!'''
)
UpperCAmelCase = list(
''' ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklm'''
'''nopqrstuvwxyz.,;!?+-*#@^\'èéòà€ù=)(&%$£/\\'''
)
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = basic(target_str, genes_list)
print(
F"""\nGeneration: {generation}\nTotal Population: {population}\nTarget: {target}"""
)
| 195
|
import copy
import inspect
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import TimesformerConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING,
TimesformerForVideoClassification,
TimesformerModel,
)
from transformers.models.timesformer.modeling_timesformer import TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from transformers import VideoMAEImageProcessor
class __lowerCAmelCase :
"""simple docstring"""
def __init__( self , lowerCamelCase__ , lowerCamelCase__=13 , lowerCamelCase__=10 , lowerCamelCase__=3 , lowerCamelCase__=2 , lowerCamelCase__=2 , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__=32 , lowerCamelCase__=5 , lowerCamelCase__=4 , lowerCamelCase__=37 , lowerCamelCase__="gelu" , lowerCamelCase__=0.1 , lowerCamelCase__=0.1 , lowerCamelCase__=10 , lowerCamelCase__=0.02 , lowerCamelCase__="divided_space_time" , lowerCamelCase__=None , ) -> Any:
'''simple docstring'''
__lowerCamelCase = parent
__lowerCamelCase = batch_size
__lowerCamelCase = image_size
__lowerCamelCase = num_channels
__lowerCamelCase = patch_size
__lowerCamelCase = num_frames
__lowerCamelCase = is_training
__lowerCamelCase = use_labels
__lowerCamelCase = hidden_size
__lowerCamelCase = num_hidden_layers
__lowerCamelCase = num_attention_heads
__lowerCamelCase = intermediate_size
__lowerCamelCase = hidden_act
__lowerCamelCase = hidden_dropout_prob
__lowerCamelCase = attention_probs_dropout_prob
__lowerCamelCase = attention_type
__lowerCamelCase = initializer_range
__lowerCamelCase = scope
__lowerCamelCase = num_labels
# in TimeSformer, the number of spatial tokens equals num_frames * num_patches per frame + 1 CLS token
__lowerCamelCase = (image_size // patch_size) ** 2
__lowerCamelCase = (num_frames) * self.num_patches_per_frame + 1
def lowercase_ ( self ) -> Optional[int]:
'''simple docstring'''
__lowerCamelCase = floats_tensor(
[self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] )
__lowerCamelCase = None
if self.use_labels:
__lowerCamelCase = ids_tensor([self.batch_size] , self.num_labels )
__lowerCamelCase = self.get_config()
return config, pixel_values, labels
def lowercase_ ( self ) -> Union[str, Any]:
'''simple docstring'''
__lowerCamelCase = TimesformerConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_frames=self.num_frames , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , attention_type=self.attention_type , )
__lowerCamelCase = self.num_labels
return config
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Tuple:
'''simple docstring'''
__lowerCamelCase = TimesformerModel(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
__lowerCamelCase = model(lowerCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
__lowerCamelCase = TimesformerForVideoClassification(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
__lowerCamelCase = model(lowerCamelCase__ )
# verify the logits shape
__lowerCamelCase = torch.Size((self.batch_size, self.num_labels) )
self.parent.assertEqual(result.logits.shape , lowerCamelCase__ )
def lowercase_ ( self ) -> Tuple:
'''simple docstring'''
__lowerCamelCase = self.prepare_config_and_inputs()
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase = config_and_inputs
__lowerCamelCase = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class __lowerCAmelCase ( __magic_name__ , __magic_name__ , unittest.TestCase ):
"""simple docstring"""
snake_case_ = (TimesformerModel, TimesformerForVideoClassification) if is_torch_available() else ()
snake_case_ = (
{'''feature-extraction''': TimesformerModel, '''video-classification''': TimesformerForVideoClassification}
if is_torch_available()
else {}
)
snake_case_ = False
snake_case_ = False
snake_case_ = False
snake_case_ = False
def lowercase_ ( self ) -> List[str]:
'''simple docstring'''
__lowerCamelCase = TimesformerModelTester(self )
__lowerCamelCase = ConfigTester(
self , config_class=lowerCamelCase__ , has_text_modality=lowerCamelCase__ , hidden_size=37 )
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=False ) -> int:
'''simple docstring'''
__lowerCamelCase = copy.deepcopy(lowerCamelCase__ )
if return_labels:
if model_class in get_values(lowerCamelCase__ ):
__lowerCamelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowerCamelCase__ )
return inputs_dict
def lowercase_ ( self ) -> Optional[int]:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='TimeSformer does not use inputs_embeds' )
def lowercase_ ( self ) -> List[str]:
'''simple docstring'''
pass
def lowercase_ ( self ) -> Union[str, Any]:
'''simple docstring'''
__lowerCamelCase , __lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCamelCase = model_class(lowerCamelCase__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__lowerCamelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCamelCase__ , nn.Linear ) )
def lowercase_ ( self ) -> List[str]:
'''simple docstring'''
__lowerCamelCase , __lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCamelCase = model_class(lowerCamelCase__ )
__lowerCamelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowerCamelCase = [*signature.parameters.keys()]
__lowerCamelCase = ['pixel_values']
self.assertListEqual(arg_names[:1] , lowerCamelCase__ )
def lowercase_ ( self ) -> Union[str, Any]:
'''simple docstring'''
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase__ )
def lowercase_ ( self ) -> str:
'''simple docstring'''
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_video_classification(*lowerCamelCase__ )
@slow
def lowercase_ ( self ) -> Dict:
'''simple docstring'''
for model_name in TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCamelCase = TimesformerModel.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
def lowercase_ ( self ) -> List[Any]:
'''simple docstring'''
if not self.has_attentions:
pass
else:
__lowerCamelCase , __lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCamelCase = True
for model_class in self.all_model_classes:
__lowerCamelCase = self.model_tester.seq_length
__lowerCamelCase = self.model_tester.num_frames
__lowerCamelCase = True
__lowerCamelCase = False
__lowerCamelCase = True
__lowerCamelCase = model_class(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
with torch.no_grad():
__lowerCamelCase = model(**self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ ) )
__lowerCamelCase = outputs.attentions
self.assertEqual(len(lowerCamelCase__ ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
__lowerCamelCase = True
__lowerCamelCase = model_class(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
with torch.no_grad():
__lowerCamelCase = model(**self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ ) )
__lowerCamelCase = outputs.attentions
self.assertEqual(len(lowerCamelCase__ ) , self.model_tester.num_hidden_layers )
# attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1)
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , )
__lowerCamelCase = len(lowerCamelCase__ )
# Check attention is always last and order is fine
__lowerCamelCase = True
__lowerCamelCase = True
__lowerCamelCase = model_class(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
with torch.no_grad():
__lowerCamelCase = model(**self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ ) )
self.assertEqual(out_len + 1 , len(lowerCamelCase__ ) )
__lowerCamelCase = outputs.attentions
self.assertEqual(len(lowerCamelCase__ ) , self.model_tester.num_hidden_layers )
# attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1)
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , )
def lowercase_ ( self ) -> Tuple:
'''simple docstring'''
def check_hidden_states_output(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
__lowerCamelCase = model_class(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
with torch.no_grad():
__lowerCamelCase = model(**self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ ) )
__lowerCamelCase = outputs.hidden_states
__lowerCamelCase = self.model_tester.num_hidden_layers + 1
self.assertEqual(len(lowerCamelCase__ ) , lowerCamelCase__ )
__lowerCamelCase = self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
__lowerCamelCase , __lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCamelCase = True
check_hidden_states_output(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__lowerCamelCase = True
check_hidden_states_output(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
def lowerCamelCase_ ( ) -> Any:
"""simple docstring"""
__lowerCamelCase = hf_hub_download(
repo_id='hf-internal-testing/spaghetti-video' , filename='eating_spaghetti.npy' , repo_type='dataset' )
__lowerCamelCase = np.load(UpperCamelCase__ )
return list(UpperCamelCase__ )
@require_torch
@require_vision
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def lowercase_ ( self ) -> List[Any]:
'''simple docstring'''
# logits were tested with a different mean and std, so we use the same here
return (
VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
if is_vision_available()
else None
)
@slow
def lowercase_ ( self ) -> List[str]:
'''simple docstring'''
__lowerCamelCase = TimesformerForVideoClassification.from_pretrained('facebook/timesformer-base-finetuned-k400' ).to(
lowerCamelCase__ )
__lowerCamelCase = self.default_image_processor
__lowerCamelCase = prepare_video()
__lowerCamelCase = image_processor(video[:8] , return_tensors='pt' ).to(lowerCamelCase__ )
# forward pass
with torch.no_grad():
__lowerCamelCase = model(**lowerCamelCase__ )
# verify the logits
__lowerCamelCase = torch.Size((1, 400) )
self.assertEqual(outputs.logits.shape , lowerCamelCase__ )
__lowerCamelCase = torch.tensor([-0.30_16, -0.77_13, -0.42_05] ).to(lowerCamelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCamelCase__ , atol=1e-4 ) )
| 90
| 0
|
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__snake_case = logging.get_logger(__name__)
__snake_case = {
'''huggingface/time-series-transformer-tourism-monthly''': (
'''https://huggingface.co/huggingface/time-series-transformer-tourism-monthly/resolve/main/config.json'''
),
# See all TimeSeriesTransformer models at https://huggingface.co/models?filter=time_series_transformer
}
class __snake_case ( snake_case_ ):
__lowerCamelCase : Optional[int] = """time_series_transformer"""
__lowerCamelCase : List[str] = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
"""num_hidden_layers""": """encoder_layers""",
}
def __init__( self , snake_case__ = None , snake_case__ = None , snake_case__ = "student_t" , snake_case__ = "nll" , snake_case__ = 1 , snake_case__ = [1, 2, 3, 4, 5, 6, 7] , snake_case__ = "mean" , snake_case__ = 0 , snake_case__ = 0 , snake_case__ = 0 , snake_case__ = 0 , snake_case__ = None , snake_case__ = None , snake_case__ = 32 , snake_case__ = 32 , snake_case__ = 2 , snake_case__ = 2 , snake_case__ = 2 , snake_case__ = 2 , snake_case__ = True , snake_case__ = "gelu" , snake_case__ = 64 , snake_case__ = 0.1 , snake_case__ = 0.1 , snake_case__ = 0.1 , snake_case__ = 0.1 , snake_case__ = 0.1 , snake_case__ = 100 , snake_case__ = 0.02 , snake_case__=True , **snake_case__ , ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase : Dict =prediction_length
UpperCAmelCase : int =context_length or prediction_length
UpperCAmelCase : str =distribution_output
UpperCAmelCase : Union[str, Any] =loss
UpperCAmelCase : Any =input_size
UpperCAmelCase : List[Any] =num_time_features
UpperCAmelCase : Any =lags_sequence
UpperCAmelCase : Union[str, Any] =scaling
UpperCAmelCase : Dict =num_dynamic_real_features
UpperCAmelCase : str =num_static_real_features
UpperCAmelCase : List[str] =num_static_categorical_features
if cardinality and num_static_categorical_features > 0:
if len(snake_case__ ) != num_static_categorical_features:
raise ValueError(
'''The cardinality should be a list of the same length as `num_static_categorical_features`''' )
UpperCAmelCase : int =cardinality
else:
UpperCAmelCase : List[Any] =[0]
if embedding_dimension and num_static_categorical_features > 0:
if len(snake_case__ ) != num_static_categorical_features:
raise ValueError(
'''The embedding dimension should be a list of the same length as `num_static_categorical_features`''' )
UpperCAmelCase : Union[str, Any] =embedding_dimension
else:
UpperCAmelCase : Optional[Any] =[min(50 , (cat + 1) // 2 ) for cat in self.cardinality]
UpperCAmelCase : Union[str, Any] =num_parallel_samples
# Transformer architecture configuration
UpperCAmelCase : Optional[Any] =input_size * len(snake_case__ ) + self._number_of_features
UpperCAmelCase : int =d_model
UpperCAmelCase : Dict =encoder_attention_heads
UpperCAmelCase : int =decoder_attention_heads
UpperCAmelCase : Optional[Any] =encoder_ffn_dim
UpperCAmelCase : int =decoder_ffn_dim
UpperCAmelCase : Dict =encoder_layers
UpperCAmelCase : Tuple =decoder_layers
UpperCAmelCase : Optional[Any] =dropout
UpperCAmelCase : int =attention_dropout
UpperCAmelCase : Dict =activation_dropout
UpperCAmelCase : Optional[int] =encoder_layerdrop
UpperCAmelCase : int =decoder_layerdrop
UpperCAmelCase : str =activation_function
UpperCAmelCase : Optional[int] =init_std
UpperCAmelCase : Tuple =use_cache
super().__init__(is_encoder_decoder=snake_case__ , **snake_case__ )
@property
def UpperCAmelCase__ ( self ) -> int:
'''simple docstring'''
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 353
|
import json
import os
from typing import Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__snake_case = logging.get_logger(__name__)
__snake_case = {'''vocab_file''': '''vocab.json'''}
__snake_case = {
'''vocab_file''': {
'''mgp-str''': '''https://huggingface.co/alibaba-damo/mgp-str-base/blob/main/vocab.json''',
}
}
__snake_case = {'''mgp-str''': 27}
class __snake_case ( lowerCamelCase__ ):
__lowerCamelCase : Union[str, Any] = VOCAB_FILES_NAMES
__lowerCamelCase : int = PRETRAINED_VOCAB_FILES_MAP
__lowerCamelCase : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self , snake_case__ , snake_case__="[GO]" , snake_case__="[GO]" , snake_case__="[s]" , snake_case__="[GO]" , **snake_case__ ) -> Any:
'''simple docstring'''
super().__init__(
unk_token=snake_case__ , bos_token=snake_case__ , eos_token=snake_case__ , pad_token=snake_case__ , **snake_case__ , )
with open(snake_case__ , encoding='''utf-8''' ) as vocab_handle:
UpperCAmelCase : int =json.load(snake_case__ )
UpperCAmelCase : List[str] ={v: k for k, v in self.vocab.items()}
@property
def UpperCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
return len(self.vocab )
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
return dict(self.vocab , **self.added_tokens_encoder )
def UpperCAmelCase__ ( self , snake_case__ ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase : List[str] =[]
for s in text:
char_tokens.extend(snake_case__ )
return char_tokens
def UpperCAmelCase__ ( self , snake_case__ ) -> Union[str, Any]:
'''simple docstring'''
return self.vocab.get(snake_case__ , self.vocab.get(self.unk_token ) )
def UpperCAmelCase__ ( self , snake_case__ ) -> Union[str, Any]:
'''simple docstring'''
return self.decoder.get(snake_case__ )
def UpperCAmelCase__ ( self , snake_case__ , snake_case__ = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(snake_case__ ):
logger.error('''Vocabulary path ({}) should be a directory'''.format(snake_case__ ) )
return
UpperCAmelCase : List[Any] =os.path.join(
snake_case__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
with open(snake_case__ , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.vocab , indent=2 , sort_keys=snake_case__ , ensure_ascii=snake_case__ ) + '''\n''' )
return (vocab_file,)
| 78
| 0
|
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Optional[Any] ) -> Dict:
'''simple docstring'''
A__ = []
A__ = []
A__ = {
"^": 3,
"*": 2,
"/": 2,
"%": 2,
"+": 1,
"-": 1,
} # Priority of each operator
A__ = len(SCREAMING_SNAKE_CASE_ ) if (len(SCREAMING_SNAKE_CASE_ ) > 7) else 7
# Print table header for output
print(
"Symbol".center(8 ) , "Stack".center(SCREAMING_SNAKE_CASE_ ) , "Postfix".center(SCREAMING_SNAKE_CASE_ ) , sep=" | " , )
print("-" * (print_width * 3 + 7) )
for x in infix:
if x.isalpha() or x.isdigit():
post_fix.append(SCREAMING_SNAKE_CASE_ ) # if x is Alphabet / Digit, add it to Postfix
elif x == "(":
stack.append(SCREAMING_SNAKE_CASE_ ) # if x is "(" push to Stack
elif x == ")": # if x is ")" pop stack until "(" is encountered
while stack[-1] != "(":
post_fix.append(stack.pop() ) # Pop stack & add the content to Postfix
stack.pop()
else:
if len(SCREAMING_SNAKE_CASE_ ) == 0:
stack.append(SCREAMING_SNAKE_CASE_ ) # If stack is empty, push x to stack
else: # while priority of x is not > priority of element in the stack
while len(SCREAMING_SNAKE_CASE_ ) > 0 and priority[x] <= priority[stack[-1]]:
post_fix.append(stack.pop() ) # pop stack & add to Postfix
stack.append(SCREAMING_SNAKE_CASE_ ) # push x to stack
print(
x.center(8 ) , ("".join(SCREAMING_SNAKE_CASE_ )).ljust(SCREAMING_SNAKE_CASE_ ) , ("".join(SCREAMING_SNAKE_CASE_ )).ljust(SCREAMING_SNAKE_CASE_ ) , sep=" | " , ) # Output in tabular format
while len(SCREAMING_SNAKE_CASE_ ) > 0: # while stack is not empty
post_fix.append(stack.pop() ) # pop stack & add to Postfix
print(
" ".center(8 ) , ("".join(SCREAMING_SNAKE_CASE_ )).ljust(SCREAMING_SNAKE_CASE_ ) , ("".join(SCREAMING_SNAKE_CASE_ )).ljust(SCREAMING_SNAKE_CASE_ ) , sep=" | " , ) # Output in tabular format
return "".join(SCREAMING_SNAKE_CASE_ ) # return Postfix as str
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Tuple ) -> List[str]:
'''simple docstring'''
A__ = list(infix[::-1] ) # reverse the infix equation
for i in range(len(SCREAMING_SNAKE_CASE_ ) ):
if infix[i] == "(":
A__ = ")" # change "(" to ")"
elif infix[i] == ")":
A__ = "(" # change ")" to "("
return (infix_2_postfix("".join(SCREAMING_SNAKE_CASE_ ) ))[
::-1
] # call infix_2_postfix on Infix, return reverse of Postfix
if __name__ == "__main__":
lowerCAmelCase__ = input("""\nEnter an Infix Equation = """) # Input an Infix equation
lowerCAmelCase__ = """""".join(Infix.split()) # Remove spaces from the input
print("""\n\t""", Infix, """(Infix) -> """, infix_2_prefix(Infix), """(Prefix)""")
| 68
|
'''simple docstring'''
import os
from math import logaa
def __lowerCamelCase ( __snake_case : str = "base_exp.txt" ) -> int:
"""simple docstring"""
A__ : float =0
A__ : Optional[int] =0
for i, line in enumerate(open(os.path.join(os.path.dirname(__snake_case ), __snake_case ) ) ):
A__ , A__ : Union[str, Any] =list(map(__snake_case, line.split(""",""" ) ) )
if x * logaa(__snake_case ) > largest:
A__ : List[str] =x * logaa(__snake_case )
A__ : Any =i + 1
return result
if __name__ == "__main__":
print(solution())
| 134
| 0
|
from __future__ import annotations
import os
from typing import Any
import requests
A : Any = 'https://api.github.com'
# https://docs.github.com/en/free-pro-team@latest/rest/reference/users#get-the-authenticated-user
A : List[str] = BASE_URL + '/user'
# https://github.com/settings/tokens
A : Optional[int] = os.environ.get('USER_TOKEN', '')
def UpperCamelCase ( __magic_name__ : str ) -> dict[Any, Any]:
"""simple docstring"""
lowercase__ = {
"""Authorization""": f'''token {auth_token}''',
"""Accept""": """application/vnd.github.v3+json""",
}
return requests.get(__magic_name__ , headers=__magic_name__ ).json()
if __name__ == "__main__": # pragma: no cover
if USER_TOKEN:
for key, value in fetch_github_info(USER_TOKEN).items():
print(F'{key}: {value}')
else:
raise ValueError('\'USER_TOKEN\' field cannot be empty.')
| 351
|
import warnings
from ...utils import logging
from .image_processing_clip import CLIPImageProcessor
A : List[Any] = logging.get_logger(__name__)
class A ( UpperCAmelCase__ ):
'''simple docstring'''
def __init__(self : List[Any] , *_UpperCAmelCase : int , **_UpperCAmelCase : List[str] ) -> None:
"""simple docstring"""
warnings.warn(
"""The class CLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use CLIPImageProcessor instead.""" , _UpperCAmelCase , )
super().__init__(*_UpperCAmelCase , **_UpperCAmelCase )
| 146
| 0
|
"""simple docstring"""
from __future__ import annotations
def __lowerCamelCase ( a_ : list[float] , a_ : list[float] ) -> float:
__SCREAMING_SNAKE_CASE :Any = sorted(numsa + numsa )
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE :str = divmod(len(a_ ) , 2 )
if mod == 1:
return all_numbers[div]
else:
return (all_numbers[div] + all_numbers[div - 1]) / 2
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCamelCase_ = [float(x) for x in input("Enter the elements of first array: ").split()]
lowerCamelCase_ = [float(x) for x in input("Enter the elements of second array: ").split()]
print(f'The median of two arrays is: {median_of_two_arrays(array_a, array_a)}')
| 191
|
"""simple docstring"""
def __lowerCamelCase ( a_ : int , a_ : str ) -> Optional[int]:
__SCREAMING_SNAKE_CASE :Optional[int] = [1]
for i in range(2 , a_ ):
factorials.append(factorials[-1] * i )
assert 0 <= k < factorials[-1] * n, "k out of bounds"
__SCREAMING_SNAKE_CASE :List[str] = []
__SCREAMING_SNAKE_CASE :Optional[Any] = list(range(a_ ) )
# Find permutation
while factorials:
__SCREAMING_SNAKE_CASE :Optional[int] = factorials.pop()
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE :Dict = divmod(a_ , a_ )
permutation.append(elements[number] )
elements.remove(elements[number] )
permutation.append(elements[0] )
return permutation
if __name__ == "__main__":
import doctest
doctest.testmod()
| 191
| 1
|
import collections
import os
from typing import List, Optional, Tuple
from transformers.utils import is_jieba_available, requires_backends
if is_jieba_available():
import jieba
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowerCamelCase__ : Tuple = logging.get_logger(__name__)
lowerCamelCase__ : str = {'vocab_file': 'vocab.txt'}
lowerCamelCase__ : Optional[Any] = {
'vocab_file': {
'openbmb/cpm-ant-10b': 'https://huggingface.co/openbmb/cpm-ant-10b/blob/main/vocab.txt',
},
}
lowerCamelCase__ : List[str] = {
'openbmb/cpm-ant-10b': 1_024,
}
def UpperCAmelCase_ ( __UpperCAmelCase : int ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE_ = collections.OrderedDict()
with open(__UpperCAmelCase , 'r' , encoding='utf-8' ) as reader:
SCREAMING_SNAKE_CASE_ = reader.readlines()
for index, token in enumerate(__UpperCAmelCase ):
SCREAMING_SNAKE_CASE_ = token.rstrip('\n' )
SCREAMING_SNAKE_CASE_ = index
return vocab
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self : Optional[Any] , _lowerCAmelCase : Dict , _lowerCAmelCase : Tuple="<unk>" , _lowerCAmelCase : Optional[Any]=200 ):
SCREAMING_SNAKE_CASE_ = vocab
SCREAMING_SNAKE_CASE_ = unk_token
SCREAMING_SNAKE_CASE_ = max_input_chars_per_word
def lowerCAmelCase_ ( self : Union[str, Any] , _lowerCAmelCase : Union[str, Any] ):
SCREAMING_SNAKE_CASE_ = list(_lowerCAmelCase )
if len(_lowerCAmelCase ) > self.max_input_chars_per_word:
return [self.unk_token]
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = []
while start < len(_lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ = len(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = None
while start < end:
SCREAMING_SNAKE_CASE_ = ''.join(chars[start:end] )
if substr in self.vocab:
SCREAMING_SNAKE_CASE_ = substr
break
end -= 1
if cur_substr is None:
sub_tokens.append(self.unk_token )
start += 1
else:
sub_tokens.append(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = end
return sub_tokens
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = VOCAB_FILES_NAMES
lowercase_ = PRETRAINED_VOCAB_FILES_MAP
lowercase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase_ = ["input_ids", "attention_mask"]
lowercase_ = False
def __init__( self : Any , _lowerCAmelCase : Any , _lowerCAmelCase : Optional[int]="<d>" , _lowerCAmelCase : str="</d>" , _lowerCAmelCase : int="<s>" , _lowerCAmelCase : str="</s>" , _lowerCAmelCase : Any="<pad>" , _lowerCAmelCase : Any="<unk>" , _lowerCAmelCase : Optional[int]="</n>" , _lowerCAmelCase : str="</_>" , _lowerCAmelCase : Tuple="left" , **_lowerCAmelCase : Tuple , ):
requires_backends(self , ['jieba'] )
super().__init__(
bod_token=_lowerCAmelCase , eod_token=_lowerCAmelCase , bos_token=_lowerCAmelCase , eos_token=_lowerCAmelCase , pad_token=_lowerCAmelCase , unk_token=_lowerCAmelCase , line_token=_lowerCAmelCase , space_token=_lowerCAmelCase , padding_side=_lowerCAmelCase , **_lowerCAmelCase , )
SCREAMING_SNAKE_CASE_ = bod_token
SCREAMING_SNAKE_CASE_ = eod_token
SCREAMING_SNAKE_CASE_ = load_vocab(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = self.encoder[space_token]
SCREAMING_SNAKE_CASE_ = self.encoder[line_token]
del self.encoder[space_token]
del self.encoder[line_token]
SCREAMING_SNAKE_CASE_ = collections.OrderedDict(sorted(self.encoder.items() , key=lambda _lowerCAmelCase : x[1] ) )
SCREAMING_SNAKE_CASE_ = {v: k for k, v in self.encoder.items()}
SCREAMING_SNAKE_CASE_ = WordpieceTokenizer(vocab=self.encoder , unk_token=self.unk_token )
@property
def lowerCAmelCase_ ( self : Any ):
return self.encoder[self.bod_token]
@property
def lowerCAmelCase_ ( self : List[str] ):
return self.encoder[self.eod_token]
@property
def lowerCAmelCase_ ( self : List[str] ):
return self.encoder["\n"]
@property
def lowerCAmelCase_ ( self : Any ):
return len(self.encoder )
def lowerCAmelCase_ ( self : Union[str, Any] ):
return dict(self.encoder , **self.added_tokens_encoder )
def lowerCAmelCase_ ( self : Tuple , _lowerCAmelCase : Tuple ):
SCREAMING_SNAKE_CASE_ = []
for x in jieba.cut(_lowerCAmelCase , cut_all=_lowerCAmelCase ):
output_tokens.extend(self.wordpiece_tokenizer.tokenize(_lowerCAmelCase ) )
return output_tokens
def lowerCAmelCase_ ( self : int , _lowerCAmelCase : str , **_lowerCAmelCase : Optional[Any] ):
SCREAMING_SNAKE_CASE_ = [i for i in token_ids if i >= 0]
SCREAMING_SNAKE_CASE_ = [
x for x in token_ids if x != self.pad_token_id and x != self.eos_token_id and x != self.bos_token_id
]
return super()._decode(_lowerCAmelCase , **_lowerCAmelCase )
def lowerCAmelCase_ ( self : Optional[Any] , _lowerCAmelCase : Union[str, Any] ):
return token in self.encoder
def lowerCAmelCase_ ( self : int , _lowerCAmelCase : List[str] ):
return "".join(_lowerCAmelCase )
def lowerCAmelCase_ ( self : Union[str, Any] , _lowerCAmelCase : Optional[int] ):
return self.encoder.get(_lowerCAmelCase , self.encoder.get(self.unk_token ) )
def lowerCAmelCase_ ( self : Any , _lowerCAmelCase : Optional[int] ):
return self.decoder.get(_lowerCAmelCase , self.unk_token )
def lowerCAmelCase_ ( self : Any , _lowerCAmelCase : str , _lowerCAmelCase : Optional[str] = None ):
if os.path.isdir(_lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ = os.path.join(
_lowerCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
else:
SCREAMING_SNAKE_CASE_ = (filename_prefix + '-' if filename_prefix else '') + save_directory
SCREAMING_SNAKE_CASE_ = 0
if " " in self.encoder:
SCREAMING_SNAKE_CASE_ = self.encoder[' ']
del self.encoder[" "]
if "\n" in self.encoder:
SCREAMING_SNAKE_CASE_ = self.encoder['\n']
del self.encoder["\n"]
SCREAMING_SNAKE_CASE_ = collections.OrderedDict(sorted(self.encoder.items() , key=lambda _lowerCAmelCase : x[1] ) )
with open(_lowerCAmelCase , 'w' , encoding='utf-8' ) as writer:
for token, token_index in self.encoder.items():
if index != token_index:
logger.warning(
F"Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."
' Please check that the vocabulary is not corrupted!' )
SCREAMING_SNAKE_CASE_ = token_index
writer.write(token + '\n' )
index += 1
return (vocab_file,)
def lowerCAmelCase_ ( self : int , _lowerCAmelCase : List[int] , _lowerCAmelCase : List[int] = None ):
if token_ids_a is None:
return [self.bos_token_id] + token_ids_a
return [self.bos_token_id] + token_ids_a + [self.bos_token_id] + token_ids_a
def lowerCAmelCase_ ( self : List[str] , _lowerCAmelCase : List[int] , _lowerCAmelCase : Optional[List[int]] = None , _lowerCAmelCase : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_lowerCAmelCase , token_ids_a=_lowerCAmelCase , already_has_special_tokens=_lowerCAmelCase )
if token_ids_a is not None:
return [1] + ([0] * len(_lowerCAmelCase )) + [1] + ([0] * len(_lowerCAmelCase ))
return [1] + ([0] * len(_lowerCAmelCase ))
| 210
|
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
@staticmethod
@abstractmethod
def lowerCAmelCase_ ( _lowerCAmelCase : ArgumentParser ):
raise NotImplementedError()
@abstractmethod
def lowerCAmelCase_ ( self : Dict ):
raise NotImplementedError()
| 210
| 1
|
'''simple docstring'''
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
import torch
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
__SCREAMING_SNAKE_CASE : List[str] = logging.get_logger(__name__)
class lowerCamelCase_ (snake_case__ ):
'''simple docstring'''
__UpperCamelCase: Optional[int] = ["input_features", "is_longer"]
def __init__( self : str , A : int=64 , A : Dict=48000 , A : str=480 , A : List[Any]=10 , A : Optional[Any]=1024 , A : Tuple=0.0 , A : List[Any]=False , A : float = 0 , A : float = 14000 , A : int = None , A : str = "fusion" , A : str = "repeatpad" , **A : Dict , ):
super().__init__(
feature_size=A , sampling_rate=A , padding_value=A , return_attention_mask=A , **A , )
_UpperCAmelCase : Optional[Any] = top_db
_UpperCAmelCase : Dict = truncation
_UpperCAmelCase : List[Any] = padding
_UpperCAmelCase : Optional[Any] = fft_window_size
_UpperCAmelCase : Dict = (fft_window_size >> 1) + 1
_UpperCAmelCase : Any = hop_length
_UpperCAmelCase : Tuple = max_length_s
_UpperCAmelCase : str = max_length_s * sampling_rate
_UpperCAmelCase : Any = sampling_rate
_UpperCAmelCase : Optional[int] = frequency_min
_UpperCAmelCase : str = frequency_max
_UpperCAmelCase : Union[str, Any] = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=A , min_frequency=A , max_frequency=A , sampling_rate=A , norm=A , mel_scale="htk" , )
_UpperCAmelCase : Tuple = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=A , min_frequency=A , max_frequency=A , sampling_rate=A , norm="slaney" , mel_scale="slaney" , )
def _A ( self : List[str] ):
_UpperCAmelCase : Union[str, Any] = copy.deepcopy(self.__dict__ )
_UpperCAmelCase : Dict = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
if "mel_filters_slaney" in output:
del output["mel_filters_slaney"]
return output
def _A ( self : Optional[Any] , A : np.array , A : Optional[np.array] = None ):
_UpperCAmelCase : Dict = spectrogram(
A , window_function(self.fft_window_size , "hann" ) , frame_length=self.fft_window_size , hop_length=self.hop_length , power=2.0 , mel_filters=A , log_mel="dB" , )
return log_mel_spectrogram.T
def _A ( self : str , A : str , A : List[str] , A : List[Any] ):
_UpperCAmelCase : List[str] = np.array_split(list(range(0 , total_frames - chunk_frames + 1 ) ) , 3 )
if len(ranges[1] ) == 0:
# if the audio is too short, we just use the first chunk
_UpperCAmelCase : Optional[Any] = [0]
if len(ranges[2] ) == 0:
# if the audio is too short, we just use the first chunk
_UpperCAmelCase : Tuple = [0]
# randomly choose index for each part
_UpperCAmelCase : Dict = np.random.choice(ranges[0] )
_UpperCAmelCase : str = np.random.choice(ranges[1] )
_UpperCAmelCase : Tuple = np.random.choice(ranges[2] )
_UpperCAmelCase : str = mel[idx_front : idx_front + chunk_frames, :]
_UpperCAmelCase : str = mel[idx_middle : idx_middle + chunk_frames, :]
_UpperCAmelCase : List[Any] = mel[idx_back : idx_back + chunk_frames, :]
_UpperCAmelCase : Dict = torch.tensor(mel[None, None, :] )
_UpperCAmelCase : Optional[Any] = torch.nn.functional.interpolate(
A , size=[chunk_frames, 64] , mode="bilinear" , align_corners=A )
_UpperCAmelCase : List[str] = mel_shrink[0][0].numpy()
_UpperCAmelCase : str = np.stack([mel_shrink, mel_chunk_front, mel_chunk_middle, mel_chunk_back] , axis=0 )
return mel_fusion
def _A ( self : List[Any] , A : np.array , A : List[str] , A : Any , A : Optional[int] ):
if waveform.shape[0] > max_length:
if truncation == "rand_trunc":
_UpperCAmelCase : int = True
# random crop to max_length (for compatibility) -> this should be handled by self.pad
_UpperCAmelCase : str = len(A ) - max_length
_UpperCAmelCase : str = np.random.randint(0 , overflow + 1 )
_UpperCAmelCase : int = waveform[idx : idx + max_length]
_UpperCAmelCase : Any = self._np_extract_fbank_features(A , self.mel_filters_slaney )[None, :]
elif truncation == "fusion":
_UpperCAmelCase : Tuple = self._np_extract_fbank_features(A , self.mel_filters )
_UpperCAmelCase : List[str] = max_length // self.hop_length + 1 # the +1 related to how the spectrogram is computed
_UpperCAmelCase : Optional[Any] = mel.shape[0]
if chunk_frames == total_frames:
# there is a corner case where the audio length is larger than max_length but smaller than max_length+hop_length.
# In this case, we just use the whole audio.
_UpperCAmelCase : Any = np.stack([mel, mel, mel, mel] , axis=0 )
_UpperCAmelCase : int = False
else:
_UpperCAmelCase : Tuple = self._random_mel_fusion(A , A , A )
_UpperCAmelCase : Any = True
else:
raise NotImplementedError(F"""data_truncating {truncation} not implemented""" )
else:
_UpperCAmelCase : Optional[Any] = False
# only use repeat as a new possible value for padding. you repeat the audio before applying the usual max_length padding
if waveform.shape[0] < max_length:
if padding == "repeat":
_UpperCAmelCase : str = int(max_length / len(A ) )
_UpperCAmelCase : Dict = np.stack(np.tile(A , n_repeat + 1 ) )[:max_length]
if padding == "repeatpad":
_UpperCAmelCase : Dict = int(max_length / len(A ) )
_UpperCAmelCase : List[str] = np.stack(np.tile(A , A ) )
_UpperCAmelCase : Optional[Any] = np.pad(A , (0, max_length - waveform.shape[0]) , mode="constant" , constant_values=0 )
if truncation == "fusion":
_UpperCAmelCase : str = self._np_extract_fbank_features(A , self.mel_filters )
_UpperCAmelCase : Optional[int] = np.stack([input_mel, input_mel, input_mel, input_mel] , axis=0 )
else:
_UpperCAmelCase : List[str] = self._np_extract_fbank_features(A , self.mel_filters_slaney )[None, :]
return input_mel, longer
def __call__( self : Union[str, Any] , A : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , A : str = None , A : Optional[str] = None , A : Optional[int] = None , A : Optional[int] = None , A : Optional[Union[str, TensorType]] = None , **A : List[str] , ):
_UpperCAmelCase : int = truncation if truncation is not None else self.truncation
_UpperCAmelCase : Optional[int] = padding if padding else self.padding
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F"""The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a"""
F""" sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input"""
F""" was sampled with {self.sampling_rate} and not {sampling_rate}.""" )
else:
logger.warning(
"It is strongly recommended to pass the `sampling_rate` argument to this function. "
"Failing to do so can result in silent errors that might be hard to debug." )
_UpperCAmelCase : Any = isinstance(A , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F"""Only mono-channel audio is supported for input to {self}""" )
_UpperCAmelCase : Optional[Any] = is_batched_numpy or (
isinstance(A , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
_UpperCAmelCase : int = [np.asarray(A , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(A , np.ndarray ):
_UpperCAmelCase : List[str] = np.asarray(A , dtype=np.floataa )
elif isinstance(A , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
_UpperCAmelCase : Any = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
_UpperCAmelCase : List[str] = [np.asarray(A )]
# convert to mel spectrogram, truncate and pad if needed.
_UpperCAmelCase : Dict = [
self._get_input_mel(A , max_length if max_length else self.nb_max_samples , A , A )
for waveform in raw_speech
]
_UpperCAmelCase : int = []
_UpperCAmelCase : Optional[Any] = []
for mel, longer in padded_inputs:
input_mel.append(A )
is_longer.append(A )
if truncation == "fusion" and sum(A ) == 0:
# if no audio is longer than 10s, then randomly select one audio to be longer
_UpperCAmelCase : Union[str, Any] = np.random.randint(0 , len(A ) )
_UpperCAmelCase : Optional[Any] = True
if isinstance(input_mel[0] , A ):
_UpperCAmelCase : List[str] = [np.asarray(A , dtype=np.floataa ) for feature in input_mel]
# is_longer is a list of bool
_UpperCAmelCase : Tuple = [[longer] for longer in is_longer]
_UpperCAmelCase : Optional[Any] = {"input_features": input_mel, "is_longer": is_longer}
_UpperCAmelCase : Tuple = BatchFeature(A )
if return_tensors is not None:
_UpperCAmelCase : List[Any] = input_features.convert_to_tensors(A )
return input_features
| 31
|
"""simple docstring"""
import math
import os
from copy import deepcopy
import datasets
import evaluate
import torch
import transformers
from datasets import load_dataset
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer
from accelerate import Accelerator
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import is_tpu_available, set_seed
__UpperCamelCase : List[str] = '''true'''
def __SCREAMING_SNAKE_CASE ( A_ , A_=82 , A_=16 ):
set_seed(42 )
lowerCAmelCase__ : Union[str, Any] = RegressionModel()
lowerCAmelCase__ : Optional[int] = deepcopy(A_ )
lowerCAmelCase__ : Any = RegressionDataset(length=A_ )
lowerCAmelCase__ : List[str] = DataLoader(A_ , batch_size=A_ )
model.to(accelerator.device )
lowerCAmelCase__ ,lowerCAmelCase__ : Dict = accelerator.prepare(A_ , A_ )
return model, ddp_model, dataloader
def __SCREAMING_SNAKE_CASE ( A_ , A_=False ):
lowerCAmelCase__ : List[str] = AutoTokenizer.from_pretrained('''hf-internal-testing/mrpc-bert-base-cased''' )
lowerCAmelCase__ : List[str] = load_dataset('''glue''' , '''mrpc''' , split='''validation''' )
def tokenize_function(A_ ):
lowerCAmelCase__ : Optional[Any] = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=A_ , max_length=A_ )
return outputs
with accelerator.main_process_first():
lowerCAmelCase__ : Dict = dataset.map(
A_ , batched=A_ , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , )
lowerCAmelCase__ : int = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(A_ ):
if use_longest:
return tokenizer.pad(A_ , padding='''longest''' , return_tensors='''pt''' )
return tokenizer.pad(A_ , padding='''max_length''' , max_length=1_28 , return_tensors='''pt''' )
return DataLoader(A_ , shuffle=A_ , collate_fn=A_ , batch_size=16 )
def __SCREAMING_SNAKE_CASE ( A_ , A_ ):
lowerCAmelCase__ : Union[str, Any] = Accelerator(dispatch_batches=A_ , split_batches=A_ )
lowerCAmelCase__ : str = get_dataloader(A_ , not dispatch_batches )
lowerCAmelCase__ : Optional[int] = AutoModelForSequenceClassification.from_pretrained(
'''hf-internal-testing/mrpc-bert-base-cased''' , return_dict=A_ )
lowerCAmelCase__ ,lowerCAmelCase__ : Union[str, Any] = accelerator.prepare(A_ , A_ )
return {"ddp": [ddp_model, ddp_dataloader, "cuda:0"], "no": [model, dataloader, accelerator.device]}, accelerator
def __SCREAMING_SNAKE_CASE ( A_ , A_ , A_ ):
lowerCAmelCase__ : Union[str, Any] = []
for batch in dataloader:
lowerCAmelCase__ ,lowerCAmelCase__ : Optional[Any] = batch.values()
with torch.no_grad():
lowerCAmelCase__ : List[str] = model(A_ )
lowerCAmelCase__ ,lowerCAmelCase__ : str = accelerator.gather_for_metrics((logit, target) )
logits_and_targets.append((logit, target) )
lowerCAmelCase__ ,lowerCAmelCase__ : int = [], []
for logit, targ in logits_and_targets:
logits.append(A_ )
targs.append(A_ )
lowerCAmelCase__ ,lowerCAmelCase__ : List[str] = torch.cat(A_ ), torch.cat(A_ )
return logits, targs
def __SCREAMING_SNAKE_CASE ( A_ , A_=82 , A_=False , A_=False , A_=16 ):
lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ : Union[str, Any] = get_basic_setup(A_ , A_ , A_ )
lowerCAmelCase__ ,lowerCAmelCase__ : Optional[int] = generate_predictions(A_ , A_ , A_ )
assert (
len(A_ ) == num_samples
), f'Unexpected number of inputs:\n Expected: {num_samples}\n Actual: {len(A_ )}'
def __SCREAMING_SNAKE_CASE ( A_ = False , A_ = False ):
lowerCAmelCase__ : Optional[Any] = evaluate.load('''glue''' , '''mrpc''' )
lowerCAmelCase__ ,lowerCAmelCase__ : Optional[int] = get_mrpc_setup(A_ , A_ )
# First do baseline
lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ : Any = setup['''no''']
model.to(A_ )
model.eval()
for batch in dataloader:
batch.to(A_ )
with torch.inference_mode():
lowerCAmelCase__ : Optional[int] = model(**A_ )
lowerCAmelCase__ : Dict = outputs.logits.argmax(dim=-1 )
metric.add_batch(predictions=A_ , references=batch['''labels'''] )
lowerCAmelCase__ : Dict = metric.compute()
# Then do distributed
lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ : Optional[Any] = setup['''ddp''']
model.eval()
for batch in dataloader:
with torch.inference_mode():
lowerCAmelCase__ : Union[str, Any] = model(**A_ )
lowerCAmelCase__ : int = outputs.logits.argmax(dim=-1 )
lowerCAmelCase__ : int = batch['''labels''']
lowerCAmelCase__ ,lowerCAmelCase__ : int = accelerator.gather_for_metrics((preds, references) )
metric.add_batch(predictions=A_ , references=A_ )
lowerCAmelCase__ : List[Any] = metric.compute()
for key in "accuracy f1".split():
assert math.isclose(
baseline[key] , distributed[key] ), f'Baseline and Distributed are not the same for key {key}:\n\tBaseline: {baseline[key]}\n\tDistributed: {distributed[key]}\n'
def __SCREAMING_SNAKE_CASE ( ):
lowerCAmelCase__ : List[str] = Accelerator(split_batches=A_ , dispatch_batches=A_ )
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_warning()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# These are a bit slower so they should only be ran on the GPU or TPU
if torch.cuda.is_available() or is_tpu_available():
if accelerator.is_local_main_process:
print('''**Testing gather_for_metrics**''' )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
if accelerator.is_local_main_process:
print(f'With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`' )
test_mrpc(A_ , A_ )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print('''**Test torch metrics**''' )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
lowerCAmelCase__ : Optional[Any] = Accelerator(split_batches=A_ , dispatch_batches=A_ )
if accelerator.is_local_main_process:
print(f'With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`, length=99' )
test_torch_metrics(A_ , 99 )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print('''**Test last batch is not dropped when perfectly divisible**''' )
lowerCAmelCase__ : List[str] = Accelerator()
test_torch_metrics(A_ , 5_12 )
accelerator.state._reset_state()
def __SCREAMING_SNAKE_CASE ( A_ ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 106
| 0
|
import os
import pytest
from transformers.dynamic_module_utils import get_imports
SCREAMING_SNAKE_CASE__ = """
import os
"""
SCREAMING_SNAKE_CASE__ = """
def foo():
import os
return False
"""
SCREAMING_SNAKE_CASE__ = """
def foo():
def bar():
if True:
import os
return False
return bar()
"""
SCREAMING_SNAKE_CASE__ = """
import os
try:
import bar
except ImportError:
raise ValueError()
"""
SCREAMING_SNAKE_CASE__ = """
import os
def foo():
try:
import bar
except ImportError:
raise ValueError()
"""
SCREAMING_SNAKE_CASE__ = """
import os
try:
import bar
except (ImportError, AttributeError):
raise ValueError()
"""
SCREAMING_SNAKE_CASE__ = """
import os
try:
import bar
except ImportError as e:
raise ValueError()
"""
SCREAMING_SNAKE_CASE__ = """
import os
try:
import bar
except:
raise ValueError()
"""
SCREAMING_SNAKE_CASE__ = """
import os
try:
import bar
import baz
except ImportError:
raise ValueError()
"""
SCREAMING_SNAKE_CASE__ = """
import os
try:
import bar
import baz
except ImportError:
x = 1
raise ValueError()
"""
SCREAMING_SNAKE_CASE__ = [
TOP_LEVEL_IMPORT,
IMPORT_IN_FUNCTION,
DEEPLY_NESTED_IMPORT,
TOP_LEVEL_TRY_IMPORT,
GENERIC_EXCEPT_IMPORT,
MULTILINE_TRY_IMPORT,
MULTILINE_BOTH_IMPORT,
MULTIPLE_EXCEPTS_IMPORT,
EXCEPT_AS_IMPORT,
TRY_IMPORT_IN_FUNCTION,
]
@pytest.mark.parametrize("case" , __lowerCamelCase )
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Dict , __lowerCamelCase: Any ):
'''simple docstring'''
lowercase_ = os.path.join(__lowerCamelCase , "test_file.py" )
with open(__lowerCamelCase , "w" ) as _tmp_file:
_tmp_file.write(__lowerCamelCase )
lowercase_ = get_imports(__lowerCamelCase )
assert parsed_imports == ["os"]
| 367
|
import gc
import unittest
import numpy as np
import torch
from diffusers import (
AudioDiffusionPipeline,
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
DiffusionPipeline,
Mel,
UNetaDConditionModel,
UNetaDModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class __lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
def A__ ( self ) -> Any:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def A__ ( self ) -> Tuple:
'''simple docstring'''
torch.manual_seed(0 )
lowercase_ = UNetaDModel(
sample_size=(32, 64) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=("AttnDownBlock2D", "DownBlock2D") , up_block_types=("UpBlock2D", "AttnUpBlock2D") , )
return model
@property
def A__ ( self ) -> Tuple:
'''simple docstring'''
torch.manual_seed(0 )
lowercase_ = UNetaDConditionModel(
sample_size=(64, 32) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=("CrossAttnDownBlock2D", "DownBlock2D") , up_block_types=("UpBlock2D", "CrossAttnUpBlock2D") , cross_attention_dim=10 , )
return model
@property
def A__ ( self ) -> Optional[Any]:
'''simple docstring'''
torch.manual_seed(0 )
lowercase_ = AutoencoderKL(
sample_size=(128, 64) , in_channels=1 , out_channels=1 , latent_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=("DownEncoderBlock2D", "DownEncoderBlock2D") , up_block_types=("UpDecoderBlock2D", "UpDecoderBlock2D") , )
lowercase_ = UNetaDModel(
sample_size=(64, 32) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=("AttnDownBlock2D", "DownBlock2D") , up_block_types=("UpBlock2D", "AttnUpBlock2D") , )
return vqvae, unet
@slow
def A__ ( self ) -> Union[str, Any]:
'''simple docstring'''
lowercase_ = "cpu" # ensure determinism for the device-dependent torch.Generator
lowercase_ = Mel(
x_res=self.dummy_unet.config.sample_size[1] , y_res=self.dummy_unet.config.sample_size[0] , )
lowercase_ = DDPMScheduler()
lowercase_ = AudioDiffusionPipeline(vqvae=UpperCAmelCase , unet=self.dummy_unet , mel=UpperCAmelCase , scheduler=UpperCAmelCase )
lowercase_ = pipe.to(UpperCAmelCase )
pipe.set_progress_bar_config(disable=UpperCAmelCase )
lowercase_ = torch.Generator(device=UpperCAmelCase ).manual_seed(42 )
lowercase_ = pipe(generator=UpperCAmelCase , steps=4 )
lowercase_ = output.audios[0]
lowercase_ = output.images[0]
lowercase_ = torch.Generator(device=UpperCAmelCase ).manual_seed(42 )
lowercase_ = pipe(generator=UpperCAmelCase , steps=4 , return_dict=UpperCAmelCase )
lowercase_ = output[0][0]
assert audio.shape == (1, (self.dummy_unet.config.sample_size[1] - 1) * mel.hop_length)
assert (
image.height == self.dummy_unet.config.sample_size[0]
and image.width == self.dummy_unet.config.sample_size[1]
)
lowercase_ = np.frombuffer(image.tobytes() , dtype="uint8" )[:10]
lowercase_ = np.frombuffer(image_from_tuple.tobytes() , dtype="uint8" )[:10]
lowercase_ = np.array([69, 255, 255, 255, 0, 0, 77, 181, 12, 127] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() == 0
lowercase_ = Mel(
x_res=self.dummy_vqvae_and_unet[0].config.sample_size[1] , y_res=self.dummy_vqvae_and_unet[0].config.sample_size[0] , )
lowercase_ = DDIMScheduler()
lowercase_ = self.dummy_vqvae_and_unet
lowercase_ = AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0] , unet=dummy_vqvae_and_unet[1] , mel=UpperCAmelCase , scheduler=UpperCAmelCase )
lowercase_ = pipe.to(UpperCAmelCase )
pipe.set_progress_bar_config(disable=UpperCAmelCase )
np.random.seed(0 )
lowercase_ = np.random.uniform(-1 , 1 , ((dummy_vqvae_and_unet[0].config.sample_size[1] - 1) * mel.hop_length,) )
lowercase_ = torch.Generator(device=UpperCAmelCase ).manual_seed(42 )
lowercase_ = pipe(raw_audio=UpperCAmelCase , generator=UpperCAmelCase , start_step=5 , steps=10 )
lowercase_ = output.images[0]
assert (
image.height == self.dummy_vqvae_and_unet[0].config.sample_size[0]
and image.width == self.dummy_vqvae_and_unet[0].config.sample_size[1]
)
lowercase_ = np.frombuffer(image.tobytes() , dtype="uint8" )[:10]
lowercase_ = np.array([120, 117, 110, 109, 138, 167, 138, 148, 132, 121] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
lowercase_ = self.dummy_unet_condition
lowercase_ = AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0] , unet=UpperCAmelCase , mel=UpperCAmelCase , scheduler=UpperCAmelCase )
lowercase_ = pipe.to(UpperCAmelCase )
pipe.set_progress_bar_config(disable=UpperCAmelCase )
np.random.seed(0 )
lowercase_ = torch.rand((1, 1, 10) )
lowercase_ = pipe(generator=UpperCAmelCase , encoding=UpperCAmelCase )
lowercase_ = output.images[0]
lowercase_ = np.frombuffer(image.tobytes() , dtype="uint8" )[:10]
lowercase_ = np.array([107, 103, 120, 127, 142, 122, 113, 122, 97, 111] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
@slow
@require_torch_gpu
class __lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
def A__ ( self ) -> Optional[Any]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A__ ( self ) -> Dict:
'''simple docstring'''
lowercase_ = torch_device
lowercase_ = DiffusionPipeline.from_pretrained("teticio/audio-diffusion-ddim-256" )
lowercase_ = pipe.to(UpperCAmelCase )
pipe.set_progress_bar_config(disable=UpperCAmelCase )
lowercase_ = torch.Generator(device=UpperCAmelCase ).manual_seed(42 )
lowercase_ = pipe(generator=UpperCAmelCase )
lowercase_ = output.audios[0]
lowercase_ = output.images[0]
assert audio.shape == (1, (pipe.unet.config.sample_size[1] - 1) * pipe.mel.hop_length)
assert image.height == pipe.unet.config.sample_size[0] and image.width == pipe.unet.config.sample_size[1]
lowercase_ = np.frombuffer(image.tobytes() , dtype="uint8" )[:10]
lowercase_ = np.array([151, 167, 154, 144, 122, 134, 121, 105, 70, 26] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
| 297
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_torch_available,
is_vision_available,
)
_SCREAMING_SNAKE_CASE : Tuple = {"configuration_beit": ["BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "BeitConfig", "BeitOnnxConfig"]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : str = ["BeitFeatureExtractor"]
_SCREAMING_SNAKE_CASE : Dict = ["BeitImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : List[Any] = [
"BEIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"BeitForImageClassification",
"BeitForMaskedImageModeling",
"BeitForSemanticSegmentation",
"BeitModel",
"BeitPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : Any = [
"FlaxBeitForImageClassification",
"FlaxBeitForMaskedImageModeling",
"FlaxBeitModel",
"FlaxBeitPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_beit import BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, BeitConfig, BeitOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_beit import BeitFeatureExtractor
from .image_processing_beit import BeitImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_beit import (
BEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
BeitForImageClassification,
BeitForMaskedImageModeling,
BeitForSemanticSegmentation,
BeitModel,
BeitPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_beit import (
FlaxBeitForImageClassification,
FlaxBeitForMaskedImageModeling,
FlaxBeitModel,
FlaxBeitPreTrainedModel,
)
else:
import sys
_SCREAMING_SNAKE_CASE : str = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 85
|
'''simple docstring'''
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE : Tuple = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE : Dict = {
"BridgeTower/bridgetower-base": "https://huggingface.co/BridgeTower/bridgetower-base/blob/main/config.json",
"BridgeTower/bridgetower-base-itm-mlm": (
"https://huggingface.co/BridgeTower/bridgetower-base-itm-mlm/blob/main/config.json"
),
}
class _snake_case ( lowercase_ ):
lowerCAmelCase_ : Dict = "bridgetower_vision_model"
def __init__( self , a__=768 , a__=12 , a__=3 , a__=16 , a__=288 , a__=1 , a__=1e-05 , a__=False , a__=True , a__=False , **a__ , ) -> int:
'''simple docstring'''
super().__init__(**a__ )
snake_case_ = hidden_size
snake_case_ = num_hidden_layers
snake_case_ = num_channels
snake_case_ = patch_size
snake_case_ = image_size
snake_case_ = initializer_factor
snake_case_ = layer_norm_eps
snake_case_ = stop_gradient
snake_case_ = share_layernorm
snake_case_ = remove_last_layer
@classmethod
def lowerCAmelCase__ ( cls , a__ , **a__ ) -> "PretrainedConfig":
'''simple docstring'''
snake_case_ , snake_case_ = cls.get_config_dict(a__ , **a__ )
if config_dict.get("model_type" ) == "bridgetower":
snake_case_ = config_dict["text_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
F'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(a__ , **a__ )
class _snake_case ( lowercase_ ):
lowerCAmelCase_ : Any = "bridgetower_text_model"
def __init__( self , a__=50_265 , a__=768 , a__=12 , a__=12 , a__=1 , a__=3_072 , a__="gelu" , a__=0.1 , a__=0.1 , a__=514 , a__=1 , a__=1e-05 , a__=1 , a__=0 , a__=2 , a__="absolute" , a__=True , **a__ , ) -> Optional[int]:
'''simple docstring'''
super().__init__(**a__ )
snake_case_ = vocab_size
snake_case_ = hidden_size
snake_case_ = num_hidden_layers
snake_case_ = num_attention_heads
snake_case_ = hidden_act
snake_case_ = initializer_factor
snake_case_ = intermediate_size
snake_case_ = hidden_dropout_prob
snake_case_ = attention_probs_dropout_prob
snake_case_ = max_position_embeddings
snake_case_ = type_vocab_size
snake_case_ = layer_norm_eps
snake_case_ = position_embedding_type
snake_case_ = use_cache
snake_case_ = pad_token_id
snake_case_ = bos_token_id
snake_case_ = eos_token_id
@classmethod
def lowerCAmelCase__ ( cls , a__ , **a__ ) -> "PretrainedConfig":
'''simple docstring'''
snake_case_ , snake_case_ = cls.get_config_dict(a__ , **a__ )
if config_dict.get("model_type" ) == "bridgetower":
snake_case_ = config_dict["text_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
F'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(a__ , **a__ )
class _snake_case ( lowercase_ ):
lowerCAmelCase_ : Union[str, Any] = "bridgetower"
def __init__( self , a__=True , a__="gelu" , a__=768 , a__=1 , a__=1e-05 , a__=False , a__="add" , a__=12 , a__=6 , a__=False , a__=False , a__=None , a__=None , **a__ , ) -> int:
'''simple docstring'''
snake_case_ = kwargs.pop("text_config_dict" , a__ )
snake_case_ = kwargs.pop("vision_config_dict" , a__ )
super().__init__(**a__ )
snake_case_ = share_cross_modal_transformer_layers
snake_case_ = hidden_act
snake_case_ = hidden_size
snake_case_ = initializer_factor
snake_case_ = layer_norm_eps
snake_case_ = share_link_tower_layers
snake_case_ = link_tower_type
snake_case_ = num_attention_heads
snake_case_ = num_hidden_layers
snake_case_ = tie_word_embeddings
snake_case_ = init_layernorm_from_vision_encoder
if text_config is None:
snake_case_ = {}
logger.info("`text_config` is `None`. Initializing the `BridgeTowerTextConfig` with default values." )
if vision_config is None:
snake_case_ = {}
logger.info("`vision_config` is `None`. Initializing the `BridgeTowerVisionConfig` with default values." )
snake_case_ = BridgeTowerTextConfig(**a__ )
snake_case_ = BridgeTowerVisionConfig(**a__ )
@classmethod
def lowerCAmelCase__ ( cls , a__ , a__ , **a__ ) -> List[Any]:
'''simple docstring'''
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **a__ )
def lowerCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
snake_case_ = copy.deepcopy(self.__dict__ )
snake_case_ = self.text_config.to_dict()
snake_case_ = self.vision_config.to_dict()
snake_case_ = self.__class__.model_type
return output
| 85
| 1
|
"""simple docstring"""
from __future__ import annotations
from typing import Dict
from ...configuration_utils import PretrainedConfig
UpperCAmelCase : int = {
'susnato/ernie-m-base_pytorch': 'https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/config.json',
'susnato/ernie-m-large_pytorch': 'https://huggingface.co/susnato/ernie-m-large_pytorch/blob/main/config.json',
}
class __lowerCAmelCase ( _UpperCamelCase):
_lowercase : Dict = 'ernie_m'
_lowercase : Dict[str, str] = {"dropout": "classifier_dropout", "num_classes": "num_labels"}
def __init__( self , lowerCAmelCase__ = 2_5_0_0_0_2 , lowerCAmelCase__ = 7_6_8 , lowerCAmelCase__ = 1_2 , lowerCAmelCase__ = 1_2 , lowerCAmelCase__ = 3_0_7_2 , lowerCAmelCase__ = "gelu" , lowerCAmelCase__ = 0.1 , lowerCAmelCase__ = 0.1 , lowerCAmelCase__ = 5_1_4 , lowerCAmelCase__ = 0.02 , lowerCAmelCase__ = 1 , lowerCAmelCase__ = 1E-05 , lowerCAmelCase__=None , lowerCAmelCase__=False , lowerCAmelCase__=0.0 , **lowerCAmelCase__ , ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(pad_token_id=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
a__ : List[str] =vocab_size
a__ : List[str] =hidden_size
a__ : List[Any] =num_hidden_layers
a__ : Tuple =num_attention_heads
a__ : Union[str, Any] =intermediate_size
a__ : Any =hidden_act
a__ : Any =hidden_dropout_prob
a__ : int =attention_probs_dropout_prob
a__ : str =max_position_embeddings
a__ : Any =initializer_range
a__ : Union[str, Any] =layer_norm_eps
a__ : Union[str, Any] =classifier_dropout
a__ : List[Any] =is_decoder
a__ : int =act_dropout
| 368
|
from __future__ import annotations
import math
def _A ( SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
if num <= 0:
a__ : List[str] =f'''{num}: Invalid input, please enter a positive integer.'''
raise ValueError(SCREAMING_SNAKE_CASE )
a__ : Union[str, Any] =[True] * (num + 1)
a__ : Union[str, Any] =[]
a__ : str =2
a__ : Any =int(math.sqrt(SCREAMING_SNAKE_CASE ) )
while start <= end:
# If start is a prime
if sieve[start] is True:
prime.append(SCREAMING_SNAKE_CASE )
# Set multiples of start be False
for i in range(start * start , num + 1 , SCREAMING_SNAKE_CASE ):
if sieve[i] is True:
a__ : Optional[int] =False
start += 1
for j in range(end + 1 , num + 1 ):
if sieve[j] is True:
prime.append(SCREAMING_SNAKE_CASE )
return prime
if __name__ == "__main__":
print(prime_sieve(int(input("""Enter a positive integer: """).strip())))
| 148
| 0
|
'''simple docstring'''
def UpperCamelCase( UpperCAmelCase_ ):
return " ".join(input_str.split()[::-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 151
|
"""simple docstring"""
import logging
import random
import ray
from transformers import RagConfig, RagRetriever, RagTokenizer
from transformers.models.rag.retrieval_rag import CustomHFIndex
__lowerCAmelCase : int =logging.getLogger(__name__)
class _A :
def __init__( self ):
"""simple docstring"""
lowercase = False
def A__ ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
if not self.initialized:
lowercase = RagRetriever(
__lowerCAmelCase , question_encoder_tokenizer=__lowerCAmelCase , generator_tokenizer=__lowerCAmelCase , index=__lowerCAmelCase , init_retrieval=__lowerCAmelCase , )
lowercase = True
def A__ ( self ):
"""simple docstring"""
self.retriever.index.init_index()
def A__ ( self , __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
lowercase , lowercase = self.retriever._main_retrieve(__lowerCAmelCase , __lowerCAmelCase )
return doc_ids, retrieved_doc_embeds
class _A ( lowerCAmelCase ):
def __init__( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=None ):
"""simple docstring"""
if index is not None and index.is_initialized() and len(__lowerCAmelCase ) > 0:
raise ValueError(
"""When using Ray for distributed fine-tuning, """
"""you'll need to provide the paths instead, """
"""as the dataset and the index are loaded """
"""separately. More info in examples/rag/use_own_knowledge_dataset.py """ )
super().__init__(
__lowerCAmelCase , question_encoder_tokenizer=__lowerCAmelCase , generator_tokenizer=__lowerCAmelCase , index=__lowerCAmelCase , init_retrieval=__lowerCAmelCase , )
lowercase = retrieval_workers
if len(self.retrieval_workers ) > 0:
ray.get(
[
worker.create_rag_retriever.remote(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
for worker in self.retrieval_workers
] )
def A__ ( self ):
"""simple docstring"""
logger.info("""initializing retrieval""" )
if len(self.retrieval_workers ) > 0:
ray.get([worker.init_retrieval.remote() for worker in self.retrieval_workers] )
else:
# Non-distributed training. Load index into this same process.
self.index.init_index()
def A__ ( self , __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
if len(self.retrieval_workers ) > 0:
# Select a random retrieval actor.
lowercase = self.retrieval_workers[random.randint(0 , len(self.retrieval_workers ) - 1 )]
lowercase , lowercase = ray.get(random_worker.retrieve.remote(__lowerCAmelCase , __lowerCAmelCase ) )
else:
lowercase , lowercase = self._main_retrieve(__lowerCAmelCase , __lowerCAmelCase )
return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(__lowerCAmelCase )
@classmethod
def A__ ( cls , __lowerCAmelCase , __lowerCAmelCase=None , **__lowerCAmelCase ):
"""simple docstring"""
return super(__lowerCAmelCase , cls ).get_tokenizers(__lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase )
@classmethod
def A__ ( cls , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=None , **__lowerCAmelCase ):
"""simple docstring"""
lowercase = kwargs.pop("""config""" , __lowerCAmelCase ) or RagConfig.from_pretrained(__lowerCAmelCase , **__lowerCAmelCase )
lowercase = RagTokenizer.from_pretrained(__lowerCAmelCase , config=__lowerCAmelCase )
lowercase = rag_tokenizer.question_encoder
lowercase = rag_tokenizer.generator
if indexed_dataset is not None:
lowercase = """custom"""
lowercase = CustomHFIndex(config.retrieval_vector_size , __lowerCAmelCase )
else:
lowercase = cls._build_index(__lowerCAmelCase )
return cls(
__lowerCAmelCase , question_encoder_tokenizer=__lowerCAmelCase , generator_tokenizer=__lowerCAmelCase , retrieval_workers=__lowerCAmelCase , index=__lowerCAmelCase , )
| 197
| 0
|
"""simple docstring"""
from ...utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_torch_available,
is_transformers_available,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .multicontrolnet import MultiControlNetModel
from .pipeline_controlnet import StableDiffusionControlNetPipeline
from .pipeline_controlnet_imgaimg import StableDiffusionControlNetImgaImgPipeline
from .pipeline_controlnet_inpaint import StableDiffusionControlNetInpaintPipeline
if is_transformers_available() and is_flax_available():
from .pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline
| 357
|
"""simple docstring"""
import argparse
from tax import checkpoints
from transformers import AutoConfig, FlaxAutoModelForSeqaSeqLM
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : List[str] ,_lowerCamelCase : Any ,_lowerCamelCase : Optional[Any] ) -> str:
_lowerCAmelCase : str = AutoConfig.from_pretrained(_lowerCamelCase )
_lowerCAmelCase : int = FlaxAutoModelForSeqaSeqLM.from_config(config=_lowerCamelCase )
_lowerCAmelCase : Any = checkpoints.load_tax_checkpoint(_lowerCamelCase )
_lowerCAmelCase : Tuple = """wi_0""" in tax_model["""target"""]["""encoder"""]["""layers_0"""]["""mlp"""]
if config.model_type == "t5":
_lowerCAmelCase : Tuple = """SelfAttention"""
if config.model_type == "longt5" and config.encoder_attention_type == "local":
_lowerCAmelCase : Optional[Any] = """LocalSelfAttention"""
elif config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
_lowerCAmelCase : Union[str, Any] = """TransientGlobalSelfAttention"""
else:
raise ValueError(
"""Given config is expected to have `model_type='t5'`, or `model_type='longt5` with `encoder_attention_type`"""
""" attribute with a value from ['local', 'transient-global].""" )
# Encoder
for layer_index in range(config.num_layers ):
_lowerCAmelCase : Tuple = f"layers_{str(_lowerCamelCase )}"
# Self-Attention
_lowerCAmelCase : List[str] = tax_model["""target"""]["""encoder"""][layer_name]["""attention"""]["""key"""]["""kernel"""]
_lowerCAmelCase : str = tax_model["""target"""]["""encoder"""][layer_name]["""attention"""]["""out"""]["""kernel"""]
_lowerCAmelCase : str = tax_model["""target"""]["""encoder"""][layer_name]["""attention"""]["""query"""]["""kernel"""]
_lowerCAmelCase : Any = tax_model["""target"""]["""encoder"""][layer_name]["""attention"""]["""value"""]["""kernel"""]
# Global input layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
_lowerCAmelCase : Dict = tax_model["""target"""]["""encoder"""][layer_name]["""attention"""]["""T5LayerNorm_0"""]["""scale"""]
# Layer Normalization
_lowerCAmelCase : Any = tax_model["""target"""]["""encoder"""][layer_name]["""pre_attention_layer_norm"""]["""scale"""]
if split_mlp_wi:
_lowerCAmelCase : List[str] = tax_model["""target"""]["""encoder"""][layer_name]["""mlp"""]["""wi_0"""]["""kernel"""]
_lowerCAmelCase : int = tax_model["""target"""]["""encoder"""][layer_name]["""mlp"""]["""wi_1"""]["""kernel"""]
else:
_lowerCAmelCase : List[str] = tax_model["""target"""]["""encoder"""][layer_name]["""mlp"""]["""wi"""]["""kernel"""]
_lowerCAmelCase : Tuple = tax_model["""target"""]["""encoder"""][layer_name]["""mlp"""]["""wo"""]["""kernel"""]
# Layer Normalization
_lowerCAmelCase : Tuple = tax_model["""target"""]["""encoder"""][layer_name]["""pre_mlp_layer_norm"""]["""scale"""]
# Assigning
_lowerCAmelCase : Any = flax_model.params["""encoder"""]["""block"""][str(_lowerCamelCase )]["""layer"""]
_lowerCAmelCase : Any = tax_attention_key
_lowerCAmelCase : str = tax_attention_out
_lowerCAmelCase : Union[str, Any] = tax_attention_query
_lowerCAmelCase : Optional[Any] = tax_attention_value
_lowerCAmelCase : List[str] = tax_attention_layer_norm
# Global input layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
_lowerCAmelCase : Any = tax_global_layer_norm
if split_mlp_wi:
_lowerCAmelCase : Dict = tax_mlp_wi_a
_lowerCAmelCase : List[Any] = tax_mlp_wi_a
else:
_lowerCAmelCase : List[str] = tax_mlp_wi
_lowerCAmelCase : str = tax_mlp_wo
_lowerCAmelCase : Optional[Any] = tax_mlp_layer_norm
_lowerCAmelCase : Any = flax_model_encoder_layer_block
# Only for layer 0:
_lowerCAmelCase : Union[str, Any] = tax_model["""target"""]["""encoder"""]["""relpos_bias"""]["""rel_embedding"""].T
_lowerCAmelCase : Optional[Any] = tax_encoder_rel_embedding
# Side/global relative position_bias + layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
_lowerCAmelCase : List[str] = tax_model["""target"""]["""encoder"""]["""side_relpos_bias"""]["""rel_embedding"""].T
_lowerCAmelCase : Optional[int] = tax_encoder_global_rel_embedding
# Assigning
_lowerCAmelCase : Any = tax_model["""target"""]["""encoder"""]["""encoder_norm"""]["""scale"""]
_lowerCAmelCase : Any = tax_encoder_norm
# Decoder
for layer_index in range(config.num_layers ):
_lowerCAmelCase : Optional[int] = f"layers_{str(_lowerCamelCase )}"
# Self-Attention
_lowerCAmelCase : List[str] = tax_model["""target"""]["""decoder"""][layer_name]["""self_attention"""]["""key"""]["""kernel"""]
_lowerCAmelCase : int = tax_model["""target"""]["""decoder"""][layer_name]["""self_attention"""]["""out"""]["""kernel"""]
_lowerCAmelCase : List[Any] = tax_model["""target"""]["""decoder"""][layer_name]["""self_attention"""]["""query"""]["""kernel"""]
_lowerCAmelCase : str = tax_model["""target"""]["""decoder"""][layer_name]["""self_attention"""]["""value"""]["""kernel"""]
# Layer Normalization
_lowerCAmelCase : Optional[Any] = tax_model["""target"""]["""decoder"""][layer_name]["""pre_self_attention_layer_norm"""][
"""scale"""
]
# Encoder-Decoder-Attention
_lowerCAmelCase : List[Any] = tax_model["""target"""]["""decoder"""][layer_name]["""encoder_decoder_attention"""]
_lowerCAmelCase : List[str] = tax_enc_dec_attention_module["""key"""]["""kernel"""]
_lowerCAmelCase : List[Any] = tax_enc_dec_attention_module["""out"""]["""kernel"""]
_lowerCAmelCase : List[str] = tax_enc_dec_attention_module["""query"""]["""kernel"""]
_lowerCAmelCase : Dict = tax_enc_dec_attention_module["""value"""]["""kernel"""]
# Layer Normalization
_lowerCAmelCase : Any = tax_model["""target"""]["""decoder"""][layer_name]["""pre_cross_attention_layer_norm"""]["""scale"""]
# MLP
if split_mlp_wi:
_lowerCAmelCase : Any = tax_model["""target"""]["""decoder"""][layer_name]["""mlp"""]["""wi_0"""]["""kernel"""]
_lowerCAmelCase : List[str] = tax_model["""target"""]["""decoder"""][layer_name]["""mlp"""]["""wi_1"""]["""kernel"""]
else:
_lowerCAmelCase : Any = tax_model["""target"""]["""decoder"""][layer_name]["""mlp"""]["""wi"""]["""kernel"""]
_lowerCAmelCase : Optional[int] = tax_model["""target"""]["""decoder"""][layer_name]["""mlp"""]["""wo"""]["""kernel"""]
# Layer Normalization
_lowerCAmelCase : Optional[int] = tax_model["""target"""]["""decoder"""][layer_name]["""pre_mlp_layer_norm"""]["""scale"""]
# Assigning
_lowerCAmelCase : str = flax_model.params["""decoder"""]["""block"""][str(_lowerCamelCase )]["""layer"""]
_lowerCAmelCase : int = tax_attention_key
_lowerCAmelCase : List[str] = tax_attention_out
_lowerCAmelCase : Optional[Any] = tax_attention_query
_lowerCAmelCase : Dict = tax_attention_value
_lowerCAmelCase : str = tax_pre_attention_layer_norm
_lowerCAmelCase : List[Any] = tax_enc_dec_attention_key
_lowerCAmelCase : List[Any] = tax_enc_dec_attention_out
_lowerCAmelCase : Tuple = tax_enc_dec_attention_query
_lowerCAmelCase : Any = tax_enc_dec_attention_value
_lowerCAmelCase : Dict = tax_cross_layer_norm
if split_mlp_wi:
_lowerCAmelCase : Dict = tax_mlp_wi_a
_lowerCAmelCase : int = tax_mlp_wi_a
else:
_lowerCAmelCase : Optional[int] = tax_mlp_wi
_lowerCAmelCase : Dict = tax_mlp_wo
_lowerCAmelCase : List[Any] = txa_mlp_layer_norm
_lowerCAmelCase : Optional[Any] = flax_model_decoder_layer_block
# Decoder Normalization
_lowerCAmelCase : Any = tax_model["""target"""]["""decoder"""]["""decoder_norm"""]["""scale"""]
_lowerCAmelCase : List[str] = txa_decoder_norm
# Only for layer 0:
_lowerCAmelCase : Optional[int] = tax_model["""target"""]["""decoder"""]["""relpos_bias"""]["""rel_embedding"""].T
_lowerCAmelCase : Union[str, Any] = tax_decoder_rel_embedding
# Token Embeddings
_lowerCAmelCase : Optional[int] = tax_model["""target"""]["""token_embedder"""]["""embedding"""]
_lowerCAmelCase : Optional[int] = txa_token_embeddings
# LM Head (only in v1.1 and LongT5 checkpoints)
if "logits_dense" in tax_model["target"]["decoder"]:
_lowerCAmelCase : Tuple = tax_model["""target"""]["""decoder"""]["""logits_dense"""]["""kernel"""]
flax_model.save_pretrained(_lowerCamelCase )
print("""T5X Model was sucessfully converted!""" )
if __name__ == "__main__":
_a : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--t5x_checkpoint_path', default=None, type=str, required=True, help='Path the T5X checkpoint.'
)
parser.add_argument('--config_name', default=None, type=str, required=True, help='Config name of LongT5/T5 model.')
parser.add_argument(
'--flax_dump_folder_path', default=None, type=str, required=True, help='Path to the output FLAX model.'
)
_a : List[str] = parser.parse_args()
convert_tax_checkpoint_to_flax(args.tax_checkpoint_path, args.config_name, args.flax_dump_folder_path)
| 126
| 0
|
# DISCLAIMER: This file is strongly influenced by https://github.com/ermongroup/ddim
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import flax
import jax
import jax.numpy as jnp
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils_flax import (
CommonSchedulerState,
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
add_noise_common,
get_velocity_common,
)
@flax.struct.dataclass
class A :
__snake_case = 42
# setable values
__snake_case = 42
__snake_case = 42
__snake_case = None
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ ):
"""simple docstring"""
return cls(common=UpperCamelCase__, init_noise_sigma=UpperCamelCase__, timesteps=UpperCamelCase__ )
@dataclass
class A ( __UpperCAmelCase ):
__snake_case = 42
class A ( __UpperCAmelCase , __UpperCAmelCase ):
__snake_case = [e.name for e in FlaxKarrasDiffusionSchedulers]
__snake_case = 42
@property
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
return True
@register_to_config
def __init__( self, UpperCamelCase__ = 1000, UpperCamelCase__ = 0.0_001, UpperCamelCase__ = 0.02, UpperCamelCase__ = "linear", UpperCamelCase__ = None, UpperCamelCase__ = "fixed_small", UpperCamelCase__ = True, UpperCamelCase__ = "epsilon", UpperCamelCase__ = jnp.floataa, ):
"""simple docstring"""
lowerCAmelCase_ = dtype
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__ = None ):
"""simple docstring"""
if common is None:
lowerCAmelCase_ = CommonSchedulerState.create(self )
# standard deviation of the initial noise distribution
lowerCAmelCase_ = jnp.array(1.0, dtype=self.dtype )
lowerCAmelCase_ = jnp.arange(0, self.config.num_train_timesteps ).round()[::-1]
return DDPMSchedulerState.create(
common=UpperCamelCase__, init_noise_sigma=UpperCamelCase__, timesteps=UpperCamelCase__, )
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ = None ):
"""simple docstring"""
return sample
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ = () ):
"""simple docstring"""
lowerCAmelCase_ = self.config.num_train_timesteps // num_inference_steps
# creates integer timesteps by multiplying by ratio
# rounding to avoid issues when num_inference_step is power of 3
lowerCAmelCase_ = (jnp.arange(0, UpperCamelCase__ ) * step_ratio).round()[::-1]
return state.replace(
num_inference_steps=UpperCamelCase__, timesteps=UpperCamelCase__, )
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__=None, UpperCamelCase__=None ):
"""simple docstring"""
lowerCAmelCase_ = state.common.alphas_cumprod[t]
lowerCAmelCase_ = jnp.where(t > 0, state.common.alphas_cumprod[t - 1], jnp.array(1.0, dtype=self.dtype ) )
# For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
# and sample from it to get previous sample
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
lowerCAmelCase_ = (1 - alpha_prod_t_prev) / (1 - alpha_prod_t) * state.common.betas[t]
if variance_type is None:
lowerCAmelCase_ = self.config.variance_type
# hacks - were probably added for training stability
if variance_type == "fixed_small":
lowerCAmelCase_ = jnp.clip(UpperCamelCase__, a_min=1E-20 )
# for rl-diffuser https://arxiv.org/abs/2205.09991
elif variance_type == "fixed_small_log":
lowerCAmelCase_ = jnp.log(jnp.clip(UpperCamelCase__, a_min=1E-20 ) )
elif variance_type == "fixed_large":
lowerCAmelCase_ = state.common.betas[t]
elif variance_type == "fixed_large_log":
# Glide max_log
lowerCAmelCase_ = jnp.log(state.common.betas[t] )
elif variance_type == "learned":
return predicted_variance
elif variance_type == "learned_range":
lowerCAmelCase_ = variance
lowerCAmelCase_ = state.common.betas[t]
lowerCAmelCase_ = (predicted_variance + 1) / 2
lowerCAmelCase_ = frac * max_log + (1 - frac) * min_log
return variance
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ = None, UpperCamelCase__ = True, ):
"""simple docstring"""
lowerCAmelCase_ = timestep
if key is None:
lowerCAmelCase_ = jax.random.PRNGKey(0 )
if model_output.shape[1] == sample.shape[1] * 2 and self.config.variance_type in ["learned", "learned_range"]:
lowerCAmelCase_ = jnp.split(UpperCamelCase__, sample.shape[1], axis=1 )
else:
lowerCAmelCase_ = None
# 1. compute alphas, betas
lowerCAmelCase_ = state.common.alphas_cumprod[t]
lowerCAmelCase_ = jnp.where(t > 0, state.common.alphas_cumprod[t - 1], jnp.array(1.0, dtype=self.dtype ) )
lowerCAmelCase_ = 1 - alpha_prod_t
lowerCAmelCase_ = 1 - alpha_prod_t_prev
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if self.config.prediction_type == "epsilon":
lowerCAmelCase_ = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif self.config.prediction_type == "sample":
lowerCAmelCase_ = model_output
elif self.config.prediction_type == "v_prediction":
lowerCAmelCase_ = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output
else:
raise ValueError(
f"prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample` "
''' for the FlaxDDPMScheduler.''' )
# 3. Clip "predicted x_0"
if self.config.clip_sample:
lowerCAmelCase_ = jnp.clip(UpperCamelCase__, -1, 1 )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
lowerCAmelCase_ = (alpha_prod_t_prev ** 0.5 * state.common.betas[t]) / beta_prod_t
lowerCAmelCase_ = state.common.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
lowerCAmelCase_ = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
def random_variance():
lowerCAmelCase_ = jax.random.split(UpperCamelCase__, num=1 )
lowerCAmelCase_ = jax.random.normal(UpperCamelCase__, shape=model_output.shape, dtype=self.dtype )
return (self._get_variance(UpperCamelCase__, UpperCamelCase__, predicted_variance=UpperCamelCase__ ) ** 0.5) * noise
lowerCAmelCase_ = jnp.where(t > 0, random_variance(), jnp.zeros(model_output.shape, dtype=self.dtype ) )
lowerCAmelCase_ = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample, state)
return FlaxDDPMSchedulerOutput(prev_sample=UpperCamelCase__, state=UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, ):
"""simple docstring"""
return add_noise_common(state.common, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, ):
"""simple docstring"""
return get_velocity_common(state.common, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ )
def __len__( self ):
"""simple docstring"""
return self.config.num_train_timesteps
| 278
|
import unittest
from transformers import is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, require_torch, slow
if is_flax_available():
import optax
from flax.training.common_utils import onehot
from transformers import AutoTokenizer, FlaxMTaForConditionalGeneration
from transformers.models.ta.modeling_flax_ta import shift_tokens_right
@require_torch
@require_sentencepiece
@require_tokenizers
@require_flax
class _lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def UpperCAmelCase ( self ) -> List[Any]:
'''simple docstring'''
__snake_case : Tuple = FlaxMTaForConditionalGeneration.from_pretrained("google/mt5-small" )
__snake_case : str = AutoTokenizer.from_pretrained("google/mt5-small" )
__snake_case : List[Any] = tokenizer("Hello there" , return_tensors="np" ).input_ids
__snake_case : int = tokenizer("Hi I am" , return_tensors="np" ).input_ids
__snake_case : Tuple = shift_tokens_right(UpperCAmelCase , model.config.pad_token_id , model.config.decoder_start_token_id )
__snake_case : Tuple = model(UpperCAmelCase , decoder_input_ids=UpperCAmelCase ).logits
__snake_case : str = optax.softmax_cross_entropy(UpperCAmelCase , onehot(UpperCAmelCase , logits.shape[-1] ) ).mean()
__snake_case : Any = -(labels.shape[-1] * loss.item())
__snake_case : List[str] = -84.9_127
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1E-4 )
| 326
| 0
|
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartTokenizer, MBartTokenizerFast, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
__A = get_tests_dir('''fixtures/test_sentencepiece.model''')
if is_torch_available():
from transformers.models.mbart.modeling_mbart import shift_tokens_right
__A = 25_00_04
__A = 25_00_20
@require_sentencepiece
@require_tokenizers
class lowercase_ ( __lowercase , unittest.TestCase ):
UpperCamelCase_ : Optional[int] = MBartTokenizer
UpperCamelCase_ : Any = MBartTokenizerFast
UpperCamelCase_ : int = True
UpperCamelCase_ : int = True
def UpperCamelCase_ ( self : Optional[int] ) -> Dict:
super().setUp()
# We have a SentencePiece fixture for testing
_snake_case = MBartTokenizer(A__ , keep_accents=A__ )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCamelCase_ ( self : Optional[Any] ) -> Tuple:
_snake_case = MBartTokenizer(A__ , keep_accents=A__ )
_snake_case = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(A__ , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(A__ ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
_snake_case = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
A__ , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
_snake_case = tokenizer.convert_tokens_to_ids(A__ )
self.assertListEqual(
A__ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
# ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^
] , )
_snake_case = tokenizer.convert_ids_to_tokens(A__ )
self.assertListEqual(
A__ , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
] , )
def UpperCamelCase_ ( self : int ) -> Optional[int]:
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
_snake_case = (self.rust_tokenizer_class, '''hf-internal-testing/tiny-random-mbart''', {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
_snake_case = self.rust_tokenizer_class.from_pretrained(A__ , **A__ )
_snake_case = self.tokenizer_class.from_pretrained(A__ , **A__ )
_snake_case = tempfile.mkdtemp()
_snake_case = tokenizer_r.save_pretrained(A__ )
_snake_case = tokenizer_p.save_pretrained(A__ )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) )
_snake_case = tuple(f for f in tokenizer_r_files if '''tokenizer.json''' not in f )
self.assertSequenceEqual(A__ , A__ )
# Checks everything loads correctly in the same way
_snake_case = tokenizer_r.from_pretrained(A__ )
_snake_case = tokenizer_p.from_pretrained(A__ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(A__ , A__ ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(A__ )
# Save tokenizer rust, legacy_format=True
_snake_case = tempfile.mkdtemp()
_snake_case = tokenizer_r.save_pretrained(A__ , legacy_format=A__ )
_snake_case = tokenizer_p.save_pretrained(A__ )
# Checks it save with the same files
self.assertSequenceEqual(A__ , A__ )
# Checks everything loads correctly in the same way
_snake_case = tokenizer_r.from_pretrained(A__ )
_snake_case = tokenizer_p.from_pretrained(A__ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(A__ , A__ ) )
shutil.rmtree(A__ )
# Save tokenizer rust, legacy_format=False
_snake_case = tempfile.mkdtemp()
_snake_case = tokenizer_r.save_pretrained(A__ , legacy_format=A__ )
_snake_case = tokenizer_p.save_pretrained(A__ )
# Checks it saved the tokenizer.json file
self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
_snake_case = tokenizer_r.from_pretrained(A__ )
_snake_case = tokenizer_p.from_pretrained(A__ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(A__ , A__ ) )
shutil.rmtree(A__ )
@require_torch
@require_sentencepiece
@require_tokenizers
class lowercase_ ( unittest.TestCase ):
UpperCamelCase_ : List[Any] = "facebook/mbart-large-en-ro"
UpperCamelCase_ : str = [
" UN Chief Says There Is No Military Solution in Syria",
" Secretary-General Ban Ki-moon says his response to Russia's stepped up military support for Syria is that \"there is no military solution\" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.",
]
UpperCamelCase_ : List[str] = [
"Şeful ONU declară că nu există o soluţie militară în Siria",
"Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei"
" pentru Siria este că \"nu există o soluţie militară\" la conflictul de aproape cinci ani şi că noi arme nu vor"
" face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.",
]
UpperCamelCase_ : str = [8_2_7_4, 1_2_7_8_7_3, 2_5_9_1_6, 7, 8_6_2_2, 2_0_7_1, 4_3_8, 6_7_4_8_5, 5_3, 1_8_7_8_9_5, 2_3, 5_1_7_1_2, 2, EN_CODE]
@classmethod
def UpperCamelCase_ ( cls : Optional[int] ) -> List[str]:
_snake_case = MBartTokenizer.from_pretrained(
cls.checkpoint_name , src_lang='''en_XX''' , tgt_lang='''ro_RO''' )
_snake_case = 1
return cls
def UpperCamelCase_ ( self : Union[str, Any] ) -> Tuple:
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''ar_AR'''] , 250001 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''en_EN'''] , 250004 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''ro_RO'''] , 250020 )
def UpperCamelCase_ ( self : Tuple ) -> List[str]:
_snake_case = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , A__ )
def UpperCamelCase_ ( self : Optional[Any] ) -> List[Any]:
self.assertIn(A__ , self.tokenizer.all_special_ids )
_snake_case = [RO_CODE, 884, 9019, 96, 9, 916, 86792, 36, 18743, 15596, 5, 2]
_snake_case = self.tokenizer.decode(A__ , skip_special_tokens=A__ )
_snake_case = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=A__ )
self.assertEqual(A__ , A__ )
self.assertNotIn(self.tokenizer.eos_token , A__ )
def UpperCamelCase_ ( self : List[Any] ) -> List[str]:
_snake_case = ['''this is gunna be a long sentence ''' * 20]
assert isinstance(src_text[0] , A__ )
_snake_case = 10
_snake_case = self.tokenizer(A__ , max_length=A__ , truncation=A__ ).input_ids[0]
self.assertEqual(ids[-2] , 2 )
self.assertEqual(ids[-1] , A__ )
self.assertEqual(len(A__ ) , A__ )
def UpperCamelCase_ ( self : int ) -> int:
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(['''<mask>''', '''ar_AR'''] ) , [250026, 250001] )
def UpperCamelCase_ ( self : Optional[int] ) -> str:
_snake_case = tempfile.mkdtemp()
_snake_case = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(A__ )
_snake_case = MBartTokenizer.from_pretrained(A__ )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , A__ )
@require_torch
def UpperCamelCase_ ( self : Tuple ) -> Union[str, Any]:
_snake_case = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=A__ , return_tensors='''pt''' )
_snake_case = shift_tokens_right(batch['''labels'''] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
assert batch.input_ids[1][-2:].tolist() == [2, EN_CODE]
assert batch.decoder_input_ids[1][0].tolist() == RO_CODE
assert batch.decoder_input_ids[1][-1] == 2
assert batch.labels[1][-2:].tolist() == [2, RO_CODE]
@require_torch
def UpperCamelCase_ ( self : Tuple ) -> int:
_snake_case = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=A__ , truncation=A__ , max_length=len(self.expected_src_tokens ) , return_tensors='''pt''' , )
_snake_case = shift_tokens_right(batch['''labels'''] , self.tokenizer.pad_token_id )
self.assertIsInstance(A__ , A__ )
self.assertEqual((2, 14) , batch.input_ids.shape )
self.assertEqual((2, 14) , batch.attention_mask.shape )
_snake_case = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , A__ )
self.assertEqual(2 , batch.decoder_input_ids[0, -1] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id, EN_CODE] )
def UpperCamelCase_ ( self : List[str] ) -> str:
_snake_case = self.tokenizer(self.src_text , padding=A__ , truncation=A__ , max_length=3 , return_tensors='''pt''' )
_snake_case = self.tokenizer(
text_target=self.tgt_text , padding=A__ , truncation=A__ , max_length=10 , return_tensors='''pt''' )
_snake_case = targets['''input_ids''']
_snake_case = shift_tokens_right(A__ , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 10 )
@require_torch
def UpperCamelCase_ ( self : List[Any] ) -> int:
_snake_case = self.tokenizer._build_translation_inputs(
'''A test''' , return_tensors='''pt''' , src_lang='''en_XX''' , tgt_lang='''ar_AR''' )
self.assertEqual(
nested_simplify(A__ ) , {
# A, test, EOS, en_XX
'''input_ids''': [[62, 3034, 2, 250004]],
'''attention_mask''': [[1, 1, 1, 1]],
# ar_AR
'''forced_bos_token_id''': 250001,
} , )
| 278
|
from ..utils import DummyObject, requires_backends
class lowercase_ ( metaclass=__lowercase ):
UpperCamelCase_ : Optional[int] = ["speech"]
def __init__( self : str , *A__ : List[str] , **A__ : Tuple ) -> Optional[Any]:
requires_backends(self , ['''speech'''] )
class lowercase_ ( metaclass=__lowercase ):
UpperCamelCase_ : Optional[Any] = ["speech"]
def __init__( self : Dict , *A__ : int , **A__ : int ) -> Tuple:
requires_backends(self , ['''speech'''] )
| 278
| 1
|
"""simple docstring"""
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__SCREAMING_SNAKE_CASE : List[str] = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE : List[Any] = {
'google/pix2struct-textcaps-base': (
'https://huggingface.co/google/pix2struct-textcaps-base/resolve/main/config.json'
),
}
class __A (snake_case__):
'''simple docstring'''
__lowercase: List[Any] = """pix2struct_text_model"""
__lowercase: List[str] = ["""past_key_values"""]
__lowercase: Dict = {
"""hidden_size""": """hidden_size""",
"""num_attention_heads""": """num_heads""",
"""num_hidden_layers""": """num_layers""",
}
def __init__( self : str , UpperCAmelCase_ : Optional[int]=50_244 , UpperCAmelCase_ : List[str]=768 , UpperCAmelCase_ : Union[str, Any]=64 , UpperCAmelCase_ : Any=2_048 , UpperCAmelCase_ : List[str]=12 , UpperCAmelCase_ : Optional[Any]=12 , UpperCAmelCase_ : Optional[int]=32 , UpperCAmelCase_ : Optional[int]=128 , UpperCAmelCase_ : str=0.1 , UpperCAmelCase_ : Any=1E-6 , UpperCAmelCase_ : int=1.0 , UpperCAmelCase_ : str="gelu_new" , UpperCAmelCase_ : Optional[Any]=0 , UpperCAmelCase_ : Dict=False , UpperCAmelCase_ : List[Any]=0 , UpperCAmelCase_ : Dict=1 , UpperCAmelCase_ : str=False , UpperCAmelCase_ : int=True , **UpperCAmelCase_ : Dict , ) ->Union[str, Any]:
"""simple docstring"""
snake_case_ = vocab_size
snake_case_ = hidden_size
snake_case_ = d_kv
snake_case_ = d_ff
snake_case_ = num_layers
snake_case_ = num_heads
snake_case_ = relative_attention_num_buckets
snake_case_ = relative_attention_max_distance
snake_case_ = dropout_rate
snake_case_ = layer_norm_epsilon
snake_case_ = initializer_factor
snake_case_ = use_cache
snake_case_ = eos_token_id
snake_case_ = decoder_start_token_id
# for backwards compatibility
snake_case_ = dense_act_fn
super().__init__(
pad_token_id=UpperCAmelCase_ , eos_token_id=UpperCAmelCase_ , decoder_start_token_id=UpperCAmelCase_ , tie_word_embeddings=UpperCAmelCase_ , is_decoder=UpperCAmelCase_ , **UpperCAmelCase_ , )
@classmethod
def lowerCAmelCase ( cls : Optional[Any] , UpperCAmelCase_ : Union[str, os.PathLike] , **UpperCAmelCase_ : Union[str, Any] ) ->"PretrainedConfig":
"""simple docstring"""
cls._set_token_in_kwargs(UpperCAmelCase_ )
snake_case_ , snake_case_ = cls.get_config_dict(UpperCAmelCase_ , **UpperCAmelCase_ )
# get the text config dict if we are loading from Pix2StructConfig
if config_dict.get("""model_type""" ) == "pix2struct":
snake_case_ = config_dict["""text_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(UpperCAmelCase_ , **UpperCAmelCase_ )
class __A (snake_case__):
'''simple docstring'''
__lowercase: Optional[Any] = """pix2struct_vision_model"""
def __init__( self : Optional[int] , UpperCAmelCase_ : int=768 , UpperCAmelCase_ : Union[str, Any]=768 , UpperCAmelCase_ : List[Any]=2_048 , UpperCAmelCase_ : Tuple=64 , UpperCAmelCase_ : Union[str, Any]=12 , UpperCAmelCase_ : Optional[int]=12 , UpperCAmelCase_ : List[str]="gelu_new" , UpperCAmelCase_ : Any=1E-6 , UpperCAmelCase_ : Any=0.0 , UpperCAmelCase_ : Optional[int]=0.0 , UpperCAmelCase_ : Optional[Any]=1E-10 , UpperCAmelCase_ : str=1.0 , UpperCAmelCase_ : str=4_096 , UpperCAmelCase_ : Dict=32 , UpperCAmelCase_ : List[str]=128 , **UpperCAmelCase_ : List[Any] , ) ->str:
"""simple docstring"""
super().__init__(**UpperCAmelCase_ )
snake_case_ = hidden_size
snake_case_ = patch_embed_hidden_size
snake_case_ = d_ff
snake_case_ = dropout_rate
snake_case_ = num_hidden_layers
snake_case_ = num_attention_heads
snake_case_ = initializer_range
snake_case_ = initializer_factor
snake_case_ = attention_dropout
snake_case_ = layer_norm_eps
snake_case_ = dense_act_fn
snake_case_ = seq_len
snake_case_ = relative_attention_num_buckets
snake_case_ = relative_attention_max_distance
snake_case_ = d_kv
@classmethod
def lowerCAmelCase ( cls : str , UpperCAmelCase_ : Union[str, os.PathLike] , **UpperCAmelCase_ : List[str] ) ->"PretrainedConfig":
"""simple docstring"""
cls._set_token_in_kwargs(UpperCAmelCase_ )
snake_case_ , snake_case_ = cls.get_config_dict(UpperCAmelCase_ , **UpperCAmelCase_ )
# get the vision config dict if we are loading from Pix2StructConfig
if config_dict.get("""model_type""" ) == "pix2struct":
snake_case_ = config_dict["""vision_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(UpperCAmelCase_ , **UpperCAmelCase_ )
class __A (snake_case__):
'''simple docstring'''
__lowercase: Optional[Any] = """pix2struct"""
__lowercase: Union[str, Any] = True
def __init__( self : Tuple , UpperCAmelCase_ : List[str]=None , UpperCAmelCase_ : Optional[Any]=None , UpperCAmelCase_ : List[Any]=1.0 , UpperCAmelCase_ : List[str]=0.02 , UpperCAmelCase_ : Optional[int]=False , UpperCAmelCase_ : Union[str, Any]=False , UpperCAmelCase_ : int=True , **UpperCAmelCase_ : Optional[Any] , ) ->Optional[int]:
"""simple docstring"""
super().__init__(tie_word_embeddings=UpperCAmelCase_ , is_encoder_decoder=UpperCAmelCase_ , **UpperCAmelCase_ )
if text_config is None:
snake_case_ = {}
logger.info("""text_config is None. Initializing the Pix2StructTextConfig with default values.""" )
if vision_config is None:
snake_case_ = {}
logger.info("""vision_config is None. Initializing the Pix2StructVisionConfig with default values.""" )
snake_case_ = PixaStructTextConfig(**UpperCAmelCase_ )
snake_case_ = PixaStructVisionConfig(**UpperCAmelCase_ )
snake_case_ = self.text_config.decoder_start_token_id
snake_case_ = self.text_config.pad_token_id
snake_case_ = self.text_config.eos_token_id
snake_case_ = initializer_factor
snake_case_ = initializer_range
snake_case_ = self.initializer_range
snake_case_ = self.initializer_range
snake_case_ = is_vqa
@classmethod
def lowerCAmelCase ( cls : Tuple , UpperCAmelCase_ : PixaStructTextConfig , UpperCAmelCase_ : PixaStructVisionConfig , **UpperCAmelCase_ : Tuple ) ->Union[str, Any]:
"""simple docstring"""
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **UpperCAmelCase_ )
def lowerCAmelCase ( self : Optional[Any] ) ->Tuple:
"""simple docstring"""
snake_case_ = copy.deepcopy(self.__dict__ )
snake_case_ = self.text_config.to_dict()
snake_case_ = self.vision_config.to_dict()
snake_case_ = self.__class__.model_type
return output
| 347
|
"""simple docstring"""
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConfig,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaForCTC,
WavaVecaForPreTraining,
WavaVecaProcessor,
logging,
)
from transformers.models.wavaveca.modeling_wavaveca import WavaVecaForSequenceClassification
logging.set_verbosity_info()
__SCREAMING_SNAKE_CASE : Tuple = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE : Tuple = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'adapter_layer': 'encoder.layers.*.adapter_layer',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'lm_head',
'mask_emb': 'masked_spec_embed',
'pooling_layer.linear': 'projector',
'pooling_layer.projection': 'classifier',
}
__SCREAMING_SNAKE_CASE : List[Any] = [
'lm_head',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
'projector',
'classifier',
]
def _a ( _SCREAMING_SNAKE_CASE ) -> List[str]:
snake_case_ = {}
with open(_SCREAMING_SNAKE_CASE , """r""" ) as file:
for line_number, line in enumerate(_SCREAMING_SNAKE_CASE ):
snake_case_ = line.strip()
if line:
snake_case_ = line.split()
snake_case_ = line_number
snake_case_ = words[0]
snake_case_ = value
return result
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Tuple:
for attribute in key.split(""".""" ):
snake_case_ = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
snake_case_ = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(_SCREAMING_SNAKE_CASE ):
snake_case_ = PARAM_MAPPING[full_name.split(""".""" )[-1]]
snake_case_ = """param"""
if weight_type is not None and weight_type != "param":
snake_case_ = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).shape
elif weight_type is not None and weight_type == "param":
snake_case_ = hf_pointer
for attribute in hf_param_name.split(""".""" ):
snake_case_ = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
snake_case_ = shape_pointer.shape
# let's reduce dimension
snake_case_ = value[0]
else:
snake_case_ = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be"""
f""" {value.shape} for {full_name}""" )
if weight_type == "weight":
snake_case_ = value
elif weight_type == "weight_g":
snake_case_ = value
elif weight_type == "weight_v":
snake_case_ = value
elif weight_type == "bias":
snake_case_ = value
elif weight_type == "param":
for attribute in hf_param_name.split(""".""" ):
snake_case_ = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
snake_case_ = value
else:
snake_case_ = value
logger.info(f"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" )
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Tuple:
snake_case_ = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(_SCREAMING_SNAKE_CASE ):
snake_case_ = PARAM_MAPPING[full_name.split(""".""" )[-1]]
snake_case_ = """param"""
if weight_type is not None and weight_type != "param":
snake_case_ = """.""".join([key, weight_type] )
elif weight_type is not None and weight_type == "param":
snake_case_ = """.""".join([key, hf_param_name] )
else:
snake_case_ = key
snake_case_ = value if """lm_head""" in full_key else value[0]
__SCREAMING_SNAKE_CASE : int = {
'W_a': 'linear_1.weight',
'W_b': 'linear_2.weight',
'b_a': 'linear_1.bias',
'b_b': 'linear_2.bias',
'ln_W': 'norm.weight',
'ln_b': 'norm.bias',
}
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None ) -> List[str]:
snake_case_ = False
for key, mapped_key in MAPPING.items():
snake_case_ = """wav2vec2.""" + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]:
snake_case_ = True
if "*" in mapped_key:
snake_case_ = name.split(_SCREAMING_SNAKE_CASE )[0].split(""".""" )[-2]
snake_case_ = mapped_key.replace("""*""" , _SCREAMING_SNAKE_CASE )
if "weight_g" in name:
snake_case_ = """weight_g"""
elif "weight_v" in name:
snake_case_ = """weight_v"""
elif "bias" in name:
snake_case_ = """bias"""
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
snake_case_ = """weight"""
else:
snake_case_ = None
if hf_dict is not None:
rename_dict(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
else:
set_recursively(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return is_used
return is_used
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Any:
snake_case_ = []
snake_case_ = fairseq_model.state_dict()
snake_case_ = hf_model.wavaveca.feature_extractor
for name, value in fairseq_dict.items():
snake_case_ = False
if "conv_layers" in name:
load_conv_layer(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , hf_model.config.feat_extract_norm == """group""" , )
snake_case_ = True
else:
snake_case_ = load_wavaveca_layer(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if not is_used:
unused_weights.append(_SCREAMING_SNAKE_CASE )
logger.warning(f"""Unused weights: {unused_weights}""" )
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
snake_case_ = full_name.split("""conv_layers.""" )[-1]
snake_case_ = name.split(""".""" )
snake_case_ = int(items[0] )
snake_case_ = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" )
snake_case_ = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" )
snake_case_ = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.""" )
snake_case_ = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.""" )
snake_case_ = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(_SCREAMING_SNAKE_CASE )
@torch.no_grad()
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=False ) -> int:
if config_path is not None:
snake_case_ = WavaVecaConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
else:
snake_case_ = WavaVecaConfig()
if is_seq_class:
snake_case_ = read_txt_into_dict(_SCREAMING_SNAKE_CASE )
snake_case_ = idalabel
snake_case_ = WavaVecaForSequenceClassification(_SCREAMING_SNAKE_CASE )
snake_case_ = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16_000 , padding_value=0 , do_normalize=_SCREAMING_SNAKE_CASE , return_attention_mask=_SCREAMING_SNAKE_CASE , )
feature_extractor.save_pretrained(_SCREAMING_SNAKE_CASE )
elif is_finetuned:
if dict_path:
snake_case_ = Dictionary.load(_SCREAMING_SNAKE_CASE )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
snake_case_ = target_dict.pad_index
snake_case_ = target_dict.bos_index
snake_case_ = target_dict.eos_index
snake_case_ = len(target_dict.symbols )
snake_case_ = os.path.join(_SCREAMING_SNAKE_CASE , """vocab.json""" )
if not os.path.isdir(_SCREAMING_SNAKE_CASE ):
logger.error("""--pytorch_dump_folder_path ({}) should be a directory""".format(_SCREAMING_SNAKE_CASE ) )
return
os.makedirs(_SCREAMING_SNAKE_CASE , exist_ok=_SCREAMING_SNAKE_CASE )
snake_case_ = target_dict.indices
# fairseq has the <pad> and <s> switched
snake_case_ = 0
snake_case_ = 1
with open(_SCREAMING_SNAKE_CASE , """w""" , encoding="""utf-8""" ) as vocab_handle:
json.dump(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
snake_case_ = WavaVecaCTCTokenizer(
_SCREAMING_SNAKE_CASE , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="""|""" , do_lower_case=_SCREAMING_SNAKE_CASE , )
snake_case_ = True if config.feat_extract_norm == """layer""" else False
snake_case_ = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16_000 , padding_value=0 , do_normalize=_SCREAMING_SNAKE_CASE , return_attention_mask=_SCREAMING_SNAKE_CASE , )
snake_case_ = WavaVecaProcessor(feature_extractor=_SCREAMING_SNAKE_CASE , tokenizer=_SCREAMING_SNAKE_CASE )
processor.save_pretrained(_SCREAMING_SNAKE_CASE )
snake_case_ = WavaVecaForCTC(_SCREAMING_SNAKE_CASE )
else:
snake_case_ = WavaVecaForPreTraining(_SCREAMING_SNAKE_CASE )
if is_finetuned or is_seq_class:
snake_case_ , snake_case_ , snake_case_ = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} )
else:
snake_case_ = argparse.Namespace(task="""audio_pretraining""" )
snake_case_ = fairseq.tasks.setup_task(_SCREAMING_SNAKE_CASE )
snake_case_ , snake_case_ , snake_case_ = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=_SCREAMING_SNAKE_CASE )
snake_case_ = model[0].eval()
recursively_load_weights(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , not is_finetuned )
hf_wavavec.save_pretrained(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : str = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--not_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not'
)
parser.add_argument(
'--is_seq_class',
action='store_true',
help='Whether the model to convert is a fine-tuned sequence classification model or not',
)
__SCREAMING_SNAKE_CASE : Any = parser.parse_args()
__SCREAMING_SNAKE_CASE : List[Any] = not args.not_finetuned and not args.is_seq_class
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.dict_path,
is_finetuned,
args.is_seq_class,
)
| 347
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
lowerCamelCase = {'''configuration_swin''': ['''SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''SwinConfig''', '''SwinOnnxConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase = [
'''SWIN_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''SwinForImageClassification''',
'''SwinForMaskedImageModeling''',
'''SwinModel''',
'''SwinPreTrainedModel''',
'''SwinBackbone''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase = [
'''TF_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFSwinForImageClassification''',
'''TFSwinForMaskedImageModeling''',
'''TFSwinModel''',
'''TFSwinPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_swin import SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinConfig, SwinOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swin import (
SWIN_PRETRAINED_MODEL_ARCHIVE_LIST,
SwinBackbone,
SwinForImageClassification,
SwinForMaskedImageModeling,
SwinModel,
SwinPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_swin import (
TF_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST,
TFSwinForImageClassification,
TFSwinForMaskedImageModeling,
TFSwinModel,
TFSwinPreTrainedModel,
)
else:
import sys
lowerCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 365
|
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_funnel import FunnelTokenizer
lowerCamelCase = logging.get_logger(__name__)
lowerCamelCase = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
lowerCamelCase = [
'''small''',
'''small-base''',
'''medium''',
'''medium-base''',
'''intermediate''',
'''intermediate-base''',
'''large''',
'''large-base''',
'''xlarge''',
'''xlarge-base''',
]
lowerCamelCase = {
'''vocab_file''': {
'''funnel-transformer/small''': '''https://huggingface.co/funnel-transformer/small/resolve/main/vocab.txt''',
'''funnel-transformer/small-base''': '''https://huggingface.co/funnel-transformer/small-base/resolve/main/vocab.txt''',
'''funnel-transformer/medium''': '''https://huggingface.co/funnel-transformer/medium/resolve/main/vocab.txt''',
'''funnel-transformer/medium-base''': (
'''https://huggingface.co/funnel-transformer/medium-base/resolve/main/vocab.txt'''
),
'''funnel-transformer/intermediate''': (
'''https://huggingface.co/funnel-transformer/intermediate/resolve/main/vocab.txt'''
),
'''funnel-transformer/intermediate-base''': (
'''https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/vocab.txt'''
),
'''funnel-transformer/large''': '''https://huggingface.co/funnel-transformer/large/resolve/main/vocab.txt''',
'''funnel-transformer/large-base''': '''https://huggingface.co/funnel-transformer/large-base/resolve/main/vocab.txt''',
'''funnel-transformer/xlarge''': '''https://huggingface.co/funnel-transformer/xlarge/resolve/main/vocab.txt''',
'''funnel-transformer/xlarge-base''': (
'''https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''funnel-transformer/small''': '''https://huggingface.co/funnel-transformer/small/resolve/main/tokenizer.json''',
'''funnel-transformer/small-base''': (
'''https://huggingface.co/funnel-transformer/small-base/resolve/main/tokenizer.json'''
),
'''funnel-transformer/medium''': '''https://huggingface.co/funnel-transformer/medium/resolve/main/tokenizer.json''',
'''funnel-transformer/medium-base''': (
'''https://huggingface.co/funnel-transformer/medium-base/resolve/main/tokenizer.json'''
),
'''funnel-transformer/intermediate''': (
'''https://huggingface.co/funnel-transformer/intermediate/resolve/main/tokenizer.json'''
),
'''funnel-transformer/intermediate-base''': (
'''https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/tokenizer.json'''
),
'''funnel-transformer/large''': '''https://huggingface.co/funnel-transformer/large/resolve/main/tokenizer.json''',
'''funnel-transformer/large-base''': (
'''https://huggingface.co/funnel-transformer/large-base/resolve/main/tokenizer.json'''
),
'''funnel-transformer/xlarge''': '''https://huggingface.co/funnel-transformer/xlarge/resolve/main/tokenizer.json''',
'''funnel-transformer/xlarge-base''': (
'''https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/tokenizer.json'''
),
},
}
lowerCamelCase = {f'''funnel-transformer/{name}''': 512 for name in _model_names}
lowerCamelCase = {f'''funnel-transformer/{name}''': {'''do_lower_case''': True} for name in _model_names}
class _a ( _lowercase):
_a : Tuple = VOCAB_FILES_NAMES
_a : Dict = PRETRAINED_VOCAB_FILES_MAP
_a : Dict = PRETRAINED_INIT_CONFIGURATION
_a : Union[str, Any] = FunnelTokenizer
_a : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_a : int = 2
def __init__( self : List[Any] , _SCREAMING_SNAKE_CASE : str=None , _SCREAMING_SNAKE_CASE : str=None , _SCREAMING_SNAKE_CASE : Any=True , _SCREAMING_SNAKE_CASE : Any="<unk>" , _SCREAMING_SNAKE_CASE : Dict="<sep>" , _SCREAMING_SNAKE_CASE : Optional[int]="<pad>" , _SCREAMING_SNAKE_CASE : str="<cls>" , _SCREAMING_SNAKE_CASE : List[str]="<mask>" , _SCREAMING_SNAKE_CASE : Optional[int]="<s>" , _SCREAMING_SNAKE_CASE : Dict="</s>" , _SCREAMING_SNAKE_CASE : Any=True , _SCREAMING_SNAKE_CASE : Dict=True , _SCREAMING_SNAKE_CASE : Tuple=None , _SCREAMING_SNAKE_CASE : str="##" , **_SCREAMING_SNAKE_CASE : List[str] , )-> List[str]:
super().__init__(
_SCREAMING_SNAKE_CASE , tokenizer_file=_SCREAMING_SNAKE_CASE , do_lower_case=_SCREAMING_SNAKE_CASE , unk_token=_SCREAMING_SNAKE_CASE , sep_token=_SCREAMING_SNAKE_CASE , pad_token=_SCREAMING_SNAKE_CASE , cls_token=_SCREAMING_SNAKE_CASE , mask_token=_SCREAMING_SNAKE_CASE , bos_token=_SCREAMING_SNAKE_CASE , eos_token=_SCREAMING_SNAKE_CASE , clean_text=_SCREAMING_SNAKE_CASE , tokenize_chinese_chars=_SCREAMING_SNAKE_CASE , strip_accents=_SCREAMING_SNAKE_CASE , wordpieces_prefix=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
lowerCAmelCase__ : Any = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' , _SCREAMING_SNAKE_CASE ) != do_lower_case
or normalizer_state.get('''strip_accents''' , _SCREAMING_SNAKE_CASE ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , _SCREAMING_SNAKE_CASE ) != tokenize_chinese_chars
):
lowerCAmelCase__ : int = getattr(_SCREAMING_SNAKE_CASE , normalizer_state.pop('''type''' ) )
lowerCAmelCase__ : Dict = do_lower_case
lowerCAmelCase__ : str = strip_accents
lowerCAmelCase__ : Dict = tokenize_chinese_chars
lowerCAmelCase__ : str = normalizer_class(**_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ : List[Any] = do_lower_case
def UpperCAmelCase__( self : Any , _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : Union[str, Any]=None )-> Optional[int]:
lowerCAmelCase__ : Tuple = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def UpperCAmelCase__( self : Optional[Any] , _SCREAMING_SNAKE_CASE : List[int] , _SCREAMING_SNAKE_CASE : Optional[List[int]] = None )-> List[int]:
lowerCAmelCase__ : str = [self.sep_token_id]
lowerCAmelCase__ : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return len(cls ) * [self.cls_token_type_id] + len(token_ids_a + sep ) * [0]
return len(cls ) * [self.cls_token_type_id] + len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCAmelCase__( self : str , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Optional[str] = None )-> Tuple[str]:
lowerCAmelCase__ : Any = self._tokenizer.model.save(_SCREAMING_SNAKE_CASE , name=_SCREAMING_SNAKE_CASE )
return tuple(_SCREAMING_SNAKE_CASE )
| 211
| 0
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_a = logging.get_logger(__name__)
_a = {
'''junnyu/roformer_chinese_small''': '''https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/config.json''',
'''junnyu/roformer_chinese_base''': '''https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/config.json''',
'''junnyu/roformer_chinese_char_small''': (
'''https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/config.json'''
),
'''junnyu/roformer_chinese_char_base''': (
'''https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/config.json'''
),
'''junnyu/roformer_small_discriminator''': (
'''https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/config.json'''
),
'''junnyu/roformer_small_generator''': (
'''https://huggingface.co/junnyu/roformer_small_generator/resolve/main/config.json'''
),
# See all RoFormer models at https://huggingface.co/models?filter=roformer
}
class __lowerCamelCase ( snake_case__):
"""simple docstring"""
UpperCamelCase__ = "roformer"
def __init__( self , UpperCAmelCase=5_0000 , UpperCAmelCase=None , UpperCAmelCase=768 , UpperCAmelCase=12 , UpperCAmelCase=12 , UpperCAmelCase=3072 , UpperCAmelCase="gelu" , UpperCAmelCase=0.1 , UpperCAmelCase=0.1 , UpperCAmelCase=1536 , UpperCAmelCase=2 , UpperCAmelCase=0.02 , UpperCAmelCase=1e-12 , UpperCAmelCase=0 , UpperCAmelCase=False , UpperCAmelCase=True , **UpperCAmelCase , ):
"""simple docstring"""
super().__init__(pad_token_id=UpperCAmelCase , **UpperCAmelCase )
_UpperCAmelCase = vocab_size
_UpperCAmelCase = hidden_size if embedding_size is None else embedding_size
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = hidden_act
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = type_vocab_size
_UpperCAmelCase = initializer_range
_UpperCAmelCase = layer_norm_eps
_UpperCAmelCase = rotary_value
_UpperCAmelCase = use_cache
class __lowerCamelCase ( snake_case__):
"""simple docstring"""
@property
def UpperCamelCase ( self ):
"""simple docstring"""
if self.task == "multiple-choice":
_UpperCAmelCase = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
_UpperCAmelCase = {0: 'batch', 1: 'sequence'}
_UpperCAmelCase = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
('token_type_ids', dynamic_axis),
] )
| 39
|
"""simple docstring"""
import json
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from transformers import OneFormerImageProcessor
from transformers.models.oneformer.image_processing_oneformer import binary_mask_to_rle
from transformers.models.oneformer.modeling_oneformer import OneFormerForUniversalSegmentationOutput
if is_vision_available():
from PIL import Image
def lowercase (_lowerCAmelCase , _lowerCAmelCase="shi-labs/oneformer_demo" ):
with open(hf_hub_download(_lowerCAmelCase , _lowerCAmelCase , repo_type="""dataset""" ) , """r""" ) as f:
__lowerCAmelCase = json.load(_lowerCAmelCase )
__lowerCAmelCase = {}
__lowerCAmelCase = []
__lowerCAmelCase = []
for key, info in class_info.items():
__lowerCAmelCase = info["""name"""]
class_names.append(info["""name"""] )
if info["isthing"]:
thing_ids.append(int(_lowerCAmelCase ) )
__lowerCAmelCase = thing_ids
__lowerCAmelCase = class_names
return metadata
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self , snake_case_ , snake_case_=7 , snake_case_=3 , snake_case_=30 , snake_case_=400 , snake_case_=None , snake_case_=True , snake_case_=True , snake_case_=[0.5, 0.5, 0.5] , snake_case_=[0.5, 0.5, 0.5] , snake_case_=10 , snake_case_=False , snake_case_=255 , snake_case_="shi-labs/oneformer_demo" , snake_case_="ade20k_panoptic.json" , snake_case_=10 , ) -> Union[str, Any]:
__lowerCAmelCase = parent
__lowerCAmelCase = batch_size
__lowerCAmelCase = num_channels
__lowerCAmelCase = min_resolution
__lowerCAmelCase = max_resolution
__lowerCAmelCase = do_resize
__lowerCAmelCase = {"""shortest_edge""": 32, """longest_edge""": 1_333} if size is None else size
__lowerCAmelCase = do_normalize
__lowerCAmelCase = image_mean
__lowerCAmelCase = image_std
__lowerCAmelCase = class_info_file
__lowerCAmelCase = prepare_metadata(snake_case_ , snake_case_ )
__lowerCAmelCase = num_text
__lowerCAmelCase = repo_path
# for the post_process_functions
__lowerCAmelCase = 2
__lowerCAmelCase = 10
__lowerCAmelCase = 10
__lowerCAmelCase = 3
__lowerCAmelCase = 4
__lowerCAmelCase = num_labels
__lowerCAmelCase = do_reduce_labels
__lowerCAmelCase = ignore_index
def A__ ( self ) -> Any:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"num_labels": self.num_labels,
"do_reduce_labels": self.do_reduce_labels,
"ignore_index": self.ignore_index,
"class_info_file": self.class_info_file,
"metadata": self.metadata,
"num_text": self.num_text,
}
def A__ ( self , snake_case_ , snake_case_=False ) -> Dict:
if not batched:
__lowerCAmelCase = image_inputs[0]
if isinstance(snake_case_ , Image.Image ):
__lowerCAmelCase , __lowerCAmelCase = image.size
else:
__lowerCAmelCase , __lowerCAmelCase = image.shape[1], image.shape[2]
if w < h:
__lowerCAmelCase = int(self.size["""shortest_edge"""] * h / w )
__lowerCAmelCase = self.size["""shortest_edge"""]
elif w > h:
__lowerCAmelCase = self.size["""shortest_edge"""]
__lowerCAmelCase = int(self.size["""shortest_edge"""] * w / h )
else:
__lowerCAmelCase = self.size["""shortest_edge"""]
__lowerCAmelCase = self.size["""shortest_edge"""]
else:
__lowerCAmelCase = []
for image in image_inputs:
__lowerCAmelCase , __lowerCAmelCase = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
__lowerCAmelCase = max(snake_case_ , key=lambda snake_case_ : item[0] )[0]
__lowerCAmelCase = max(snake_case_ , key=lambda snake_case_ : item[1] )[1]
return expected_height, expected_width
def A__ ( self ) -> Tuple:
return OneFormerForUniversalSegmentationOutput(
# +1 for null class
class_queries_logits=torch.randn((self.batch_size, self.num_queries, self.num_classes + 1) ) , masks_queries_logits=torch.randn((self.batch_size, self.num_queries, self.height, self.width) ) , )
@require_torch
@require_vision
class lowerCAmelCase_ ( A__ , unittest.TestCase ):
'''simple docstring'''
_snake_case = OneFormerImageProcessor if (is_vision_available() and is_torch_available()) else None
# only for test_image_processing_common.test_image_proc_to_json_string
_snake_case = image_processing_class
def A__ ( self ) -> str:
__lowerCAmelCase = OneFormerImageProcessorTester(self )
@property
def A__ ( self ) -> Dict:
return self.image_processing_tester.prepare_image_processor_dict()
def A__ ( self ) -> Union[str, Any]:
__lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(snake_case_ , """image_mean""" ) )
self.assertTrue(hasattr(snake_case_ , """image_std""" ) )
self.assertTrue(hasattr(snake_case_ , """do_normalize""" ) )
self.assertTrue(hasattr(snake_case_ , """do_resize""" ) )
self.assertTrue(hasattr(snake_case_ , """size""" ) )
self.assertTrue(hasattr(snake_case_ , """ignore_index""" ) )
self.assertTrue(hasattr(snake_case_ , """class_info_file""" ) )
self.assertTrue(hasattr(snake_case_ , """num_text""" ) )
self.assertTrue(hasattr(snake_case_ , """repo_path""" ) )
self.assertTrue(hasattr(snake_case_ , """metadata""" ) )
self.assertTrue(hasattr(snake_case_ , """do_reduce_labels""" ) )
def A__ ( self ) -> List[str]:
pass
def A__ ( self ) -> Union[str, Any]:
# Initialize image_processor
__lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__lowerCAmelCase = prepare_image_inputs(self.image_processing_tester , equal_resolution=snake_case_ )
for image in image_inputs:
self.assertIsInstance(snake_case_ , Image.Image )
# Test not batched input
__lowerCAmelCase = image_processor(image_inputs[0] , ["""semantic"""] , return_tensors="""pt""" ).pixel_values
__lowerCAmelCase , __lowerCAmelCase = self.image_processing_tester.get_expected_values(snake_case_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
__lowerCAmelCase , __lowerCAmelCase = self.image_processing_tester.get_expected_values(snake_case_ , batched=snake_case_ )
__lowerCAmelCase = image_processor(
snake_case_ , ["""semantic"""] * len(snake_case_ ) , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def A__ ( self ) -> List[str]:
# Initialize image_processor
__lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__lowerCAmelCase = prepare_image_inputs(self.image_processing_tester , equal_resolution=snake_case_ , numpify=snake_case_ )
for image in image_inputs:
self.assertIsInstance(snake_case_ , np.ndarray )
# Test not batched input
__lowerCAmelCase = image_processor(image_inputs[0] , ["""semantic"""] , return_tensors="""pt""" ).pixel_values
__lowerCAmelCase , __lowerCAmelCase = self.image_processing_tester.get_expected_values(snake_case_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
__lowerCAmelCase , __lowerCAmelCase = self.image_processing_tester.get_expected_values(snake_case_ , batched=snake_case_ )
__lowerCAmelCase = image_processor(
snake_case_ , ["""semantic"""] * len(snake_case_ ) , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def A__ ( self ) -> Tuple:
# Initialize image_processor
__lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__lowerCAmelCase = prepare_image_inputs(self.image_processing_tester , equal_resolution=snake_case_ , torchify=snake_case_ )
for image in image_inputs:
self.assertIsInstance(snake_case_ , torch.Tensor )
# Test not batched input
__lowerCAmelCase = image_processor(image_inputs[0] , ["""semantic"""] , return_tensors="""pt""" ).pixel_values
__lowerCAmelCase , __lowerCAmelCase = self.image_processing_tester.get_expected_values(snake_case_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
__lowerCAmelCase , __lowerCAmelCase = self.image_processing_tester.get_expected_values(snake_case_ , batched=snake_case_ )
__lowerCAmelCase = image_processor(
snake_case_ , ["""semantic"""] * len(snake_case_ ) , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def A__ ( self , snake_case_=False , snake_case_=False , snake_case_="np" ) -> Optional[Any]:
__lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# prepare image and target
__lowerCAmelCase = self.image_processing_tester.num_labels
__lowerCAmelCase = None
__lowerCAmelCase = None
__lowerCAmelCase = prepare_image_inputs(self.image_processing_tester , equal_resolution=snake_case_ )
if with_segmentation_maps:
__lowerCAmelCase = num_labels
if is_instance_map:
__lowerCAmelCase = list(range(snake_case_ ) ) * 2
__lowerCAmelCase = dict(enumerate(snake_case_ ) )
__lowerCAmelCase = [
np.random.randint(0 , high * 2 , (img.size[1], img.size[0]) ).astype(np.uinta ) for img in image_inputs
]
if segmentation_type == "pil":
__lowerCAmelCase = [Image.fromarray(snake_case_ ) for annotation in annotations]
__lowerCAmelCase = image_processor(
snake_case_ , ["""semantic"""] * len(snake_case_ ) , snake_case_ , return_tensors="""pt""" , instance_id_to_semantic_id=snake_case_ , pad_and_return_pixel_mask=snake_case_ , )
return inputs
def A__ ( self ) -> List[str]:
pass
def A__ ( self ) -> Optional[Any]:
def common(snake_case_=False , snake_case_=None ):
__lowerCAmelCase = self.comm_get_image_processor_inputs(
with_segmentation_maps=snake_case_ , is_instance_map=snake_case_ , segmentation_type=snake_case_ )
__lowerCAmelCase = inputs["""mask_labels"""]
__lowerCAmelCase = inputs["""class_labels"""]
__lowerCAmelCase = inputs["""pixel_values"""]
__lowerCAmelCase = inputs["""text_inputs"""]
# check the batch_size
for mask_label, class_label, text_input in zip(snake_case_ , snake_case_ , snake_case_ ):
self.assertEqual(mask_label.shape[0] , class_label.shape[0] )
# this ensure padding has happened
self.assertEqual(mask_label.shape[1:] , pixel_values.shape[2:] )
self.assertEqual(len(snake_case_ ) , self.image_processing_tester.num_text )
common()
common(is_instance_map=snake_case_ )
common(is_instance_map=snake_case_ , segmentation_type="""pil""" )
common(is_instance_map=snake_case_ , segmentation_type="""pil""" )
def A__ ( self ) -> Optional[int]:
__lowerCAmelCase = np.zeros((20, 50) )
__lowerCAmelCase = 1
__lowerCAmelCase = 1
__lowerCAmelCase = 1
__lowerCAmelCase = binary_mask_to_rle(snake_case_ )
self.assertEqual(len(snake_case_ ) , 4 )
self.assertEqual(rle[0] , 21 )
self.assertEqual(rle[1] , 45 )
def A__ ( self ) -> Optional[Any]:
__lowerCAmelCase = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file="""ade20k_panoptic.json""" , num_text=self.image_processing_tester.num_text , repo_path="""shi-labs/oneformer_demo""" , )
__lowerCAmelCase = self.image_processing_tester.get_fake_oneformer_outputs()
__lowerCAmelCase = fature_extractor.post_process_semantic_segmentation(snake_case_ )
self.assertEqual(len(snake_case_ ) , self.image_processing_tester.batch_size )
self.assertEqual(
segmentation[0].shape , (
self.image_processing_tester.height,
self.image_processing_tester.width,
) , )
__lowerCAmelCase = [(1, 4) for i in range(self.image_processing_tester.batch_size )]
__lowerCAmelCase = fature_extractor.post_process_semantic_segmentation(snake_case_ , target_sizes=snake_case_ )
self.assertEqual(segmentation[0].shape , target_sizes[0] )
def A__ ( self ) -> Union[str, Any]:
__lowerCAmelCase = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file="""ade20k_panoptic.json""" , num_text=self.image_processing_tester.num_text , repo_path="""shi-labs/oneformer_demo""" , )
__lowerCAmelCase = self.image_processing_tester.get_fake_oneformer_outputs()
__lowerCAmelCase = image_processor.post_process_instance_segmentation(snake_case_ , threshold=0 )
self.assertTrue(len(snake_case_ ) == self.image_processing_tester.batch_size )
for el in segmentation:
self.assertTrue("""segmentation""" in el )
self.assertTrue("""segments_info""" in el )
self.assertEqual(type(el["""segments_info"""] ) , snake_case_ )
self.assertEqual(
el["""segmentation"""].shape , (self.image_processing_tester.height, self.image_processing_tester.width) )
def A__ ( self ) -> Union[str, Any]:
__lowerCAmelCase = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file="""ade20k_panoptic.json""" , num_text=self.image_processing_tester.num_text , repo_path="""shi-labs/oneformer_demo""" , )
__lowerCAmelCase = self.image_processing_tester.get_fake_oneformer_outputs()
__lowerCAmelCase = image_processor.post_process_panoptic_segmentation(snake_case_ , threshold=0 )
self.assertTrue(len(snake_case_ ) == self.image_processing_tester.batch_size )
for el in segmentation:
self.assertTrue("""segmentation""" in el )
self.assertTrue("""segments_info""" in el )
self.assertEqual(type(el["""segments_info"""] ) , snake_case_ )
self.assertEqual(
el["""segmentation"""].shape , (self.image_processing_tester.height, self.image_processing_tester.width) )
| 301
| 0
|
from typing import Optional
import numpy as np
import torch
from torch import nn
from transformers import GPTaConfig, GPTaLMHeadModel
from transformers.modeling_utils import ModuleUtilsMixin
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class __a ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
__lowercase : Optional[Any] = [r'h\.\d+\.attn\.bias', r'h\.\d+\.attn\.masked_bias']
@register_to_config
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = None , lowerCAmelCase__ = 50_257 , lowerCAmelCase__ = 1_024 , lowerCAmelCase__ = 768 , lowerCAmelCase__ = 12 , lowerCAmelCase__ = 12 , lowerCAmelCase__ = None , lowerCAmelCase__ = "gelu_new" , lowerCAmelCase__ = 0.1 , lowerCAmelCase__ = 0.1 , lowerCAmelCase__ = 0.1 , lowerCAmelCase__ = 1E-5 , lowerCAmelCase__ = 0.0_2 , lowerCAmelCase__ = True , lowerCAmelCase__ = True , lowerCAmelCase__ = False , lowerCAmelCase__ = False , ) -> Optional[int]:
'''simple docstring'''
super().__init__()
lowercase__: Tuple = prefix_length
if prefix_inner_dim != n_embd and prefix_hidden_dim is None:
raise ValueError(
F'`prefix_hidden_dim` cannot be `None` when `prefix_inner_dim`: {prefix_hidden_dim} and'
F' `n_embd`: {n_embd} are not equal.' )
lowercase__: List[str] = prefix_inner_dim
lowercase__: Optional[int] = prefix_hidden_dim
lowercase__: Optional[int] = (
nn.Linear(self.prefix_inner_dim , self.prefix_hidden_dim )
if self.prefix_hidden_dim is not None
else nn.Identity()
)
lowercase__: Any = (
nn.Linear(self.prefix_hidden_dim , lowerCAmelCase__ ) if self.prefix_hidden_dim is not None else nn.Identity()
)
lowercase__: str = GPTaConfig(
vocab_size=lowerCAmelCase__ , n_positions=lowerCAmelCase__ , n_embd=lowerCAmelCase__ , n_layer=lowerCAmelCase__ , n_head=lowerCAmelCase__ , n_inner=lowerCAmelCase__ , activation_function=lowerCAmelCase__ , resid_pdrop=lowerCAmelCase__ , embd_pdrop=lowerCAmelCase__ , attn_pdrop=lowerCAmelCase__ , layer_norm_epsilon=lowerCAmelCase__ , initializer_range=lowerCAmelCase__ , scale_attn_weights=lowerCAmelCase__ , use_cache=lowerCAmelCase__ , scale_attn_by_inverse_layer_idx=lowerCAmelCase__ , reorder_and_upcast_attn=lowerCAmelCase__ , )
lowercase__: str = GPTaLMHeadModel(lowerCAmelCase__ )
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = None , lowerCAmelCase__ = None , ) -> List[str]:
'''simple docstring'''
lowercase__: List[Any] = self.transformer.transformer.wte(lowerCAmelCase__ )
lowercase__: Any = self.encode_prefix(lowerCAmelCase__ )
lowercase__: Union[str, Any] = self.decode_prefix(lowerCAmelCase__ )
lowercase__: Tuple = torch.cat((prefix_embeds, embedding_text) , dim=1 )
if labels is not None:
lowercase__: Any = self.get_dummy_token(input_ids.shape[0] , input_ids.device )
lowercase__: Tuple = torch.cat((dummy_token, input_ids) , dim=1 )
lowercase__: Tuple = self.transformer(inputs_embeds=lowerCAmelCase__ , labels=lowerCAmelCase__ , attention_mask=lowerCAmelCase__ )
if self.prefix_hidden_dim is not None:
return out, hidden
else:
return out
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> torch.Tensor:
'''simple docstring'''
return torch.zeros(lowerCAmelCase__ , self.prefix_length , dtype=torch.intaa , device=lowerCAmelCase__ )
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ ) -> str:
'''simple docstring'''
return self.encode_prefix(lowerCAmelCase__ )
@torch.no_grad()
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Any:
'''simple docstring'''
lowercase__: Any = torch.split(lowerCAmelCase__ , 1 , dim=0 )
lowercase__: Optional[Any] = []
lowercase__: Union[str, Any] = []
for feature in features:
lowercase__: int = self.decode_prefix(feature.to(lowerCAmelCase__ ) ) # back to the clip feature
# Only support beam search for now
lowercase__: str = self.generate_beam(
input_embeds=lowerCAmelCase__ , device=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__ )
generated_tokens.append(output_tokens[0] )
generated_seq_lengths.append(seq_lengths[0] )
lowercase__: Any = torch.stack(lowerCAmelCase__ )
lowercase__: Dict = torch.stack(lowerCAmelCase__ )
return generated_tokens, generated_seq_lengths
@torch.no_grad()
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__ = 5 , lowerCAmelCase__ = 67 , lowerCAmelCase__ = 1.0 , lowerCAmelCase__ = None , ) -> int:
'''simple docstring'''
lowercase__: Optional[Any] = eos_token_id
lowercase__: int = None
lowercase__: int = None
lowercase__: Optional[int] = torch.ones(lowerCAmelCase__ , device=lowerCAmelCase__ , dtype=torch.int )
lowercase__: Any = torch.zeros(lowerCAmelCase__ , device=lowerCAmelCase__ , dtype=torch.bool )
if input_embeds is not None:
lowercase__: Union[str, Any] = input_embeds
else:
lowercase__: Any = self.transformer.transformer.wte(lowerCAmelCase__ )
for i in range(lowerCAmelCase__ ):
lowercase__: Dict = self.transformer(inputs_embeds=lowerCAmelCase__ )
lowercase__: Tuple = outputs.logits
lowercase__: List[str] = logits[:, -1, :] / (temperature if temperature > 0 else 1.0)
lowercase__: Union[str, Any] = logits.softmax(-1 ).log()
if scores is None:
lowercase__: List[Any] = logits.topk(lowerCAmelCase__ , -1 )
lowercase__: Optional[Any] = generated.expand(lowerCAmelCase__ , *generated.shape[1:] )
lowercase__: Any = next_tokens.permute(1 , 0 ), scores.squeeze(0 )
if tokens is None:
lowercase__: Dict = next_tokens
else:
lowercase__: List[str] = tokens.expand(lowerCAmelCase__ , *tokens.shape[1:] )
lowercase__: Optional[Any] = torch.cat((tokens, next_tokens) , dim=1 )
else:
lowercase__: Optional[Any] = -float(np.inf )
lowercase__: Optional[Any] = 0
lowercase__: int = scores[:, None] + logits
seq_lengths[~is_stopped] += 1
lowercase__: List[Any] = scores_sum / seq_lengths[:, None]
lowercase__: Union[str, Any] = scores_sum_average.view(-1 ).topk(lowerCAmelCase__ , -1 )
lowercase__: List[str] = next_tokens // scores_sum.shape[1]
lowercase__: List[str] = seq_lengths[next_tokens_source]
lowercase__: int = next_tokens % scores_sum.shape[1]
lowercase__: int = next_tokens.unsqueeze(1 )
lowercase__: List[str] = tokens[next_tokens_source]
lowercase__: Optional[Any] = torch.cat((tokens, next_tokens) , dim=1 )
lowercase__: Union[str, Any] = generated[next_tokens_source]
lowercase__: Tuple = scores_sum_average * seq_lengths
lowercase__: Optional[int] = is_stopped[next_tokens_source]
lowercase__: Any = self.transformer.transformer.wte(next_tokens.squeeze() ).view(generated.shape[0] , 1 , -1 )
lowercase__: List[Any] = torch.cat((generated, next_token_embed) , dim=1 )
lowercase__: str = is_stopped + next_tokens.eq(lowerCAmelCase__ ).squeeze()
if is_stopped.all():
break
lowercase__: List[Any] = scores / seq_lengths
lowercase__: Optional[Any] = scores.argsort(descending=lowerCAmelCase__ )
# tokens tensors are already padded to max_seq_length
lowercase__: Any = [tokens[i] for i in order]
lowercase__: Union[str, Any] = torch.stack(lowerCAmelCase__ , dim=0 )
lowercase__: Union[str, Any] = torch.tensor([seq_lengths[i] for i in order] , dtype=seq_lengths.dtype )
return output_texts, seq_lengths
| 367
|
import os
from typing import Dict, List, Union
import tensorflow as tf
from keras_nlp.tokenizers import BytePairTokenizer
from tensorflow_text import pad_model_inputs
from .tokenization_gpta import GPTaTokenizer
class __a ( tf.keras.layers.Layer ):
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = None , lowerCAmelCase__ = None ) -> int:
'''simple docstring'''
super().__init__()
lowercase__: Union[str, Any] = pad_token_id
lowercase__: List[str] = max_length
lowercase__: int = vocab
lowercase__: List[Any] = merges
lowercase__: str = BytePairTokenizer(lowerCAmelCase__ , lowerCAmelCase__ , sequence_length=lowerCAmelCase__ )
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls , lowerCAmelCase__ , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> Any:
'''simple docstring'''
lowercase__: Tuple = [' '.join(lowerCAmelCase__ ) for m in tokenizer.bpe_ranks.keys()]
lowercase__: List[Any] = tokenizer.get_vocab()
return cls(lowerCAmelCase__ , lowerCAmelCase__ , *lowerCAmelCase__ , **lowerCAmelCase__ )
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls , lowerCAmelCase__ , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> List[Any]:
'''simple docstring'''
lowercase__: int = GPTaTokenizer.from_pretrained(lowerCAmelCase__ , *lowerCAmelCase__ , **lowerCAmelCase__ )
return cls.from_tokenizer(lowerCAmelCase__ , *lowerCAmelCase__ , **lowerCAmelCase__ )
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls , lowerCAmelCase__ ) -> Dict:
'''simple docstring'''
return cls(**lowerCAmelCase__ )
def SCREAMING_SNAKE_CASE__ ( self ) -> List[Any]:
'''simple docstring'''
return {
"vocab": self.vocab,
"merges": self.merges,
"max_length": self.max_length,
"pad_token_id": self.pad_token_id,
}
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> Optional[Any]:
'''simple docstring'''
lowercase__: Optional[Any] = self.tf_tokenizer(lowerCAmelCase__ )
lowercase__: List[Any] = tf.ones_like(lowerCAmelCase__ )
if self.pad_token_id is not None:
# pad the tokens up to max length
lowercase__: int = max_length if max_length is not None else self.max_length
if max_length is not None:
lowercase__ , lowercase__: List[Any] = pad_model_inputs(
lowerCAmelCase__ , max_seq_length=lowerCAmelCase__ , pad_value=self.pad_token_id )
return {"attention_mask": attention_mask, "input_ids": input_ids}
| 288
| 0
|
'''simple docstring'''
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, PLBartTokenizer, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
__a = get_tests_dir('fixtures/test_sentencepiece.model')
if is_torch_available():
from transformers.models.plbart.modeling_plbart import shift_tokens_right
__a = 50_003
__a = 50_002
@require_sentencepiece
@require_tokenizers
class A__ ( UpperCamelCase__ , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ : Union[str, Any] = PLBartTokenizer
UpperCamelCase_ : Union[str, Any] = None
UpperCamelCase_ : Tuple = False
def _lowerCAmelCase ( self : int ) -> List[str]:
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
_UpperCAmelCase : Tuple = PLBartTokenizer(__a , language_codes="base" , keep_accents=__a )
tokenizer.save_pretrained(self.tmpdirname )
def _lowerCAmelCase ( self : List[str] ) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase : Optional[int] = PLBartTokenizer(__a , language_codes="base" , keep_accents=__a )
_UpperCAmelCase : Union[str, Any] = tokenizer.tokenize("This is a test" )
self.assertListEqual(__a , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__a ) , [value + tokenizer.fairseq_offset for value in [2_8_5, 4_6, 1_0, 1_7_0, 3_8_2]] , )
_UpperCAmelCase : List[str] = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
__a , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
_UpperCAmelCase : Tuple = tokenizer.convert_tokens_to_ids(__a )
self.assertListEqual(
__a , [
value + tokenizer.fairseq_offset
for value in [8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, 2, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, 2, 4]
] , )
_UpperCAmelCase : Dict = tokenizer.convert_ids_to_tokens(__a )
self.assertListEqual(
__a , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] , )
_UpperCAmelCase : Any = tokenizer.vocab_size
_UpperCAmelCase : Dict = [tokenizer.convert_ids_to_tokens(__a ) for x in range(end - 4 , __a )]
self.assertListEqual(__a , ["__java__", "__python__", "__en_XX__", "<mask>"] )
_UpperCAmelCase : Tuple = "java.lang.Exception, python.lang.Exception, javascript, php, ruby, go"
_UpperCAmelCase : Any = tokenizer(__a ).input_ids
self.assertEqual(
tokenizer.decode(__a , skip_special_tokens=__a , clean_up_tokenization_spaces=__a ) , __a , )
def _lowerCAmelCase ( self : List[str] ) -> Tuple:
"""simple docstring"""
_UpperCAmelCase : str = PLBartTokenizer(__a , language_codes="multi" , keep_accents=__a )
_UpperCAmelCase : Optional[Any] = tokenizer.tokenize("This is a test" )
self.assertListEqual(__a , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__a ) , [value + tokenizer.fairseq_offset for value in [2_8_5, 4_6, 1_0, 1_7_0, 3_8_2]] , )
_UpperCAmelCase : str = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
__a , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
_UpperCAmelCase : Dict = tokenizer.convert_tokens_to_ids(__a )
self.assertListEqual(
__a , [
value + tokenizer.fairseq_offset
for value in [8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, 2, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, 2, 4]
] , )
_UpperCAmelCase : Optional[Any] = tokenizer.convert_ids_to_tokens(__a )
self.assertListEqual(
__a , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] , )
_UpperCAmelCase : List[Any] = tokenizer.vocab_size
_UpperCAmelCase : List[Any] = [tokenizer.convert_ids_to_tokens(__a ) for x in range(end - 7 , __a )]
self.assertListEqual(
__a , ["__java__", "__python__", "__en_XX__", "__javascript__", "__php__", "__ruby__", "__go__"] )
_UpperCAmelCase : Optional[Any] = "java.lang.Exception, python.lang.Exception, javascript, php, ruby, go"
_UpperCAmelCase : Dict = tokenizer(__a ).input_ids
self.assertEqual(
tokenizer.decode(__a , skip_special_tokens=__a , clean_up_tokenization_spaces=__a ) , __a , )
@require_torch
@require_sentencepiece
@require_tokenizers
class A__ ( unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ : str = """uclanlp/plbart-python-en_XX"""
UpperCamelCase_ : List[str] = [
"""def maximum(a,b,c):NEW_LINE_INDENTreturn max([a,b,c])""",
"""def sum(a,b,c):NEW_LINE_INDENTreturn sum([a,b,c])""",
]
UpperCamelCase_ : Tuple = [
"""Returns the maximum value of a b c.""",
"""Sums the values of a b c.""",
]
UpperCamelCase_ : Optional[int] = [
1_34,
54_52,
3_34_60,
3_34_41,
3_34_63,
3_34_65,
3_34_63,
3_34_49,
9_88,
20,
3_34_56,
19,
3_34_56,
7_71,
39,
42_58,
8_89,
33_18,
3_34_41,
3_34_63,
3_34_65,
3_34_63,
3_34_49,
24_71,
2,
PYTHON_CODE,
]
@classmethod
def _lowerCAmelCase ( cls : Optional[int] ) -> Dict:
"""simple docstring"""
_UpperCAmelCase : int = PLBartTokenizer.from_pretrained(
cls.checkpoint_name , language_codes="base" , src_lang="python" , tgt_lang="en_XX" )
_UpperCAmelCase : List[str] = 1
return cls
def _lowerCAmelCase ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["__java__"] , 5_0_0_0_1 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["__python__"] , 5_0_0_0_2 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["__en_XX__"] , 5_0_0_0_3 )
def _lowerCAmelCase ( self : Tuple ) -> str:
"""simple docstring"""
_UpperCAmelCase : int = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , __a )
def _lowerCAmelCase ( self : Dict ) -> List[str]:
"""simple docstring"""
self.assertIn(__a , self.tokenizer.all_special_ids )
_UpperCAmelCase : List[Any] = [EN_CODE, 9_0_3_7, 3_3_4_4_2, 5_7, 7_5_2, 1_5_3, 1_4, 5_6, 1_8, 9, 2]
_UpperCAmelCase : Tuple = self.tokenizer.decode(__a , skip_special_tokens=__a )
_UpperCAmelCase : List[str] = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=__a )
self.assertEqual(__a , __a )
self.assertNotIn(self.tokenizer.eos_token , __a )
def _lowerCAmelCase ( self : int ) -> Dict:
"""simple docstring"""
_UpperCAmelCase : List[str] = ["def sum(a,b,c):NEW_LINE_INDENTreturn sum([a,b,c])" * 2_0]
self.assertIsInstance(src_text[0] , __a )
_UpperCAmelCase : int = 1_0
_UpperCAmelCase : int = self.tokenizer(__a , max_length=__a , truncation=__a ).input_ids[0]
self.assertEqual(ids[-2] , 2 )
self.assertEqual(ids[-1] , __a )
self.assertEqual(len(__a ) , __a )
def _lowerCAmelCase ( self : str ) -> List[str]:
"""simple docstring"""
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["<mask>", "__java__"] ) , [5_0_0_0_4, 5_0_0_0_1] )
def _lowerCAmelCase ( self : int ) -> str:
"""simple docstring"""
_UpperCAmelCase : List[Any] = tempfile.mkdtemp()
_UpperCAmelCase : Tuple = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(__a )
_UpperCAmelCase : List[str] = PLBartTokenizer.from_pretrained(__a )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , __a )
@require_torch
def _lowerCAmelCase ( self : str ) -> Dict:
"""simple docstring"""
_UpperCAmelCase : Dict = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=__a , return_tensors="pt" )
_UpperCAmelCase : str = shift_tokens_right(batch["labels"] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
self.assertEqual(batch.input_ids[1][-2:].tolist() , [2, PYTHON_CODE] )
self.assertEqual(batch.decoder_input_ids[1][0] , __a )
self.assertEqual(batch.decoder_input_ids[1][-1] , 2 )
self.assertEqual(batch.labels[1][-2:].tolist() , [2, EN_CODE] )
@require_torch
def _lowerCAmelCase ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase : List[Any] = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=__a , truncation=__a , max_length=len(self.expected_src_tokens ) , return_tensors="pt" , )
_UpperCAmelCase : List[Any] = shift_tokens_right(batch["labels"] , self.tokenizer.pad_token_id )
self.assertIsInstance(__a , __a )
self.assertEqual((2, 2_6) , batch.input_ids.shape )
self.assertEqual((2, 2_6) , batch.attention_mask.shape )
_UpperCAmelCase : List[str] = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , __a )
self.assertEqual(2 , batch.decoder_input_ids[0, -1] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id, PYTHON_CODE] )
def _lowerCAmelCase ( self : Optional[int] ) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase : Optional[Any] = self.tokenizer(self.src_text , padding=__a , truncation=__a , max_length=3 , return_tensors="pt" )
_UpperCAmelCase : List[str] = self.tokenizer(
text_target=self.tgt_text , padding=__a , truncation=__a , max_length=1_0 , return_tensors="pt" )
_UpperCAmelCase : Optional[int] = targets["input_ids"]
_UpperCAmelCase : Dict = shift_tokens_right(__a , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 1_0 )
@require_torch
def _lowerCAmelCase ( self : List[Any] ) -> Dict:
"""simple docstring"""
_UpperCAmelCase : int = self.tokenizer._build_translation_inputs(
"A test" , return_tensors="pt" , src_lang="en_XX" , tgt_lang="java" )
self.assertEqual(
nested_simplify(__a ) , {
# A, test, EOS, en_XX
"input_ids": [[1_5_0, 2_4_2, 2, 5_0_0_0_3]],
"attention_mask": [[1, 1, 1, 1]],
# java
"forced_bos_token_id": 5_0_0_0_1,
} , )
| 145
|
'''simple docstring'''
import math
def lowerCAmelCase_ ( ) -> None:
'''simple docstring'''
UpperCAmelCase_ = input("Enter message: " )
UpperCAmelCase_ = int(input(f"""Enter key [2-{len(snake_case_ ) - 1}]: """ ) )
UpperCAmelCase_ = input("Encryption/Decryption [e/d]: " )
if mode.lower().startswith("e" ):
UpperCAmelCase_ = encrypt_message(snake_case_ , snake_case_ )
elif mode.lower().startswith("d" ):
UpperCAmelCase_ = decrypt_message(snake_case_ , snake_case_ )
# Append pipe symbol (vertical bar) to identify spaces at the end.
print(f"""Output:\n{text + "|"}""" )
def lowerCAmelCase_ ( snake_case_ : int , snake_case_ : str ) -> str:
'''simple docstring'''
UpperCAmelCase_ = [""] * key
for col in range(snake_case_ ):
UpperCAmelCase_ = col
while pointer < len(snake_case_ ):
cipher_text[col] += message[pointer]
pointer += key
return "".join(snake_case_ )
def lowerCAmelCase_ ( snake_case_ : int , snake_case_ : str ) -> str:
'''simple docstring'''
UpperCAmelCase_ = math.ceil(len(snake_case_ ) / key )
UpperCAmelCase_ = key
UpperCAmelCase_ = (num_cols * num_rows) - len(snake_case_ )
UpperCAmelCase_ = [""] * num_cols
UpperCAmelCase_ = 0
UpperCAmelCase_ = 0
for symbol in message:
plain_text[col] += symbol
col += 1
if (
(col == num_cols)
or (col == num_cols - 1)
and (row >= num_rows - num_shaded_boxes)
):
UpperCAmelCase_ = 0
row += 1
return "".join(snake_case_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 1
| 0
|
from typing import List
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
"""snap-research/efficientformer-l1-300""": (
"""https://huggingface.co/snap-research/efficientformer-l1-300/resolve/main/config.json"""
),
}
class lowerCAmelCase ( lowercase_ ):
__lowerCamelCase = 'efficientformer'
def __init__( self :int , _lowercase :List[int] = [3, 2, 6, 4] , _lowercase :List[int] = [48, 96, 2_24, 4_48] , _lowercase :List[bool] = [True, True, True, True] , _lowercase :int = 4_48 , _lowercase :int = 32 , _lowercase :int = 4 , _lowercase :int = 7 , _lowercase :int = 5 , _lowercase :int = 8 , _lowercase :int = 4 , _lowercase :float = 0.0 , _lowercase :int = 16 , _lowercase :int = 3 , _lowercase :int = 3 , _lowercase :int = 3 , _lowercase :int = 2 , _lowercase :int = 1 , _lowercase :float = 0.0 , _lowercase :int = 1 , _lowercase :bool = True , _lowercase :bool = True , _lowercase :float = 1e-5 , _lowercase :str = "gelu" , _lowercase :float = 0.02 , _lowercase :float = 1e-12 , _lowercase :int = 2_24 , _lowercase :float = 1e-05 , **_lowercase :List[str] , ):
'''simple docstring'''
super().__init__(**_lowercase )
lowercase__ = hidden_act
lowercase__ = hidden_dropout_prob
lowercase__ = hidden_sizes
lowercase__ = num_hidden_layers
lowercase__ = num_attention_heads
lowercase__ = initializer_range
lowercase__ = layer_norm_eps
lowercase__ = patch_size
lowercase__ = num_channels
lowercase__ = depths
lowercase__ = mlp_expansion_ratio
lowercase__ = downsamples
lowercase__ = dim
lowercase__ = key_dim
lowercase__ = attention_ratio
lowercase__ = resolution
lowercase__ = pool_size
lowercase__ = downsample_patch_size
lowercase__ = downsample_stride
lowercase__ = downsample_pad
lowercase__ = drop_path_rate
lowercase__ = num_metaad_blocks
lowercase__ = distillation
lowercase__ = use_layer_scale
lowercase__ = layer_scale_init_value
lowercase__ = image_size
lowercase__ = batch_norm_eps
| 364
|
def _A ( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ):
if height >= 1:
move_tower(height - 1 , __magic_name__ , __magic_name__ , __magic_name__ )
move_disk(__magic_name__ , __magic_name__ )
move_tower(height - 1 , __magic_name__ , __magic_name__ , __magic_name__ )
def _A ( __magic_name__ , __magic_name__ ):
print("moving disk from" , __magic_name__ , "to" , __magic_name__ )
def _A ( ):
lowercase__ = int(input("Height of hanoi: " ).strip() )
move_tower(__magic_name__ , "A" , "B" , "C" )
if __name__ == "__main__":
main()
| 201
| 0
|
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.utils import ComputeEnvironment
from .cluster import get_cluster_input
from .config_args import cache_dir, default_config_file, default_yaml_config_file, load_config_from_file # noqa: F401
from .config_utils import _ask_field, _ask_options, _convert_compute_environment # noqa: F401
from .sagemaker import get_sagemaker_input
SCREAMING_SNAKE_CASE__ : Dict = 'Launches a series of prompts to create and save a `default_config.yaml` configuration file for your training system. Should always be ran first on your machine'
def A ( ) -> Optional[Any]:
lowerCamelCase : Union[str, Any] = _ask_options(
"In which compute environment are you running?" ,["This machine", "AWS (Amazon SageMaker)"] ,_convert_compute_environment ,)
if compute_environment == ComputeEnvironment.AMAZON_SAGEMAKER:
lowerCamelCase : Union[str, Any] = get_sagemaker_input()
else:
lowerCamelCase : Tuple = get_cluster_input()
return config
def A ( _SCREAMING_SNAKE_CASE=None ) -> str:
if subparsers is not None:
lowerCamelCase : int = subparsers.add_parser("config" ,description=_SCREAMING_SNAKE_CASE )
else:
lowerCamelCase : Optional[Any] = argparse.ArgumentParser("Accelerate config command" ,description=_SCREAMING_SNAKE_CASE )
parser.add_argument(
"--config_file" ,default=_SCREAMING_SNAKE_CASE ,help=(
"The path to use to store the config file. Will default to a file named default_config.yaml in the cache "
"location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have "
"such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed "
"with 'huggingface'."
) ,)
if subparsers is not None:
parser.set_defaults(func=_SCREAMING_SNAKE_CASE )
return parser
def A ( _SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
lowerCamelCase : int = get_user_input()
if args.config_file is not None:
lowerCamelCase : Optional[int] = args.config_file
else:
if not os.path.isdir(_SCREAMING_SNAKE_CASE ):
os.makedirs(_SCREAMING_SNAKE_CASE )
lowerCamelCase : Tuple = default_yaml_config_file
if config_file.endswith(".json" ):
config.to_json_file(_SCREAMING_SNAKE_CASE )
else:
config.to_yaml_file(_SCREAMING_SNAKE_CASE )
print(f'''accelerate configuration saved at {config_file}''' )
def A ( ) -> List[str]:
lowerCamelCase : int = config_command_parser()
lowerCamelCase : Tuple = parser.parse_args()
config_command(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
main()
| 48
|
"""simple docstring"""
import json
import os
import shutil
import tempfile
from unittest import TestCase
from transformers import BartTokenizer, BartTokenizerFast, DPRQuestionEncoderTokenizer, DPRQuestionEncoderTokenizerFast
from transformers.models.bart.configuration_bart import BartConfig
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES
from transformers.models.dpr.configuration_dpr import DPRConfig
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES
from transformers.testing_utils import require_faiss, require_tokenizers, require_torch, slow
from transformers.utils import is_datasets_available, is_faiss_available, is_torch_available
if is_torch_available() and is_datasets_available() and is_faiss_available():
from transformers.models.rag.configuration_rag import RagConfig
from transformers.models.rag.tokenization_rag import RagTokenizer
@require_faiss
@require_torch
class SCREAMING_SNAKE_CASE__ ( lowercase ):
"""simple docstring"""
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Optional[int] = tempfile.mkdtemp()
lowerCAmelCase : Optional[int] = 8
# DPR tok
lowerCAmelCase : Dict = [
"[UNK]",
"[CLS]",
"[SEP]",
"[PAD]",
"[MASK]",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
lowerCAmelCase : List[str] = os.path.join(self.tmpdirname , "dpr_tokenizer" )
os.makedirs(snake_case__ , exist_ok=snake_case__ )
lowerCAmelCase : Dict = os.path.join(snake_case__ , DPR_VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
# BART tok
lowerCAmelCase : Optional[int] = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"<unk>",
]
lowerCAmelCase : Optional[int] = dict(zip(snake_case__ , range(len(snake_case__ ) ) ) )
lowerCAmelCase : List[Any] = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
lowerCAmelCase : str = {"unk_token": "<unk>"}
lowerCAmelCase : int = os.path.join(self.tmpdirname , "bart_tokenizer" )
os.makedirs(snake_case__ , exist_ok=snake_case__ )
lowerCAmelCase : int = os.path.join(snake_case__ , BART_VOCAB_FILES_NAMES["vocab_file"] )
lowerCAmelCase : Dict = os.path.join(snake_case__ , BART_VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(snake_case__ ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(snake_case__ ) )
def lowercase__ ( self ):
"""simple docstring"""
return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , "dpr_tokenizer" ) )
def lowercase__ ( self ):
"""simple docstring"""
return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname , "bart_tokenizer" ) )
def lowercase__ ( self ):
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
@require_tokenizers
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : str = os.path.join(self.tmpdirname , "rag_tokenizer" )
lowerCAmelCase : List[Any] = RagConfig(question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() )
lowerCAmelCase : Optional[Any] = RagTokenizer(question_encoder=self.get_dpr_tokenizer() , generator=self.get_bart_tokenizer() )
rag_config.save_pretrained(snake_case__ )
rag_tokenizer.save_pretrained(snake_case__ )
lowerCAmelCase : List[str] = RagTokenizer.from_pretrained(snake_case__ , config=snake_case__ )
self.assertIsInstance(new_rag_tokenizer.question_encoder , snake_case__ )
self.assertEqual(new_rag_tokenizer.question_encoder.get_vocab() , rag_tokenizer.question_encoder.get_vocab() )
self.assertIsInstance(new_rag_tokenizer.generator , snake_case__ )
self.assertEqual(new_rag_tokenizer.generator.get_vocab() , rag_tokenizer.generator.get_vocab() )
@slow
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Optional[int] = RagTokenizer.from_pretrained("facebook/rag-token-nq" )
lowerCAmelCase : Dict = [
"who got the first nobel prize in physics",
"when is the next deadpool movie being released",
"which mode is used for short wave broadcast service",
"who is the owner of reading football club",
"when is the next scandal episode coming out",
"when is the last time the philadelphia won the superbowl",
"what is the most current adobe flash player version",
"how many episodes are there in dragon ball z",
"what is the first step in the evolution of the eye",
"where is gall bladder situated in human body",
"what is the main mineral in lithium batteries",
"who is the president of usa right now",
"where do the greasers live in the outsiders",
"panda is a national animal of which country",
"what is the name of manchester united stadium",
]
lowerCAmelCase : Union[str, Any] = tokenizer(snake_case__ )
self.assertIsNotNone(snake_case__ )
@slow
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : List[str] = RagTokenizer.from_pretrained("facebook/rag-sequence-nq" )
lowerCAmelCase : List[str] = [
"who got the first nobel prize in physics",
"when is the next deadpool movie being released",
"which mode is used for short wave broadcast service",
"who is the owner of reading football club",
"when is the next scandal episode coming out",
"when is the last time the philadelphia won the superbowl",
"what is the most current adobe flash player version",
"how many episodes are there in dragon ball z",
"what is the first step in the evolution of the eye",
"where is gall bladder situated in human body",
"what is the main mineral in lithium batteries",
"who is the president of usa right now",
"where do the greasers live in the outsiders",
"panda is a national animal of which country",
"what is the name of manchester united stadium",
]
lowerCAmelCase : str = tokenizer(snake_case__ )
self.assertIsNotNone(snake_case__ )
| 108
| 0
|
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Value
from .base import TaskTemplate
@dataclass(frozen=lowerCAmelCase__)
class _SCREAMING_SNAKE_CASE ( lowerCAmelCase__):
# `task` is not a ClassVar since we want it to be part of the `asdict` output for JSON serialization
_UpperCamelCase:str = field(default="summarization" , metadata={"include_in_asdict_even_if_is_default": True})
_UpperCamelCase:ClassVar[Features] = Features({"text": Value("string")})
_UpperCamelCase:ClassVar[Features] = Features({"summary": Value("string")})
_UpperCamelCase:str = "text"
_UpperCamelCase:str = "summary"
@property
def _snake_case ( self )-> Dict[str, str]:
return {self.text_column: "text", self.summary_column: "summary"}
| 49
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__A : Tuple = {'configuration_glpn': ['GLPN_PRETRAINED_CONFIG_ARCHIVE_MAP', 'GLPNConfig']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Optional[int] = ['GLPNFeatureExtractor']
__A : Dict = ['GLPNImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Any = [
'GLPN_PRETRAINED_MODEL_ARCHIVE_LIST',
'GLPNForDepthEstimation',
'GLPNLayer',
'GLPNModel',
'GLPNPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_glpn import GLPN_PRETRAINED_CONFIG_ARCHIVE_MAP, GLPNConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_glpn import GLPNFeatureExtractor
from .image_processing_glpn import GLPNImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_glpn import (
GLPN_PRETRAINED_MODEL_ARCHIVE_LIST,
GLPNForDepthEstimation,
GLPNLayer,
GLPNModel,
GLPNPreTrainedModel,
)
else:
import sys
__A : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 49
| 1
|
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = "▁"
UpperCAmelCase__ = {"vocab_file": "sentencepiece.bpe.model"}
UpperCAmelCase__ = {
"vocab_file": {
"xlm-roberta-base": "https://huggingface.co/xlm-roberta-base/resolve/main/sentencepiece.bpe.model",
"xlm-roberta-large": "https://huggingface.co/xlm-roberta-large/resolve/main/sentencepiece.bpe.model",
"xlm-roberta-large-finetuned-conll02-dutch": (
"https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/sentencepiece.bpe.model"
),
"xlm-roberta-large-finetuned-conll02-spanish": (
"https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/sentencepiece.bpe.model"
),
"xlm-roberta-large-finetuned-conll03-english": (
"https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/sentencepiece.bpe.model"
),
"xlm-roberta-large-finetuned-conll03-german": (
"https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/sentencepiece.bpe.model"
),
}
}
UpperCAmelCase__ = {
"xlm-roberta-base": 512,
"xlm-roberta-large": 512,
"xlm-roberta-large-finetuned-conll02-dutch": 512,
"xlm-roberta-large-finetuned-conll02-spanish": 512,
"xlm-roberta-large-finetuned-conll03-english": 512,
"xlm-roberta-large-finetuned-conll03-german": 512,
}
class lowercase_ ( lowercase ):
'''simple docstring'''
__snake_case = VOCAB_FILES_NAMES
__snake_case = PRETRAINED_VOCAB_FILES_MAP
__snake_case = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__snake_case = ['''input_ids''', '''attention_mask''']
def __init__( self : Optional[int] , __UpperCAmelCase : int , __UpperCAmelCase : Optional[Any]="<s>" , __UpperCAmelCase : Optional[int]="</s>" , __UpperCAmelCase : str="</s>" , __UpperCAmelCase : int="<s>" , __UpperCAmelCase : Union[str, Any]="<unk>" , __UpperCAmelCase : Optional[Any]="<pad>" , __UpperCAmelCase : str="<mask>" , __UpperCAmelCase : Optional[Dict[str, Any]] = None , **__UpperCAmelCase : Any , ) ->None:
"""simple docstring"""
a = AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else mask_token
a = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=__UpperCAmelCase , eos_token=__UpperCAmelCase , unk_token=__UpperCAmelCase , sep_token=__UpperCAmelCase , cls_token=__UpperCAmelCase , pad_token=__UpperCAmelCase , mask_token=__UpperCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **__UpperCAmelCase , )
a = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(__UpperCAmelCase ) )
a = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
a = {'''<s>''': 0, '''<pad>''': 1, '''</s>''': 2, '''<unk>''': 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
a = 1
a = len(self.sp_model ) + self.fairseq_offset
a = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self : List[str] ) ->Optional[int]:
"""simple docstring"""
a = self.__dict__.copy()
a = None
a = self.sp_model.serialized_model_proto()
return state
def __setstate__( self : Tuple , __UpperCAmelCase : Tuple ) ->Optional[Any]:
"""simple docstring"""
a = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
a = {}
a = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def __lowerCAmelCase ( self : Optional[Any] , __UpperCAmelCase : List[int] , __UpperCAmelCase : Optional[List[int]] = None ) ->List[int]:
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
a = [self.cls_token_id]
a = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def __lowerCAmelCase ( self : str , __UpperCAmelCase : List[int] , __UpperCAmelCase : Optional[List[int]] = None , __UpperCAmelCase : bool = False ) ->List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__UpperCAmelCase , token_ids_a=__UpperCAmelCase , already_has_special_tokens=__UpperCAmelCase )
if token_ids_a is None:
return [1] + ([0] * len(__UpperCAmelCase )) + [1]
return [1] + ([0] * len(__UpperCAmelCase )) + [1, 1] + ([0] * len(__UpperCAmelCase )) + [1]
def __lowerCAmelCase ( self : Optional[Any] , __UpperCAmelCase : List[int] , __UpperCAmelCase : Optional[List[int]] = None ) ->List[int]:
"""simple docstring"""
a = [self.sep_token_id]
a = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def __lowerCAmelCase ( self : Tuple ) ->List[Any]:
"""simple docstring"""
return len(self.sp_model ) + self.fairseq_offset + 1 # Add the <mask> token
def __lowerCAmelCase ( self : Dict ) ->Tuple:
"""simple docstring"""
a = {self.convert_ids_to_tokens(__UpperCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __lowerCAmelCase ( self : Union[str, Any] , __UpperCAmelCase : str ) ->List[str]:
"""simple docstring"""
return self.sp_model.encode(__UpperCAmelCase , out_type=__UpperCAmelCase )
def __lowerCAmelCase ( self : Optional[Any] , __UpperCAmelCase : Optional[int] ) ->Optional[int]:
"""simple docstring"""
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
a = self.sp_model.PieceToId(__UpperCAmelCase )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def __lowerCAmelCase ( self : Dict , __UpperCAmelCase : str ) ->List[str]:
"""simple docstring"""
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def __lowerCAmelCase ( self : Tuple , __UpperCAmelCase : Optional[Any] ) ->Union[str, Any]:
"""simple docstring"""
a = ''''''.join(__UpperCAmelCase ).replace(__UpperCAmelCase , ''' ''' ).strip()
return out_string
def __lowerCAmelCase ( self : Any , __UpperCAmelCase : str , __UpperCAmelCase : Optional[str] = None ) ->Tuple[str]:
"""simple docstring"""
if not os.path.isdir(__UpperCAmelCase ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
a = os.path.join(
__UpperCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__UpperCAmelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __UpperCAmelCase )
elif not os.path.isfile(self.vocab_file ):
with open(__UpperCAmelCase , '''wb''' ) as fi:
a = self.sp_model.serialized_model_proto()
fi.write(__UpperCAmelCase )
return (out_vocab_file,)
| 0
|
import unittest
import numpy as np
import torch
from diffusers import PNDMPipeline, PNDMScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class __lowercase (unittest.TestCase ):
@property
def UpperCamelCase__ ( self ) ->Tuple:
'''simple docstring'''
torch.manual_seed(0 )
__lowerCAmelCase : List[Any] = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('''DownBlock2D''', '''AttnDownBlock2D''') , up_block_types=('''AttnUpBlock2D''', '''UpBlock2D''') , )
return model
def UpperCamelCase__ ( self ) ->int:
'''simple docstring'''
__lowerCAmelCase : List[str] = self.dummy_uncond_unet
__lowerCAmelCase : Any = PNDMScheduler()
__lowerCAmelCase : Dict = PNDMPipeline(unet=A_ , scheduler=A_ )
pndm.to(A_ )
pndm.set_progress_bar_config(disable=A_ )
__lowerCAmelCase : Optional[Any] = torch.manual_seed(0 )
__lowerCAmelCase : Any = pndm(generator=A_ , num_inference_steps=20 , output_type='''numpy''' ).images
__lowerCAmelCase : Optional[Any] = torch.manual_seed(0 )
__lowerCAmelCase : List[Any] = pndm(generator=A_ , num_inference_steps=20 , output_type='''numpy''' , return_dict=A_ )[0]
__lowerCAmelCase : Tuple = image[0, -3:, -3:, -1]
__lowerCAmelCase : Optional[Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
__lowerCAmelCase : int = np.array([1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch
class __lowercase (unittest.TestCase ):
def UpperCamelCase__ ( self ) ->Optional[Any]:
'''simple docstring'''
__lowerCAmelCase : Optional[int] = '''google/ddpm-cifar10-32'''
__lowerCAmelCase : Union[str, Any] = UNetaDModel.from_pretrained(A_ )
__lowerCAmelCase : int = PNDMScheduler()
__lowerCAmelCase : Any = PNDMPipeline(unet=A_ , scheduler=A_ )
pndm.to(A_ )
pndm.set_progress_bar_config(disable=A_ )
__lowerCAmelCase : Tuple = torch.manual_seed(0 )
__lowerCAmelCase : Any = pndm(generator=A_ , output_type='''numpy''' ).images
__lowerCAmelCase : Optional[int] = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
__lowerCAmelCase : List[Any] = np.array([0.1_564, 0.14_645, 0.1_406, 0.14_715, 0.12_425, 0.14_045, 0.13_115, 0.12_175, 0.125] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 275
| 0
|
'''simple docstring'''
import itertools
import json
import os
import unittest
from transformers import AddedToken, LongformerTokenizer, LongformerTokenizerFast
from transformers.models.longformer.tokenization_longformer import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class a__ ( __A , unittest.TestCase ):
"""simple docstring"""
__UpperCamelCase : List[str] = LongformerTokenizer
__UpperCamelCase : Dict = True
__UpperCamelCase : List[str] = LongformerTokenizerFast
__UpperCamelCase : Optional[Any] = True
def _snake_case (self ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
__lowerCAmelCase = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''<unk>''',
]
__lowerCAmelCase = dict(zip(__lowercase , range(len(__lowercase ) ) ) )
__lowerCAmelCase = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
__lowerCAmelCase = {'''unk_token''': '''<unk>'''}
__lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
__lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(__lowercase ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(__lowercase ) )
def _snake_case (self , **__lowercase ):
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **__lowercase )
def _snake_case (self , **__lowercase ):
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **__lowercase )
def _snake_case (self , __lowercase ):
__lowerCAmelCase = '''lower newer'''
__lowerCAmelCase = '''lower newer'''
return input_text, output_text
def _snake_case (self ):
__lowerCAmelCase = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map )
__lowerCAmelCase = '''lower newer'''
__lowerCAmelCase = ['''l''', '''o''', '''w''', '''er''', '''\u0120''', '''n''', '''e''', '''w''', '''er''']
__lowerCAmelCase = tokenizer.tokenize(__lowercase ) # , add_prefix_space=True)
self.assertListEqual(__lowercase , __lowercase )
__lowerCAmelCase = tokens + [tokenizer.unk_token]
__lowerCAmelCase = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__lowercase ) , __lowercase )
def _snake_case (self ):
__lowerCAmelCase = self.get_tokenizer()
self.assertListEqual(tokenizer.encode('''Hello world!''' , add_special_tokens=__lowercase ) , [0, 3_14_14, 2_32, 3_28, 2] )
self.assertListEqual(
tokenizer.encode('''Hello world! cécé herlolip 418''' , add_special_tokens=__lowercase ) , [0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 4_60_78, 15_88, 2] , )
@slow
def _snake_case (self ):
__lowerCAmelCase = self.tokenizer_class.from_pretrained('''allenai/longformer-base-4096''' )
__lowerCAmelCase = tokenizer.encode('''sequence builders''' , add_special_tokens=__lowercase )
__lowerCAmelCase = tokenizer.encode('''multi-sequence build''' , add_special_tokens=__lowercase )
__lowerCAmelCase = tokenizer.encode(
'''sequence builders''' , add_special_tokens=__lowercase , add_prefix_space=__lowercase )
__lowerCAmelCase = tokenizer.encode(
'''sequence builders''' , '''multi-sequence build''' , add_special_tokens=__lowercase , add_prefix_space=__lowercase )
__lowerCAmelCase = tokenizer.build_inputs_with_special_tokens(__lowercase )
__lowerCAmelCase = tokenizer.build_inputs_with_special_tokens(__lowercase , __lowercase )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
def _snake_case (self ):
__lowerCAmelCase = self.get_tokenizer()
__lowerCAmelCase = '''Encode this sequence.'''
__lowerCAmelCase = tokenizer.byte_encoder[''' '''.encode('''utf-8''' )[0]]
# Testing encoder arguments
__lowerCAmelCase = tokenizer.encode(__lowercase , add_special_tokens=__lowercase , add_prefix_space=__lowercase )
__lowerCAmelCase = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertNotEqual(__lowercase , __lowercase )
__lowerCAmelCase = tokenizer.encode(__lowercase , add_special_tokens=__lowercase , add_prefix_space=__lowercase )
__lowerCAmelCase = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertEqual(__lowercase , __lowercase )
tokenizer.add_special_tokens({'''bos_token''': '''<s>'''} )
__lowerCAmelCase = tokenizer.encode(__lowercase , add_special_tokens=__lowercase )
__lowerCAmelCase = tokenizer.convert_ids_to_tokens(encoded[1] )[0]
self.assertNotEqual(__lowercase , __lowercase )
# Testing spaces after special tokens
__lowerCAmelCase = '''<mask>'''
tokenizer.add_special_tokens(
{'''mask_token''': AddedToken(__lowercase , lstrip=__lowercase , rstrip=__lowercase )} ) # mask token has a left space
__lowerCAmelCase = tokenizer.convert_tokens_to_ids(__lowercase )
__lowerCAmelCase = '''Encode <mask> sequence'''
__lowerCAmelCase = '''Encode <mask>sequence'''
__lowerCAmelCase = tokenizer.encode(__lowercase )
__lowerCAmelCase = encoded.index(__lowercase )
__lowerCAmelCase = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertEqual(__lowercase , __lowercase )
__lowerCAmelCase = tokenizer.encode(__lowercase )
__lowerCAmelCase = encoded.index(__lowercase )
__lowerCAmelCase = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertNotEqual(__lowercase , __lowercase )
def _snake_case (self ):
pass
def _snake_case (self ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
__lowerCAmelCase = self.rust_tokenizer_class.from_pretrained(__lowercase , **__lowercase )
__lowerCAmelCase = self.tokenizer_class.from_pretrained(__lowercase , **__lowercase )
__lowerCAmelCase = '''A, <mask> AllenNLP sentence.'''
__lowerCAmelCase = tokenizer_r.encode_plus(__lowercase , add_special_tokens=__lowercase , return_token_type_ids=__lowercase )
__lowerCAmelCase = tokenizer_p.encode_plus(__lowercase , add_special_tokens=__lowercase , return_token_type_ids=__lowercase )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r['''token_type_ids'''] ) , sum(tokens_p['''token_type_ids'''] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r['''attention_mask'''] ) / len(tokens_r['''attention_mask'''] ) , sum(tokens_p['''attention_mask'''] ) / len(tokens_p['''attention_mask'''] ) , )
__lowerCAmelCase = tokenizer_r.convert_ids_to_tokens(tokens_r['''input_ids'''] )
__lowerCAmelCase = tokenizer_p.convert_ids_to_tokens(tokens_p['''input_ids'''] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p['''input_ids'''] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] )
self.assertSequenceEqual(tokens_r['''input_ids'''] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] )
self.assertSequenceEqual(
__lowercase , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] )
self.assertSequenceEqual(
__lowercase , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] )
def _snake_case (self ):
for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ):
__lowerCAmelCase = self.rust_tokenizer_class.from_pretrained(
self.tmpdirname , use_fast=__lowercase , add_prefix_space=__lowercase , trim_offsets=__lowercase )
__lowerCAmelCase = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() )
__lowerCAmelCase = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() )
self.assertEqual(pre_tokenizer_state['''add_prefix_space'''] , __lowercase )
self.assertEqual(post_processor_state['''add_prefix_space'''] , __lowercase )
self.assertEqual(post_processor_state['''trim_offsets'''] , __lowercase )
def _snake_case (self ):
# Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space` and
# `trim_offsets`
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
__lowerCAmelCase = '''hello''' # `hello` is a token in the vocabulary of `pretrained_name`
__lowerCAmelCase = F"""{text_of_1_token} {text_of_1_token}"""
__lowerCAmelCase = self.rust_tokenizer_class.from_pretrained(
__lowercase , use_fast=__lowercase , add_prefix_space=__lowercase , trim_offsets=__lowercase )
__lowerCAmelCase = tokenizer_r(__lowercase , return_offsets_mapping=__lowercase , add_special_tokens=__lowercase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__lowercase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__lowercase ) + 1, len(__lowercase ) + 1 + len(__lowercase )) , )
__lowerCAmelCase = self.rust_tokenizer_class.from_pretrained(
__lowercase , use_fast=__lowercase , add_prefix_space=__lowercase , trim_offsets=__lowercase )
__lowerCAmelCase = tokenizer_r(__lowercase , return_offsets_mapping=__lowercase , add_special_tokens=__lowercase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__lowercase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__lowercase ) + 1, len(__lowercase ) + 1 + len(__lowercase )) , )
__lowerCAmelCase = self.rust_tokenizer_class.from_pretrained(
__lowercase , use_fast=__lowercase , add_prefix_space=__lowercase , trim_offsets=__lowercase )
__lowerCAmelCase = tokenizer_r(__lowercase , return_offsets_mapping=__lowercase , add_special_tokens=__lowercase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__lowercase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__lowercase ), len(__lowercase ) + 1 + len(__lowercase )) , )
__lowerCAmelCase = self.rust_tokenizer_class.from_pretrained(
__lowercase , use_fast=__lowercase , add_prefix_space=__lowercase , trim_offsets=__lowercase )
__lowerCAmelCase = tokenizer_r(__lowercase , return_offsets_mapping=__lowercase , add_special_tokens=__lowercase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__lowercase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__lowercase ), len(__lowercase ) + 1 + len(__lowercase )) , )
__lowerCAmelCase = F""" {text}"""
# tokenizer_r = self.rust_tokenizer_class.from_pretrained(
# pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True
# )
# encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
# self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token)))
# self.assertEqual(
# encoding.offset_mapping[1],
# (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)),
# )
__lowerCAmelCase = self.rust_tokenizer_class.from_pretrained(
__lowercase , use_fast=__lowercase , add_prefix_space=__lowercase , trim_offsets=__lowercase )
__lowerCAmelCase = tokenizer_r(__lowercase , return_offsets_mapping=__lowercase , add_special_tokens=__lowercase )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(__lowercase )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(__lowercase ) + 1, 1 + len(__lowercase ) + 1 + len(__lowercase )) , )
__lowerCAmelCase = self.rust_tokenizer_class.from_pretrained(
__lowercase , use_fast=__lowercase , add_prefix_space=__lowercase , trim_offsets=__lowercase )
__lowerCAmelCase = tokenizer_r(__lowercase , return_offsets_mapping=__lowercase , add_special_tokens=__lowercase )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(__lowercase )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(__lowercase ), 1 + len(__lowercase ) + 1 + len(__lowercase )) , )
__lowerCAmelCase = self.rust_tokenizer_class.from_pretrained(
__lowercase , use_fast=__lowercase , add_prefix_space=__lowercase , trim_offsets=__lowercase )
__lowerCAmelCase = tokenizer_r(__lowercase , return_offsets_mapping=__lowercase , add_special_tokens=__lowercase )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(__lowercase )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(__lowercase ), 1 + len(__lowercase ) + 1 + len(__lowercase )) , )
| 368
|
'''simple docstring'''
# Imports
import numpy as np
class a__ :
"""simple docstring"""
def __init__(self , __lowercase=None , __lowercase=None , __lowercase=None , __lowercase=None , __lowercase=None ):
self.set_matricies(red=__lowercase , green=__lowercase , blue=__lowercase , red_edge=__lowercase , nir=__lowercase )
def _snake_case (self , __lowercase=None , __lowercase=None , __lowercase=None , __lowercase=None , __lowercase=None ):
if red is not None:
__lowerCAmelCase = red
if green is not None:
__lowerCAmelCase = green
if blue is not None:
__lowerCAmelCase = blue
if red_edge is not None:
__lowerCAmelCase = red_edge
if nir is not None:
__lowerCAmelCase = nir
return True
def _snake_case (self , __lowercase="" , __lowercase=None , __lowercase=None , __lowercase=None , __lowercase=None , __lowercase=None ):
self.set_matricies(red=__lowercase , green=__lowercase , blue=__lowercase , red_edge=__lowercase , nir=__lowercase )
__lowerCAmelCase = {
'''ARVI2''': self.arvaa,
'''CCCI''': self.ccci,
'''CVI''': self.cvi,
'''GLI''': self.gli,
'''NDVI''': self.ndvi,
'''BNDVI''': self.bndvi,
'''redEdgeNDVI''': self.red_edge_ndvi,
'''GNDVI''': self.gndvi,
'''GBNDVI''': self.gbndvi,
'''GRNDVI''': self.grndvi,
'''RBNDVI''': self.rbndvi,
'''PNDVI''': self.pndvi,
'''ATSAVI''': self.atsavi,
'''BWDRVI''': self.bwdrvi,
'''CIgreen''': self.ci_green,
'''CIrededge''': self.ci_rededge,
'''CI''': self.ci,
'''CTVI''': self.ctvi,
'''GDVI''': self.gdvi,
'''EVI''': self.evi,
'''GEMI''': self.gemi,
'''GOSAVI''': self.gosavi,
'''GSAVI''': self.gsavi,
'''Hue''': self.hue,
'''IVI''': self.ivi,
'''IPVI''': self.ipvi,
'''I''': self.i,
'''RVI''': self.rvi,
'''MRVI''': self.mrvi,
'''MSAVI''': self.m_savi,
'''NormG''': self.norm_g,
'''NormNIR''': self.norm_nir,
'''NormR''': self.norm_r,
'''NGRDI''': self.ngrdi,
'''RI''': self.ri,
'''S''': self.s,
'''IF''': self._if,
'''DVI''': self.dvi,
'''TVI''': self.tvi,
'''NDRE''': self.ndre,
}
try:
return funcs[index]()
except KeyError:
print('''Index not in the list!''' )
return False
def _snake_case (self ):
return -0.1_8 + (1.1_7 * ((self.nir - self.red) / (self.nir + self.red)))
def _snake_case (self ):
return ((self.nir - self.redEdge) / (self.nir + self.redEdge)) / (
(self.nir - self.red) / (self.nir + self.red)
)
def _snake_case (self ):
return self.nir * (self.red / (self.green**2))
def _snake_case (self ):
return (2 * self.green - self.red - self.blue) / (
2 * self.green + self.red + self.blue
)
def _snake_case (self ):
return (self.nir - self.red) / (self.nir + self.red)
def _snake_case (self ):
return (self.nir - self.blue) / (self.nir + self.blue)
def _snake_case (self ):
return (self.redEdge - self.red) / (self.redEdge + self.red)
def _snake_case (self ):
return (self.nir - self.green) / (self.nir + self.green)
def _snake_case (self ):
return (self.nir - (self.green + self.blue)) / (
self.nir + (self.green + self.blue)
)
def _snake_case (self ):
return (self.nir - (self.green + self.red)) / (
self.nir + (self.green + self.red)
)
def _snake_case (self ):
return (self.nir - (self.blue + self.red)) / (self.nir + (self.blue + self.red))
def _snake_case (self ):
return (self.nir - (self.green + self.red + self.blue)) / (
self.nir + (self.green + self.red + self.blue)
)
def _snake_case (self , __lowercase=0.0_8 , __lowercase=1.2_2 , __lowercase=0.0_3 ):
return a * (
(self.nir - a * self.red - b)
/ (a * self.nir + self.red - a * b + x * (1 + a**2))
)
def _snake_case (self ):
return (0.1 * self.nir - self.blue) / (0.1 * self.nir + self.blue)
def _snake_case (self ):
return (self.nir / self.green) - 1
def _snake_case (self ):
return (self.nir / self.redEdge) - 1
def _snake_case (self ):
return (self.red - self.blue) / self.red
def _snake_case (self ):
__lowerCAmelCase = self.ndvi()
return ((ndvi + 0.5) / (abs(ndvi + 0.5 ))) * (abs(ndvi + 0.5 ) ** (1 / 2))
def _snake_case (self ):
return self.nir - self.green
def _snake_case (self ):
return 2.5 * (
(self.nir - self.red) / (self.nir + 6 * self.red - 7.5 * self.blue + 1)
)
def _snake_case (self ):
__lowerCAmelCase = (2 * (self.nir**2 - self.red**2) + 1.5 * self.nir + 0.5 * self.red) / (
self.nir + self.red + 0.5
)
return n * (1 - 0.2_5 * n) - (self.red - 0.1_2_5) / (1 - self.red)
def _snake_case (self , __lowercase=0.1_6 ):
return (self.nir - self.green) / (self.nir + self.green + y)
def _snake_case (self , __lowercase=0.5 ):
return ((self.nir - self.green) / (self.nir + self.green + n)) * (1 + n)
def _snake_case (self ):
return np.arctan(
((2 * self.red - self.green - self.blue) / 3_0.5) * (self.green - self.blue) )
def _snake_case (self , __lowercase=None , __lowercase=None ):
return (self.nir - b) / (a * self.red)
def _snake_case (self ):
return (self.nir / ((self.nir + self.red) / 2)) * (self.ndvi() + 1)
def _snake_case (self ):
return (self.red + self.green + self.blue) / 3_0.5
def _snake_case (self ):
return self.nir / self.red
def _snake_case (self ):
return (self.rvi() - 1) / (self.rvi() + 1)
def _snake_case (self ):
return (
(2 * self.nir + 1)
- ((2 * self.nir + 1) ** 2 - 8 * (self.nir - self.red)) ** (1 / 2)
) / 2
def _snake_case (self ):
return self.green / (self.nir + self.red + self.green)
def _snake_case (self ):
return self.nir / (self.nir + self.red + self.green)
def _snake_case (self ):
return self.red / (self.nir + self.red + self.green)
def _snake_case (self ):
return (self.green - self.red) / (self.green + self.red)
def _snake_case (self ):
return (self.red - self.green) / (self.red + self.green)
def _snake_case (self ):
__lowerCAmelCase = np.max([np.max(self.red ), np.max(self.green ), np.max(self.blue )] )
__lowerCAmelCase = np.min([np.min(self.red ), np.min(self.green ), np.min(self.blue )] )
return (max_value - min_value) / max_value
def _snake_case (self ):
return (2 * self.red - self.green - self.blue) / (self.green - self.blue)
def _snake_case (self ):
return self.nir / self.red
def _snake_case (self ):
return (self.ndvi() + 0.5) ** (1 / 2)
def _snake_case (self ):
return (self.nir - self.redEdge) / (self.nir + self.redEdge)
| 9
| 0
|
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_albert import AlbertTokenizer
else:
SCREAMING_SNAKE_CASE : str = None
SCREAMING_SNAKE_CASE : str = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE : List[Any] = {"vocab_file": "spiece.model", "tokenizer_file": "tokenizer.json"}
SCREAMING_SNAKE_CASE : Optional[int] = {
"vocab_file": {
"albert-base-v1": "https://huggingface.co/albert-base-v1/resolve/main/spiece.model",
"albert-large-v1": "https://huggingface.co/albert-large-v1/resolve/main/spiece.model",
"albert-xlarge-v1": "https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model",
"albert-xxlarge-v1": "https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model",
"albert-base-v2": "https://huggingface.co/albert-base-v2/resolve/main/spiece.model",
"albert-large-v2": "https://huggingface.co/albert-large-v2/resolve/main/spiece.model",
"albert-xlarge-v2": "https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model",
"albert-xxlarge-v2": "https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model",
},
"tokenizer_file": {
"albert-base-v1": "https://huggingface.co/albert-base-v1/resolve/main/tokenizer.json",
"albert-large-v1": "https://huggingface.co/albert-large-v1/resolve/main/tokenizer.json",
"albert-xlarge-v1": "https://huggingface.co/albert-xlarge-v1/resolve/main/tokenizer.json",
"albert-xxlarge-v1": "https://huggingface.co/albert-xxlarge-v1/resolve/main/tokenizer.json",
"albert-base-v2": "https://huggingface.co/albert-base-v2/resolve/main/tokenizer.json",
"albert-large-v2": "https://huggingface.co/albert-large-v2/resolve/main/tokenizer.json",
"albert-xlarge-v2": "https://huggingface.co/albert-xlarge-v2/resolve/main/tokenizer.json",
"albert-xxlarge-v2": "https://huggingface.co/albert-xxlarge-v2/resolve/main/tokenizer.json",
},
}
SCREAMING_SNAKE_CASE : Optional[int] = {
"albert-base-v1": 512,
"albert-large-v1": 512,
"albert-xlarge-v1": 512,
"albert-xxlarge-v1": 512,
"albert-base-v2": 512,
"albert-large-v2": 512,
"albert-xlarge-v2": 512,
"albert-xxlarge-v2": 512,
}
SCREAMING_SNAKE_CASE : Union[str, Any] = "▁"
class _lowerCamelCase( _a ):
lowercase_ : Union[str, Any] = VOCAB_FILES_NAMES
lowercase_ : Dict = PRETRAINED_VOCAB_FILES_MAP
lowercase_ : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase_ : List[Any] = AlbertTokenizer
def __init__( self, lowerCamelCase=None, lowerCamelCase=None, lowerCamelCase=True, lowerCamelCase=True, lowerCamelCase=False, lowerCamelCase="[CLS]", lowerCamelCase="[SEP]", lowerCamelCase="<unk>", lowerCamelCase="[SEP]", lowerCamelCase="<pad>", lowerCamelCase="[CLS]", lowerCamelCase="[MASK]", **lowerCamelCase, ) -> Optional[Any]:
"""simple docstring"""
_lowercase : Optional[Any] = (
AddedToken(lowerCamelCase, lstrip=lowerCamelCase, rstrip=lowerCamelCase, normalized=lowerCamelCase)
if isinstance(lowerCamelCase, lowerCamelCase)
else mask_token
)
super().__init__(
lowerCamelCase, tokenizer_file=lowerCamelCase, do_lower_case=lowerCamelCase, remove_space=lowerCamelCase, keep_accents=lowerCamelCase, bos_token=lowerCamelCase, eos_token=lowerCamelCase, unk_token=lowerCamelCase, sep_token=lowerCamelCase, pad_token=lowerCamelCase, cls_token=lowerCamelCase, mask_token=lowerCamelCase, **lowerCamelCase, )
_lowercase : List[Any] = do_lower_case
_lowercase : List[Any] = remove_space
_lowercase : Optional[int] = keep_accents
_lowercase : Optional[int] = vocab_file
_lowercase : List[Any] = False if not self.vocab_file else True
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase = None) -> List[int]:
"""simple docstring"""
_lowercase : str = [self.sep_token_id]
_lowercase : int = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase = None) -> List[int]:
"""simple docstring"""
_lowercase : Dict = [self.sep_token_id]
_lowercase : str = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1]
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase = None) -> Tuple[str]:
"""simple docstring"""
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.')
if not os.path.isdir(lowerCamelCase):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''')
return
_lowercase : Optional[Any] = os.path.join(
lowerCamelCase, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'])
if os.path.abspath(self.vocab_file) != os.path.abspath(lowerCamelCase):
copyfile(self.vocab_file, lowerCamelCase)
return (out_vocab_file,)
| 21
|
import datasets
import faiss
import numpy as np
import streamlit as st
import torch
from elasticsearch import Elasticsearch
from elia_utils import (
embed_questions_for_retrieval,
make_qa_sas_model,
qa_sas_generate,
query_es_index,
query_qa_dense_index,
)
import transformers
from transformers import AutoModel, AutoModelForSeqaSeqLM, AutoTokenizer
SCREAMING_SNAKE_CASE : str = "bart"
SCREAMING_SNAKE_CASE : Optional[int] = True
@st.cache(allow_output_mutation=lowerCamelCase_ )
def UpperCamelCase_( ) -> int:
if LOAD_DENSE_INDEX:
_lowercase : str = AutoTokenizer.from_pretrained('yjernite/retribert-base-uncased' )
_lowercase : Union[str, Any] = AutoModel.from_pretrained('yjernite/retribert-base-uncased' ).to('cuda:0' )
_lowercase : str = qar_model.eval()
else:
_lowercase , _lowercase : Any = (None, None)
if MODEL_TYPE == "bart":
_lowercase : Dict = AutoTokenizer.from_pretrained('yjernite/bart_eli5' )
_lowercase : int = AutoModelForSeqaSeqLM.from_pretrained('yjernite/bart_eli5' ).to('cuda:0' )
_lowercase : Any = torch.load('seq2seq_models/eli5_bart_model_blm_2.pth' )
sas_model.load_state_dict(save_dict['model'] )
_lowercase : List[Any] = sas_model.eval()
else:
_lowercase , _lowercase : Union[str, Any] = make_qa_sas_model(
model_name='t5-small' , from_file='seq2seq_models/eli5_t5_model_1024_4.pth' , device='cuda:0' )
return (qar_tokenizer, qar_model, sas_tokenizer, sas_model)
@st.cache(allow_output_mutation=lowerCamelCase_ )
def UpperCamelCase_( ) -> str:
if LOAD_DENSE_INDEX:
_lowercase : Optional[Any] = faiss.StandardGpuResources()
_lowercase : Optional[int] = datasets.load_dataset(path='wiki_snippets' , name='wiki40b_en_100_0' )['train']
_lowercase : Tuple = np.memmap(
'wiki40b_passages_reps_32_l-8_h-768_b-512-512.dat' , dtype='float32' , mode='r' , shape=(wikiaab_passages.num_rows, 128) , )
_lowercase : Any = faiss.IndexFlatIP(128 )
_lowercase : Union[str, Any] = faiss.index_cpu_to_gpu(lowerCamelCase_ , 1 , lowerCamelCase_ )
wikiaab_gpu_index_flat.add(lowerCamelCase_ ) # TODO fix for larger GPU
else:
_lowercase , _lowercase : Any = (None, None)
_lowercase : List[str] = Elasticsearch([{'host': 'localhost', 'port': '9200'}] )
return (wikiaab_passages, wikiaab_gpu_index_flat, es_client)
@st.cache(allow_output_mutation=lowerCamelCase_ )
def UpperCamelCase_( ) -> Any:
_lowercase : List[str] = datasets.load_dataset('eli5' , name='LFQA_reddit' )
_lowercase : Optional[Any] = elia['train_eli5']
_lowercase : Tuple = np.memmap(
'eli5_questions_reps.dat' , dtype='float32' , mode='r' , shape=(elia_train.num_rows, 128) )
_lowercase : Union[str, Any] = faiss.IndexFlatIP(128 )
eli5_train_q_index.add(lowerCamelCase_ )
return (elia_train, eli5_train_q_index)
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Any = load_indexes()
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = load_models()
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[Any] = load_train_data()
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_=10 ) -> List[str]:
_lowercase : Any = embed_questions_for_retrieval([question] , lowerCamelCase_ , lowerCamelCase_ )
_lowercase , _lowercase : List[str] = eli5_train_q_index.search(lowerCamelCase_ , lowerCamelCase_ )
_lowercase : List[str] = [elia_train[int(lowerCamelCase_ )] for i in I[0]]
return nn_examples
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_="wiki40b" , lowerCamelCase_="dense" , lowerCamelCase_=10 ) -> Dict:
if source == "none":
_lowercase , _lowercase : Union[str, Any] = (' <P> '.join(['' for _ in range(11 )] ).strip(), [])
else:
if method == "dense":
_lowercase , _lowercase : Dict = query_qa_dense_index(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
else:
_lowercase , _lowercase : str = query_es_index(
lowerCamelCase_ , lowerCamelCase_ , index_name='english_wiki40b_snippets_100w' , n_results=lowerCamelCase_ , )
_lowercase : List[Any] = [
(res['article_title'], res['section_title'].strip(), res['score'], res['passage_text']) for res in hit_lst
]
_lowercase : Union[str, Any] = 'question: {} context: {}'.format(lowerCamelCase_ , lowerCamelCase_ )
return question_doc, support_list
@st.cache(
hash_funcs={
torch.Tensor: (lambda lowerCamelCase_ : None),
transformers.models.bart.tokenization_bart.BartTokenizer: (lambda lowerCamelCase_ : None),
} )
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_=64 , lowerCamelCase_=256 , lowerCamelCase_=False , lowerCamelCase_=2 , lowerCamelCase_=0.95 , lowerCamelCase_=0.8 ) -> Dict:
with torch.no_grad():
_lowercase : str = qa_sas_generate(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , num_answers=1 , num_beams=lowerCamelCase_ , min_len=lowerCamelCase_ , max_len=lowerCamelCase_ , do_sample=lowerCamelCase_ , temp=lowerCamelCase_ , top_p=lowerCamelCase_ , top_k=lowerCamelCase_ , max_input_length=1024 , device='cuda:0' , )[0]
return (answer, support_list)
st.title("Long Form Question Answering with ELI5")
# Start sidebar
SCREAMING_SNAKE_CASE : Union[str, Any] = "<img src='https://huggingface.co/front/assets/huggingface_logo.svg'>"
SCREAMING_SNAKE_CASE : List[Any] = "\n<html>\n <head>\n <style>\n .img-container {\n padding-left: 90px;\n padding-right: 90px;\n padding-top: 50px;\n padding-bottom: 50px;\n background-color: #f0f3f9;\n }\n </style>\n </head>\n <body>\n <span class=\"img-container\"> <!-- Inline parent element -->\n %s\n </span>\n </body>\n</html>\n" % (
header_html,
)
st.sidebar.markdown(
header_full,
unsafe_allow_html=True,
)
# Long Form QA with ELI5 and Wikipedia
SCREAMING_SNAKE_CASE : Any = "\nThis demo presents a model trained to [provide long-form answers to open-domain questions](https://yjernite.github.io/lfqa.html).\nFirst, a document retriever fetches a set of relevant Wikipedia passages given the question from the [Wiki40b](https://research.google/pubs/pub49029/) dataset,\na pre-processed fixed snapshot of Wikipedia.\n"
st.sidebar.markdown(description, unsafe_allow_html=True)
SCREAMING_SNAKE_CASE : Union[str, Any] = [
"Answer the question",
"View the retrieved document only",
"View the most similar ELI5 question and answer",
"Show me everything, please!",
]
SCREAMING_SNAKE_CASE : Optional[int] = st.sidebar.checkbox("Demo options")
if demo_options:
SCREAMING_SNAKE_CASE : List[str] = st.sidebar.selectbox(
"",
action_list,
index=3,
)
SCREAMING_SNAKE_CASE : Optional[int] = action_list.index(action_st)
SCREAMING_SNAKE_CASE : Tuple = st.sidebar.selectbox(
"",
["Show full text of passages", "Show passage section titles"],
index=0,
)
SCREAMING_SNAKE_CASE : int = show_type == "Show full text of passages"
else:
SCREAMING_SNAKE_CASE : Any = 3
SCREAMING_SNAKE_CASE : Dict = True
SCREAMING_SNAKE_CASE : Union[str, Any] = st.sidebar.checkbox("Retrieval options")
if retrieval_options:
SCREAMING_SNAKE_CASE : Tuple = "\n ### Information retriever options\n\n The **sparse** retriever uses ElasticSearch, while the **dense** retriever uses max-inner-product search between a question and passage embedding\n trained using the [ELI5](https://arxiv.org/abs/1907.09190) questions-answer pairs.\n The answer is then generated by sequence to sequence model which takes the question and retrieved document as input.\n "
st.sidebar.markdown(retriever_info)
SCREAMING_SNAKE_CASE : Dict = st.sidebar.selectbox("Which Wikipedia format should the model use?", ["wiki40b", "none"])
SCREAMING_SNAKE_CASE : Union[str, Any] = st.sidebar.selectbox("Which Wikipedia indexer should the model use?", ["dense", "sparse", "mixed"])
else:
SCREAMING_SNAKE_CASE : int = "wiki40b"
SCREAMING_SNAKE_CASE : int = "dense"
SCREAMING_SNAKE_CASE : str = "beam"
SCREAMING_SNAKE_CASE : Optional[Any] = 2
SCREAMING_SNAKE_CASE : List[str] = 64
SCREAMING_SNAKE_CASE : Union[str, Any] = 256
SCREAMING_SNAKE_CASE : Union[str, Any] = None
SCREAMING_SNAKE_CASE : List[Any] = None
SCREAMING_SNAKE_CASE : str = st.sidebar.checkbox("Generation options")
if generate_options:
SCREAMING_SNAKE_CASE : Any = "\n ### Answer generation options\n\n The sequence-to-sequence model was initialized with [BART](https://huggingface.co/facebook/bart-large)\n weights and fine-tuned on the ELI5 QA pairs and retrieved documents. You can use the model for greedy decoding with\n **beam** search, or **sample** from the decoder's output probabilities.\n "
st.sidebar.markdown(generate_info)
SCREAMING_SNAKE_CASE : List[Any] = st.sidebar.selectbox("Would you like to use beam search or sample an answer?", ["beam", "sampled"])
SCREAMING_SNAKE_CASE : Tuple = st.sidebar.slider(
"Minimum generation length", min_value=8, max_value=256, value=64, step=8, format=None, key=None
)
SCREAMING_SNAKE_CASE : int = st.sidebar.slider(
"Maximum generation length", min_value=64, max_value=512, value=256, step=16, format=None, key=None
)
if sampled == "beam":
SCREAMING_SNAKE_CASE : int = st.sidebar.slider("Beam size", min_value=1, max_value=8, value=2, step=None, format=None, key=None)
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = st.sidebar.slider(
"Nucleus sampling p", min_value=0.1, max_value=1.0, value=0.95, step=0.01, format=None, key=None
)
SCREAMING_SNAKE_CASE : Any = st.sidebar.slider(
"Temperature", min_value=0.1, max_value=1.0, value=0.7, step=0.01, format=None, key=None
)
SCREAMING_SNAKE_CASE : str = None
# start main text
SCREAMING_SNAKE_CASE : List[str] = [
"<MY QUESTION>",
"How do people make chocolate?",
"Why do we get a fever when we are sick?",
"How can different animals perceive different colors?",
"What is natural language processing?",
"What's the best way to treat a sunburn?",
"What exactly are vitamins ?",
"How does nuclear energy provide electricity?",
"What's the difference between viruses and bacteria?",
"Why are flutes classified as woodwinds when most of them are made out of metal ?",
"Why do people like drinking coffee even though it tastes so bad?",
"What happens when wine ages? How does it make the wine taste better?",
"If an animal is an herbivore, where does it get the protein that it needs to survive if it only eats grass?",
"How can we set a date to the beginning or end of an artistic period? Doesn't the change happen gradually?",
"How does New Zealand have so many large bird predators?",
]
SCREAMING_SNAKE_CASE : str = st.selectbox(
"What would you like to ask? ---- select <MY QUESTION> to enter a new query",
questions_list,
index=1,
)
if question_s == "<MY QUESTION>":
SCREAMING_SNAKE_CASE : List[str] = st.text_input("Enter your question here:", "")
else:
SCREAMING_SNAKE_CASE : Optional[int] = question_s
if st.button("Show me!"):
if action in [0, 1, 3]:
if index_type == "mixed":
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = make_support(question, source=wiki_source, method="dense", n_results=10)
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Any = make_support(question, source=wiki_source, method="sparse", n_results=10)
SCREAMING_SNAKE_CASE : Tuple = []
for res_d, res_s in zip(support_list_dense, support_list_sparse):
if tuple(res_d) not in support_list:
support_list += [tuple(res_d)]
if tuple(res_s) not in support_list:
support_list += [tuple(res_s)]
SCREAMING_SNAKE_CASE : Optional[Any] = support_list[:10]
SCREAMING_SNAKE_CASE : int = "<P> " + " <P> ".join([res[-1] for res in support_list])
else:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Any = make_support(question, source=wiki_source, method=index_type, n_results=10)
if action in [0, 3]:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Tuple = answer_question(
question_doc,
sas_model,
sas_tokenizer,
min_len=min_len,
max_len=int(max_len),
sampling=(sampled == "sampled"),
n_beams=n_beams,
top_p=top_p,
temp=temp,
)
st.markdown("### The model generated answer is:")
st.write(answer)
if action in [0, 1, 3] and wiki_source != "none":
st.markdown("--- \n ### The model is drawing information from the following Wikipedia passages:")
for i, res in enumerate(support_list):
SCREAMING_SNAKE_CASE : Optional[Any] = "https://en.wikipedia.org/wiki/{}".format(res[0].replace(" ", "_"))
SCREAMING_SNAKE_CASE : List[Any] = res[1].strip()
if sec_titles == "":
SCREAMING_SNAKE_CASE : Union[str, Any] = "[{}]({})".format(res[0], wiki_url)
else:
SCREAMING_SNAKE_CASE : Any = sec_titles.split(" & ")
SCREAMING_SNAKE_CASE : List[Any] = " & ".join(
["[{}]({}#{})".format(sec.strip(), wiki_url, sec.strip().replace(" ", "_")) for sec in sec_list]
)
st.markdown(
"{0:02d} - **Article**: {1:<18} <br> _Section_: {2}".format(i + 1, res[0], sections),
unsafe_allow_html=True,
)
if show_passages:
st.write(
"> <span style=\"font-family:arial; font-size:10pt;\">" + res[-1] + "</span>", unsafe_allow_html=True
)
if action in [2, 3]:
SCREAMING_SNAKE_CASE : str = find_nearest_training(question)
SCREAMING_SNAKE_CASE : Any = nn_train_list[0]
st.markdown(
"--- \n ### The most similar question in the ELI5 training set was: \n\n {}".format(train_exple["title"])
)
SCREAMING_SNAKE_CASE : str = [
"{}. {}".format(i + 1, " \n".join([line.strip() for line in ans.split("\n") if line.strip() != ""]))
for i, (ans, sc) in enumerate(zip(train_exple["answers"]["text"], train_exple["answers"]["score"]))
if i == 0 or sc > 2
]
st.markdown("##### Its answers were: \n\n {}".format("\n".join(answers_st)))
SCREAMING_SNAKE_CASE : Tuple = "\n---\n\n**Disclaimer**\n\n*The intent of this app is to provide some (hopefully entertaining) insights into the behavior of a current LFQA system.\nEvaluating biases of such a model and ensuring factual generations are still very much open research problems.\nTherefore, until some significant progress is achieved, we caution against using the generated answers for practical purposes.*\n"
st.sidebar.markdown(disclaimer, unsafe_allow_html=True)
| 21
| 1
|
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Value
from .base import TaskTemplate
@dataclass(frozen=_lowercase )
class _lowerCAmelCase ( _lowercase ):
'''simple docstring'''
a_ : str =field(default="""language-modeling""" , metadata={"""include_in_asdict_even_if_is_default""": True} )
a_ : Optional[int] =Features({"""text""": Value("""string""" )} )
a_ : int =Features({} )
a_ : Optional[int] ="""text"""
@property
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
return {self.text_column: "text"}
| 360
|
from __future__ import annotations
lowerCAmelCase_ = []
def lowerCamelCase_ ( lowerCAmelCase: list[list[int]] , lowerCAmelCase: int , lowerCAmelCase: int )-> bool:
for i in range(len(lowerCAmelCase ) ):
if board[row][i] == 1:
return False
for i in range(len(lowerCAmelCase ) ):
if board[i][column] == 1:
return False
for i, j in zip(range(lowerCAmelCase , -1 , -1 ) , range(lowerCAmelCase , -1 , -1 ) ):
if board[i][j] == 1:
return False
for i, j in zip(range(lowerCAmelCase , -1 , -1 ) , range(lowerCAmelCase , len(lowerCAmelCase ) ) ):
if board[i][j] == 1:
return False
return True
def lowerCamelCase_ ( lowerCAmelCase: list[list[int]] , lowerCAmelCase: int )-> bool:
if row >= len(lowerCAmelCase ):
solution.append(lowerCAmelCase )
printboard(lowerCAmelCase )
print()
return True
for i in range(len(lowerCAmelCase ) ):
if is_safe(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
_snake_case : Dict = 1
solve(lowerCAmelCase , row + 1 )
_snake_case : str = 0
return False
def lowerCamelCase_ ( lowerCAmelCase: list[list[int]] )-> None:
for i in range(len(lowerCAmelCase ) ):
for j in range(len(lowerCAmelCase ) ):
if board[i][j] == 1:
print('Q' , end=' ' )
else:
print('.' , end=' ' )
print()
# n=int(input("The no. of queens"))
lowerCAmelCase_ = 8
lowerCAmelCase_ = [[0 for i in range(n)] for j in range(n)]
solve(board, 0)
print("""The total no. of solutions are :""", len(solution))
| 260
| 0
|
"""simple docstring"""
from __future__ import annotations
def __lowerCamelCase ( a_ : int , a_ : Dict , a_ : Union[str, Any] , a_ : Union[str, Any] ) -> List[str]:
__SCREAMING_SNAKE_CASE :Tuple = []
__SCREAMING_SNAKE_CASE :int = input_list[low:mid], input_list[mid : high + 1]
while left and right:
result.append((left if left[0] <= right[0] else right).pop(0 ) )
__SCREAMING_SNAKE_CASE :Optional[int] = result + left + right
return input_list
def __lowerCamelCase ( a_ : int ) -> Optional[int]:
if len(_lowercase ) <= 1:
return input_list
__SCREAMING_SNAKE_CASE :Optional[int] = list(_lowercase )
# iteration for two-way merging
__SCREAMING_SNAKE_CASE :Dict = 2
while p <= len(_lowercase ):
# getting low, high and middle value for merge-sort of single list
for i in range(0 , len(_lowercase ) , _lowercase ):
__SCREAMING_SNAKE_CASE :List[Any] = i
__SCREAMING_SNAKE_CASE :Union[str, Any] = i + p - 1
__SCREAMING_SNAKE_CASE :Union[str, Any] = (low + high + 1) // 2
__SCREAMING_SNAKE_CASE :Optional[Any] = merge(_lowercase , _lowercase , _lowercase , _lowercase )
# final merge of last two parts
if p * 2 >= len(_lowercase ):
__SCREAMING_SNAKE_CASE :Any = i
__SCREAMING_SNAKE_CASE :Any = merge(_lowercase , 0 , _lowercase , len(_lowercase ) - 1 )
break
p *= 2
return input_list
if __name__ == "__main__":
lowerCamelCase_ = input("Enter numbers separated by a comma:\n").strip()
if user_input == "":
lowerCamelCase_ = []
else:
lowerCamelCase_ = [int(item.strip()) for item in user_input.split(",")]
print(iter_merge_sort(unsorted))
| 191
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCamelCase : Any = logging.get_logger(__name__)
__UpperCamelCase : Optional[int] = {
'facebook/nllb-moe-54B': 'https://huggingface.co/facebook/nllb-moe-54b/resolve/main/config.json',
}
class lowercase__ ( UpperCamelCase_):
UpperCamelCase_ = """nllb-moe"""
UpperCamelCase_ = ["""past_key_values"""]
UpperCamelCase_ = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self : List[str] , UpperCamelCase__ : List[str]=12_8112 , UpperCamelCase__ : str=1024 , UpperCamelCase__ : Optional[int]=12 , UpperCamelCase__ : Union[str, Any]=4096 , UpperCamelCase__ : Optional[Any]=16 , UpperCamelCase__ : Optional[Any]=12 , UpperCamelCase__ : str=4096 , UpperCamelCase__ : Dict=16 , UpperCamelCase__ : Any=0.05 , UpperCamelCase__ : Any=0.05 , UpperCamelCase__ : Optional[Any]=True , UpperCamelCase__ : Optional[Any]=True , UpperCamelCase__ : List[Any]="relu" , UpperCamelCase__ : Union[str, Any]=1024 , UpperCamelCase__ : Optional[int]=0.1 , UpperCamelCase__ : Any=0.1 , UpperCamelCase__ : Optional[Any]=0.0 , UpperCamelCase__ : str=0.02 , UpperCamelCase__ : Tuple=2 , UpperCamelCase__ : List[str]=True , UpperCamelCase__ : Dict=False , UpperCamelCase__ : Any="float32" , UpperCamelCase__ : List[Any]=False , UpperCamelCase__ : Tuple=128 , UpperCamelCase__ : Tuple=64 , UpperCamelCase__ : List[str]=4 , UpperCamelCase__ : Union[str, Any]=4 , UpperCamelCase__ : Dict=0.001 , UpperCamelCase__ : Optional[Any]=0.001 , UpperCamelCase__ : Optional[Any]="all" , UpperCamelCase__ : Tuple=False , UpperCamelCase__ : List[Any]=False , UpperCamelCase__ : List[Any]=1.0 , UpperCamelCase__ : str=0.2 , UpperCamelCase__ : List[Any]=1 , UpperCamelCase__ : List[Any]=0 , UpperCamelCase__ : Optional[int]=2 , UpperCamelCase__ : Tuple=False , **UpperCamelCase__ : Union[str, Any] , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = vocab_size
SCREAMING_SNAKE_CASE : Dict = max_position_embeddings
SCREAMING_SNAKE_CASE : int = d_model
SCREAMING_SNAKE_CASE : Any = encoder_ffn_dim
SCREAMING_SNAKE_CASE : List[Any] = encoder_layers
SCREAMING_SNAKE_CASE : Union[str, Any] = encoder_attention_heads
SCREAMING_SNAKE_CASE : List[str] = decoder_ffn_dim
SCREAMING_SNAKE_CASE : Dict = decoder_layers
SCREAMING_SNAKE_CASE : List[Any] = decoder_attention_heads
SCREAMING_SNAKE_CASE : List[str] = dropout
SCREAMING_SNAKE_CASE : Any = attention_dropout
SCREAMING_SNAKE_CASE : Union[str, Any] = activation_dropout
SCREAMING_SNAKE_CASE : List[Any] = activation_function
SCREAMING_SNAKE_CASE : Union[str, Any] = init_std
SCREAMING_SNAKE_CASE : int = encoder_layerdrop
SCREAMING_SNAKE_CASE : List[Any] = decoder_layerdrop
SCREAMING_SNAKE_CASE : Any = use_cache
SCREAMING_SNAKE_CASE : str = encoder_layers
SCREAMING_SNAKE_CASE : List[Any] = scale_embedding # scale factor will be sqrt(d_model) if True
SCREAMING_SNAKE_CASE : List[str] = router_z_loss_coef
SCREAMING_SNAKE_CASE : List[str] = router_aux_loss_coef
SCREAMING_SNAKE_CASE : int = decoder_sparse_step
SCREAMING_SNAKE_CASE : Optional[int] = encoder_sparse_step
SCREAMING_SNAKE_CASE : List[str] = num_experts
SCREAMING_SNAKE_CASE : int = expert_capacity
SCREAMING_SNAKE_CASE : Any = router_bias
if router_dtype not in ["float32", "float16", "bfloat16"]:
raise ValueError(f"""`router_dtype` must be one of 'float32', 'float16' or 'bfloat16', got {router_dtype}""" )
SCREAMING_SNAKE_CASE : str = router_dtype
SCREAMING_SNAKE_CASE : List[Any] = router_ignore_padding_tokens
SCREAMING_SNAKE_CASE : int = batch_prioritized_routing
SCREAMING_SNAKE_CASE : str = second_expert_policy
SCREAMING_SNAKE_CASE : Optional[Any] = normalize_router_prob_before_dropping
SCREAMING_SNAKE_CASE : Optional[Any] = moe_eval_capacity_token_fraction
SCREAMING_SNAKE_CASE : Optional[int] = moe_token_dropout
SCREAMING_SNAKE_CASE : Optional[int] = output_router_logits
super().__init__(
pad_token_id=UpperCamelCase__ , bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ , is_encoder_decoder=UpperCamelCase__ , decoder_start_token_id=UpperCamelCase__ , **UpperCamelCase__ , )
| 182
| 0
|
import torch
from torch import nn
class lowercase__ ( nn.Module):
def __init__( self : int , UpperCamelCase__ : Tuple , UpperCamelCase__ : List[Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : List[Any]=1 , UpperCamelCase__ : Optional[int]=False ):
'''simple docstring'''
super().__init__()
SCREAMING_SNAKE_CASE : List[str] = n_token
SCREAMING_SNAKE_CASE : str = d_embed
SCREAMING_SNAKE_CASE : Dict = d_proj
SCREAMING_SNAKE_CASE : str = cutoffs + [n_token]
SCREAMING_SNAKE_CASE : List[str] = [0] + self.cutoffs
SCREAMING_SNAKE_CASE : Union[str, Any] = div_val
SCREAMING_SNAKE_CASE : Optional[int] = self.cutoffs[0]
SCREAMING_SNAKE_CASE : Union[str, Any] = len(self.cutoffs ) - 1
SCREAMING_SNAKE_CASE : Any = self.shortlist_size + self.n_clusters
if self.n_clusters > 0:
SCREAMING_SNAKE_CASE : List[Any] = nn.Parameter(torch.zeros(self.n_clusters , self.d_embed ) )
SCREAMING_SNAKE_CASE : List[str] = nn.Parameter(torch.zeros(self.n_clusters ) )
SCREAMING_SNAKE_CASE : Union[str, Any] = nn.ModuleList()
SCREAMING_SNAKE_CASE : int = nn.ParameterList()
if div_val == 1:
for i in range(len(self.cutoffs ) ):
if d_proj != d_embed:
self.out_projs.append(nn.Parameter(torch.FloatTensor(UpperCamelCase__ , UpperCamelCase__ ) ) )
else:
self.out_projs.append(UpperCamelCase__ )
self.out_layers.append(nn.Linear(UpperCamelCase__ , UpperCamelCase__ ) )
else:
for i in range(len(self.cutoffs ) ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[str] = self.cutoff_ends[i], self.cutoff_ends[i + 1]
SCREAMING_SNAKE_CASE : List[str] = d_embed // (div_val**i)
self.out_projs.append(nn.Parameter(torch.FloatTensor(UpperCamelCase__ , UpperCamelCase__ ) ) )
self.out_layers.append(nn.Linear(UpperCamelCase__ , r_idx - l_idx ) )
SCREAMING_SNAKE_CASE : List[str] = keep_order
def __A ( self : int , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Dict , UpperCamelCase__ : str , UpperCamelCase__ : Optional[int] ):
'''simple docstring'''
if proj is None:
SCREAMING_SNAKE_CASE : List[Any] = nn.functional.linear(UpperCamelCase__ , UpperCamelCase__ , bias=UpperCamelCase__ )
else:
# if CUDA_MAJOR <= 9 and CUDA_MINOR <= 1:
SCREAMING_SNAKE_CASE : Any = nn.functional.linear(UpperCamelCase__ , proj.t().contiguous() )
SCREAMING_SNAKE_CASE : List[Any] = nn.functional.linear(UpperCamelCase__ , UpperCamelCase__ , bias=UpperCamelCase__ )
# else:
# logit = torch.einsum('bd,de,ev->bv', (hidden, proj, weight.t()))
# if bias is not None:
# logit = logit + bias
return logit
def __A ( self : Any , UpperCamelCase__ : List[Any] , UpperCamelCase__ : List[str]=None , UpperCamelCase__ : Tuple=False ):
'''simple docstring'''
if labels is not None:
# Shift so that tokens < n predict n
SCREAMING_SNAKE_CASE : Any = hidden[..., :-1, :].contiguous()
SCREAMING_SNAKE_CASE : List[Any] = labels[..., 1:].contiguous()
SCREAMING_SNAKE_CASE : Dict = hidden.view(-1 , hidden.size(-1 ) )
SCREAMING_SNAKE_CASE : str = labels.view(-1 )
if hidden.size(0 ) != labels.size(0 ):
raise RuntimeError('''Input and labels should have the same size in the batch dimension.''' )
else:
SCREAMING_SNAKE_CASE : List[Any] = hidden.view(-1 , hidden.size(-1 ) )
if self.n_clusters == 0:
SCREAMING_SNAKE_CASE : Tuple = self._compute_logit(UpperCamelCase__ , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0] )
if labels is not None:
SCREAMING_SNAKE_CASE : Union[str, Any] = labels != -100
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.zeros_like(UpperCamelCase__ , dtype=hidden.dtype , device=hidden.device )
SCREAMING_SNAKE_CASE : Dict = (
-nn.functional.log_softmax(UpperCamelCase__ , dim=-1 )[mask].gather(1 , labels[mask].unsqueeze(1 ) ).squeeze(1 )
)
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = nn.functional.log_softmax(UpperCamelCase__ , dim=-1 )
else:
# construct weights and biases
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[str] = [], []
for i in range(len(self.cutoffs ) ):
if self.div_val == 1:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = self.cutoff_ends[i], self.cutoff_ends[i + 1]
SCREAMING_SNAKE_CASE : int = self.out_layers[0].weight[l_idx:r_idx]
SCREAMING_SNAKE_CASE : List[str] = self.out_layers[0].bias[l_idx:r_idx]
else:
SCREAMING_SNAKE_CASE : str = self.out_layers[i].weight
SCREAMING_SNAKE_CASE : List[Any] = self.out_layers[i].bias
if i == 0:
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.cat([weight_i, self.cluster_weight] , dim=0 )
SCREAMING_SNAKE_CASE : Any = torch.cat([bias_i, self.cluster_bias] , dim=0 )
weights.append(UpperCamelCase__ )
biases.append(UpperCamelCase__ )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[str] = weights[0], biases[0], self.out_projs[0]
SCREAMING_SNAKE_CASE : Dict = self._compute_logit(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Dict = nn.functional.log_softmax(UpperCamelCase__ , dim=1 )
if labels is None:
SCREAMING_SNAKE_CASE : str = hidden.new_empty((head_logit.size(0 ), self.n_token) )
else:
SCREAMING_SNAKE_CASE : Dict = torch.zeros_like(UpperCamelCase__ , dtype=hidden.dtype , device=hidden.device )
SCREAMING_SNAKE_CASE : Any = 0
SCREAMING_SNAKE_CASE : Optional[Any] = [0] + self.cutoffs
for i in range(len(UpperCamelCase__ ) - 1 ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : int = cutoff_values[i], cutoff_values[i + 1]
if labels is not None:
SCREAMING_SNAKE_CASE : Tuple = (labels >= l_idx) & (labels < r_idx)
SCREAMING_SNAKE_CASE : str = mask_i.nonzero().squeeze()
if indices_i.numel() == 0:
continue
SCREAMING_SNAKE_CASE : List[str] = labels.index_select(0 , UpperCamelCase__ ) - l_idx
SCREAMING_SNAKE_CASE : str = head_logprob.index_select(0 , UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Optional[int] = hidden.index_select(0 , UpperCamelCase__ )
else:
SCREAMING_SNAKE_CASE : Any = hidden
if i == 0:
if labels is not None:
SCREAMING_SNAKE_CASE : Union[str, Any] = head_logprob_i.gather(1 , target_i[:, None] ).squeeze(1 )
else:
SCREAMING_SNAKE_CASE : List[Any] = head_logprob[:, : self.cutoffs[0]]
else:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Any = weights[i], biases[i], self.out_projs[i]
SCREAMING_SNAKE_CASE : str = self._compute_logit(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Tuple = nn.functional.log_softmax(UpperCamelCase__ , dim=1 )
SCREAMING_SNAKE_CASE : Optional[int] = self.cutoffs[0] + i - 1 # No probability for the head cluster
if labels is not None:
SCREAMING_SNAKE_CASE : Tuple = head_logprob_i[:, cluster_prob_idx] + tail_logprob_i.gather(
1 , target_i[:, None] ).squeeze(1 )
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = head_logprob[:, cluster_prob_idx, None] + tail_logprob_i
SCREAMING_SNAKE_CASE : List[Any] = logprob_i
if labels is not None:
if (hasattr(self , '''keep_order''' ) and self.keep_order) or keep_order:
out.index_copy_(0 , UpperCamelCase__ , -logprob_i )
else:
out[offset : offset + logprob_i.size(0 )].copy_(-logprob_i )
offset += logprob_i.size(0 )
return out
def __A ( self : Optional[Any] , UpperCamelCase__ : Tuple ):
'''simple docstring'''
if self.n_clusters == 0:
SCREAMING_SNAKE_CASE : Optional[int] = self._compute_logit(UpperCamelCase__ , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0] )
return nn.functional.log_softmax(UpperCamelCase__ , dim=-1 )
else:
# construct weights and biases
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : str = [], []
for i in range(len(self.cutoffs ) ):
if self.div_val == 1:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : str = self.cutoff_ends[i], self.cutoff_ends[i + 1]
SCREAMING_SNAKE_CASE : Optional[int] = self.out_layers[0].weight[l_idx:r_idx]
SCREAMING_SNAKE_CASE : Optional[Any] = self.out_layers[0].bias[l_idx:r_idx]
else:
SCREAMING_SNAKE_CASE : str = self.out_layers[i].weight
SCREAMING_SNAKE_CASE : Union[str, Any] = self.out_layers[i].bias
if i == 0:
SCREAMING_SNAKE_CASE : List[Any] = torch.cat([weight_i, self.cluster_weight] , dim=0 )
SCREAMING_SNAKE_CASE : Optional[Any] = torch.cat([bias_i, self.cluster_bias] , dim=0 )
weights.append(UpperCamelCase__ )
biases.append(UpperCamelCase__ )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Tuple = weights[0], biases[0], self.out_projs[0]
SCREAMING_SNAKE_CASE : Union[str, Any] = self._compute_logit(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
SCREAMING_SNAKE_CASE : List[str] = hidden.new_empty((head_logit.size(0 ), self.n_token) )
SCREAMING_SNAKE_CASE : Any = nn.functional.log_softmax(UpperCamelCase__ , dim=1 )
SCREAMING_SNAKE_CASE : List[Any] = [0] + self.cutoffs
for i in range(len(UpperCamelCase__ ) - 1 ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[str] = cutoff_values[i], cutoff_values[i + 1]
if i == 0:
SCREAMING_SNAKE_CASE : Union[str, Any] = head_logprob[:, : self.cutoffs[0]]
else:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[Any] = weights[i], biases[i], self.out_projs[i]
SCREAMING_SNAKE_CASE : Dict = self._compute_logit(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Optional[Any] = nn.functional.log_softmax(UpperCamelCase__ , dim=1 )
SCREAMING_SNAKE_CASE : List[Any] = head_logprob[:, -i] + tail_logprob_i
SCREAMING_SNAKE_CASE : int = logprob_i
return out
| 258
|
import argparse
import json
import numpy
import torch
from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def A ( _lowercase , _lowercase ):
# Load checkpoint
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.load(_lowercase , map_location='''cpu''' )
SCREAMING_SNAKE_CASE : List[str] = chkpt['''model''']
# We have the base model one level deeper than the original XLM repository
SCREAMING_SNAKE_CASE : int = {}
for k, v in state_dict.items():
if "pred_layer" in k:
SCREAMING_SNAKE_CASE : Optional[Any] = v
else:
SCREAMING_SNAKE_CASE : List[Any] = v
SCREAMING_SNAKE_CASE : Dict = chkpt['''params''']
SCREAMING_SNAKE_CASE : Optional[Any] = {n: v for n, v in config.items() if not isinstance(_lowercase , (torch.FloatTensor, numpy.ndarray) )}
SCREAMING_SNAKE_CASE : Any = chkpt['''dico_word2id''']
SCREAMING_SNAKE_CASE : str = {s + '''</w>''' if s.find('''@@''' ) == -1 and i > 13 else s.replace('''@@''' , '''''' ): i for s, i in vocab.items()}
# Save pytorch-model
SCREAMING_SNAKE_CASE : Dict = pytorch_dump_folder_path + '''/''' + WEIGHTS_NAME
SCREAMING_SNAKE_CASE : List[str] = pytorch_dump_folder_path + '''/''' + CONFIG_NAME
SCREAMING_SNAKE_CASE : Dict = pytorch_dump_folder_path + '''/''' + VOCAB_FILES_NAMES['''vocab_file''']
print(f"""Save PyTorch model to {pytorch_weights_dump_path}""" )
torch.save(_lowercase , _lowercase )
print(f"""Save configuration file to {pytorch_config_dump_path}""" )
with open(_lowercase , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(_lowercase , indent=2 ) + '''\n''' )
print(f"""Save vocab file to {pytorch_config_dump_path}""" )
with open(_lowercase , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(_lowercase , indent=2 ) + '''\n''' )
if __name__ == "__main__":
__UpperCamelCase : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--xlm_checkpoint_path', default=None, type=str, required=True, help='Path the official PyTorch dump.'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
__UpperCamelCase : List[Any] = parser.parse_args()
convert_xlm_checkpoint_to_pytorch(args.xlm_checkpoint_path, args.pytorch_dump_folder_path)
| 258
| 1
|
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( UpperCamelCase = 1000 ):
"""simple docstring"""
lowerCAmelCase__ , lowerCAmelCase__ : Dict = 1, 1
lowerCAmelCase__ : int = []
for i in range(1 , n + 1 ):
lowerCAmelCase__ : List[str] = prev_numerator + 2 * prev_denominator
lowerCAmelCase__ : List[Any] = prev_numerator + prev_denominator
if len(str(UpperCamelCase ) ) > len(str(UpperCamelCase ) ):
result.append(UpperCamelCase )
lowerCAmelCase__ : str = numerator
lowerCAmelCase__ : Union[str, Any] = denominator
return len(UpperCamelCase )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 37
|
import re
import string
import numpy as np
import datasets
__lowerCAmelCase : Optional[int] = '\nReturns the rate at which the input predicted strings exactly match their references, ignoring any strings input as part of the regexes_to_ignore list.\n'
__lowerCAmelCase : Optional[int] = '\nArgs:\n predictions: List of predicted texts.\n references: List of reference texts.\n regexes_to_ignore: List, defaults to None. Regex expressions of characters to\n ignore when calculating the exact matches. Note: these regexes are removed\n from the input data before the changes based on the options below (e.g. ignore_case,\n ignore_punctuation, ignore_numbers) are applied.\n ignore_case: Boolean, defaults to False. If true, turns everything\n to lowercase so that capitalization differences are ignored.\n ignore_punctuation: Boolean, defaults to False. If true, removes all punctuation before\n comparing predictions and references.\n ignore_numbers: Boolean, defaults to False. If true, removes all punctuation before\n comparing predictions and references.\nReturns:\n exact_match: Dictionary containing exact_match rate. Possible values are between 0.0 and 100.0, inclusive.\nExamples:\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds)\n >>> print(round(results["exact_match"], 1))\n 25.0\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell"], ignore_case=True, ignore_punctuation=True)\n >>> print(round(results["exact_match"], 1))\n 50.0\n\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell", "YELL"], ignore_case=True, ignore_punctuation=True)\n >>> print(round(results["exact_match"], 1))\n 75.0\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell", "YELL"], ignore_case=True, ignore_punctuation=True, ignore_numbers=True)\n >>> print(round(results["exact_match"], 1))\n 100.0\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["The cat sat on the mat.", "Theaters are great.", "It\'s like comparing oranges and apples."]\n >>> preds = ["The cat sat on the mat?", "Theaters are great.", "It\'s like comparing apples and oranges."]\n >>> results = exact_match.compute(references=refs, predictions=preds)\n >>> print(round(results["exact_match"], 1))\n 33.3\n\n'
__lowerCAmelCase : Optional[int] = '\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCAmelCase_ ( datasets.Metric ):
'''simple docstring'''
def _lowercase ( self : str ) -> Optional[int]:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" , id="""sequence""" ),
"""references""": datasets.Value("""string""" , id="""sequence""" ),
} ) , reference_urls=[] , )
def _lowercase ( self : Optional[int] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Optional[Any]=None , UpperCamelCase__ : Optional[Any]=False , UpperCamelCase__ : int=False , UpperCamelCase__ : Tuple=False , ) -> Dict:
"""simple docstring"""
if regexes_to_ignore is not None:
for s in regexes_to_ignore:
__magic_name__ = np.array([re.sub(UpperCamelCase__ , """""" , UpperCamelCase__ ) for x in predictions] )
__magic_name__ = np.array([re.sub(UpperCamelCase__ , """""" , UpperCamelCase__ ) for x in references] )
else:
__magic_name__ = np.asarray(UpperCamelCase__ )
__magic_name__ = np.asarray(UpperCamelCase__ )
if ignore_case:
__magic_name__ = np.char.lower(UpperCamelCase__ )
__magic_name__ = np.char.lower(UpperCamelCase__ )
if ignore_punctuation:
__magic_name__ = string.punctuation.maketrans("""""" , """""" , string.punctuation )
__magic_name__ = np.char.translate(UpperCamelCase__ , table=UpperCamelCase__ )
__magic_name__ = np.char.translate(UpperCamelCase__ , table=UpperCamelCase__ )
if ignore_numbers:
__magic_name__ = string.digits.maketrans("""""" , """""" , string.digits )
__magic_name__ = np.char.translate(UpperCamelCase__ , table=UpperCamelCase__ )
__magic_name__ = np.char.translate(UpperCamelCase__ , table=UpperCamelCase__ )
__magic_name__ = predictions == references
return {"exact_match": np.mean(UpperCamelCase__ ) * 100}
| 88
| 0
|
def _A ( UpperCamelCase_ : int, UpperCamelCase_ : int, UpperCamelCase_ : int) -> int:
'''simple docstring'''
if exponent == 1:
return base
if exponent % 2 == 0:
__lowercase = _modexpt(UpperCamelCase_, exponent // 2, UpperCamelCase_) % modulo_value
return (x * x) % modulo_value
else:
return (base * _modexpt(UpperCamelCase_, exponent - 1, UpperCamelCase_)) % modulo_value
def _A ( UpperCamelCase_ : int = 1777, UpperCamelCase_ : int = 1855, UpperCamelCase_ : int = 8) -> int:
'''simple docstring'''
__lowercase = base
for _ in range(1, UpperCamelCase_):
__lowercase = _modexpt(UpperCamelCase_, UpperCamelCase_, 10**digits)
return result
if __name__ == "__main__":
print(F"{solution() = }")
| 352
|
"""simple docstring"""
import numpy
# List of input, output pairs
_a = (
((5, 2, 3), 15),
((6, 5, 9), 25),
((11, 12, 13), 41),
((1, 1, 1), 8),
((11, 12, 13), 41),
)
_a = (((5_15, 22, 13), 5_55), ((61, 35, 49), 1_50))
_a = [2, 4, 1, 5]
_a = len(train_data)
_a = 0.009
def _A ( UpperCamelCase_ : str, UpperCamelCase_ : List[Any]="train") -> Optional[Any]:
'''simple docstring'''
return calculate_hypothesis_value(UpperCamelCase_, UpperCamelCase_) - output(
UpperCamelCase_, UpperCamelCase_)
def _A ( UpperCamelCase_ : List[Any]) -> Union[str, Any]:
'''simple docstring'''
__lowercase = 0
for i in range(len(UpperCamelCase_) - 1):
hyp_val += data_input_tuple[i] * parameter_vector[i + 1]
hyp_val += parameter_vector[0]
return hyp_val
def _A ( UpperCamelCase_ : Dict, UpperCamelCase_ : Optional[int]) -> Dict:
'''simple docstring'''
if data_set == "train":
return train_data[example_no][1]
elif data_set == "test":
return test_data[example_no][1]
return None
def _A ( UpperCamelCase_ : Dict, UpperCamelCase_ : List[str]) -> int:
'''simple docstring'''
if data_set == "train":
return _hypothesis_value(train_data[example_no][0])
elif data_set == "test":
return _hypothesis_value(test_data[example_no][0])
return None
def _A ( UpperCamelCase_ : Any, UpperCamelCase_ : Tuple=m) -> int:
'''simple docstring'''
__lowercase = 0
for i in range(UpperCamelCase_):
if index == -1:
summation_value += _error(UpperCamelCase_)
else:
summation_value += _error(UpperCamelCase_) * train_data[i][0][index]
return summation_value
def _A ( UpperCamelCase_ : str) -> str:
'''simple docstring'''
__lowercase = summation_of_cost_derivative(UpperCamelCase_, UpperCamelCase_) / m
return cost_derivative_value
def _A ( ) -> List[str]:
'''simple docstring'''
global parameter_vector
# Tune these values to set a tolerance value for predicted output
__lowercase = 0.000_002
__lowercase = 0
__lowercase = 0
while True:
j += 1
__lowercase = [0, 0, 0, 0]
for i in range(0, len(UpperCamelCase_)):
__lowercase = get_cost_derivative(i - 1)
__lowercase = (
parameter_vector[i] - LEARNING_RATE * cost_derivative
)
if numpy.allclose(
UpperCamelCase_, UpperCamelCase_, atol=UpperCamelCase_, rtol=UpperCamelCase_, ):
break
__lowercase = temp_parameter_vector
print(("Number of iterations:", j))
def _A ( ) -> int:
'''simple docstring'''
for i in range(len(UpperCamelCase_)):
print(("Actual output value:", output(UpperCamelCase_, "test")))
print(("Hypothesis output:", calculate_hypothesis_value(UpperCamelCase_, "test")))
if __name__ == "__main__":
run_gradient_descent()
print('\nTesting gradient descent for a linear hypothesis function.\n')
test_gradient_descent()
| 144
| 0
|
'''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.test_utils import execute_subprocess_async
def __UpperCAmelCase ( A : Optional[Any]=None ) -> Union[str, Any]:
if subparsers is not None:
UpperCAmelCase_ : int = subparsers.add_parser('''test''' )
else:
UpperCAmelCase_ : int = argparse.ArgumentParser('''Accelerate test command''' )
parser.add_argument(
'''--config_file''' , default=A , help=(
'''The path to use to store the config file. Will default to a file named default_config.yaml in the cache '''
'''location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have '''
'''such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed '''
'''with \'huggingface\'.'''
) , )
if subparsers is not None:
parser.set_defaults(func=A )
return parser
def __UpperCAmelCase ( A : Any ) -> Optional[int]:
UpperCAmelCase_ : Optional[Any] = os.path.sep.join(__file__.split(os.path.sep )[:-2] + ['''test_utils''', '''scripts''', '''test_script.py'''] )
if args.config_file is None:
UpperCAmelCase_ : Optional[int] = script_name
else:
UpperCAmelCase_ : int = F"--config_file={args.config_file} {script_name}"
UpperCAmelCase_ : Union[str, Any] = ["""accelerate-launch"""] + test_args.split()
UpperCAmelCase_ : Optional[int] = execute_subprocess_async(A , env=os.environ.copy() )
if result.returncode == 0:
print('''Test is a success! You are ready for your distributed training!''' )
def __UpperCAmelCase ( ) -> Union[str, Any]:
UpperCAmelCase_ : str = test_command_parser()
UpperCAmelCase_ : Dict = parser.parse_args()
test_command(A )
if __name__ == "__main__":
main()
| 304
|
import bza
import gzip
import lzma
import os
import shutil
import struct
import tarfile
import warnings
import zipfile
from abc import ABC, abstractmethod
from pathlib import Path
from typing import Dict, List, Optional, Type, Union
from .. import config
from .filelock import FileLock
from .logging import get_logger
_lowerCamelCase =get_logger(__name__)
class A__ :
def __init__( self , __magic_name__ = None ):
lowerCamelCase : Dict = (
os.path.join(__magic_name__ , config.EXTRACTED_DATASETS_DIR ) if cache_dir else config.EXTRACTED_DATASETS_PATH
)
lowerCamelCase : List[str] = Extractor
def UpperCamelCase__ ( self , __magic_name__ ):
from .file_utils import hash_url_to_filename
# Path where we extract compressed archives
# We extract in the cache dir, and get the extracted path name by hashing the original path"
lowerCamelCase : int = os.path.abspath(__magic_name__ )
return os.path.join(self.extract_dir , hash_url_to_filename(__magic_name__ ) )
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ ):
return force_extract or (
not os.path.isfile(__magic_name__ ) and not (os.path.isdir(__magic_name__ ) and os.listdir(__magic_name__ ))
)
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ = False ):
lowerCamelCase : Union[str, Any] = self.extractor.infer_extractor_format(__magic_name__ )
if not extractor_format:
return input_path
lowerCamelCase : int = self._get_output_path(__magic_name__ )
if self._do_extract(__magic_name__ , __magic_name__ ):
self.extractor.extract(__magic_name__ , __magic_name__ , __magic_name__ )
return output_path
class A__ ( __SCREAMING_SNAKE_CASE):
@classmethod
@abstractmethod
def UpperCamelCase__ ( cls , __magic_name__ , **__magic_name__ ):
...
@staticmethod
@abstractmethod
def UpperCamelCase__ ( __magic_name__ , __magic_name__ ):
...
class A__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : List[bytes] = []
@staticmethod
def UpperCamelCase__ ( __magic_name__ , __magic_name__ ):
with open(__magic_name__ , """rb""" ) as f:
return f.read(__magic_name__ )
@classmethod
def UpperCamelCase__ ( cls , __magic_name__ , __magic_name__ = b"" ):
if not magic_number:
lowerCamelCase : Optional[Any] = max(len(__magic_name__ ) for cls_magic_number in cls.magic_numbers )
try:
lowerCamelCase : Tuple = cls.read_magic_number(__magic_name__ , __magic_name__ )
except OSError:
return False
return any(magic_number.startswith(__magic_name__ ) for cls_magic_number in cls.magic_numbers )
class A__ ( __SCREAMING_SNAKE_CASE):
@classmethod
def UpperCamelCase__ ( cls , __magic_name__ , **__magic_name__ ):
return tarfile.is_tarfile(__magic_name__ )
@staticmethod
def UpperCamelCase__ ( __magic_name__ , __magic_name__ ):
def resolved(__magic_name__ ) -> str:
return os.path.realpath(os.path.abspath(__magic_name__ ) )
def badpath(__magic_name__ , __magic_name__ ) -> bool:
# joinpath will ignore base if path is absolute
return not resolved(os.path.join(__magic_name__ , __magic_name__ ) ).startswith(__magic_name__ )
def badlink(__magic_name__ , __magic_name__ ) -> bool:
# Links are interpreted relative to the directory containing the link
lowerCamelCase : List[str] = resolved(os.path.join(__magic_name__ , os.path.dirname(info.name ) ) )
return badpath(info.linkname , base=__magic_name__ )
lowerCamelCase : Optional[Any] = resolved(__magic_name__ )
for finfo in members:
if badpath(finfo.name , __magic_name__ ):
logger.error(F'''Extraction of {finfo.name} is blocked (illegal path)''' )
elif finfo.issym() and badlink(__magic_name__ , __magic_name__ ):
logger.error(F'''Extraction of {finfo.name} is blocked: Symlink to {finfo.linkname}''' )
elif finfo.islnk() and badlink(__magic_name__ , __magic_name__ ):
logger.error(F'''Extraction of {finfo.name} is blocked: Hard link to {finfo.linkname}''' )
else:
yield finfo
@staticmethod
def UpperCamelCase__ ( __magic_name__ , __magic_name__ ):
os.makedirs(__magic_name__ , exist_ok=__magic_name__ )
lowerCamelCase : Dict = tarfile.open(__magic_name__ )
tar_file.extractall(__magic_name__ , members=TarExtractor.safemembers(__magic_name__ , __magic_name__ ) )
tar_file.close()
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : str = [B"""\x1F\x8B"""]
@staticmethod
def UpperCamelCase__ ( __magic_name__ , __magic_name__ ):
with gzip.open(__magic_name__ , """rb""" ) as gzip_file:
with open(__magic_name__ , """wb""" ) as extracted_file:
shutil.copyfileobj(__magic_name__ , __magic_name__ )
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : Optional[int] = [
B"""PK\x03\x04""",
B"""PK\x05\x06""", # empty archive
B"""PK\x07\x08""", # spanned archive
]
@classmethod
def UpperCamelCase__ ( cls , __magic_name__ , __magic_name__ = b"" ):
if super().is_extractable(__magic_name__ , magic_number=__magic_name__ ):
return True
try:
# Alternative version of zipfile.is_zipfile that has less false positives, but misses executable zip archives.
# From: https://github.com/python/cpython/pull/5053
from zipfile import (
_CD_SIGNATURE,
_ECD_DISK_NUMBER,
_ECD_DISK_START,
_ECD_ENTRIES_TOTAL,
_ECD_OFFSET,
_ECD_SIZE,
_EndRecData,
sizeCentralDir,
stringCentralDir,
structCentralDir,
)
with open(__magic_name__ , """rb""" ) as fp:
lowerCamelCase : List[str] = _EndRecData(__magic_name__ )
if endrec:
if endrec[_ECD_ENTRIES_TOTAL] == 0 and endrec[_ECD_SIZE] == 0 and endrec[_ECD_OFFSET] == 0:
return True # Empty zipfiles are still zipfiles
elif endrec[_ECD_DISK_NUMBER] == endrec[_ECD_DISK_START]:
fp.seek(endrec[_ECD_OFFSET] ) # Central directory is on the same disk
if fp.tell() == endrec[_ECD_OFFSET] and endrec[_ECD_SIZE] >= sizeCentralDir:
lowerCamelCase : List[Any] = fp.read(__magic_name__ ) # CD is where we expect it to be
if len(__magic_name__ ) == sizeCentralDir:
lowerCamelCase : str = struct.unpack(__magic_name__ , __magic_name__ ) # CD is the right size
if centdir[_CD_SIGNATURE] == stringCentralDir:
return True # First central directory entry has correct magic number
return False
except Exception: # catch all errors in case future python versions change the zipfile internals
return False
@staticmethod
def UpperCamelCase__ ( __magic_name__ , __magic_name__ ):
os.makedirs(__magic_name__ , exist_ok=__magic_name__ )
with zipfile.ZipFile(__magic_name__ , """r""" ) as zip_file:
zip_file.extractall(__magic_name__ )
zip_file.close()
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : List[str] = [B"""\xFD\x37\x7A\x58\x5A\x00"""]
@staticmethod
def UpperCamelCase__ ( __magic_name__ , __magic_name__ ):
with lzma.open(__magic_name__ ) as compressed_file:
with open(__magic_name__ , """wb""" ) as extracted_file:
shutil.copyfileobj(__magic_name__ , __magic_name__ )
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : Any = [B"""Rar!\x1a\x07\x00""", B"""Rar!\x1a\x07\x01\x00"""] # RAR_ID # RAR5_ID
@staticmethod
def UpperCamelCase__ ( __magic_name__ , __magic_name__ ):
if not config.RARFILE_AVAILABLE:
raise ImportError("""Please pip install rarfile""" )
import rarfile
os.makedirs(__magic_name__ , exist_ok=__magic_name__ )
lowerCamelCase : Union[str, Any] = rarfile.RarFile(__magic_name__ )
rf.extractall(__magic_name__ )
rf.close()
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : Tuple = [B"""\x28\xb5\x2F\xFD"""]
@staticmethod
def UpperCamelCase__ ( __magic_name__ , __magic_name__ ):
if not config.ZSTANDARD_AVAILABLE:
raise ImportError("""Please pip install zstandard""" )
import zstandard as zstd
lowerCamelCase : int = zstd.ZstdDecompressor()
with open(__magic_name__ , """rb""" ) as ifh, open(__magic_name__ , """wb""" ) as ofh:
dctx.copy_stream(__magic_name__ , __magic_name__ )
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : Any = [B"""\x42\x5A\x68"""]
@staticmethod
def UpperCamelCase__ ( __magic_name__ , __magic_name__ ):
with bza.open(__magic_name__ , """rb""" ) as compressed_file:
with open(__magic_name__ , """wb""" ) as extracted_file:
shutil.copyfileobj(__magic_name__ , __magic_name__ )
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : List[Any] = [B"""\x37\x7A\xBC\xAF\x27\x1C"""]
@staticmethod
def UpperCamelCase__ ( __magic_name__ , __magic_name__ ):
if not config.PY7ZR_AVAILABLE:
raise ImportError("""Please pip install py7zr""" )
import pyazr
os.makedirs(__magic_name__ , exist_ok=__magic_name__ )
with pyazr.SevenZipFile(__magic_name__ , """r""" ) as archive:
archive.extractall(__magic_name__ )
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : List[Any] = [B"""\x04\x22\x4D\x18"""]
@staticmethod
def UpperCamelCase__ ( __magic_name__ , __magic_name__ ):
if not config.LZ4_AVAILABLE:
raise ImportError("""Please pip install lz4""" )
import lza.frame
with lza.frame.open(__magic_name__ , """rb""" ) as compressed_file:
with open(__magic_name__ , """wb""" ) as extracted_file:
shutil.copyfileobj(__magic_name__ , __magic_name__ )
class A__ :
# Put zip file to the last, b/c it is possible wrongly detected as zip (I guess it means: as tar or gzip)
_UpperCAmelCase : Dict[str, Type[BaseExtractor]] = {
"tar": TarExtractor,
"gzip": GzipExtractor,
"zip": ZipExtractor,
"xz": XzExtractor,
"rar": RarExtractor,
"zstd": ZstdExtractor,
"bz2": BzipaExtractor,
"7z": SevenZipExtractor, # <Added version="2.4.0"/>
"lz4": LzaExtractor, # <Added version="2.4.0"/>
}
@classmethod
def UpperCamelCase__ ( cls ):
return max(
len(__magic_name__ )
for extractor in cls.extractors.values()
if issubclass(__magic_name__ , __magic_name__ )
for extractor_magic_number in extractor.magic_numbers )
@staticmethod
def UpperCamelCase__ ( __magic_name__ , __magic_name__ ):
try:
return MagicNumberBaseExtractor.read_magic_number(__magic_name__ , magic_number_length=__magic_name__ )
except OSError:
return b""
@classmethod
def UpperCamelCase__ ( cls , __magic_name__ , __magic_name__ = False ):
warnings.warn(
"""Method 'is_extractable' was deprecated in version 2.4.0 and will be removed in 3.0.0. """
"""Use 'infer_extractor_format' instead.""" , category=__magic_name__ , )
lowerCamelCase : int = cls.infer_extractor_format(__magic_name__ )
if extractor_format:
return True if not return_extractor else (True, cls.extractors[extractor_format])
return False if not return_extractor else (False, None)
@classmethod
def UpperCamelCase__ ( cls , __magic_name__ ): # <Added version="2.4.0"/>
lowerCamelCase : Dict = cls._get_magic_number_max_length()
lowerCamelCase : Optional[Any] = cls._read_magic_number(__magic_name__ , __magic_name__ )
for extractor_format, extractor in cls.extractors.items():
if extractor.is_extractable(__magic_name__ , magic_number=__magic_name__ ):
return extractor_format
@classmethod
def UpperCamelCase__ ( cls , __magic_name__ , __magic_name__ , __magic_name__ = None , __magic_name__ = "deprecated" , ):
os.makedirs(os.path.dirname(__magic_name__ ) , exist_ok=__magic_name__ )
# Prevent parallel extractions
lowerCamelCase : Tuple = str(Path(__magic_name__ ).with_suffix(""".lock""" ) )
with FileLock(__magic_name__ ):
shutil.rmtree(__magic_name__ , ignore_errors=__magic_name__ )
if extractor_format or extractor != "deprecated":
if extractor != "deprecated" or not isinstance(__magic_name__ , __magic_name__ ): # passed as positional arg
warnings.warn(
"""Parameter 'extractor' was deprecated in version 2.4.0 and will be removed in 3.0.0. """
"""Use 'extractor_format' instead.""" , category=__magic_name__ , )
lowerCamelCase : int = extractor if extractor != """deprecated""" else extractor_format
else:
lowerCamelCase : Optional[int] = cls.extractors[extractor_format]
return extractor.extract(__magic_name__ , __magic_name__ )
else:
warnings.warn(
"""Parameter 'extractor_format' was made required in version 2.4.0 and not passing it will raise an """
"""exception in 3.0.0.""" , category=__magic_name__ , )
for extractor in cls.extractors.values():
if extractor.is_extractable(__magic_name__ ):
return extractor.extract(__magic_name__ , __magic_name__ )
| 287
| 0
|
from itertools import zip_longest
import requests
from bsa import BeautifulSoup
from pandas import DataFrame
def A ( _lowerCamelCase = "laptop" ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = F"https://www.amazon.in/laptop/s?k={product}"
_lowerCAmelCase : Dict = {
"User-Agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36\n (KHTML, like Gecko)Chrome/44.0.2403.157 Safari/537.36",
"Accept-Language": "en-US, en;q=0.5",
}
_lowerCAmelCase : Optional[int] = BeautifulSoup(requests.get(_lowerCamelCase , headers=_lowerCamelCase ).text )
# Initialize a Pandas dataframe with the column titles
_lowerCAmelCase : int = DataFrame(
columns=[
"Product Title",
"Product Link",
"Current Price of the product",
"Product Rating",
"MRP of the product",
"Discount",
] )
# Loop through each entry and store them in the dataframe
for item, _ in zip_longest(
soup.find_all(
"div" , attrs={"class": "s-result-item", "data-component-type": "s-search-result"} , ) , soup.find_all("div" , attrs={"class": "a-row a-size-base a-color-base"} ) , ):
try:
_lowerCAmelCase : Any = item.ha.text
_lowerCAmelCase : List[str] = "https://www.amazon.in/" + item.ha.a["href"]
_lowerCAmelCase : Any = item.find("span" , attrs={"class": "a-offscreen"} ).text
try:
_lowerCAmelCase : List[str] = item.find("span" , attrs={"class": "a-icon-alt"} ).text
except AttributeError:
_lowerCAmelCase : str = "Not available"
try:
_lowerCAmelCase : Optional[Any] = (
"₹"
+ item.find(
"span" , attrs={"class": "a-price a-text-price"} ).text.split("₹" )[1]
)
except AttributeError:
_lowerCAmelCase : Optional[Any] = ""
try:
_lowerCAmelCase : int = float(
(
(
float(product_mrp.strip("₹" ).replace("," , "" ) )
- float(product_price.strip("₹" ).replace("," , "" ) )
)
/ float(product_mrp.strip("₹" ).replace("," , "" ) )
)
* 100 )
except ValueError:
_lowerCAmelCase : Optional[Any] = float("nan" )
except AttributeError:
pass
_lowerCAmelCase : Any = [
product_title,
product_link,
product_price,
product_rating,
product_mrp,
discount,
]
_lowerCAmelCase : List[str] = " "
_lowerCAmelCase : Tuple = " "
data_frame.index += 1
return data_frame
if __name__ == "__main__":
_snake_case = "headphones"
get_amazon_product_data(product).to_csv(f'''Amazon Product Data for {product}.csv''')
| 300
|
_snake_case = 8.3144598
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
if temperature < 0:
raise Exception("Temperature cannot be less than 0 K" )
if molar_mass <= 0:
raise Exception("Molar mass cannot be less than or equal to 0 kg/mol" )
else:
return (3 * UNIVERSAL_GAS_CONSTANT * temperature / molar_mass) ** 0.5
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
# example
_snake_case = 300
_snake_case = 28
_snake_case = rms_speed_of_molecule(temperature, molar_mass)
print(f'''Vrms of Nitrogen gas at 300 K is {vrms} m/s''')
| 300
| 1
|
'''simple docstring'''
import json
import os
from typing import Dict, List, Optional, Tuple
import regex as re
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowerCamelCase_ = logging.get_logger(__name__)
lowerCamelCase_ = {
'''vocab_file''': '''vocab.json''',
'''merges_file''': '''merges.txt''',
'''tokenizer_config_file''': '''tokenizer_config.json''',
}
lowerCamelCase_ = {
'''vocab_file''': {
'''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json'''
},
'''merges_file''': {
'''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt'''
},
'''tokenizer_config_file''': {
'''facebook/blenderbot_small-90M''': (
'''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json'''
)
},
}
lowerCamelCase_ = {'''facebook/blenderbot_small-90M''': 5_12}
def __lowercase ( __lowercase ) -> List[str]:
'''simple docstring'''
_A = set()
_A = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
_A = char
_A = set(__lowercase )
return pairs
class _UpperCAmelCase ( snake_case_ ):
"""simple docstring"""
snake_case = VOCAB_FILES_NAMES
snake_case = PRETRAINED_VOCAB_FILES_MAP
snake_case = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case = ['''input_ids''', '''attention_mask''']
def __init__( self : List[Any] , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : int , __UpperCAmelCase : str="__start__" , __UpperCAmelCase : Dict="__end__" , __UpperCAmelCase : str="__unk__" , __UpperCAmelCase : List[str]="__null__" , **__UpperCAmelCase : List[str] , ):
'''simple docstring'''
super().__init__(unk_token=__UpperCAmelCase , bos_token=__UpperCAmelCase , eos_token=__UpperCAmelCase , pad_token=__UpperCAmelCase , **__UpperCAmelCase )
with open(__UpperCAmelCase , encoding="utf-8" ) as vocab_handle:
_A = json.load(__UpperCAmelCase )
_A = {v: k for k, v in self.encoder.items()}
with open(__UpperCAmelCase , encoding="utf-8" ) as merges_handle:
_A = merges_handle.read().split("\n" )[1:-1]
_A = [tuple(merge.split() ) for merge in merges]
_A = dict(zip(__UpperCAmelCase , range(len(__UpperCAmelCase ) ) ) )
_A = {}
@property
def lowerCAmelCase ( self : Any ):
'''simple docstring'''
return len(self.encoder )
def lowerCAmelCase ( self : str ):
'''simple docstring'''
return dict(self.encoder , **self.added_tokens_encoder )
def lowerCAmelCase ( self : List[Any] , __UpperCAmelCase : str ):
'''simple docstring'''
if token in self.cache:
return self.cache[token]
_A = re.sub("([.,!?()])" , R" \1" , __UpperCAmelCase )
_A = re.sub("(')" , R" \1 " , __UpperCAmelCase )
_A = re.sub(R"\s{2,}" , " " , __UpperCAmelCase )
if "\n" in token:
_A = token.replace("\n" , " __newln__" )
_A = token.split(" " )
_A = []
for token in tokens:
if not len(__UpperCAmelCase ):
continue
_A = token.lower()
_A = tuple(__UpperCAmelCase )
_A = tuple(list(word[:-1] ) + [word[-1] + "</w>"] )
_A = get_pairs(__UpperCAmelCase )
if not pairs:
words.append(__UpperCAmelCase )
continue
while True:
_A = min(__UpperCAmelCase , key=lambda __UpperCAmelCase : self.bpe_ranks.get(__UpperCAmelCase , float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
_A , _A = bigram
_A = []
_A = 0
while i < len(__UpperCAmelCase ):
try:
_A = word.index(__UpperCAmelCase , __UpperCAmelCase )
new_word.extend(word[i:j] )
_A = j
except ValueError:
new_word.extend(word[i:] )
break
if word[i] == first and i < len(__UpperCAmelCase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
_A = tuple(__UpperCAmelCase )
_A = new_word
if len(__UpperCAmelCase ) == 1:
break
else:
_A = get_pairs(__UpperCAmelCase )
_A = "@@ ".join(__UpperCAmelCase )
_A = word[:-4]
_A = word
words.append(__UpperCAmelCase )
return " ".join(__UpperCAmelCase )
def lowerCAmelCase ( self : Union[str, Any] , __UpperCAmelCase : str ):
'''simple docstring'''
_A = []
_A = re.findall(R"\S+\n?" , __UpperCAmelCase )
for token in words:
split_tokens.extend(list(self.bpe(__UpperCAmelCase ).split(" " ) ) )
return split_tokens
def lowerCAmelCase ( self : Any , __UpperCAmelCase : str ):
'''simple docstring'''
_A = token.lower()
return self.encoder.get(__UpperCAmelCase , self.encoder.get(self.unk_token ) )
def lowerCAmelCase ( self : Optional[int] , __UpperCAmelCase : int ):
'''simple docstring'''
return self.decoder.get(__UpperCAmelCase , self.unk_token )
def lowerCAmelCase ( self : Tuple , __UpperCAmelCase : List[str] ):
'''simple docstring'''
_A = " ".join(__UpperCAmelCase ).replace("@@ " , "" ).strip()
return out_string
def lowerCAmelCase ( self : Optional[Any] , __UpperCAmelCase : str , __UpperCAmelCase : Optional[str] = None ):
'''simple docstring'''
if not os.path.isdir(__UpperCAmelCase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
_A = os.path.join(
__UpperCAmelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
_A = os.path.join(
__UpperCAmelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
with open(__UpperCAmelCase , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=__UpperCAmelCase , ensure_ascii=__UpperCAmelCase ) + "\n" )
_A = 0
with open(__UpperCAmelCase , "w" , encoding="utf-8" ) as writer:
writer.write("#version: 0.2\n" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda __UpperCAmelCase : kv[1] ):
if index != token_index:
logger.warning(
f'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
" Please check that the tokenizer is not corrupted!" )
_A = token_index
writer.write(" ".join(__UpperCAmelCase ) + "\n" )
index += 1
return vocab_file, merge_file
| 79
|
'''simple docstring'''
import baseaa
import io
import json
import os
from copy import deepcopy
from ..optimizer import AcceleratedOptimizer
from ..scheduler import AcceleratedScheduler
class __magic_name__ :
def __init__( self : str , lowercase_ : Dict ):
if isinstance(lowercase_ , lowercase_ ):
# Don't modify user's data should they want to reuse it (e.g. in tests), because once we
# modified it, it will not be accepted here again, since `auto` values would have been overridden
lowercase_ : List[Any] = deepcopy(lowercase_ )
elif os.path.exists(lowercase_ ):
with io.open(lowercase_ , """r""" , encoding="""utf-8""" ) as f:
lowercase_ : Union[str, Any] = json.load(lowercase_ )
else:
try:
lowercase_ : int = baseaa.urlsafe_baadecode(lowercase_ ).decode("""utf-8""" )
lowercase_ : str = json.loads(lowercase_ )
except (UnicodeDecodeError, AttributeError, ValueError):
raise ValueError(
f'''Expected a string path to an existing deepspeed config, or a dictionary, or a base64 encoded string. Received: {config_file_or_dict}''' )
lowercase_ : Any = config
self.set_stage_and_offload()
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
# zero stage - this is done as early as possible, before model is created, to allow
# ``is_deepspeed_zero3_enabled`` query and getting to the early deepspeed config object
# during ``zero.Init()`` which needs to know the dtype, and some other hparams.
lowercase_ : Tuple = self.get_value("""zero_optimization.stage""" , -1 )
# offload
lowercase_ : str = False
if self.is_zeroa() or self.is_zeroa():
lowercase_ : Dict = set(["""cpu""", """nvme"""] )
lowercase_ : List[Any] = set(
[
self.get_value("""zero_optimization.offload_optimizer.device""" ),
self.get_value("""zero_optimization.offload_param.device""" ),
] )
if len(offload_devices & offload_devices_valid ) > 0:
lowercase_ : Tuple = True
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , lowercase_ : Any ):
lowercase_ : Optional[Any] = self.config
# find the config node of interest if it exists
lowercase_ : Tuple = ds_key_long.split(""".""" )
lowercase_ : Union[str, Any] = nodes.pop()
for node in nodes:
lowercase_ : List[str] = config.get(lowercase_ )
if config is None:
return None, ds_key
return config, ds_key
def SCREAMING_SNAKE_CASE_ ( self : str , lowercase_ : List[str] , lowercase_ : List[str]=None ):
lowercase_ , lowercase_ : List[Any] = self.find_config_node(lowercase_ )
if config is None:
return default
return config.get(lowercase_ , lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : List[str] , lowercase_ : Optional[int] , lowercase_ : int=False ):
lowercase_ : int = self.config
# find the config node of interest if it exists
lowercase_ : Dict = ds_key_long.split(""".""" )
for node in nodes:
lowercase_ : List[Any] = config
lowercase_ : Dict = config.get(lowercase_ )
if config is None:
if must_exist:
raise ValueError(f'''Can\'t find {ds_key_long} entry in the config: {self.config}''' )
else:
return
# if found remove it
if parent_config is not None:
parent_config.pop(lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , lowercase_ : Tuple ):
lowercase_ : str = self.get_value(lowercase_ )
return False if value is None else bool(lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : List[str] , lowercase_ : Union[str, Any] ):
lowercase_ : Union[str, Any] = self.get_value(lowercase_ )
return False if value is None else not bool(lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : Dict ):
return self._stage == 2
def SCREAMING_SNAKE_CASE_ ( self : Any ):
return self._stage == 3
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
return self._offload
class __magic_name__ :
def __init__( self : Any , lowercase_ : Union[str, Any] ):
lowercase_ : Any = engine
def SCREAMING_SNAKE_CASE_ ( self : Tuple , lowercase_ : int , **lowercase_ : str ):
# runs backpropagation and handles mixed precision
self.engine.backward(lowercase_ , **lowercase_ )
# Deepspeed's `engine.step` performs the following operations:
# - gradient accumulation check
# - gradient clipping
# - optimizer step
# - zero grad
# - checking overflow
# - lr_scheduler step (only if engine.lr_scheduler is not None)
self.engine.step()
# and this plugin overrides the above calls with no-ops when Accelerate runs under
# Deepspeed, but allows normal functionality for non-Deepspeed cases thus enabling a simple
# training loop that works transparently under many training regimes.
class __magic_name__ ( _UpperCAmelCase):
def __init__( self : Optional[Any] , lowercase_ : Tuple ):
super().__init__(lowercase_ , device_placement=lowercase_ , scaler=lowercase_ )
lowercase_ : Any = hasattr(self.optimizer , """overflow""" )
def SCREAMING_SNAKE_CASE_ ( self : int , lowercase_ : Tuple=None ):
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
@property
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ):
if self.__has_overflow__:
return self.optimizer.overflow
return False
class __magic_name__ ( _UpperCAmelCase):
def __init__( self : Dict , lowercase_ : Optional[Any] , lowercase_ : Tuple ):
super().__init__(lowercase_ , lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ):
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
class __magic_name__ :
def __init__( self : Any , lowercase_ : Union[str, Any] , lowercase_ : List[str]=0.0_01 , lowercase_ : List[str]=0 , **lowercase_ : List[Any] ):
lowercase_ : str = params
lowercase_ : List[Any] = lr
lowercase_ : int = weight_decay
lowercase_ : Union[str, Any] = kwargs
class __magic_name__ :
def __init__( self : Tuple , lowercase_ : Optional[Any] , lowercase_ : List[str]=None , lowercase_ : int=0 , **lowercase_ : int ):
lowercase_ : Union[str, Any] = optimizer
lowercase_ : List[str] = total_num_steps
lowercase_ : Dict = warmup_num_steps
lowercase_ : Dict = kwargs
| 239
| 0
|
"""simple docstring"""
from __future__ import annotations
from fractions import Fraction
from math import gcd, sqrt
def __A ( a_ :int) -> bool:
__a : int = int(number**0.5)
return number == sq * sq
def __A ( a_ :int , a_ :int , a_ :int , a_ :int , a_ :int , a_ :int) -> tuple[int, int]:
__a : int = x_num * y_den * z_den + y_num * x_den * z_den + z_num * x_den * y_den
__a : int = x_den * y_den * z_den
__a : int = gcd(a_ , a_)
top //= hcf
bottom //= hcf
return top, bottom
def __A ( a_ :int = 35) -> int:
__a : set = set()
__a : int
__a : Fraction = Fraction(0)
__a : tuple[int, int]
for x_num in range(1 , order + 1):
for x_den in range(x_num + 1 , order + 1):
for y_num in range(1 , order + 1):
for y_den in range(y_num + 1 , order + 1):
# n=1
__a : List[str] = x_num * y_den + x_den * y_num
__a : List[Any] = x_den * y_den
__a : str = gcd(a_ , a_)
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
__a : Optional[Any] = add_three(
a_ , a_ , a_ , a_ , a_ , a_)
unique_s.add(a_)
# n=2
__a : Optional[int] = (
x_num * x_num * y_den * y_den + x_den * x_den * y_num * y_num
)
__a : List[str] = x_den * x_den * y_den * y_den
if is_sq(a_) and is_sq(a_):
__a : int = int(sqrt(a_))
__a : int = int(sqrt(a_))
__a : int = gcd(a_ , a_)
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
__a : Dict = add_three(
a_ , a_ , a_ , a_ , a_ , a_)
unique_s.add(a_)
# n=-1
__a : Any = x_num * y_num
__a : List[str] = x_den * y_num + x_num * y_den
__a : str = gcd(a_ , a_)
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
__a : int = add_three(
a_ , a_ , a_ , a_ , a_ , a_)
unique_s.add(a_)
# n=2
__a : Optional[Any] = x_num * x_num * y_num * y_num
__a : Optional[int] = (
x_den * x_den * y_num * y_num + x_num * x_num * y_den * y_den
)
if is_sq(a_) and is_sq(a_):
__a : Union[str, Any] = int(sqrt(a_))
__a : Optional[Any] = int(sqrt(a_))
__a : Any = gcd(a_ , a_)
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
__a : Dict = add_three(
a_ , a_ , a_ , a_ , a_ , a_)
unique_s.add(a_)
for num, den in unique_s:
total += Fraction(a_ , a_)
return total.denominator + total.numerator
if __name__ == "__main__":
print(F'{solution() = }')
| 188
|
"""simple docstring"""
import os
import string
import sys
A = 1 << 8
A = {
'''tab''': ord('''\t'''),
'''newline''': ord('''\r'''),
'''esc''': 27,
'''up''': 65 + ARROW_KEY_FLAG,
'''down''': 66 + ARROW_KEY_FLAG,
'''right''': 67 + ARROW_KEY_FLAG,
'''left''': 68 + ARROW_KEY_FLAG,
'''mod_int''': 91,
'''undefined''': sys.maxsize,
'''interrupt''': 3,
'''insert''': 50,
'''delete''': 51,
'''pg_up''': 53,
'''pg_down''': 54,
}
A = KEYMAP['''up''']
A = KEYMAP['''left''']
if sys.platform == "win32":
A = []
A = {
B'''\xe0H''': KEYMAP['''up'''] - ARROW_KEY_FLAG,
B'''\x00H''': KEYMAP['''up'''] - ARROW_KEY_FLAG,
B'''\xe0P''': KEYMAP['''down'''] - ARROW_KEY_FLAG,
B'''\x00P''': KEYMAP['''down'''] - ARROW_KEY_FLAG,
B'''\xe0M''': KEYMAP['''right'''] - ARROW_KEY_FLAG,
B'''\x00M''': KEYMAP['''right'''] - ARROW_KEY_FLAG,
B'''\xe0K''': KEYMAP['''left'''] - ARROW_KEY_FLAG,
B'''\x00K''': KEYMAP['''left'''] - ARROW_KEY_FLAG,
}
for i in range(10):
A = ord(str(i))
def __A ( ) -> Dict:
if os.name == "nt":
import msvcrt
__a : Optional[Any] = '''mbcs'''
# Flush the keyboard buffer
while msvcrt.kbhit():
msvcrt.getch()
if len(a_) == 0:
# Read the keystroke
__a : Optional[Any] = msvcrt.getch()
# If it is a prefix char, get second part
if ch in (b"\x00", b"\xe0"):
__a : Optional[Any] = ch + msvcrt.getch()
# Translate actual Win chars to bullet char types
try:
__a : Union[str, Any] = chr(WIN_KEYMAP[cha])
WIN_CH_BUFFER.append(chr(KEYMAP['''mod_int''']))
WIN_CH_BUFFER.append(a_)
if ord(a_) in (
KEYMAP["insert"] - 1 << 9,
KEYMAP["delete"] - 1 << 9,
KEYMAP["pg_up"] - 1 << 9,
KEYMAP["pg_down"] - 1 << 9,
):
WIN_CH_BUFFER.append(chr(1_26))
__a : str = chr(KEYMAP['''esc'''])
except KeyError:
__a : str = cha[1]
else:
__a : Optional[Any] = ch.decode(a_)
else:
__a : Union[str, Any] = WIN_CH_BUFFER.pop(0)
elif os.name == "posix":
import termios
import tty
__a : Any = sys.stdin.fileno()
__a : List[str] = termios.tcgetattr(a_)
try:
tty.setraw(a_)
__a : int = sys.stdin.read(1)
finally:
termios.tcsetattr(a_ , termios.TCSADRAIN , a_)
return ch
def __A ( ) -> str:
__a : Any = get_raw_chars()
if ord(a_) in [KEYMAP["interrupt"], KEYMAP["newline"]]:
return char
elif ord(a_) == KEYMAP["esc"]:
__a : str = get_raw_chars()
if ord(a_) == KEYMAP["mod_int"]:
__a : List[str] = get_raw_chars()
if ord(a_) >= KEYMAP["arrow_begin"] - ARROW_KEY_FLAG and ord(a_) <= KEYMAP["arrow_end"] - ARROW_KEY_FLAG:
return chr(ord(a_) + ARROW_KEY_FLAG)
else:
return KEYMAP["undefined"]
else:
return get_raw_chars()
else:
if char in string.printable:
return char
else:
return KEYMAP["undefined"]
| 188
| 1
|
"""simple docstring"""
from typing import List, Optional, Tuple, Union
import torch
from torch import nn
from torch.nn import CrossEntropyLoss
from ... import AutoBackbone
from ...modeling_outputs import SemanticSegmenterOutput
from ...modeling_utils import PreTrainedModel
from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward, replace_return_docstrings
from ...utils.backbone_utils import BackboneMixin
from .configuration_upernet import UperNetConfig
lowercase_ = [
"openmmlab/upernet-convnext-tiny",
# See all UperNet models at https://huggingface.co/models?filter=upernet
]
# General docstring
lowercase_ = "UperNetConfig"
class __lowerCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self , _a , _a , _a , _a = 0 , _a = False , _a = 1 , ):
super().__init__()
__a = nn.Convad(
in_channels=_a , out_channels=_a , kernel_size=_a , padding=_a , bias=_a , dilation=_a , )
__a = nn.BatchNormad(_a )
__a = nn.ReLU()
def __UpperCAmelCase ( self , _a ):
__a = self.conv(_a )
__a = self.batch_norm(_a )
__a = self.activation(_a )
return output
class __lowerCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self , _a , _a , _a ):
super().__init__()
__a = [
nn.AdaptiveAvgPoolad(_a ),
UperNetConvModule(_a , _a , kernel_size=1 ),
]
for i, layer in enumerate(self.layers ):
self.add_module(str(_a ) , _a )
def __UpperCAmelCase ( self , _a ):
__a = input
for layer in self.layers:
__a = layer(_a )
return hidden_state
class __lowerCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self , _a , _a , _a , _a ):
super().__init__()
__a = pool_scales
__a = align_corners
__a = in_channels
__a = channels
__a = []
for i, pool_scale in enumerate(_a ):
__a = UperNetPyramidPoolingBlock(pool_scale=_a , in_channels=_a , channels=_a )
self.blocks.append(_a )
self.add_module(str(_a ) , _a )
def __UpperCAmelCase ( self , _a ):
__a = []
for ppm in self.blocks:
__a = ppm(_a )
__a = nn.functional.interpolate(
_a , size=x.size()[2:] , mode='''bilinear''' , align_corners=self.align_corners )
ppm_outs.append(_a )
return ppm_outs
class __lowerCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self , _a , _a ):
super().__init__()
__a = config
__a = config.pool_scales # e.g. (1, 2, 3, 6)
__a = in_channels
__a = config.hidden_size
__a = False
__a = nn.Convad(self.channels , config.num_labels , kernel_size=1 )
# PSP Module
__a = UperNetPyramidPoolingModule(
self.pool_scales , self.in_channels[-1] , self.channels , align_corners=self.align_corners , )
__a = UperNetConvModule(
self.in_channels[-1] + len(self.pool_scales ) * self.channels , self.channels , kernel_size=3 , padding=1 , )
# FPN Module
__a = nn.ModuleList()
__a = nn.ModuleList()
for in_channels in self.in_channels[:-1]: # skip the top layer
__a = UperNetConvModule(_a , self.channels , kernel_size=1 )
__a = UperNetConvModule(self.channels , self.channels , kernel_size=3 , padding=1 )
self.lateral_convs.append(_a )
self.fpn_convs.append(_a )
__a = UperNetConvModule(
len(self.in_channels ) * self.channels , self.channels , kernel_size=3 , padding=1 , )
def __UpperCAmelCase ( self ):
self.apply(self._init_weights )
def __UpperCAmelCase ( self , _a ):
if isinstance(_a , nn.Convad ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
def __UpperCAmelCase ( self , _a ):
__a = inputs[-1]
__a = [x]
psp_outs.extend(self.psp_modules(_a ) )
__a = torch.cat(_a , dim=1 )
__a = self.bottleneck(_a )
return output
def __UpperCAmelCase ( self , _a ):
# build laterals
__a = [lateral_conv(encoder_hidden_states[i] ) for i, lateral_conv in enumerate(self.lateral_convs )]
laterals.append(self.psp_forward(_a ) )
# build top-down path
__a = len(_a )
for i in range(used_backbone_levels - 1 , 0 , -1 ):
__a = laterals[i - 1].shape[2:]
__a = laterals[i - 1] + nn.functional.interpolate(
laterals[i] , size=_a , mode='''bilinear''' , align_corners=self.align_corners )
# build outputs
__a = [self.fpn_convs[i](laterals[i] ) for i in range(used_backbone_levels - 1 )]
# append psp feature
fpn_outs.append(laterals[-1] )
for i in range(used_backbone_levels - 1 , 0 , -1 ):
__a = nn.functional.interpolate(
fpn_outs[i] , size=fpn_outs[0].shape[2:] , mode='''bilinear''' , align_corners=self.align_corners )
__a = torch.cat(_a , dim=1 )
__a = self.fpn_bottleneck(_a )
__a = self.classifier(_a )
return output
class __lowerCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self , _a , _a = 2 , _a = 3 , _a = 1 ):
super().__init__()
__a = config
__a = config.auxiliary_in_channels
__a = config.auxiliary_channels
__a = config.auxiliary_num_convs
__a = config.auxiliary_concat_input
__a = in_index
__a = (kernel_size // 2) * dilation
__a = []
convs.append(
UperNetConvModule(
self.in_channels , self.channels , kernel_size=_a , padding=_a , dilation=_a ) )
for i in range(self.num_convs - 1 ):
convs.append(
UperNetConvModule(
self.channels , self.channels , kernel_size=_a , padding=_a , dilation=_a ) )
if self.num_convs == 0:
__a = nn.Identity()
else:
__a = nn.Sequential(*_a )
if self.concat_input:
__a = UperNetConvModule(
self.in_channels + self.channels , self.channels , kernel_size=_a , padding=kernel_size // 2 )
__a = nn.Convad(self.channels , config.num_labels , kernel_size=1 )
def __UpperCAmelCase ( self ):
self.apply(self._init_weights )
def __UpperCAmelCase ( self , _a ):
if isinstance(_a , nn.Convad ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
def __UpperCAmelCase ( self , _a ):
# just take the relevant feature maps
__a = encoder_hidden_states[self.in_index]
__a = self.convs(_a )
if self.concat_input:
__a = self.conv_cat(torch.cat([hidden_states, output] , dim=1 ) )
__a = self.classifier(_a )
return output
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCAmelCase : int = UperNetConfig
__UpperCAmelCase : Union[str, Any] = 'pixel_values'
__UpperCAmelCase : Dict = True
def __UpperCAmelCase ( self , _a ):
if isinstance(_a , _a ):
module.backbone.init_weights()
module.decode_head.init_weights()
module.auxiliary_head.init_weights()
def __UpperCAmelCase ( self ):
self.backbone.init_weights()
self.decode_head.init_weights()
self.auxiliary_head.init_weights()
def __UpperCAmelCase ( self , _a , _a=False ):
if isinstance(_a , _a ):
__a = value
lowercase_ = r"\n Parameters:\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use\n it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n config ([`UperNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n"
lowercase_ = r"\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Padding will be ignored by default should you provide it. Pixel values can be obtained using\n [`AutoImageProcessor`]. See [`SegformerImageProcessor.__call__`] for details.\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers in case the backbone has them. See\n `attentions` under returned tensors for more detail.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers of the backbone. See `hidden_states` under\n returned tensors for more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n"
@add_start_docstrings(
'UperNet framework leveraging any vision backbone e.g. for ADE20k, CityScapes.' , __SCREAMING_SNAKE_CASE , )
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self , _a ):
super().__init__(_a )
__a = AutoBackbone.from_config(config.backbone_config )
# Semantic segmentation head(s)
__a = UperNetHead(_a , in_channels=self.backbone.channels )
__a = UperNetFCNHead(_a ) if config.use_auxiliary_head else None
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(UPERNET_INPUTS_DOCSTRING.format('''batch_size, sequence_length''' ) )
@replace_return_docstrings(output_type=_a , config_class=_CONFIG_FOR_DOC )
def __UpperCAmelCase ( self , _a = None , _a = None , _a = None , _a = None , _a = None , ):
__a = return_dict if return_dict is not None else self.config.use_return_dict
__a = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
__a = output_attentions if output_attentions is not None else self.config.output_attentions
__a = self.backbone.forward_with_filtered_kwargs(
_a , output_hidden_states=_a , output_attentions=_a )
__a = outputs.feature_maps
__a = self.decode_head(_a )
__a = nn.functional.interpolate(_a , size=pixel_values.shape[2:] , mode='''bilinear''' , align_corners=_a )
__a = None
if self.auxiliary_head is not None:
__a = self.auxiliary_head(_a )
__a = nn.functional.interpolate(
_a , size=pixel_values.shape[2:] , mode='''bilinear''' , align_corners=_a )
__a = None
if labels is not None:
if self.config.num_labels == 1:
raise ValueError('''The number of labels should be greater than one''' )
else:
# compute weighted loss
__a = CrossEntropyLoss(ignore_index=self.config.loss_ignore_index )
__a = loss_fct(_a , _a )
__a = loss_fct(_a , _a )
__a = main_loss + self.config.auxiliary_loss_weight * auxiliary_loss
if not return_dict:
if output_hidden_states:
__a = (logits,) + outputs[1:]
else:
__a = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return SemanticSegmenterOutput(
loss=_a , logits=_a , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
| 45
|
def __lowerCAmelCase ( a__ ) -> str:
__a = []
__a = set({'''(''', '''[''', '''{'''} )
__a = set({''')''', ''']''', '''}'''} )
__a = {'''{''': '''}''', '''[''': ''']''', '''(''': ''')'''}
for i in range(len(a__ ) ):
if s[i] in open_brackets:
stack.append(s[i] )
elif s[i] in closed_brackets and (
len(a__ ) == 0 or (len(a__ ) > 0 and open_to_closed[stack.pop()] != s[i])
):
return False
return len(a__ ) == 0
def __lowerCAmelCase ( ) -> Dict:
__a = input('''Enter sequence of brackets: ''' )
if is_balanced(a__ ):
print(a__ , '''is balanced''' )
else:
print(a__ , '''is not balanced''' )
if __name__ == "__main__":
main()
| 6
| 0
|
"""simple docstring"""
import multiprocessing
import time
from arguments import PretokenizationArguments
from datasets import load_dataset
from transformers import AutoTokenizer, HfArgumentParser
def __UpperCAmelCase ( __lowerCamelCase ) -> Any:
lowercase__ : List[str] = {}
lowercase__ : Union[str, Any] = tokenizer(example['''content'''] , truncation=__lowerCamelCase )['''input_ids''']
lowercase__ : Union[str, Any] = len(example['''content'''] ) / len(output['''input_ids'''] )
return output
lowerCAmelCase_ = HfArgumentParser(PretokenizationArguments)
lowerCAmelCase_ = parser.parse_args()
if args.num_workers is None:
lowerCAmelCase_ = multiprocessing.cpu_count()
lowerCAmelCase_ = AutoTokenizer.from_pretrained(args.tokenizer_dir)
lowerCAmelCase_ = time.time()
lowerCAmelCase_ = load_dataset(args.dataset_name, split='train')
print(F'''Dataset loaded in {time.time()-t_start:.2f}s''')
lowerCAmelCase_ = time.time()
lowerCAmelCase_ = ds.map(
tokenize,
num_proc=args.num_workers,
remove_columns=[
'repo_name',
'path',
'copies',
'size',
'content',
'license',
'hash',
'line_mean',
'line_max',
'alpha_frac',
'autogenerated',
],
)
print(F'''Dataset tokenized in {time.time()-t_start:.2f}s''')
lowerCAmelCase_ = time.time()
ds.push_to_hub(args.tokenized_data_repo)
print(F'''Data pushed to the hub in {time.time()-t_start:.2f}s''')
| 302
|
"""simple docstring"""
import numpy as np
import torch
import tqdm
from ...models.unet_ad import UNetaDModel
from ...pipelines import DiffusionPipeline
from ...utils import randn_tensor
from ...utils.dummy_pt_objects import DDPMScheduler
class __A ( A_ ):
'''simple docstring'''
def __init__( self : Any ,_snake_case : UNetaDModel ,_snake_case : UNetaDModel ,_snake_case : DDPMScheduler ,_snake_case : Any ,) -> List[Any]:
"""simple docstring"""
super().__init__()
lowercase__ : Optional[int] = value_function
lowercase__ : Optional[int] = unet
lowercase__ : Tuple = scheduler
lowercase__ : Dict = env
lowercase__ : int = env.get_dataset()
lowercase__ : Dict = {}
for key in self.data.keys():
try:
lowercase__ : Optional[Any] = self.data[key].mean()
except: # noqa: E722
pass
lowercase__ : List[Any] = {}
for key in self.data.keys():
try:
lowercase__ : str = self.data[key].std()
except: # noqa: E722
pass
lowercase__ : Tuple = env.observation_space.shape[0]
lowercase__ : Optional[int] = env.action_space.shape[0]
def UpperCAmelCase ( self : str ,_snake_case : Any ,_snake_case : int ) -> Optional[Any]:
"""simple docstring"""
return (x_in - self.means[key]) / self.stds[key]
def UpperCAmelCase ( self : Dict ,_snake_case : int ,_snake_case : List[Any] ) -> Tuple:
"""simple docstring"""
return x_in * self.stds[key] + self.means[key]
def UpperCAmelCase ( self : Union[str, Any] ,_snake_case : Dict ) -> Optional[int]:
"""simple docstring"""
if type(_snake_case ) is dict:
return {k: self.to_torch(_snake_case ) for k, v in x_in.items()}
elif torch.is_tensor(_snake_case ):
return x_in.to(self.unet.device )
return torch.tensor(_snake_case ,device=self.unet.device )
def UpperCAmelCase ( self : Union[str, Any] ,_snake_case : Any ,_snake_case : int ,_snake_case : List[Any] ) -> Tuple:
"""simple docstring"""
for key, val in cond.items():
lowercase__ : List[Any] = val.clone()
return x_in
def UpperCAmelCase ( self : int ,_snake_case : Optional[int] ,_snake_case : List[Any] ,_snake_case : int ,_snake_case : int ) -> Optional[Any]:
"""simple docstring"""
lowercase__ : Any = x.shape[0]
lowercase__ : Dict = None
for i in tqdm.tqdm(self.scheduler.timesteps ):
# create batch of timesteps to pass into model
lowercase__ : Dict = torch.full((batch_size,) ,_snake_case ,device=self.unet.device ,dtype=torch.long )
for _ in range(_snake_case ):
with torch.enable_grad():
x.requires_grad_()
# permute to match dimension for pre-trained models
lowercase__ : int = self.value_function(x.permute(0 ,2 ,1 ) ,_snake_case ).sample
lowercase__ : Optional[Any] = torch.autograd.grad([y.sum()] ,[x] )[0]
lowercase__ : List[str] = self.scheduler._get_variance(_snake_case )
lowercase__ : Union[str, Any] = torch.exp(0.5 * posterior_variance )
lowercase__ : Optional[int] = model_std * grad
lowercase__ : Optional[Any] = 0
lowercase__ : str = x.detach()
lowercase__ : Dict = x + scale * grad
lowercase__ : str = self.reset_xa(_snake_case ,_snake_case ,self.action_dim )
lowercase__ : Union[str, Any] = self.unet(x.permute(0 ,2 ,1 ) ,_snake_case ).sample.permute(0 ,2 ,1 )
# TODO: verify deprecation of this kwarg
lowercase__ : Dict = self.scheduler.step(_snake_case ,_snake_case ,_snake_case ,predict_epsilon=_snake_case )['''prev_sample''']
# apply conditions to the trajectory (set the initial state)
lowercase__ : Dict = self.reset_xa(_snake_case ,_snake_case ,self.action_dim )
lowercase__ : Union[str, Any] = self.to_torch(_snake_case )
return x, y
def __call__( self : Union[str, Any] ,_snake_case : Any ,_snake_case : Tuple=64 ,_snake_case : Any=32 ,_snake_case : Optional[Any]=2 ,_snake_case : str=0.1 ) -> List[Any]:
"""simple docstring"""
lowercase__ : Any = self.normalize(_snake_case ,'''observations''' )
lowercase__ : Tuple = obs[None].repeat(_snake_case ,axis=0 )
lowercase__ : Dict = {0: self.to_torch(_snake_case )}
lowercase__ : int = (batch_size, planning_horizon, self.state_dim + self.action_dim)
# generate initial noise and apply our conditions (to make the trajectories start at current state)
lowercase__ : Optional[int] = randn_tensor(_snake_case ,device=self.unet.device )
lowercase__ : Tuple = self.reset_xa(_snake_case ,_snake_case ,self.action_dim )
lowercase__ : str = self.to_torch(_snake_case )
# run the diffusion process
lowercase__ , lowercase__ : int = self.run_diffusion(_snake_case ,_snake_case ,_snake_case ,_snake_case )
# sort output trajectories by value
lowercase__ : Optional[Any] = y.argsort(0 ,descending=_snake_case ).squeeze()
lowercase__ : str = x[sorted_idx]
lowercase__ : str = sorted_values[:, :, : self.action_dim]
lowercase__ : Optional[int] = actions.detach().cpu().numpy()
lowercase__ : List[str] = self.de_normalize(_snake_case ,key='''actions''' )
# select the action with the highest value
if y is not None:
lowercase__ : str = 0
else:
# if we didn't run value guiding, select a random action
lowercase__ : str = np.random.randint(0 ,_snake_case )
lowercase__ : int = denorm_actions[selected_index, 0]
return denorm_actions
| 302
| 1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.