code
stringlengths
82
54.1k
code_codestyle
int64
0
699
style_context
stringlengths
111
35.6k
style_context_codestyle
int64
0
699
label
int64
0
1
'''simple docstring''' import multiprocessing import os from typing import BinaryIO, Optional, Union import fsspec from .. import Dataset, Features, NamedSplit, config from ..formatting import query_table from ..packaged_modules.json.json import Json from ..utils import logging from ..utils.typing import NestedDataStructureLike, PathLike from .abc import AbstractDatasetReader class _snake_case (__SCREAMING_SNAKE_CASE): def __init__( self ,_snake_case ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = False ,_snake_case = False ,_snake_case = None ,_snake_case = None ,**_snake_case ,): super().__init__( _snake_case ,split=_snake_case ,features=_snake_case ,cache_dir=_snake_case ,keep_in_memory=_snake_case ,streaming=_snake_case ,num_proc=_snake_case ,**_snake_case ,) UpperCAmelCase_ : Tuple = field UpperCAmelCase_ : List[Any] = path_or_paths if isinstance(_snake_case ,_snake_case ) else {self.split: path_or_paths} UpperCAmelCase_ : Optional[int] = Json( cache_dir=_snake_case ,data_files=_snake_case ,features=_snake_case ,field=_snake_case ,**_snake_case ,) def UpperCamelCase__ ( self ): # Build iterable dataset if self.streaming: UpperCAmelCase_ : List[str] = self.builder.as_streaming_dataset(split=self.split ) # Build regular (map-style) dataset else: UpperCAmelCase_ : Union[str, Any] = None UpperCAmelCase_ : int = None UpperCAmelCase_ : List[Any] = None UpperCAmelCase_ : int = None self.builder.download_and_prepare( download_config=_snake_case ,download_mode=_snake_case ,verification_mode=_snake_case ,base_path=_snake_case ,num_proc=self.num_proc ,) UpperCAmelCase_ : Dict = self.builder.as_dataset( split=self.split ,verification_mode=_snake_case ,in_memory=self.keep_in_memory ) return dataset class _snake_case : def __init__( self ,_snake_case ,_snake_case ,_snake_case = None ,_snake_case = None ,**_snake_case ,): if num_proc is not None and num_proc <= 0: raise ValueError(f'''num_proc {num_proc} must be an integer > 0.''' ) UpperCAmelCase_ : int = dataset UpperCAmelCase_ : Union[str, Any] = path_or_buf UpperCAmelCase_ : str = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE UpperCAmelCase_ : Dict = num_proc UpperCAmelCase_ : Optional[Any] = "utf-8" UpperCAmelCase_ : Optional[int] = to_json_kwargs def UpperCamelCase__ ( self ): UpperCAmelCase_ : Dict = self.to_json_kwargs.pop("path_or_buf" ,_snake_case ) UpperCAmelCase_ : Tuple = self.to_json_kwargs.pop("orient" ,"records" ) UpperCAmelCase_ : Any = self.to_json_kwargs.pop("lines" ,True if orient == "records" else False ) UpperCAmelCase_ : Optional[int] = self.to_json_kwargs.pop("index" ,False if orient in ["split", "table"] else True ) UpperCAmelCase_ : int = self.to_json_kwargs.pop("compression" ,_snake_case ) if compression not in [None, "infer", "gzip", "bz2", "xz"]: raise NotImplementedError(f'''`datasets` currently does not support {compression} compression''' ) if isinstance(self.path_or_buf ,(str, bytes, os.PathLike) ): with fsspec.open(self.path_or_buf ,"wb" ,compression=_snake_case ) as buffer: UpperCAmelCase_ : List[str] = self._write(file_obj=_snake_case ,orient=_snake_case ,lines=_snake_case ,index=_snake_case ,**self.to_json_kwargs ) else: if compression: raise NotImplementedError( f'''The compression parameter is not supported when writing to a buffer, but compression={compression}''' " was passed. Please provide a local path instead." ) UpperCAmelCase_ : Union[str, Any] = self._write( file_obj=self.path_or_buf ,orient=_snake_case ,lines=_snake_case ,index=_snake_case ,**self.to_json_kwargs ) return written def UpperCamelCase__ ( self ,_snake_case ): UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = args UpperCAmelCase_ : List[str] = query_table( table=self.dataset.data ,key=slice(_snake_case ,offset + self.batch_size ) ,indices=self.dataset._indices ,) UpperCAmelCase_ : Optional[Any] = batch.to_pandas().to_json( path_or_buf=_snake_case ,orient=_snake_case ,lines=_snake_case ,index=_snake_case ,**_snake_case ) if not json_str.endswith("\n" ): json_str += "\n" return json_str.encode(self.encoding ) def UpperCamelCase__ ( self ,_snake_case ,_snake_case ,_snake_case ,_snake_case ,**_snake_case ,): UpperCAmelCase_ : Optional[Any] = 0 if self.num_proc is None or self.num_proc == 1: for offset in logging.tqdm( range(0 ,len(self.dataset ) ,self.batch_size ) ,unit="ba" ,disable=not logging.is_progress_bar_enabled() ,desc="Creating json from Arrow format" ,): UpperCAmelCase_ : Any = self._batch_json((offset, orient, lines, index, to_json_kwargs) ) written += file_obj.write(_snake_case ) else: UpperCAmelCase_ , UpperCAmelCase_ : int = len(self.dataset ), self.batch_size with multiprocessing.Pool(self.num_proc ) as pool: for json_str in logging.tqdm( pool.imap( self._batch_json ,[(offset, orient, lines, index, to_json_kwargs) for offset in range(0 ,_snake_case ,_snake_case )] ,) ,total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size ,unit="ba" ,disable=not logging.is_progress_bar_enabled() ,desc="Creating json from Arrow format" ,): written += file_obj.write(_snake_case ) return written
71
'''simple docstring''' from numpy import exp, pi, sqrt def a__ ( _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : float = 0.0 , _SCREAMING_SNAKE_CASE : float = 1.0 ) -> int: """simple docstring""" return 1 / sqrt(2 * pi * sigma**2 ) * exp(-((x - mu) ** 2) / (2 * sigma**2) ) if __name__ == "__main__": import doctest doctest.testmod()
71
1
'''simple docstring''' import json import os import re import unicodedata from json.encoder import INFINITY from typing import Any, Dict, List, Optional, Tuple, Union import numpy as np import regex from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...tokenization_utils_base import BatchEncoding from ...utils import TensorType, is_flax_available, is_tf_available, is_torch_available, logging from ...utils.generic import _is_jax, _is_numpy _lowerCamelCase = logging.get_logger(__name__) _lowerCamelCase = { """artists_file""": """artists.json""", """lyrics_file""": """lyrics.json""", """genres_file""": """genres.json""", } _lowerCamelCase = { """artists_file""": { """jukebox""": """https://huggingface.co/ArthurZ/jukebox/blob/main/artists.json""", }, """genres_file""": { """jukebox""": """https://huggingface.co/ArthurZ/jukebox/blob/main/genres.json""", }, """lyrics_file""": { """jukebox""": """https://huggingface.co/ArthurZ/jukebox/blob/main/lyrics.json""", }, } _lowerCamelCase = { """jukebox""": 512, } class _snake_case (__SCREAMING_SNAKE_CASE): __A : int =VOCAB_FILES_NAMES __A : Optional[Any] =PRETRAINED_VOCAB_FILES_MAP __A : Any =PRETRAINED_LYRIC_TOKENS_SIZES __A : Optional[Any] =["input_ids", "attention_mask"] def __init__( self ,_snake_case ,_snake_case ,_snake_case ,_snake_case=["v3", "v2", "v2"] ,_snake_case=5_12 ,_snake_case=5 ,_snake_case="<|endoftext|>" ,**_snake_case ,): UpperCAmelCase_ : Union[str, Any] = AddedToken(_snake_case ,lstrip=_snake_case ,rstrip=_snake_case ) if isinstance(_snake_case ,_snake_case ) else unk_token super().__init__( unk_token=_snake_case ,n_genres=_snake_case ,version=_snake_case ,max_n_lyric_tokens=_snake_case ,**_snake_case ,) UpperCAmelCase_ : Union[str, Any] = version UpperCAmelCase_ : List[str] = max_n_lyric_tokens UpperCAmelCase_ : Optional[Any] = n_genres with open(_snake_case ,encoding="utf-8" ) as vocab_handle: UpperCAmelCase_ : int = json.load(_snake_case ) with open(_snake_case ,encoding="utf-8" ) as vocab_handle: UpperCAmelCase_ : Union[str, Any] = json.load(_snake_case ) with open(_snake_case ,encoding="utf-8" ) as vocab_handle: UpperCAmelCase_ : List[str] = json.load(_snake_case ) UpperCAmelCase_ : Union[str, Any] = R"[^A-Za-z0-9.,:;!?\-'\"()\[\] \t\n]+" # In v2, we had a n_vocab=80 and in v3 we missed + and so n_vocab=79 of characters. if len(self.lyrics_encoder ) == 79: UpperCAmelCase_ : str = oov.replace(R"\-'" ,R"\-+'" ) UpperCAmelCase_ : List[Any] = regex.compile(_snake_case ) UpperCAmelCase_ : Dict = {v: k for k, v in self.artists_encoder.items()} UpperCAmelCase_ : int = {v: k for k, v in self.genres_encoder.items()} UpperCAmelCase_ : str = {v: k for k, v in self.lyrics_encoder.items()} @property def UpperCamelCase__ ( self ): return len(self.artists_encoder ) + len(self.genres_encoder ) + len(self.lyrics_encoder ) def UpperCamelCase__ ( self ): return dict(self.artists_encoder ,self.genres_encoder ,self.lyrics_encoder ) def UpperCamelCase__ ( self ,_snake_case ,_snake_case ,_snake_case ): UpperCAmelCase_ : str = [self.artists_encoder.get(_snake_case ,0 ) for artist in list_artists] for genres in range(len(_snake_case ) ): UpperCAmelCase_ : List[Any] = [self.genres_encoder.get(_snake_case ,0 ) for genre in list_genres[genres]] UpperCAmelCase_ : Optional[Any] = list_genres[genres] + [-1] * (self.n_genres - len(list_genres[genres] )) UpperCAmelCase_ : List[Any] = [[self.lyrics_encoder.get(_snake_case ,0 ) for character in list_lyrics[0]], [], []] return artists_id, list_genres, lyric_ids def UpperCamelCase__ ( self ,_snake_case ): return list(_snake_case ) def UpperCamelCase__ ( self ,_snake_case ,_snake_case ,_snake_case ,**_snake_case ): UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = self.prepare_for_tokenization(_snake_case ,_snake_case ,_snake_case ) UpperCAmelCase_ : str = self._tokenize(_snake_case ) return artist, genre, lyrics def UpperCamelCase__ ( self ,_snake_case ,_snake_case ,_snake_case ,_snake_case = False ): for idx in range(len(self.version ) ): if self.version[idx] == "v3": UpperCAmelCase_ : Optional[int] = artists[idx].lower() UpperCAmelCase_ : Union[str, Any] = [genres[idx].lower()] else: UpperCAmelCase_ : Optional[Any] = self._normalize(artists[idx] ) + ".v2" UpperCAmelCase_ : Optional[Any] = [ self._normalize(_snake_case ) + ".v2" for genre in genres[idx].split("_" ) ] # split is for the full dictionary with combined genres if self.version[0] == "v2": UpperCAmelCase_ : Union[str, Any] = regex.compile(R"[^A-Za-z0-9.,:;!?\-'\"()\[\] \t\n]+" ) UpperCAmelCase_ : Tuple = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789.,:;!?-+'\"()[] \t\n" UpperCAmelCase_ : Tuple = {vocab[index]: index + 1 for index in range(len(_snake_case ) )} UpperCAmelCase_ : Dict = 0 UpperCAmelCase_ : Any = len(_snake_case ) + 1 UpperCAmelCase_ : Any = self.vocab UpperCAmelCase_ : Union[str, Any] = {v: k for k, v in self.vocab.items()} UpperCAmelCase_ : Union[str, Any] = "" else: UpperCAmelCase_ : Optional[Any] = regex.compile(R"[^A-Za-z0-9.,:;!?\-+'\"()\[\] \t\n]+" ) UpperCAmelCase_ : str = self._run_strip_accents(_snake_case ) UpperCAmelCase_ : int = lyrics.replace("\\" ,"\n" ) UpperCAmelCase_ : List[Any] = self.out_of_vocab.sub("" ,_snake_case ), [], [] return artists, genres, lyrics def UpperCamelCase__ ( self ,_snake_case ): UpperCAmelCase_ : Any = unicodedata.normalize("NFD" ,_snake_case ) UpperCAmelCase_ : str = [] for char in text: UpperCAmelCase_ : List[str] = unicodedata.category(_snake_case ) if cat == "Mn": continue output.append(_snake_case ) return "".join(_snake_case ) def UpperCamelCase__ ( self ,_snake_case ): UpperCAmelCase_ : Optional[Any] = ( [chr(_snake_case ) for i in range(ord("a" ) ,ord("z" ) + 1 )] + [chr(_snake_case ) for i in range(ord("A" ) ,ord("Z" ) + 1 )] + [chr(_snake_case ) for i in range(ord("0" ) ,ord("9" ) + 1 )] + ["."] ) UpperCAmelCase_ : Optional[Any] = frozenset(_snake_case ) UpperCAmelCase_ : List[Any] = re.compile(R"_+" ) UpperCAmelCase_ : int = "".join([c if c in accepted else "_" for c in text.lower()] ) UpperCAmelCase_ : int = pattern.sub("_" ,_snake_case ).strip("_" ) return text def UpperCamelCase__ ( self ,_snake_case ): return " ".join(_snake_case ) def UpperCamelCase__ ( self ,_snake_case ,_snake_case = None ,_snake_case = False ): # Convert to TensorType if not isinstance(_snake_case ,_snake_case ): UpperCAmelCase_ : str = TensorType(_snake_case ) # Get a function reference for the correct framework if tensor_type == TensorType.TENSORFLOW: if not is_tf_available(): raise ImportError( "Unable to convert output to TensorFlow tensors format, TensorFlow is not installed." ) import tensorflow as tf UpperCAmelCase_ : Tuple = tf.constant UpperCAmelCase_ : Optional[int] = tf.is_tensor elif tensor_type == TensorType.PYTORCH: if not is_torch_available(): raise ImportError("Unable to convert output to PyTorch tensors format, PyTorch is not installed." ) import torch UpperCAmelCase_ : Tuple = torch.tensor UpperCAmelCase_ : str = torch.is_tensor elif tensor_type == TensorType.JAX: if not is_flax_available(): raise ImportError("Unable to convert output to JAX tensors format, JAX is not installed." ) import jax.numpy as jnp # noqa: F811 UpperCAmelCase_ : List[str] = jnp.array UpperCAmelCase_ : Optional[int] = _is_jax else: UpperCAmelCase_ : Optional[Any] = np.asarray UpperCAmelCase_ : str = _is_numpy # Do the tensor conversion in batch try: if prepend_batch_axis: UpperCAmelCase_ : Union[str, Any] = [inputs] if not is_tensor(_snake_case ): UpperCAmelCase_ : Any = as_tensor(_snake_case ) except: # noqa E722 raise ValueError( "Unable to create tensor, you should probably activate truncation and/or padding " "with 'padding=True' 'truncation=True' to have batched tensors with the same length." ) return inputs def __call__( self ,_snake_case ,_snake_case ,_snake_case="" ,_snake_case="pt" ): UpperCAmelCase_ : Optional[int] = [0, 0, 0] UpperCAmelCase_ : List[Any] = [artist] * len(self.version ) UpperCAmelCase_ : List[Any] = [genres] * len(self.version ) UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : int = self.tokenize(_snake_case ,_snake_case ,_snake_case ) UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Tuple = self._convert_token_to_id(_snake_case ,_snake_case ,_snake_case ) UpperCAmelCase_ : Optional[Any] = [-INFINITY] * len(full_tokens[-1] ) UpperCAmelCase_ : Optional[int] = [ self.convert_to_tensors( [input_ids + [artists_id[i]] + genres_ids[i] + full_tokens[i]] ,tensor_type=_snake_case ) for i in range(len(self.version ) ) ] return BatchEncoding({"input_ids": input_ids, "attention_masks": attention_masks} ) def UpperCamelCase__ ( self ,_snake_case ,_snake_case = None ): if not os.path.isdir(_snake_case ): logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' ) return UpperCAmelCase_ : Dict = os.path.join( _snake_case ,(filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["artists_file"] ) with open(_snake_case ,"w" ,encoding="utf-8" ) as f: f.write(json.dumps(self.artists_encoder ,ensure_ascii=_snake_case ) ) UpperCAmelCase_ : Any = os.path.join( _snake_case ,(filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["genres_file"] ) with open(_snake_case ,"w" ,encoding="utf-8" ) as f: f.write(json.dumps(self.genres_encoder ,ensure_ascii=_snake_case ) ) UpperCAmelCase_ : int = os.path.join( _snake_case ,(filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["lyrics_file"] ) with open(_snake_case ,"w" ,encoding="utf-8" ) as f: f.write(json.dumps(self.lyrics_encoder ,ensure_ascii=_snake_case ) ) return (artists_file, genres_file, lyrics_file) def UpperCamelCase__ ( self ,_snake_case ,_snake_case ,_snake_case ): UpperCAmelCase_ : Tuple = self.artists_decoder.get(_snake_case ) UpperCAmelCase_ : Union[str, Any] = [self.genres_decoder.get(_snake_case ) for genre in genres_index] UpperCAmelCase_ : str = [self.lyrics_decoder.get(_snake_case ) for character in lyric_index] return artist, genres, lyrics
71
'''simple docstring''' from typing import Optional from torch import nn from .transformer_ad import TransformeraDModel, TransformeraDModelOutput class _snake_case (nn.Module): def __init__( self ,_snake_case = 16 ,_snake_case = 88 ,_snake_case = None ,_snake_case = 1 ,_snake_case = 0.0 ,_snake_case = 32 ,_snake_case = None ,_snake_case = False ,_snake_case = None ,_snake_case = None ,_snake_case = "geglu" ,_snake_case = None ,): super().__init__() UpperCAmelCase_ : Optional[Any] = nn.ModuleList( [ TransformeraDModel( num_attention_heads=_snake_case ,attention_head_dim=_snake_case ,in_channels=_snake_case ,num_layers=_snake_case ,dropout=_snake_case ,norm_num_groups=_snake_case ,cross_attention_dim=_snake_case ,attention_bias=_snake_case ,sample_size=_snake_case ,num_vector_embeds=_snake_case ,activation_fn=_snake_case ,num_embeds_ada_norm=_snake_case ,) for _ in range(2 ) ] ) # Variables that can be set by a pipeline: # The ratio of transformer1 to transformer2's output states to be combined during inference UpperCAmelCase_ : List[str] = 0.5 # The shape of `encoder_hidden_states` is expected to be # `(batch_size, condition_lengths[0]+condition_lengths[1], num_features)` UpperCAmelCase_ : int = [77, 2_57] # Which transformer to use to encode which condition. # E.g. `(1, 0)` means that we'll use `transformers[1](conditions[0])` and `transformers[0](conditions[1])` UpperCAmelCase_ : List[Any] = [1, 0] def UpperCamelCase__ ( self ,_snake_case ,_snake_case ,_snake_case=None ,_snake_case=None ,_snake_case=None ,_snake_case = True ,): UpperCAmelCase_ : List[str] = hidden_states UpperCAmelCase_ : str = [] UpperCAmelCase_ : Optional[int] = 0 # attention_mask is not used yet for i in range(2 ): # for each of the two transformers, pass the corresponding condition tokens UpperCAmelCase_ : Any = encoder_hidden_states[:, tokens_start : tokens_start + self.condition_lengths[i]] UpperCAmelCase_ : Any = self.transformer_index_for_condition[i] UpperCAmelCase_ : int = self.transformers[transformer_index]( _snake_case ,encoder_hidden_states=_snake_case ,timestep=_snake_case ,cross_attention_kwargs=_snake_case ,return_dict=_snake_case ,)[0] encoded_states.append(encoded_state - input_states ) tokens_start += self.condition_lengths[i] UpperCAmelCase_ : Dict = encoded_states[0] * self.mix_ratio + encoded_states[1] * (1 - self.mix_ratio) UpperCAmelCase_ : List[Any] = output_states + input_states if not return_dict: return (output_states,) return TransformeraDModelOutput(sample=_snake_case )
71
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_sentencepiece_available, is_tf_available, is_tokenizers_available, is_torch_available, ) if is_sentencepiece_available(): from ..ta.tokenization_ta import TaTokenizer else: from ...utils.dummy_sentencepiece_objects import TaTokenizer _lowerCamelCase = TaTokenizer if is_tokenizers_available(): from ..ta.tokenization_ta_fast import TaTokenizerFast else: from ...utils.dummy_tokenizers_objects import TaTokenizerFast _lowerCamelCase = TaTokenizerFast _lowerCamelCase = {"""configuration_mt5""": ["""MT5Config""", """MT5OnnxConfig"""]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCamelCase = [ """MT5EncoderModel""", """MT5ForConditionalGeneration""", """MT5ForQuestionAnswering""", """MT5Model""", """MT5PreTrainedModel""", """MT5Stack""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCamelCase = ["""TFMT5EncoderModel""", """TFMT5ForConditionalGeneration""", """TFMT5Model"""] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCamelCase = ["""FlaxMT5EncoderModel""", """FlaxMT5ForConditionalGeneration""", """FlaxMT5Model"""] if TYPE_CHECKING: from .configuration_mta import MTaConfig, MTaOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mta import ( MTaEncoderModel, MTaForConditionalGeneration, MTaForQuestionAnswering, MTaModel, MTaPreTrainedModel, MTaStack, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_mta import TFMTaEncoderModel, TFMTaForConditionalGeneration, TFMTaModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_mta import FlaxMTaEncoderModel, FlaxMTaForConditionalGeneration, FlaxMTaModel else: import sys _lowerCamelCase = _LazyModule( __name__, globals()["""__file__"""], _import_structure, extra_objects={"""MT5Tokenizer""": MTaTokenizer, """MT5TokenizerFast""": MTaTokenizerFast}, module_spec=__spec__, )
71
'''simple docstring''' import json import sys def a__ ( _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : int ) -> Tuple: """simple docstring""" with open(_SCREAMING_SNAKE_CASE , encoding="utf-8" ) as f: UpperCAmelCase_ : Dict = json.load(_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : str = ["<details>", "<summary>Show updated benchmarks!</summary>", " "] for benchmark_name in sorted(_SCREAMING_SNAKE_CASE ): UpperCAmelCase_ : Optional[Any] = results[benchmark_name] UpperCAmelCase_ : Any = benchmark_name.split("/" )[-1] output_md.append(F'''### Benchmark: {benchmark_file_name}''' ) UpperCAmelCase_ : Any = "| metric |" UpperCAmelCase_ : Any = "|--------|" UpperCAmelCase_ : Union[str, Any] = "| new / old (diff) |" for metric_name in sorted(_SCREAMING_SNAKE_CASE ): UpperCAmelCase_ : Tuple = benchmark_res[metric_name] UpperCAmelCase_ : Union[str, Any] = metric_vals["new"] UpperCAmelCase_ : Optional[Any] = metric_vals.get("old" , _SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : Dict = metric_vals.get("diff" , _SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : Dict = F''' {new_val:f}''' if isinstance(_SCREAMING_SNAKE_CASE , (int, float) ) else "None" if old_val is not None: val_str += F''' / {old_val:f}''' if isinstance(_SCREAMING_SNAKE_CASE , (int, float) ) else "None" if dif_val is not None: val_str += F''' ({dif_val:f})''' if isinstance(_SCREAMING_SNAKE_CASE , (int, float) ) else "None" title += " " + metric_name + " |" lines += "---|" value += val_str + " |" output_md += [title, lines, value, " "] output_md.append("</details>" ) with open(_SCREAMING_SNAKE_CASE , "w" , encoding="utf-8" ) as f: f.writelines("\n".join(_SCREAMING_SNAKE_CASE ) ) if __name__ == "__main__": _lowerCamelCase = sys.argv[1] _lowerCamelCase = sys.argv[2] format_json_to_md(input_json_file, output_md_file)
71
1
'''simple docstring''' import gc import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, XLMRobertaTokenizer from diffusers import AltDiffusionPipeline, AutoencoderKL, DDIMScheduler, PNDMScheduler, UNetaDConditionModel from diffusers.pipelines.alt_diffusion.modeling_roberta_series import ( RobertaSeriesConfig, RobertaSeriesModelWithTransformation, ) from diffusers.utils import slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class _snake_case (__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase): __A : Any =AltDiffusionPipeline __A : List[str] =TEXT_TO_IMAGE_PARAMS __A : Optional[int] =TEXT_TO_IMAGE_BATCH_PARAMS __A : List[Any] =TEXT_TO_IMAGE_IMAGE_PARAMS __A : Dict =TEXT_TO_IMAGE_IMAGE_PARAMS def UpperCamelCase__ ( self ): torch.manual_seed(0 ) UpperCAmelCase_ : Tuple = UNetaDConditionModel( block_out_channels=(32, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=4 ,out_channels=4 ,down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") ,up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") ,cross_attention_dim=32 ,) UpperCAmelCase_ : int = DDIMScheduler( beta_start=0.00085 ,beta_end=0.012 ,beta_schedule="scaled_linear" ,clip_sample=_snake_case ,set_alpha_to_one=_snake_case ,) torch.manual_seed(0 ) UpperCAmelCase_ : Optional[int] = AutoencoderKL( block_out_channels=[32, 64] ,in_channels=3 ,out_channels=3 ,down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] ,up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] ,latent_channels=4 ,) # TODO: address the non-deterministic text encoder (fails for save-load tests) # torch.manual_seed(0) # text_encoder_config = RobertaSeriesConfig( # hidden_size=32, # project_dim=32, # intermediate_size=37, # layer_norm_eps=1e-05, # num_attention_heads=4, # num_hidden_layers=5, # vocab_size=5002, # ) # text_encoder = RobertaSeriesModelWithTransformation(text_encoder_config) torch.manual_seed(0 ) UpperCAmelCase_ : str = CLIPTextConfig( bos_token_id=0 ,eos_token_id=2 ,hidden_size=32 ,projection_dim=32 ,intermediate_size=37 ,layer_norm_eps=1E-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=50_02 ,) UpperCAmelCase_ : Union[str, Any] = CLIPTextModel(_snake_case ) UpperCAmelCase_ : Optional[Any] = XLMRobertaTokenizer.from_pretrained("hf-internal-testing/tiny-xlm-roberta" ) UpperCAmelCase_ : List[Any] = 77 UpperCAmelCase_ : Optional[Any] = { "unet": unet, "scheduler": scheduler, "vae": vae, "text_encoder": text_encoder, "tokenizer": tokenizer, "safety_checker": None, "feature_extractor": None, } return components def UpperCamelCase__ ( self ,_snake_case ,_snake_case=0 ): if str(_snake_case ).startswith("mps" ): UpperCAmelCase_ : int = torch.manual_seed(_snake_case ) else: UpperCAmelCase_ : Dict = torch.Generator(device=_snake_case ).manual_seed(_snake_case ) UpperCAmelCase_ : int = { "prompt": "A painting of a squirrel eating a burger", "generator": generator, "num_inference_steps": 2, "guidance_scale": 6.0, "output_type": "numpy", } return inputs def UpperCamelCase__ ( self ): super().test_attention_slicing_forward_pass(expected_max_diff=3E-3 ) def UpperCamelCase__ ( self ): super().test_inference_batch_single_identical(expected_max_diff=3E-3 ) def UpperCamelCase__ ( self ): UpperCAmelCase_ : str = "cpu" # ensure determinism for the device-dependent torch.Generator UpperCAmelCase_ : List[Any] = self.get_dummy_components() torch.manual_seed(0 ) UpperCAmelCase_ : Union[str, Any] = RobertaSeriesConfig( hidden_size=32 ,project_dim=32 ,intermediate_size=37 ,layer_norm_eps=1E-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,vocab_size=50_02 ,) # TODO: remove after fixing the non-deterministic text encoder UpperCAmelCase_ : Any = RobertaSeriesModelWithTransformation(_snake_case ) UpperCAmelCase_ : int = text_encoder UpperCAmelCase_ : int = AltDiffusionPipeline(**_snake_case ) UpperCAmelCase_ : Dict = alt_pipe.to(_snake_case ) alt_pipe.set_progress_bar_config(disable=_snake_case ) UpperCAmelCase_ : Any = self.get_dummy_inputs(_snake_case ) UpperCAmelCase_ : Any = "A photo of an astronaut" UpperCAmelCase_ : Tuple = alt_pipe(**_snake_case ) UpperCAmelCase_ : Union[str, Any] = output.images UpperCAmelCase_ : int = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) UpperCAmelCase_ : int = np.array( [0.5748162, 0.60447145, 0.48821217, 0.50100636, 0.5431185, 0.45763683, 0.49657696, 0.48132733, 0.47573093] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def UpperCamelCase__ ( self ): UpperCAmelCase_ : Optional[int] = "cpu" # ensure determinism for the device-dependent torch.Generator UpperCAmelCase_ : int = self.get_dummy_components() UpperCAmelCase_ : Union[str, Any] = PNDMScheduler(skip_prk_steps=_snake_case ) torch.manual_seed(0 ) UpperCAmelCase_ : List[str] = RobertaSeriesConfig( hidden_size=32 ,project_dim=32 ,intermediate_size=37 ,layer_norm_eps=1E-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,vocab_size=50_02 ,) # TODO: remove after fixing the non-deterministic text encoder UpperCAmelCase_ : Union[str, Any] = RobertaSeriesModelWithTransformation(_snake_case ) UpperCAmelCase_ : Tuple = text_encoder UpperCAmelCase_ : List[str] = AltDiffusionPipeline(**_snake_case ) UpperCAmelCase_ : int = alt_pipe.to(_snake_case ) alt_pipe.set_progress_bar_config(disable=_snake_case ) UpperCAmelCase_ : Dict = self.get_dummy_inputs(_snake_case ) UpperCAmelCase_ : List[Any] = alt_pipe(**_snake_case ) UpperCAmelCase_ : Optional[Any] = output.images UpperCAmelCase_ : int = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) UpperCAmelCase_ : Optional[int] = np.array( [0.51605093, 0.5707241, 0.47365507, 0.50578886, 0.5633877, 0.4642503, 0.5182081, 0.48763484, 0.49084237] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 @slow @require_torch_gpu class _snake_case (unittest.TestCase): def UpperCamelCase__ ( self ): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def UpperCamelCase__ ( self ): # make sure here that pndm scheduler skips prk UpperCAmelCase_ : Optional[Any] = AltDiffusionPipeline.from_pretrained("BAAI/AltDiffusion" ,safety_checker=_snake_case ) UpperCAmelCase_ : str = alt_pipe.to(_snake_case ) alt_pipe.set_progress_bar_config(disable=_snake_case ) UpperCAmelCase_ : str = "A painting of a squirrel eating a burger" UpperCAmelCase_ : Any = torch.manual_seed(0 ) UpperCAmelCase_ : Optional[Any] = alt_pipe([prompt] ,generator=_snake_case ,guidance_scale=6.0 ,num_inference_steps=20 ,output_type="np" ) UpperCAmelCase_ : str = output.images UpperCAmelCase_ : Optional[Any] = image[0, -3:, -3:, -1] assert image.shape == (1, 5_12, 5_12, 3) UpperCAmelCase_ : Union[str, Any] = np.array([0.1010, 0.0800, 0.0794, 0.0885, 0.0843, 0.0762, 0.0769, 0.0729, 0.0586] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def UpperCamelCase__ ( self ): UpperCAmelCase_ : List[str] = DDIMScheduler.from_pretrained("BAAI/AltDiffusion" ,subfolder="scheduler" ) UpperCAmelCase_ : Union[str, Any] = AltDiffusionPipeline.from_pretrained("BAAI/AltDiffusion" ,scheduler=_snake_case ,safety_checker=_snake_case ) UpperCAmelCase_ : int = alt_pipe.to(_snake_case ) alt_pipe.set_progress_bar_config(disable=_snake_case ) UpperCAmelCase_ : List[Any] = "A painting of a squirrel eating a burger" UpperCAmelCase_ : Optional[int] = torch.manual_seed(0 ) UpperCAmelCase_ : Dict = alt_pipe([prompt] ,generator=_snake_case ,num_inference_steps=2 ,output_type="numpy" ) UpperCAmelCase_ : int = output.images UpperCAmelCase_ : List[Any] = image[0, -3:, -3:, -1] assert image.shape == (1, 5_12, 5_12, 3) UpperCAmelCase_ : Union[str, Any] = np.array([0.4019, 0.4052, 0.3810, 0.4119, 0.3916, 0.3982, 0.4651, 0.4195, 0.5323] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
71
'''simple docstring''' import unittest from transformers import DebertaVaTokenizer, DebertaVaTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin _lowerCamelCase = get_tests_dir("""fixtures/spiece.model""") @require_sentencepiece @require_tokenizers class _snake_case (__SCREAMING_SNAKE_CASE , unittest.TestCase): __A : Optional[int] =DebertaVaTokenizer __A : Union[str, Any] =DebertaVaTokenizerFast __A : str =True __A : List[str] =True def UpperCamelCase__ ( self ): super().setUp() # We have a SentencePiece fixture for testing UpperCAmelCase_ : Optional[int] = DebertaVaTokenizer(_snake_case ,unk_token="<unk>" ) tokenizer.save_pretrained(self.tmpdirname ) def UpperCamelCase__ ( self ,_snake_case ): UpperCAmelCase_ : List[Any] = "this is a test" UpperCAmelCase_ : Optional[Any] = "this is a test" return input_text, output_text def UpperCamelCase__ ( self ): UpperCAmelCase_ : Optional[Any] = "<pad>" UpperCAmelCase_ : str = 0 self.assertEqual(self.get_tokenizer()._convert_token_to_id(_snake_case ) ,_snake_case ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(_snake_case ) ,_snake_case ) def UpperCamelCase__ ( self ): UpperCAmelCase_ : int = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] ,"<pad>" ) self.assertEqual(vocab_keys[1] ,"<unk>" ) self.assertEqual(vocab_keys[-1] ,"[PAD]" ) self.assertEqual(len(_snake_case ) ,3_00_01 ) def UpperCamelCase__ ( self ): self.assertEqual(self.get_tokenizer().vocab_size ,3_00_00 ) def UpperCamelCase__ ( self ): # fmt: off UpperCAmelCase_ : str = " \tHeLLo!how \n Are yoU? " UpperCAmelCase_ : Union[str, Any] = ["▁hello", "!", "how", "▁are", "▁you", "?"] # fmt: on UpperCAmelCase_ : Tuple = DebertaVaTokenizer(_snake_case ,do_lower_case=_snake_case ) UpperCAmelCase_ : Union[str, Any] = tokenizer.convert_ids_to_tokens(tokenizer.encode(_snake_case ,add_special_tokens=_snake_case ) ) self.assertListEqual(_snake_case ,_snake_case ) UpperCAmelCase_ : Tuple = DebertaVaTokenizerFast(_snake_case ,do_lower_case=_snake_case ) UpperCAmelCase_ : Any = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_snake_case ,add_special_tokens=_snake_case ) ) self.assertListEqual(_snake_case ,_snake_case ) @unittest.skip("There is an inconsistency between slow and fast tokenizer due to a bug in the fast one." ) def UpperCamelCase__ ( self ): pass @unittest.skip("There is an inconsistency between slow and fast tokenizer due to a bug in the fast one." ) def UpperCamelCase__ ( self ): pass def UpperCamelCase__ ( self ): # fmt: off UpperCAmelCase_ : Optional[int] = "I was born in 92000, and this is falsé." UpperCAmelCase_ : List[str] = ["▁", "<unk>", "▁was", "▁born", "▁in", "▁9", "2000", "▁", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", "▁", ".", ] # fmt: on UpperCAmelCase_ : List[Any] = DebertaVaTokenizer(_snake_case ,split_by_punct=_snake_case ) UpperCAmelCase_ : Optional[int] = tokenizer.convert_ids_to_tokens(tokenizer.encode(_snake_case ,add_special_tokens=_snake_case ) ) self.assertListEqual(_snake_case ,_snake_case ) UpperCAmelCase_ : int = DebertaVaTokenizerFast(_snake_case ,split_by_punct=_snake_case ) UpperCAmelCase_ : Dict = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_snake_case ,add_special_tokens=_snake_case ) ) self.assertListEqual(_snake_case ,_snake_case ) def UpperCamelCase__ ( self ): # fmt: off UpperCAmelCase_ : Tuple = "I was born in 92000, and this is falsé." UpperCAmelCase_ : Dict = ["▁i", "▁was", "▁born", "▁in", "▁9", "2000", "▁", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", "▁", ".", ] # fmt: on UpperCAmelCase_ : Optional[Any] = DebertaVaTokenizer(_snake_case ,do_lower_case=_snake_case ,split_by_punct=_snake_case ) UpperCAmelCase_ : Any = tokenizer.convert_ids_to_tokens(tokenizer.encode(_snake_case ,add_special_tokens=_snake_case ) ) self.assertListEqual(_snake_case ,_snake_case ) UpperCAmelCase_ : List[Any] = DebertaVaTokenizerFast(_snake_case ,do_lower_case=_snake_case ,split_by_punct=_snake_case ) UpperCAmelCase_ : Tuple = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_snake_case ,add_special_tokens=_snake_case ) ) self.assertListEqual(_snake_case ,_snake_case ) def UpperCamelCase__ ( self ): # fmt: off UpperCAmelCase_ : Optional[int] = "I was born in 92000, and this is falsé." UpperCAmelCase_ : Optional[int] = ["▁i", "▁was", "▁born", "▁in", "▁9", "2000", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", ".", ] # fmt: on UpperCAmelCase_ : List[Any] = DebertaVaTokenizer(_snake_case ,do_lower_case=_snake_case ,split_by_punct=_snake_case ) UpperCAmelCase_ : str = tokenizer.convert_ids_to_tokens(tokenizer.encode(_snake_case ,add_special_tokens=_snake_case ) ) self.assertListEqual(_snake_case ,_snake_case ) UpperCAmelCase_ : Optional[Any] = DebertaVaTokenizerFast(_snake_case ,do_lower_case=_snake_case ,split_by_punct=_snake_case ) UpperCAmelCase_ : str = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_snake_case ,add_special_tokens=_snake_case ) ) self.assertListEqual(_snake_case ,_snake_case ) def UpperCamelCase__ ( self ): # fmt: off UpperCAmelCase_ : Optional[int] = "I was born in 92000, and this is falsé." UpperCAmelCase_ : Optional[Any] = ["▁", "<unk>", "▁was", "▁born", "▁in", "▁9", "2000", "▁", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", "▁", ".", ] # fmt: on UpperCAmelCase_ : List[str] = DebertaVaTokenizer(_snake_case ,do_lower_case=_snake_case ,split_by_punct=_snake_case ) UpperCAmelCase_ : Tuple = tokenizer.convert_ids_to_tokens(tokenizer.encode(_snake_case ,add_special_tokens=_snake_case ) ) self.assertListEqual(_snake_case ,_snake_case ) UpperCAmelCase_ : Dict = DebertaVaTokenizerFast(_snake_case ,do_lower_case=_snake_case ,split_by_punct=_snake_case ) UpperCAmelCase_ : Dict = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_snake_case ,add_special_tokens=_snake_case ) ) self.assertListEqual(_snake_case ,_snake_case ) def UpperCamelCase__ ( self ): # fmt: off UpperCAmelCase_ : Tuple = " \tHeLLo!how \n Are yoU? " UpperCAmelCase_ : List[Any] = ["▁", "<unk>", "e", "<unk>", "o", "!", "how", "▁", "<unk>", "re", "▁yo", "<unk>", "?"] # fmt: on UpperCAmelCase_ : Any = DebertaVaTokenizer(_snake_case ,do_lower_case=_snake_case ,split_by_punct=_snake_case ) UpperCAmelCase_ : List[str] = tokenizer.convert_ids_to_tokens(tokenizer.encode(_snake_case ,add_special_tokens=_snake_case ) ) self.assertListEqual(_snake_case ,_snake_case ) UpperCAmelCase_ : int = DebertaVaTokenizerFast(_snake_case ,do_lower_case=_snake_case ,split_by_punct=_snake_case ) UpperCAmelCase_ : Tuple = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_snake_case ,add_special_tokens=_snake_case ) ) self.assertListEqual(_snake_case ,_snake_case ) def UpperCamelCase__ ( self ): UpperCAmelCase_ : List[str] = self.get_tokenizer() UpperCAmelCase_ : Union[str, Any] = self.get_rust_tokenizer() UpperCAmelCase_ : Dict = "I was born in 92000, and this is falsé." UpperCAmelCase_ : Any = tokenizer.convert_ids_to_tokens(tokenizer.encode(_snake_case ,add_special_tokens=_snake_case ) ) UpperCAmelCase_ : Optional[Any] = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_snake_case ,add_special_tokens=_snake_case ) ) self.assertListEqual(_snake_case ,_snake_case ) UpperCAmelCase_ : Tuple = tokenizer.encode(_snake_case ,add_special_tokens=_snake_case ) UpperCAmelCase_ : int = rust_tokenizer.encode(_snake_case ,add_special_tokens=_snake_case ) self.assertListEqual(_snake_case ,_snake_case ) UpperCAmelCase_ : Optional[Any] = self.get_rust_tokenizer() UpperCAmelCase_ : Union[str, Any] = tokenizer.encode(_snake_case ) UpperCAmelCase_ : List[Any] = rust_tokenizer.encode(_snake_case ) self.assertListEqual(_snake_case ,_snake_case ) def UpperCamelCase__ ( self ): UpperCAmelCase_ : Any = "This is a test" UpperCAmelCase_ : Optional[int] = [13, 1, 43_98, 25, 21, 12_89] UpperCAmelCase_ : Optional[Any] = ["▁", "T", "his", "▁is", "▁a", "▁test"] UpperCAmelCase_ : List[str] = ["▁", "<unk>", "his", "▁is", "▁a", "▁test"] UpperCAmelCase_ : str = DebertaVaTokenizer(_snake_case ,keep_accents=_snake_case ) UpperCAmelCase_ : List[Any] = DebertaVaTokenizerFast(_snake_case ,keep_accents=_snake_case ) UpperCAmelCase_ : Optional[int] = tokenizer.encode(_snake_case ,add_special_tokens=_snake_case ) self.assertListEqual(_snake_case ,_snake_case ) UpperCAmelCase_ : Any = tokenizer.tokenize(_snake_case ) self.assertListEqual(_snake_case ,_snake_case ) UpperCAmelCase_ : Union[str, Any] = tokenizer.convert_ids_to_tokens(_snake_case ) self.assertListEqual(_snake_case ,_snake_case ) UpperCAmelCase_ : List[Any] = rust_tokenizer.encode(_snake_case ,add_special_tokens=_snake_case ) self.assertListEqual(_snake_case ,_snake_case ) UpperCAmelCase_ : Dict = rust_tokenizer.tokenize(_snake_case ) self.assertListEqual(_snake_case ,_snake_case ) UpperCAmelCase_ : List[str] = rust_tokenizer.convert_ids_to_tokens(_snake_case ) self.assertListEqual(_snake_case ,_snake_case ) # fmt: off UpperCAmelCase_ : List[str] = "I was born in 92000, and this is falsé." UpperCAmelCase_ : Optional[int] = [13, 1, 23, 3_86, 19, 5_61, 30_50, 15, 17, 48, 25, 82_56, 18, 1, 9] UpperCAmelCase_ : str = ["▁", "I", "▁was", "▁born", "▁in", "▁9", "2000", ",", "▁and", "▁this", "▁is", "▁fal", "s", "é", ".", ] UpperCAmelCase_ : List[str] = ["▁", "<unk>", "▁was", "▁born", "▁in", "▁9", "2000", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", ".", ] # fmt: on UpperCAmelCase_ : List[str] = tokenizer.encode(_snake_case ,add_special_tokens=_snake_case ) self.assertListEqual(_snake_case ,_snake_case ) UpperCAmelCase_ : Dict = tokenizer.tokenize(_snake_case ) self.assertListEqual(_snake_case ,_snake_case ) UpperCAmelCase_ : int = tokenizer.convert_ids_to_tokens(_snake_case ) self.assertListEqual(_snake_case ,_snake_case ) UpperCAmelCase_ : Optional[int] = rust_tokenizer.encode(_snake_case ,add_special_tokens=_snake_case ) self.assertListEqual(_snake_case ,_snake_case ) UpperCAmelCase_ : Optional[int] = rust_tokenizer.tokenize(_snake_case ) self.assertListEqual(_snake_case ,_snake_case ) UpperCAmelCase_ : Any = rust_tokenizer.convert_ids_to_tokens(_snake_case ) self.assertListEqual(_snake_case ,_snake_case ) def UpperCamelCase__ ( self ): UpperCAmelCase_ : Any = DebertaVaTokenizer(_snake_case ) UpperCAmelCase_ : Optional[int] = tokenizer.encode("sequence builders" ) UpperCAmelCase_ : Dict = tokenizer.encode("multi-sequence build" ) UpperCAmelCase_ : Tuple = tokenizer.build_inputs_with_special_tokens(_snake_case ) UpperCAmelCase_ : Optional[Any] = tokenizer.build_inputs_with_special_tokens(_snake_case ,_snake_case ) self.assertEqual([tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] ,_snake_case ) self.assertEqual( [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [tokenizer.sep_token_id] ,_snake_case ,) @slow def UpperCamelCase__ ( self ): # fmt: off UpperCAmelCase_ : Union[str, Any] = {"input_ids": [[1, 3_98_67, 36, 1_93_90, 4_86, 27, 3_50_52, 8_14_36, 18, 6_06_85, 12_25, 7, 3_50_52, 8_14_36, 18, 93_67, 1_68_99, 18, 1_59_37, 53, 5_94, 7_73, 18, 1_62_87, 3_04_65, 36, 1_59_37, 6, 4_11_39, 38, 3_69_79, 6_07_63, 1_91, 6, 3_41_32, 99, 6, 5_05_38, 3_90, 4_32_30, 6, 3_41_32, 27_79, 2_08_50, 14, 6_99, 10_72, 11_94, 36, 3_82, 1_09_01, 53, 7, 6_99, 10_72, 20_84, 36, 2_04_22, 6_30, 53, 19, 1_05, 30_49, 18_96, 10_53, 1_68_99, 15_06, 11, 3_79_78, 42_43, 7, 12_37, 3_18_69, 2_00, 1_65_66, 6_54, 6, 3_50_52, 8_14_36, 7, 5_56_30, 1_35_93, 4, 2], [1, 26, 1_50_11, 13, 6_67, 8, 10_53, 18, 2_36_11, 12_37, 7_23_56, 1_28_20, 34, 10_41_34, 12_09, 35, 1_33_13, 66_27, 21, 2_02, 3_47, 7, 1_64, 23_99, 11, 46, 44_85, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 5, 12_32, 28_64, 1_57_85, 1_49_51, 1_05, 5, 85_81, 12_50, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "token_type_ids": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=_snake_case ,model_name="microsoft/deberta-v2-xlarge" ,revision="ad6e42c1532ddf3a15c39246b63f5559d558b670" ,)
71
1
'''simple docstring''' import re def a__ ( _SCREAMING_SNAKE_CASE : str ) -> list: """simple docstring""" return [char.split() for char in re.split(r"[^ a-z A-Z 0-9 \s]" , str_ )] def a__ ( _SCREAMING_SNAKE_CASE : str ) -> str: """simple docstring""" UpperCAmelCase_ : List[str] = split_input(str_ ) return "".join( ["".join([char.capitalize() for char in sub_str] ) for sub_str in string_split] ) def a__ ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : bool , _SCREAMING_SNAKE_CASE : str ) -> str: """simple docstring""" try: UpperCAmelCase_ : Any = split_input(_SCREAMING_SNAKE_CASE ) if upper: UpperCAmelCase_ : Dict = "".join( [ separator.join([char.upper() for char in sub_str] ) for sub_str in string_split ] ) else: UpperCAmelCase_ : int = "".join( [ separator.join([char.lower() for char in sub_str] ) for sub_str in string_split ] ) return res_str except IndexError: return "not valid string" def a__ ( _SCREAMING_SNAKE_CASE : str ) -> str: """simple docstring""" return to_simple_case(_SCREAMING_SNAKE_CASE ) def a__ ( _SCREAMING_SNAKE_CASE : str ) -> str: """simple docstring""" try: UpperCAmelCase_ : int = to_simple_case(_SCREAMING_SNAKE_CASE ) return res_str[0].lower() + res_str[1:] except IndexError: return "not valid string" def a__ ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : bool ) -> str: """simple docstring""" return to_complex_case(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , "_" ) def a__ ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : bool ) -> str: """simple docstring""" return to_complex_case(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , "-" ) if __name__ == "__main__": __import__("""doctest""").testmod()
71
'''simple docstring''' def a__ ( _SCREAMING_SNAKE_CASE : int ) -> int: """simple docstring""" if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): raise TypeError("Input value must be an 'int' type" ) UpperCAmelCase_ : Union[str, Any] = 0 while number: position += 1 number >>= 1 return position if __name__ == "__main__": import doctest doctest.testmod()
71
1
'''simple docstring''' def a__ ( _SCREAMING_SNAKE_CASE : List[Any] ) -> List[str]: """simple docstring""" UpperCAmelCase_ : Tuple = 0 UpperCAmelCase_ : str = len(_SCREAMING_SNAKE_CASE ) for i in range(n - 1 ): for j in range(i + 1 , _SCREAMING_SNAKE_CASE ): if arr[i] > arr[j]: num_inversions += 1 return num_inversions def a__ ( _SCREAMING_SNAKE_CASE : List[Any] ) -> Optional[int]: """simple docstring""" if len(_SCREAMING_SNAKE_CASE ) <= 1: return arr, 0 UpperCAmelCase_ : List[str] = len(_SCREAMING_SNAKE_CASE ) // 2 UpperCAmelCase_ : Union[str, Any] = arr[0:mid] UpperCAmelCase_ : Dict = arr[mid:] UpperCAmelCase_ , UpperCAmelCase_ : Tuple = count_inversions_recursive(_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ , UpperCAmelCase_ : str = count_inversions_recursive(_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ , UpperCAmelCase_ : Tuple = _count_cross_inversions(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : Any = inversion_p + inversions_q + cross_inversions return c, num_inversions def a__ ( _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : Tuple ) -> Tuple: """simple docstring""" UpperCAmelCase_ : List[str] = [] UpperCAmelCase_ : str = 0 while i < len(_SCREAMING_SNAKE_CASE ) and j < len(_SCREAMING_SNAKE_CASE ): if p[i] > q[j]: # if P[1] > Q[j], then P[k] > Q[k] for all i < k <= len(P) # These are all inversions. The claim emerges from the # property that P is sorted. num_inversion += len(_SCREAMING_SNAKE_CASE ) - i r.append(q[j] ) j += 1 else: r.append(p[i] ) i += 1 if i < len(_SCREAMING_SNAKE_CASE ): r.extend(p[i:] ) else: r.extend(q[j:] ) return r, num_inversion def a__ ( ) -> List[Any]: """simple docstring""" UpperCAmelCase_ : Union[str, Any] = [10, 2, 1, 5, 5, 2, 11] # this arr has 8 inversions: # (10, 2), (10, 1), (10, 5), (10, 5), (10, 2), (2, 1), (5, 2), (5, 2) UpperCAmelCase_ : Optional[int] = count_inversions_bf(_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = count_inversions_recursive(_SCREAMING_SNAKE_CASE ) assert num_inversions_bf == num_inversions_recursive == 8 print("number of inversions = " , _SCREAMING_SNAKE_CASE ) # testing an array with zero inversion (a sorted arr_1) arr_a.sort() UpperCAmelCase_ : Dict = count_inversions_bf(_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = count_inversions_recursive(_SCREAMING_SNAKE_CASE ) assert num_inversions_bf == num_inversions_recursive == 0 print("number of inversions = " , _SCREAMING_SNAKE_CASE ) # an empty list should also have zero inversions UpperCAmelCase_ : Optional[int] = [] UpperCAmelCase_ : Optional[Any] = count_inversions_bf(_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ , UpperCAmelCase_ : List[str] = count_inversions_recursive(_SCREAMING_SNAKE_CASE ) assert num_inversions_bf == num_inversions_recursive == 0 print("number of inversions = " , _SCREAMING_SNAKE_CASE ) if __name__ == "__main__": main()
71
'''simple docstring''' from math import factorial def a__ ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int ) -> int: """simple docstring""" if n < k or k < 0: raise ValueError("Please enter positive integers for n and k where n >= k" ) return factorial(_SCREAMING_SNAKE_CASE ) // (factorial(_SCREAMING_SNAKE_CASE ) * factorial(n - k )) if __name__ == "__main__": print( """The number of five-card hands possible from a standard""", f"""fifty-two card deck is: {combinations(52, 5)}\n""", ) print( """If a class of 40 students must be arranged into groups of""", f"""4 for group projects, there are {combinations(40, 4)} ways""", """to arrange them.\n""", ) print( """If 10 teams are competing in a Formula One race, there""", f"""are {combinations(10, 3)} ways that first, second and""", """third place can be awarded.""", )
71
1
'''simple docstring''' import argparse import io import requests import torch from omegaconf import OmegaConf from diffusers import AutoencoderKL from diffusers.pipelines.stable_diffusion.convert_from_ckpt import ( assign_to_checkpoint, conv_attn_to_linear, create_vae_diffusers_config, renew_vae_attention_paths, renew_vae_resnet_paths, ) def a__ ( _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : Optional[int] ) -> List[Any]: """simple docstring""" UpperCAmelCase_ : Tuple = checkpoint UpperCAmelCase_ : Tuple = {} UpperCAmelCase_ : Tuple = vae_state_dict["encoder.conv_in.weight"] UpperCAmelCase_ : Union[str, Any] = vae_state_dict["encoder.conv_in.bias"] UpperCAmelCase_ : Dict = vae_state_dict["encoder.conv_out.weight"] UpperCAmelCase_ : Tuple = vae_state_dict["encoder.conv_out.bias"] UpperCAmelCase_ : Dict = vae_state_dict["encoder.norm_out.weight"] UpperCAmelCase_ : List[str] = vae_state_dict["encoder.norm_out.bias"] UpperCAmelCase_ : Optional[int] = vae_state_dict["decoder.conv_in.weight"] UpperCAmelCase_ : int = vae_state_dict["decoder.conv_in.bias"] UpperCAmelCase_ : int = vae_state_dict["decoder.conv_out.weight"] UpperCAmelCase_ : Optional[int] = vae_state_dict["decoder.conv_out.bias"] UpperCAmelCase_ : Dict = vae_state_dict["decoder.norm_out.weight"] UpperCAmelCase_ : Optional[int] = vae_state_dict["decoder.norm_out.bias"] UpperCAmelCase_ : int = vae_state_dict["quant_conv.weight"] UpperCAmelCase_ : Tuple = vae_state_dict["quant_conv.bias"] UpperCAmelCase_ : Any = vae_state_dict["post_quant_conv.weight"] UpperCAmelCase_ : Optional[int] = vae_state_dict["post_quant_conv.bias"] # Retrieves the keys for the encoder down blocks only UpperCAmelCase_ : Tuple = len({".".join(layer.split("." )[:3] ) for layer in vae_state_dict if "encoder.down" in layer} ) UpperCAmelCase_ : List[str] = { layer_id: [key for key in vae_state_dict if F'''down.{layer_id}''' in key] for layer_id in range(_SCREAMING_SNAKE_CASE ) } # Retrieves the keys for the decoder up blocks only UpperCAmelCase_ : int = len({".".join(layer.split("." )[:3] ) for layer in vae_state_dict if "decoder.up" in layer} ) UpperCAmelCase_ : Optional[Any] = { layer_id: [key for key in vae_state_dict if F'''up.{layer_id}''' in key] for layer_id in range(_SCREAMING_SNAKE_CASE ) } for i in range(_SCREAMING_SNAKE_CASE ): UpperCAmelCase_ : Any = [key for key in down_blocks[i] if F'''down.{i}''' in key and F'''down.{i}.downsample''' not in key] if F'''encoder.down.{i}.downsample.conv.weight''' in vae_state_dict: UpperCAmelCase_ : Dict = vae_state_dict.pop( F'''encoder.down.{i}.downsample.conv.weight''' ) UpperCAmelCase_ : Any = vae_state_dict.pop( F'''encoder.down.{i}.downsample.conv.bias''' ) UpperCAmelCase_ : List[str] = renew_vae_resnet_paths(_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : List[str] = {"old": F'''down.{i}.block''', "new": F'''down_blocks.{i}.resnets'''} assign_to_checkpoint(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , additional_replacements=[meta_path] , config=_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : Tuple = [key for key in vae_state_dict if "encoder.mid.block" in key] UpperCAmelCase_ : Dict = 2 for i in range(1 , num_mid_res_blocks + 1 ): UpperCAmelCase_ : List[str] = [key for key in mid_resnets if F'''encoder.mid.block_{i}''' in key] UpperCAmelCase_ : List[Any] = renew_vae_resnet_paths(_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : Any = {"old": F'''mid.block_{i}''', "new": F'''mid_block.resnets.{i - 1}'''} assign_to_checkpoint(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , additional_replacements=[meta_path] , config=_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : Optional[int] = [key for key in vae_state_dict if "encoder.mid.attn" in key] UpperCAmelCase_ : str = renew_vae_attention_paths(_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : List[str] = {"old": "mid.attn_1", "new": "mid_block.attentions.0"} assign_to_checkpoint(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , additional_replacements=[meta_path] , config=_SCREAMING_SNAKE_CASE ) conv_attn_to_linear(_SCREAMING_SNAKE_CASE ) for i in range(_SCREAMING_SNAKE_CASE ): UpperCAmelCase_ : Any = num_up_blocks - 1 - i UpperCAmelCase_ : Any = [ key for key in up_blocks[block_id] if F'''up.{block_id}''' in key and F'''up.{block_id}.upsample''' not in key ] if F'''decoder.up.{block_id}.upsample.conv.weight''' in vae_state_dict: UpperCAmelCase_ : Any = vae_state_dict[ F'''decoder.up.{block_id}.upsample.conv.weight''' ] UpperCAmelCase_ : Dict = vae_state_dict[ F'''decoder.up.{block_id}.upsample.conv.bias''' ] UpperCAmelCase_ : List[str] = renew_vae_resnet_paths(_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : Tuple = {"old": F'''up.{block_id}.block''', "new": F'''up_blocks.{i}.resnets'''} assign_to_checkpoint(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , additional_replacements=[meta_path] , config=_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : Tuple = [key for key in vae_state_dict if "decoder.mid.block" in key] UpperCAmelCase_ : Union[str, Any] = 2 for i in range(1 , num_mid_res_blocks + 1 ): UpperCAmelCase_ : Dict = [key for key in mid_resnets if F'''decoder.mid.block_{i}''' in key] UpperCAmelCase_ : str = renew_vae_resnet_paths(_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : Optional[Any] = {"old": F'''mid.block_{i}''', "new": F'''mid_block.resnets.{i - 1}'''} assign_to_checkpoint(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , additional_replacements=[meta_path] , config=_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : Any = [key for key in vae_state_dict if "decoder.mid.attn" in key] UpperCAmelCase_ : Any = renew_vae_attention_paths(_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : str = {"old": "mid.attn_1", "new": "mid_block.attentions.0"} assign_to_checkpoint(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , additional_replacements=[meta_path] , config=_SCREAMING_SNAKE_CASE ) conv_attn_to_linear(_SCREAMING_SNAKE_CASE ) return new_checkpoint def a__ ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : str , ) -> int: """simple docstring""" UpperCAmelCase_ : int = requests.get( " https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml" ) UpperCAmelCase_ : Optional[Any] = io.BytesIO(r.content ) UpperCAmelCase_ : Union[str, Any] = OmegaConf.load(_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : int = 5_12 UpperCAmelCase_ : Optional[int] = "cuda" if torch.cuda.is_available() else "cpu" if checkpoint_path.endswith("safetensors" ): from safetensors import safe_open UpperCAmelCase_ : List[Any] = {} with safe_open(_SCREAMING_SNAKE_CASE , framework="pt" , device="cpu" ) as f: for key in f.keys(): UpperCAmelCase_ : Any = f.get_tensor(_SCREAMING_SNAKE_CASE ) else: UpperCAmelCase_ : Tuple = torch.load(_SCREAMING_SNAKE_CASE , map_location=_SCREAMING_SNAKE_CASE )["state_dict"] # Convert the VAE model. UpperCAmelCase_ : List[Any] = create_vae_diffusers_config(_SCREAMING_SNAKE_CASE , image_size=_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : Optional[Any] = custom_convert_ldm_vae_checkpoint(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : Tuple = AutoencoderKL(**_SCREAMING_SNAKE_CASE ) vae.load_state_dict(_SCREAMING_SNAKE_CASE ) vae.save_pretrained(_SCREAMING_SNAKE_CASE ) if __name__ == "__main__": _lowerCamelCase = argparse.ArgumentParser() parser.add_argument("""--vae_pt_path""", default=None, type=str, required=True, help="""Path to the VAE.pt to convert.""") parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the VAE.pt to convert.""") _lowerCamelCase = parser.parse_args() vae_pt_to_vae_diffuser(args.vae_pt_path, args.dump_path)
71
'''simple docstring''' import random import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, UNetaDConditionModel, VideoToVideoSDPipeline, ) from diffusers.utils import floats_tensor, is_xformers_available, skip_mps from diffusers.utils.testing_utils import enable_full_determinism, slow, torch_device from ..pipeline_params import ( TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS, ) from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() @skip_mps class _snake_case (__SCREAMING_SNAKE_CASE , unittest.TestCase): __A : Union[str, Any] =VideoToVideoSDPipeline __A : Tuple =TEXT_GUIDED_IMAGE_VARIATION_PARAMS.union({"video"}) - {"image", "width", "height"} __A : Union[str, Any] =TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({"video"}) - {"image"} __A : str =PipelineTesterMixin.required_optional_params - {"latents"} __A : Dict =False # No `output_type`. __A : Optional[int] =frozenset( [ "num_inference_steps", "generator", "latents", "return_dict", "callback", "callback_steps", ]) def UpperCamelCase__ ( self ): torch.manual_seed(0 ) UpperCAmelCase_ : Optional[int] = UNetaDConditionModel( block_out_channels=(32, 64, 64, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=4 ,out_channels=4 ,down_block_types=("CrossAttnDownBlock3D", "CrossAttnDownBlock3D", "CrossAttnDownBlock3D", "DownBlock3D") ,up_block_types=("UpBlock3D", "CrossAttnUpBlock3D", "CrossAttnUpBlock3D", "CrossAttnUpBlock3D") ,cross_attention_dim=32 ,attention_head_dim=4 ,) UpperCAmelCase_ : int = DDIMScheduler( beta_start=0.00085 ,beta_end=0.012 ,beta_schedule="scaled_linear" ,clip_sample=_snake_case ,set_alpha_to_one=_snake_case ,) torch.manual_seed(0 ) UpperCAmelCase_ : Dict = AutoencoderKL( block_out_channels=[32, 64] ,in_channels=3 ,out_channels=3 ,down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] ,up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] ,latent_channels=4 ,sample_size=1_28 ,) torch.manual_seed(0 ) UpperCAmelCase_ : Dict = CLIPTextConfig( bos_token_id=0 ,eos_token_id=2 ,hidden_size=32 ,intermediate_size=37 ,layer_norm_eps=1E-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=10_00 ,hidden_act="gelu" ,projection_dim=5_12 ,) UpperCAmelCase_ : Union[str, Any] = CLIPTextModel(_snake_case ) UpperCAmelCase_ : List[Any] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" ) UpperCAmelCase_ : Optional[int] = { "unet": unet, "scheduler": scheduler, "vae": vae, "text_encoder": text_encoder, "tokenizer": tokenizer, } return components def UpperCamelCase__ ( self ,_snake_case ,_snake_case=0 ): # 3 frames UpperCAmelCase_ : Dict = floats_tensor((1, 3, 3, 32, 32) ,rng=random.Random(_snake_case ) ).to(_snake_case ) if str(_snake_case ).startswith("mps" ): UpperCAmelCase_ : Tuple = torch.manual_seed(_snake_case ) else: UpperCAmelCase_ : Tuple = torch.Generator(device=_snake_case ).manual_seed(_snake_case ) UpperCAmelCase_ : Union[str, Any] = { "prompt": "A painting of a squirrel eating a burger", "video": video, "generator": generator, "num_inference_steps": 2, "guidance_scale": 6.0, "output_type": "pt", } return inputs def UpperCamelCase__ ( self ): UpperCAmelCase_ : str = "cpu" # ensure determinism for the device-dependent torch.Generator UpperCAmelCase_ : Dict = self.get_dummy_components() UpperCAmelCase_ : str = VideoToVideoSDPipeline(**_snake_case ) UpperCAmelCase_ : int = sd_pipe.to(_snake_case ) sd_pipe.set_progress_bar_config(disable=_snake_case ) UpperCAmelCase_ : Tuple = self.get_dummy_inputs(_snake_case ) UpperCAmelCase_ : str = "np" UpperCAmelCase_ : Dict = sd_pipe(**_snake_case ).frames UpperCAmelCase_ : Tuple = frames[0][-3:, -3:, -1] assert frames[0].shape == (32, 32, 3) UpperCAmelCase_ : Dict = np.array([1_06, 1_17, 1_13, 1_74, 1_37, 1_12, 1_48, 1_51, 1_31] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 @unittest.skipIf( torch_device != "cuda" or not is_xformers_available() ,reason="XFormers attention is only available with CUDA and `xformers` installed" ,) def UpperCamelCase__ ( self ): self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=_snake_case ,expected_max_diff=5E-3 ) @unittest.skip(reason="Batching needs to be properly figured out first for this pipeline." ) def UpperCamelCase__ ( self ): pass @unittest.skip(reason="Batching needs to be properly figured out first for this pipeline." ) def UpperCamelCase__ ( self ): pass @unittest.skip(reason="`num_images_per_prompt` argument is not supported for this pipeline." ) def UpperCamelCase__ ( self ): pass def UpperCamelCase__ ( self ): return super().test_progress_bar() @slow @skip_mps class _snake_case (unittest.TestCase): def UpperCamelCase__ ( self ): UpperCAmelCase_ : Dict = VideoToVideoSDPipeline.from_pretrained("cerspense/zeroscope_v2_XL" ,torch_dtype=torch.floataa ) pipe.enable_model_cpu_offload() # 10 frames UpperCAmelCase_ : str = torch.Generator(device="cpu" ).manual_seed(0 ) UpperCAmelCase_ : int = torch.randn((1, 10, 3, 10_24, 5_76) ,generator=_snake_case ) UpperCAmelCase_ : List[Any] = video.to("cuda" ) UpperCAmelCase_ : List[Any] = "Spiderman is surfing" UpperCAmelCase_ : Optional[Any] = pipe(_snake_case ,video=_snake_case ,generator=_snake_case ,num_inference_steps=3 ,output_type="pt" ).frames UpperCAmelCase_ : Any = np.array([-1.0458984, -1.1279297, -0.9663086, -0.91503906, -0.75097656] ) assert np.abs(video_frames.cpu().numpy()[0, 0, 0, 0, -5:] - expected_array ).sum() < 1E-2
71
1
'''simple docstring''' import argparse import shutil from pathlib import Path from tqdm import tqdm from transformers import AutoTokenizer def a__ ( _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : int=10_24 ) -> str: """simple docstring""" UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = [], [] UpperCAmelCase_ : Dict = list(zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ) UpperCAmelCase_ , UpperCAmelCase_ : Dict = sorted_examples[0] def is_too_big(_SCREAMING_SNAKE_CASE : Dict ): return tok(_SCREAMING_SNAKE_CASE , return_tensors="pt" ).input_ids.shape[1] > max_tokens for src, tgt in tqdm(sorted_examples[1:] ): UpperCAmelCase_ : int = new_src + " " + src UpperCAmelCase_ : str = new_tgt + " " + tgt if is_too_big(_SCREAMING_SNAKE_CASE ) or is_too_big(_SCREAMING_SNAKE_CASE ): # cant fit, finalize example finished_src.append(_SCREAMING_SNAKE_CASE ) finished_tgt.append(_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ , UpperCAmelCase_ : List[str] = src, tgt else: # can fit, keep adding UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = cand_src, cand_tgt # cleanup if new_src: assert new_tgt finished_src.append(_SCREAMING_SNAKE_CASE ) finished_tgt.append(_SCREAMING_SNAKE_CASE ) return finished_src, finished_tgt def a__ ( _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : Path , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : Tuple ) -> str: """simple docstring""" UpperCAmelCase_ : str = Path(_SCREAMING_SNAKE_CASE ) save_path.mkdir(exist_ok=_SCREAMING_SNAKE_CASE ) for split in ["train"]: UpperCAmelCase_ , UpperCAmelCase_ : Dict = data_dir / F'''{split}.source''', data_dir / F'''{split}.target''' UpperCAmelCase_ : int = [x.rstrip() for x in Path(_SCREAMING_SNAKE_CASE ).open().readlines()] UpperCAmelCase_ : List[str] = [x.rstrip() for x in Path(_SCREAMING_SNAKE_CASE ).open().readlines()] UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = pack_examples(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) print(F'''packed {split} split from {len(_SCREAMING_SNAKE_CASE )} examples -> {len(_SCREAMING_SNAKE_CASE )}.''' ) Path(save_path / F'''{split}.source''' ).open("w" ).write("\n".join(_SCREAMING_SNAKE_CASE ) ) Path(save_path / F'''{split}.target''' ).open("w" ).write("\n".join(_SCREAMING_SNAKE_CASE ) ) for split in ["val", "test"]: UpperCAmelCase_ , UpperCAmelCase_ : str = data_dir / F'''{split}.source''', data_dir / F'''{split}.target''' shutil.copyfile(_SCREAMING_SNAKE_CASE , save_path / F'''{split}.source''' ) shutil.copyfile(_SCREAMING_SNAKE_CASE , save_path / F'''{split}.target''' ) def a__ ( ) -> Optional[int]: """simple docstring""" UpperCAmelCase_ : Dict = argparse.ArgumentParser() parser.add_argument("--tok_name" , type=_SCREAMING_SNAKE_CASE , help="like facebook/bart-large-cnn,t5-base, etc." ) parser.add_argument("--max_seq_len" , type=_SCREAMING_SNAKE_CASE , default=1_28 ) parser.add_argument("--data_dir" , type=_SCREAMING_SNAKE_CASE ) parser.add_argument("--save_path" , type=_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : List[str] = parser.parse_args() UpperCAmelCase_ : Optional[int] = AutoTokenizer.from_pretrained(args.tok_name ) return pack_data_dir(_SCREAMING_SNAKE_CASE , Path(args.data_dir ) , args.max_seq_len , args.save_path ) if __name__ == "__main__": packer_cli()
71
'''simple docstring''' import os import pytest from datasets import ( get_dataset_config_info, get_dataset_config_names, get_dataset_infos, get_dataset_split_names, inspect_dataset, inspect_metric, ) _lowerCamelCase = pytest.mark.integration @pytest.mark.parametrize("path" , ["paws", "csv"] ) def a__ ( _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : str ) -> Tuple: """simple docstring""" inspect_dataset(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : Optional[Any] = path + ".py" assert script_name in os.listdir(_SCREAMING_SNAKE_CASE ) assert "__pycache__" not in os.listdir(_SCREAMING_SNAKE_CASE ) @pytest.mark.filterwarnings("ignore:inspect_metric is deprecated:FutureWarning" ) @pytest.mark.filterwarnings("ignore:metric_module_factory is deprecated:FutureWarning" ) @pytest.mark.parametrize("path" , ["accuracy"] ) def a__ ( _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : Tuple ) -> List[str]: """simple docstring""" inspect_metric(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : List[Any] = path + ".py" assert script_name in os.listdir(_SCREAMING_SNAKE_CASE ) assert "__pycache__" not in os.listdir(_SCREAMING_SNAKE_CASE ) @pytest.mark.parametrize( "path, config_name, expected_splits" , [ ("squad", "plain_text", ["train", "validation"]), ("dalle-mini/wit", "dalle-mini--wit", ["train"]), ("paws", "labeled_final", ["train", "test", "validation"]), ] , ) def a__ ( _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : Dict ) -> str: """simple docstring""" UpperCAmelCase_ : int = get_dataset_config_info(_SCREAMING_SNAKE_CASE , config_name=_SCREAMING_SNAKE_CASE ) assert info.config_name == config_name assert list(info.splits.keys() ) == expected_splits @pytest.mark.parametrize( "path, config_name, expected_exception" , [ ("paws", None, ValueError), ] , ) def a__ ( _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : List[str] ) -> str: """simple docstring""" with pytest.raises(_SCREAMING_SNAKE_CASE ): get_dataset_config_info(_SCREAMING_SNAKE_CASE , config_name=_SCREAMING_SNAKE_CASE ) @pytest.mark.parametrize( "path, expected" , [ ("squad", "plain_text"), ("acronym_identification", "default"), ("lhoestq/squad", "plain_text"), ("lhoestq/test", "default"), ("lhoestq/demo1", "lhoestq--demo1"), ("dalle-mini/wit", "dalle-mini--wit"), ] , ) def a__ ( _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : int ) -> List[str]: """simple docstring""" UpperCAmelCase_ : Optional[int] = get_dataset_config_names(_SCREAMING_SNAKE_CASE ) assert expected in config_names @pytest.mark.parametrize( "path, expected_configs, expected_splits_in_first_config" , [ ("squad", ["plain_text"], ["train", "validation"]), ("dalle-mini/wit", ["dalle-mini--wit"], ["train"]), ("paws", ["labeled_final", "labeled_swap", "unlabeled_final"], ["train", "test", "validation"]), ] , ) def a__ ( _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : List[str] ) -> Any: """simple docstring""" UpperCAmelCase_ : Any = get_dataset_infos(_SCREAMING_SNAKE_CASE ) assert list(infos.keys() ) == expected_configs UpperCAmelCase_ : Optional[Any] = expected_configs[0] assert expected_config in infos UpperCAmelCase_ : Dict = infos[expected_config] assert info.config_name == expected_config assert list(info.splits.keys() ) == expected_splits_in_first_config @pytest.mark.parametrize( "path, expected_config, expected_splits" , [ ("squad", "plain_text", ["train", "validation"]), ("dalle-mini/wit", "dalle-mini--wit", ["train"]), ("paws", "labeled_final", ["train", "test", "validation"]), ] , ) def a__ ( _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : Dict ) -> Any: """simple docstring""" UpperCAmelCase_ : Optional[int] = get_dataset_infos(_SCREAMING_SNAKE_CASE ) assert expected_config in infos UpperCAmelCase_ : Dict = infos[expected_config] assert info.config_name == expected_config assert list(info.splits.keys() ) == expected_splits @pytest.mark.parametrize( "path, config_name, expected_exception" , [ ("paws", None, ValueError), ] , ) def a__ ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : str ) -> Any: """simple docstring""" with pytest.raises(_SCREAMING_SNAKE_CASE ): get_dataset_split_names(_SCREAMING_SNAKE_CASE , config_name=_SCREAMING_SNAKE_CASE )
71
1
'''simple docstring''' import argparse import glob import logging import os import sys import time from collections import defaultdict from pathlib import Path from typing import Dict, List, Tuple import numpy as np import pytorch_lightning as pl import torch from callbacks import SeqaSeqLoggingCallback, get_checkpoint_callback, get_early_stopping_callback from torch import nn from torch.utils.data import DataLoader from transformers import MBartTokenizer, TaForConditionalGeneration from transformers.models.bart.modeling_bart import shift_tokens_right from utils import ( ROUGE_KEYS, LegacySeqaSeqDataset, SeqaSeqDataset, assert_all_frozen, calculate_bleu, calculate_rouge, check_output_dir, flatten_list, freeze_embeds, freeze_params, get_git_info, label_smoothed_nll_loss, lmap, pickle_save, save_git_info, save_json, use_task_specific_params, ) # need the parent dir module sys.path.insert(2, str(Path(__file__).resolve().parents[1])) from lightning_base import BaseTransformer, add_generic_args, generic_train # noqa _lowerCamelCase = logging.getLogger(__name__) class _snake_case (__SCREAMING_SNAKE_CASE): __A : Any ="summarization" __A : Dict =["loss"] __A : Dict =ROUGE_KEYS __A : Union[str, Any] ="rouge2" def __init__( self ,_snake_case ,**_snake_case ): if hparams.sortish_sampler and hparams.gpus > 1: UpperCAmelCase_ : str = False elif hparams.max_tokens_per_batch is not None: if hparams.gpus > 1: raise NotImplementedError("Dynamic Batch size does not work for multi-gpu training" ) if hparams.sortish_sampler: raise ValueError("--sortish_sampler and --max_tokens_per_batch may not be used simultaneously" ) super().__init__(_snake_case ,num_labels=_snake_case ,mode=self.mode ,**_snake_case ) use_task_specific_params(self.model ,"summarization" ) save_git_info(self.hparams.output_dir ) UpperCAmelCase_ : List[Any] = Path(self.output_dir ) / "metrics.json" UpperCAmelCase_ : Union[str, Any] = Path(self.output_dir ) / "hparams.pkl" pickle_save(self.hparams ,self.hparams_save_path ) UpperCAmelCase_ : List[Any] = 0 UpperCAmelCase_ : int = defaultdict(_snake_case ) UpperCAmelCase_ : str = self.config.model_type UpperCAmelCase_ : int = self.config.tgt_vocab_size if self.model_type == "fsmt" else self.config.vocab_size UpperCAmelCase_ : dict = { "data_dir": self.hparams.data_dir, "max_source_length": self.hparams.max_source_length, "prefix": self.model.config.prefix or "", } UpperCAmelCase_ : int = { "train": self.hparams.n_train, "val": self.hparams.n_val, "test": self.hparams.n_test, } UpperCAmelCase_ : Optional[Any] = {k: v if v >= 0 else None for k, v in n_observations_per_split.items()} UpperCAmelCase_ : int = { "train": self.hparams.max_target_length, "val": self.hparams.val_max_target_length, "test": self.hparams.test_max_target_length, } assert self.target_lens["train"] <= self.target_lens["val"], f'''target_lens: {self.target_lens}''' assert self.target_lens["train"] <= self.target_lens["test"], f'''target_lens: {self.target_lens}''' if self.hparams.freeze_embeds: freeze_embeds(self.model ) if self.hparams.freeze_encoder: freeze_params(self.model.get_encoder() ) assert_all_frozen(self.model.get_encoder() ) UpperCAmelCase_ : Union[str, Any] = get_git_info()["repo_sha"] UpperCAmelCase_ : Dict = hparams.num_workers UpperCAmelCase_ : Optional[int] = None # default to config if self.model.config.decoder_start_token_id is None and isinstance(self.tokenizer ,_snake_case ): UpperCAmelCase_ : str = self.tokenizer.lang_code_to_id[hparams.tgt_lang] UpperCAmelCase_ : Any = self.decoder_start_token_id UpperCAmelCase_ : Optional[Any] = ( SeqaSeqDataset if hasattr(self.tokenizer ,"prepare_seq2seq_batch" ) else LegacySeqaSeqDataset ) UpperCAmelCase_ : Any = False UpperCAmelCase_ : Optional[Any] = self.model.config.num_beams if self.hparams.eval_beams is None else self.hparams.eval_beams if self.hparams.eval_max_gen_length is not None: UpperCAmelCase_ : Tuple = self.hparams.eval_max_gen_length else: UpperCAmelCase_ : Union[str, Any] = self.model.config.max_length UpperCAmelCase_ : Optional[Any] = self.default_val_metric if self.hparams.val_metric is None else self.hparams.val_metric def UpperCamelCase__ ( self ,_snake_case ): UpperCAmelCase_ : List[str] = { k: self.tokenizer.batch_decode(v.tolist() ) if "mask" not in k else v.shape for k, v in batch.items() } save_json(_snake_case ,Path(self.output_dir ) / "text_batch.json" ) save_json({k: v.tolist() for k, v in batch.items()} ,Path(self.output_dir ) / "tok_batch.json" ) UpperCAmelCase_ : Optional[Any] = True return readable_batch def UpperCamelCase__ ( self ,_snake_case ,**_snake_case ): return self.model(_snake_case ,**_snake_case ) def UpperCamelCase__ ( self ,_snake_case ): UpperCAmelCase_ : List[Any] = self.tokenizer.batch_decode( _snake_case ,skip_special_tokens=_snake_case ,clean_up_tokenization_spaces=_snake_case ) return lmap(str.strip ,_snake_case ) def UpperCamelCase__ ( self ,_snake_case ): UpperCAmelCase_ : List[Any] = self.tokenizer.pad_token_id UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = batch["input_ids"], batch["attention_mask"] UpperCAmelCase_ : Optional[Any] = batch["labels"] if isinstance(self.model ,_snake_case ): UpperCAmelCase_ : str = self.model._shift_right(_snake_case ) else: UpperCAmelCase_ : int = shift_tokens_right(_snake_case ,_snake_case ) if not self.already_saved_batch: # This would be slightly better if it only happened on rank zero UpperCAmelCase_ : Tuple = decoder_input_ids self.save_readable_batch(_snake_case ) UpperCAmelCase_ : int = self(_snake_case ,attention_mask=_snake_case ,decoder_input_ids=_snake_case ,use_cache=_snake_case ) UpperCAmelCase_ : Optional[int] = outputs["logits"] if self.hparams.label_smoothing == 0: # Same behavior as modeling_bart.py, besides ignoring pad_token_id UpperCAmelCase_ : List[Any] = nn.CrossEntropyLoss(ignore_index=_snake_case ) assert lm_logits.shape[-1] == self.vocab_size UpperCAmelCase_ : List[str] = ce_loss_fct(lm_logits.view(-1 ,lm_logits.shape[-1] ) ,tgt_ids.view(-1 ) ) else: UpperCAmelCase_ : Any = nn.functional.log_softmax(_snake_case ,dim=-1 ) UpperCAmelCase_ , UpperCAmelCase_ : str = label_smoothed_nll_loss( _snake_case ,_snake_case ,self.hparams.label_smoothing ,ignore_index=_snake_case ) return (loss,) @property def UpperCamelCase__ ( self ): return self.tokenizer.pad_token_id def UpperCamelCase__ ( self ,_snake_case ,_snake_case ): UpperCAmelCase_ : Tuple = self._step(_snake_case ) UpperCAmelCase_ : Any = dict(zip(self.loss_names ,_snake_case ) ) # tokens per batch UpperCAmelCase_ : Optional[int] = batch["input_ids"].ne(self.pad ).sum() + batch["labels"].ne(self.pad ).sum() UpperCAmelCase_ : Tuple = batch["input_ids"].shape[0] UpperCAmelCase_ : List[Any] = batch["input_ids"].eq(self.pad ).sum() UpperCAmelCase_ : List[Any] = batch["input_ids"].eq(self.pad ).float().mean() # TODO(SS): make a wandb summary metric for this return {"loss": loss_tensors[0], "log": logs} def UpperCamelCase__ ( self ,_snake_case ,_snake_case ): return self._generative_step(_snake_case ) def UpperCamelCase__ ( self ,_snake_case ,_snake_case="val" ): self.step_count += 1 UpperCAmelCase_ : Optional[int] = {k: torch.stack([x[k] for x in outputs] ).mean() for k in self.loss_names} UpperCAmelCase_ : Tuple = losses["loss"] UpperCAmelCase_ : int = { k: np.array([x[k] for x in outputs] ).mean() for k in self.metric_names + ["gen_time", "gen_len"] } UpperCAmelCase_ : List[Any] = ( generative_metrics[self.val_metric] if self.val_metric in generative_metrics else losses[self.val_metric] ) UpperCAmelCase_ : torch.FloatTensor = torch.tensor(_snake_case ).type_as(_snake_case ) generative_metrics.update({k: v.item() for k, v in losses.items()} ) losses.update(_snake_case ) UpperCAmelCase_ : str = {f'''{prefix}_avg_{k}''': x for k, x in losses.items()} UpperCAmelCase_ : Dict = self.step_count self.metrics[prefix].append(_snake_case ) # callback writes this to self.metrics_save_path UpperCAmelCase_ : Optional[int] = flatten_list([x["preds"] for x in outputs] ) return { "log": all_metrics, "preds": preds, f'''{prefix}_loss''': loss, f'''{prefix}_{self.val_metric}''': metric_tensor, } def UpperCamelCase__ ( self ,_snake_case ,_snake_case ): return calculate_rouge(_snake_case ,_snake_case ) def UpperCamelCase__ ( self ,_snake_case ): UpperCAmelCase_ : int = time.time() # parser.add_argument('--eval_max_gen_length', type=int, default=None, help='never generate more than n tokens') UpperCAmelCase_ : Optional[Any] = self.model.generate( batch["input_ids"] ,attention_mask=batch["attention_mask"] ,use_cache=_snake_case ,decoder_start_token_id=self.decoder_start_token_id ,num_beams=self.eval_beams ,max_length=self.eval_max_length ,) UpperCAmelCase_ : str = (time.time() - ta) / batch["input_ids"].shape[0] UpperCAmelCase_ : List[str] = self.ids_to_clean_text(_snake_case ) UpperCAmelCase_ : List[str] = self.ids_to_clean_text(batch["labels"] ) UpperCAmelCase_ : Tuple = self._step(_snake_case ) UpperCAmelCase_ : List[Any] = dict(zip(self.loss_names ,_snake_case ) ) UpperCAmelCase_ : Dict = self.calc_generative_metrics(_snake_case ,_snake_case ) UpperCAmelCase_ : Dict = np.mean(lmap(_snake_case ,_snake_case ) ) base_metrics.update(gen_time=_snake_case ,gen_len=_snake_case ,preds=_snake_case ,target=_snake_case ,**_snake_case ) return base_metrics def UpperCamelCase__ ( self ,_snake_case ,_snake_case ): return self._generative_step(_snake_case ) def UpperCamelCase__ ( self ,_snake_case ): return self.validation_epoch_end(_snake_case ,prefix="test" ) def UpperCamelCase__ ( self ,_snake_case ): UpperCAmelCase_ : Any = self.n_obs[type_path] UpperCAmelCase_ : List[str] = self.target_lens[type_path] UpperCAmelCase_ : Union[str, Any] = self.dataset_class( self.tokenizer ,type_path=_snake_case ,n_obs=_snake_case ,max_target_length=_snake_case ,**self.dataset_kwargs ,) return dataset def UpperCamelCase__ ( self ,_snake_case ,_snake_case ,_snake_case = False ): UpperCAmelCase_ : Any = self.get_dataset(_snake_case ) if self.hparams.sortish_sampler and type_path != "test" and type_path != "val": UpperCAmelCase_ : int = dataset.make_sortish_sampler(_snake_case ,distributed=self.hparams.gpus > 1 ) return DataLoader( _snake_case ,batch_size=_snake_case ,collate_fn=dataset.collate_fn ,shuffle=_snake_case ,num_workers=self.num_workers ,sampler=_snake_case ,) elif self.hparams.max_tokens_per_batch is not None and type_path != "test" and type_path != "val": UpperCAmelCase_ : List[Any] = dataset.make_dynamic_sampler( self.hparams.max_tokens_per_batch ,distributed=self.hparams.gpus > 1 ) return DataLoader( _snake_case ,batch_sampler=_snake_case ,collate_fn=dataset.collate_fn ,num_workers=self.num_workers ,) else: return DataLoader( _snake_case ,batch_size=_snake_case ,collate_fn=dataset.collate_fn ,shuffle=_snake_case ,num_workers=self.num_workers ,sampler=_snake_case ,) def UpperCamelCase__ ( self ): UpperCAmelCase_ : Tuple = self.get_dataloader("train" ,batch_size=self.hparams.train_batch_size ,shuffle=_snake_case ) return dataloader def UpperCamelCase__ ( self ): return self.get_dataloader("val" ,batch_size=self.hparams.eval_batch_size ) def UpperCamelCase__ ( self ): return self.get_dataloader("test" ,batch_size=self.hparams.eval_batch_size ) @staticmethod def UpperCamelCase__ ( _snake_case ,_snake_case ): BaseTransformer.add_model_specific_args(_snake_case ,_snake_case ) add_generic_args(_snake_case ,_snake_case ) parser.add_argument( "--max_source_length" ,default=10_24 ,type=_snake_case ,help=( "The maximum total input sequence length after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded." ) ,) parser.add_argument( "--max_target_length" ,default=56 ,type=_snake_case ,help=( "The maximum total input sequence length after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded." ) ,) parser.add_argument( "--val_max_target_length" ,default=1_42 ,type=_snake_case ,help=( "The maximum total input sequence length after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded." ) ,) parser.add_argument( "--test_max_target_length" ,default=1_42 ,type=_snake_case ,help=( "The maximum total input sequence length after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded." ) ,) parser.add_argument("--freeze_encoder" ,action="store_true" ) parser.add_argument("--freeze_embeds" ,action="store_true" ) parser.add_argument("--sortish_sampler" ,action="store_true" ,default=_snake_case ) parser.add_argument("--overwrite_output_dir" ,action="store_true" ,default=_snake_case ) parser.add_argument("--max_tokens_per_batch" ,type=_snake_case ,default=_snake_case ) parser.add_argument("--logger_name" ,type=_snake_case ,choices=["default", "wandb", "wandb_shared"] ,default="default" ) parser.add_argument("--n_train" ,type=_snake_case ,default=-1 ,required=_snake_case ,help="# examples. -1 means use all." ) parser.add_argument("--n_val" ,type=_snake_case ,default=5_00 ,required=_snake_case ,help="# examples. -1 means use all." ) parser.add_argument("--n_test" ,type=_snake_case ,default=-1 ,required=_snake_case ,help="# examples. -1 means use all." ) parser.add_argument( "--task" ,type=_snake_case ,default="summarization" ,required=_snake_case ,help="# examples. -1 means use all." ) parser.add_argument("--label_smoothing" ,type=_snake_case ,default=0.0 ,required=_snake_case ) parser.add_argument("--src_lang" ,type=_snake_case ,default="" ,required=_snake_case ) parser.add_argument("--tgt_lang" ,type=_snake_case ,default="" ,required=_snake_case ) parser.add_argument("--eval_beams" ,type=_snake_case ,default=_snake_case ,required=_snake_case ) parser.add_argument( "--val_metric" ,type=_snake_case ,default=_snake_case ,required=_snake_case ,choices=["bleu", "rouge2", "loss", None] ) parser.add_argument("--eval_max_gen_length" ,type=_snake_case ,default=_snake_case ,help="never generate more than n tokens" ) parser.add_argument("--save_top_k" ,type=_snake_case ,default=1 ,required=_snake_case ,help="How many checkpoints to save" ) parser.add_argument( "--early_stopping_patience" ,type=_snake_case ,default=-1 ,required=_snake_case ,help=( "-1 means never early stop. early_stopping_patience is measured in validation checks, not epochs. So" " val_check_interval will effect it." ) ,) return parser class _snake_case (__SCREAMING_SNAKE_CASE): __A : Any ="translation" __A : Optional[int] =["loss"] __A : str =["bleu"] __A : int ="bleu" def __init__( self ,_snake_case ,**_snake_case ): super().__init__(_snake_case ,**_snake_case ) UpperCAmelCase_ : Optional[int] = hparams.src_lang UpperCAmelCase_ : Optional[Any] = hparams.tgt_lang def UpperCamelCase__ ( self ,_snake_case ,_snake_case ): return calculate_bleu(_snake_case ,_snake_case ) def a__ ( _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : int=None ) -> SummarizationModule: """simple docstring""" Path(args.output_dir ).mkdir(exist_ok=_SCREAMING_SNAKE_CASE ) check_output_dir(_SCREAMING_SNAKE_CASE , expected_items=3 ) if model is None: if "summarization" in args.task: UpperCAmelCase_ : SummarizationModule = SummarizationModule(_SCREAMING_SNAKE_CASE ) else: UpperCAmelCase_ : SummarizationModule = TranslationModule(_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : Union[str, Any] = Path(args.data_dir ).name if ( args.logger_name == "default" or args.fast_dev_run or str(args.output_dir ).startswith("/tmp" ) or str(args.output_dir ).startswith("/var" ) ): UpperCAmelCase_ : Tuple = True # don't pollute wandb logs unnecessarily elif args.logger_name == "wandb": from pytorch_lightning.loggers import WandbLogger UpperCAmelCase_ : str = os.environ.get("WANDB_PROJECT" , _SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : str = WandbLogger(name=model.output_dir.name , project=_SCREAMING_SNAKE_CASE ) elif args.logger_name == "wandb_shared": from pytorch_lightning.loggers import WandbLogger UpperCAmelCase_ : List[Any] = WandbLogger(name=model.output_dir.name , project=F'''hf_{dataset}''' ) if args.early_stopping_patience >= 0: UpperCAmelCase_ : Optional[Any] = get_early_stopping_callback(model.val_metric , args.early_stopping_patience ) else: UpperCAmelCase_ : Union[str, Any] = False UpperCAmelCase_ : List[Any] = args.val_metric == "loss" UpperCAmelCase_ : pl.Trainer = generic_train( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , logging_callback=SeqaSeqLoggingCallback() , checkpoint_callback=get_checkpoint_callback( args.output_dir , model.val_metric , args.save_top_k , _SCREAMING_SNAKE_CASE ) , early_stopping_callback=_SCREAMING_SNAKE_CASE , logger=_SCREAMING_SNAKE_CASE , ) pickle_save(model.hparams , model.output_dir / "hparams.pkl" ) if not args.do_predict: return model UpperCAmelCase_ : Tuple = "" UpperCAmelCase_ : str = sorted(glob.glob(os.path.join(args.output_dir , "*.ckpt" ) , recursive=_SCREAMING_SNAKE_CASE ) ) if checkpoints: UpperCAmelCase_ : Optional[Any] = checkpoints[-1] UpperCAmelCase_ : Tuple = checkpoints[-1] trainer.logger.log_hyperparams(model.hparams ) # test() without a model tests using the best checkpoint automatically trainer.test() return model if __name__ == "__main__": _lowerCamelCase = argparse.ArgumentParser() _lowerCamelCase = pl.Trainer.add_argparse_args(parser) _lowerCamelCase = SummarizationModule.add_model_specific_args(parser, os.getcwd()) _lowerCamelCase = parser.parse_args() main(args)
71
'''simple docstring''' import json import os import shutil import tempfile import unittest import numpy as np import pytest from transformers import CLIPTokenizer, CLIPTokenizerFast from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES from transformers.testing_utils import require_vision from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available if is_vision_available(): from PIL import Image from transformers import CLIPImageProcessor, CLIPProcessor @require_vision class _snake_case (unittest.TestCase): def UpperCamelCase__ ( self ): UpperCAmelCase_ : Dict = tempfile.mkdtemp() # fmt: off UpperCAmelCase_ : List[str] = ["l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "lo", "l</w>", "w</w>", "r</w>", "t</w>", "low</w>", "er</w>", "lowest</w>", "newer</w>", "wider", "<unk>", "<|startoftext|>", "<|endoftext|>"] # fmt: on UpperCAmelCase_ : List[str] = dict(zip(_snake_case ,range(len(_snake_case ) ) ) ) UpperCAmelCase_ : List[Any] = ["#version: 0.2", "l o", "lo w</w>", "e r</w>", ""] UpperCAmelCase_ : Dict = {"unk_token": "<unk>"} UpperCAmelCase_ : Optional[int] = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["vocab_file"] ) UpperCAmelCase_ : str = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["merges_file"] ) with open(self.vocab_file ,"w" ,encoding="utf-8" ) as fp: fp.write(json.dumps(_snake_case ) + "\n" ) with open(self.merges_file ,"w" ,encoding="utf-8" ) as fp: fp.write("\n".join(_snake_case ) ) UpperCAmelCase_ : Optional[Any] = { "do_resize": True, "size": 20, "do_center_crop": True, "crop_size": 18, "do_normalize": True, "image_mean": [0.48145466, 0.4578275, 0.40821073], "image_std": [0.26862954, 0.26130258, 0.27577711], } UpperCAmelCase_ : str = os.path.join(self.tmpdirname ,_snake_case ) with open(self.image_processor_file ,"w" ,encoding="utf-8" ) as fp: json.dump(_snake_case ,_snake_case ) def UpperCamelCase__ ( self ,**_snake_case ): return CLIPTokenizer.from_pretrained(self.tmpdirname ,**_snake_case ) def UpperCamelCase__ ( self ,**_snake_case ): return CLIPTokenizerFast.from_pretrained(self.tmpdirname ,**_snake_case ) def UpperCamelCase__ ( self ,**_snake_case ): return CLIPImageProcessor.from_pretrained(self.tmpdirname ,**_snake_case ) def UpperCamelCase__ ( self ): shutil.rmtree(self.tmpdirname ) def UpperCamelCase__ ( self ): UpperCAmelCase_ : Optional[Any] = [np.random.randint(2_55 ,size=(3, 30, 4_00) ,dtype=np.uinta )] UpperCAmelCase_ : Union[str, Any] = [Image.fromarray(np.moveaxis(_snake_case ,0 ,-1 ) ) for x in image_inputs] return image_inputs def UpperCamelCase__ ( self ): UpperCAmelCase_ : Tuple = self.get_tokenizer() UpperCAmelCase_ : str = self.get_rust_tokenizer() UpperCAmelCase_ : List[str] = self.get_image_processor() UpperCAmelCase_ : Tuple = CLIPProcessor(tokenizer=_snake_case ,image_processor=_snake_case ) processor_slow.save_pretrained(self.tmpdirname ) UpperCAmelCase_ : int = CLIPProcessor.from_pretrained(self.tmpdirname ,use_fast=_snake_case ) UpperCAmelCase_ : str = CLIPProcessor(tokenizer=_snake_case ,image_processor=_snake_case ) processor_fast.save_pretrained(self.tmpdirname ) UpperCAmelCase_ : str = CLIPProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor_slow.tokenizer.get_vocab() ,tokenizer_slow.get_vocab() ) self.assertEqual(processor_fast.tokenizer.get_vocab() ,tokenizer_fast.get_vocab() ) self.assertEqual(tokenizer_slow.get_vocab() ,tokenizer_fast.get_vocab() ) self.assertIsInstance(processor_slow.tokenizer ,_snake_case ) self.assertIsInstance(processor_fast.tokenizer ,_snake_case ) self.assertEqual(processor_slow.image_processor.to_json_string() ,image_processor.to_json_string() ) self.assertEqual(processor_fast.image_processor.to_json_string() ,image_processor.to_json_string() ) self.assertIsInstance(processor_slow.image_processor ,_snake_case ) self.assertIsInstance(processor_fast.image_processor ,_snake_case ) def UpperCamelCase__ ( self ): UpperCAmelCase_ : List[str] = CLIPProcessor(tokenizer=self.get_tokenizer() ,image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) UpperCAmelCase_ : Union[str, Any] = self.get_tokenizer(bos_token="(BOS)" ,eos_token="(EOS)" ) UpperCAmelCase_ : Tuple = self.get_image_processor(do_normalize=_snake_case ,padding_value=1.0 ) UpperCAmelCase_ : int = CLIPProcessor.from_pretrained( self.tmpdirname ,bos_token="(BOS)" ,eos_token="(EOS)" ,do_normalize=_snake_case ,padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab() ,tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer ,_snake_case ) self.assertEqual(processor.image_processor.to_json_string() ,image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor ,_snake_case ) def UpperCamelCase__ ( self ): UpperCAmelCase_ : List[str] = self.get_image_processor() UpperCAmelCase_ : Dict = self.get_tokenizer() UpperCAmelCase_ : Dict = CLIPProcessor(tokenizer=_snake_case ,image_processor=_snake_case ) UpperCAmelCase_ : Any = self.prepare_image_inputs() UpperCAmelCase_ : Optional[int] = image_processor(_snake_case ,return_tensors="np" ) UpperCAmelCase_ : Any = processor(images=_snake_case ,return_tensors="np" ) for key in input_image_proc.keys(): self.assertAlmostEqual(input_image_proc[key].sum() ,input_processor[key].sum() ,delta=1E-2 ) def UpperCamelCase__ ( self ): UpperCAmelCase_ : Optional[Any] = self.get_image_processor() UpperCAmelCase_ : Union[str, Any] = self.get_tokenizer() UpperCAmelCase_ : Optional[int] = CLIPProcessor(tokenizer=_snake_case ,image_processor=_snake_case ) UpperCAmelCase_ : Tuple = "lower newer" UpperCAmelCase_ : Any = processor(text=_snake_case ) UpperCAmelCase_ : List[Any] = tokenizer(_snake_case ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] ,encoded_processor[key] ) def UpperCamelCase__ ( self ): UpperCAmelCase_ : str = self.get_image_processor() UpperCAmelCase_ : Union[str, Any] = self.get_tokenizer() UpperCAmelCase_ : Tuple = CLIPProcessor(tokenizer=_snake_case ,image_processor=_snake_case ) UpperCAmelCase_ : Any = "lower newer" UpperCAmelCase_ : List[str] = self.prepare_image_inputs() UpperCAmelCase_ : str = processor(text=_snake_case ,images=_snake_case ) self.assertListEqual(list(inputs.keys() ) ,["input_ids", "attention_mask", "pixel_values"] ) # test if it raises when no input is passed with pytest.raises(_snake_case ): processor() def UpperCamelCase__ ( self ): UpperCAmelCase_ : str = self.get_image_processor() UpperCAmelCase_ : Dict = self.get_tokenizer() UpperCAmelCase_ : Optional[int] = CLIPProcessor(tokenizer=_snake_case ,image_processor=_snake_case ) UpperCAmelCase_ : List[Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] UpperCAmelCase_ : int = processor.batch_decode(_snake_case ) UpperCAmelCase_ : int = tokenizer.batch_decode(_snake_case ) self.assertListEqual(_snake_case ,_snake_case ) def UpperCamelCase__ ( self ): UpperCAmelCase_ : Dict = self.get_image_processor() UpperCAmelCase_ : int = self.get_tokenizer() UpperCAmelCase_ : Tuple = CLIPProcessor(tokenizer=_snake_case ,image_processor=_snake_case ) UpperCAmelCase_ : Optional[int] = "lower newer" UpperCAmelCase_ : Any = self.prepare_image_inputs() UpperCAmelCase_ : Dict = processor(text=_snake_case ,images=_snake_case ) self.assertListEqual(list(inputs.keys() ) ,processor.model_input_names )
71
1
'''simple docstring''' import copy from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging from ..auto import CONFIG_MAPPING _lowerCamelCase = logging.get_logger(__name__) _lowerCamelCase = { """microsoft/conditional-detr-resnet-50""": ( """https://huggingface.co/microsoft/conditional-detr-resnet-50/resolve/main/config.json""" ), } class _snake_case (__SCREAMING_SNAKE_CASE): __A : Dict ="conditional_detr" __A : Tuple =["past_key_values"] __A : Optional[int] ={ "hidden_size": "d_model", "num_attention_heads": "encoder_attention_heads", } def __init__( self ,_snake_case=True ,_snake_case=None ,_snake_case=3 ,_snake_case=3_00 ,_snake_case=6 ,_snake_case=20_48 ,_snake_case=8 ,_snake_case=6 ,_snake_case=20_48 ,_snake_case=8 ,_snake_case=0.0 ,_snake_case=0.0 ,_snake_case=True ,_snake_case="relu" ,_snake_case=2_56 ,_snake_case=0.1 ,_snake_case=0.0 ,_snake_case=0.0 ,_snake_case=0.02 ,_snake_case=1.0 ,_snake_case=False ,_snake_case="sine" ,_snake_case="resnet50" ,_snake_case=True ,_snake_case=False ,_snake_case=2 ,_snake_case=5 ,_snake_case=2 ,_snake_case=1 ,_snake_case=1 ,_snake_case=2 ,_snake_case=5 ,_snake_case=2 ,_snake_case=0.25 ,**_snake_case ,): if backbone_config is not None and use_timm_backbone: raise ValueError("You can't specify both `backbone_config` and `use_timm_backbone`." ) if not use_timm_backbone: if backbone_config is None: logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." ) UpperCAmelCase_ : Union[str, Any] = CONFIG_MAPPING["resnet"](out_features=["stage4"] ) elif isinstance(_snake_case ,_snake_case ): UpperCAmelCase_ : Dict = backbone_config.get("model_type" ) UpperCAmelCase_ : Any = CONFIG_MAPPING[backbone_model_type] UpperCAmelCase_ : List[str] = config_class.from_dict(_snake_case ) UpperCAmelCase_ : Union[str, Any] = use_timm_backbone UpperCAmelCase_ : List[Any] = backbone_config UpperCAmelCase_ : Dict = num_channels UpperCAmelCase_ : List[str] = num_queries UpperCAmelCase_ : Tuple = d_model UpperCAmelCase_ : Any = encoder_ffn_dim UpperCAmelCase_ : str = encoder_layers UpperCAmelCase_ : str = encoder_attention_heads UpperCAmelCase_ : int = decoder_ffn_dim UpperCAmelCase_ : Union[str, Any] = decoder_layers UpperCAmelCase_ : Any = decoder_attention_heads UpperCAmelCase_ : int = dropout UpperCAmelCase_ : Tuple = attention_dropout UpperCAmelCase_ : List[str] = activation_dropout UpperCAmelCase_ : Optional[int] = activation_function UpperCAmelCase_ : List[Any] = init_std UpperCAmelCase_ : str = init_xavier_std UpperCAmelCase_ : Optional[Any] = encoder_layerdrop UpperCAmelCase_ : Any = decoder_layerdrop UpperCAmelCase_ : Any = encoder_layers UpperCAmelCase_ : Optional[Any] = auxiliary_loss UpperCAmelCase_ : str = position_embedding_type UpperCAmelCase_ : Optional[int] = backbone UpperCAmelCase_ : Dict = use_pretrained_backbone UpperCAmelCase_ : List[Any] = dilation # Hungarian matcher UpperCAmelCase_ : Union[str, Any] = class_cost UpperCAmelCase_ : Any = bbox_cost UpperCAmelCase_ : Union[str, Any] = giou_cost # Loss coefficients UpperCAmelCase_ : Dict = mask_loss_coefficient UpperCAmelCase_ : List[Any] = dice_loss_coefficient UpperCAmelCase_ : int = cls_loss_coefficient UpperCAmelCase_ : Any = bbox_loss_coefficient UpperCAmelCase_ : Optional[int] = giou_loss_coefficient UpperCAmelCase_ : List[Any] = focal_alpha super().__init__(is_encoder_decoder=_snake_case ,**_snake_case ) @property def UpperCamelCase__ ( self ): return self.encoder_attention_heads @property def UpperCamelCase__ ( self ): return self.d_model def UpperCamelCase__ ( self ): UpperCAmelCase_ : int = copy.deepcopy(self.__dict__ ) if self.backbone_config is not None: UpperCAmelCase_ : List[str] = self.backbone_config.to_dict() UpperCAmelCase_ : Union[str, Any] = self.__class__.model_type return output class _snake_case (__SCREAMING_SNAKE_CASE): __A : Optional[Any] =version.parse("1.11") @property def UpperCamelCase__ ( self ): return OrderedDict( [ ("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}), ("pixel_mask", {0: "batch"}), ] ) @property def UpperCamelCase__ ( self ): return 1E-5 @property def UpperCamelCase__ ( self ): return 12
71
'''simple docstring''' import gc import unittest import numpy as np import torch import torch.nn.functional as F from transformers import ( ClapTextConfig, ClapTextModelWithProjection, RobertaTokenizer, SpeechTaHifiGan, SpeechTaHifiGanConfig, ) from diffusers import ( AudioLDMPipeline, AutoencoderKL, DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel, ) from diffusers.utils import is_xformers_available, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism from ..pipeline_params import TEXT_TO_AUDIO_BATCH_PARAMS, TEXT_TO_AUDIO_PARAMS from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class _snake_case (__SCREAMING_SNAKE_CASE , unittest.TestCase): __A : Any =AudioLDMPipeline __A : Dict =TEXT_TO_AUDIO_PARAMS __A : Any =TEXT_TO_AUDIO_BATCH_PARAMS __A : Tuple =frozenset( [ "num_inference_steps", "num_waveforms_per_prompt", "generator", "latents", "output_type", "return_dict", "callback", "callback_steps", ]) def UpperCamelCase__ ( self ): torch.manual_seed(0 ) UpperCAmelCase_ : Union[str, Any] = UNetaDConditionModel( block_out_channels=(32, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=4 ,out_channels=4 ,down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") ,up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") ,cross_attention_dim=(32, 64) ,class_embed_type="simple_projection" ,projection_class_embeddings_input_dim=32 ,class_embeddings_concat=_snake_case ,) UpperCAmelCase_ : Optional[Any] = DDIMScheduler( beta_start=0.00085 ,beta_end=0.012 ,beta_schedule="scaled_linear" ,clip_sample=_snake_case ,set_alpha_to_one=_snake_case ,) torch.manual_seed(0 ) UpperCAmelCase_ : Union[str, Any] = AutoencoderKL( block_out_channels=[32, 64] ,in_channels=1 ,out_channels=1 ,down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] ,up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] ,latent_channels=4 ,) torch.manual_seed(0 ) UpperCAmelCase_ : Optional[int] = ClapTextConfig( bos_token_id=0 ,eos_token_id=2 ,hidden_size=32 ,intermediate_size=37 ,layer_norm_eps=1E-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=10_00 ,projection_dim=32 ,) UpperCAmelCase_ : Optional[Any] = ClapTextModelWithProjection(_snake_case ) UpperCAmelCase_ : List[Any] = RobertaTokenizer.from_pretrained("hf-internal-testing/tiny-random-roberta" ,model_max_length=77 ) UpperCAmelCase_ : Optional[int] = SpeechTaHifiGanConfig( model_in_dim=8 ,sampling_rate=1_60_00 ,upsample_initial_channel=16 ,upsample_rates=[2, 2] ,upsample_kernel_sizes=[4, 4] ,resblock_kernel_sizes=[3, 7] ,resblock_dilation_sizes=[[1, 3, 5], [1, 3, 5]] ,normalize_before=_snake_case ,) UpperCAmelCase_ : Union[str, Any] = SpeechTaHifiGan(_snake_case ) UpperCAmelCase_ : Union[str, Any] = { "unet": unet, "scheduler": scheduler, "vae": vae, "text_encoder": text_encoder, "tokenizer": tokenizer, "vocoder": vocoder, } return components def UpperCamelCase__ ( self ,_snake_case ,_snake_case=0 ): if str(_snake_case ).startswith("mps" ): UpperCAmelCase_ : Optional[int] = torch.manual_seed(_snake_case ) else: UpperCAmelCase_ : List[str] = torch.Generator(device=_snake_case ).manual_seed(_snake_case ) UpperCAmelCase_ : Any = { "prompt": "A hammer hitting a wooden surface", "generator": generator, "num_inference_steps": 2, "guidance_scale": 6.0, } return inputs def UpperCamelCase__ ( self ): UpperCAmelCase_ : int = "cpu" # ensure determinism for the device-dependent torch.Generator UpperCAmelCase_ : str = self.get_dummy_components() UpperCAmelCase_ : Optional[Any] = AudioLDMPipeline(**_snake_case ) UpperCAmelCase_ : List[Any] = audioldm_pipe.to(_snake_case ) audioldm_pipe.set_progress_bar_config(disable=_snake_case ) UpperCAmelCase_ : List[str] = self.get_dummy_inputs(_snake_case ) UpperCAmelCase_ : Any = audioldm_pipe(**_snake_case ) UpperCAmelCase_ : Dict = output.audios[0] assert audio.ndim == 1 assert len(_snake_case ) == 2_56 UpperCAmelCase_ : Any = audio[:10] UpperCAmelCase_ : Any = np.array( [-0.0050, 0.0050, -0.0060, 0.0033, -0.0026, 0.0033, -0.0027, 0.0033, -0.0028, 0.0033] ) assert np.abs(audio_slice - expected_slice ).max() < 1E-2 def UpperCamelCase__ ( self ): UpperCAmelCase_ : Optional[int] = self.get_dummy_components() UpperCAmelCase_ : int = AudioLDMPipeline(**_snake_case ) UpperCAmelCase_ : Dict = audioldm_pipe.to(_snake_case ) UpperCAmelCase_ : Tuple = audioldm_pipe.to(_snake_case ) audioldm_pipe.set_progress_bar_config(disable=_snake_case ) UpperCAmelCase_ : Union[str, Any] = self.get_dummy_inputs(_snake_case ) UpperCAmelCase_ : Tuple = 3 * [inputs["prompt"]] # forward UpperCAmelCase_ : Any = audioldm_pipe(**_snake_case ) UpperCAmelCase_ : List[str] = output.audios[0] UpperCAmelCase_ : Optional[Any] = self.get_dummy_inputs(_snake_case ) UpperCAmelCase_ : str = 3 * [inputs.pop("prompt" )] UpperCAmelCase_ : str = audioldm_pipe.tokenizer( _snake_case ,padding="max_length" ,max_length=audioldm_pipe.tokenizer.model_max_length ,truncation=_snake_case ,return_tensors="pt" ,) UpperCAmelCase_ : Dict = text_inputs["input_ids"].to(_snake_case ) UpperCAmelCase_ : str = audioldm_pipe.text_encoder( _snake_case ,) UpperCAmelCase_ : Optional[Any] = prompt_embeds.text_embeds # additional L_2 normalization over each hidden-state UpperCAmelCase_ : Tuple = F.normalize(_snake_case ,dim=-1 ) UpperCAmelCase_ : int = prompt_embeds # forward UpperCAmelCase_ : int = audioldm_pipe(**_snake_case ) UpperCAmelCase_ : List[Any] = output.audios[0] assert np.abs(audio_a - audio_a ).max() < 1E-2 def UpperCamelCase__ ( self ): UpperCAmelCase_ : List[Any] = self.get_dummy_components() UpperCAmelCase_ : Tuple = AudioLDMPipeline(**_snake_case ) UpperCAmelCase_ : List[Any] = audioldm_pipe.to(_snake_case ) UpperCAmelCase_ : List[Any] = audioldm_pipe.to(_snake_case ) audioldm_pipe.set_progress_bar_config(disable=_snake_case ) UpperCAmelCase_ : Union[str, Any] = self.get_dummy_inputs(_snake_case ) UpperCAmelCase_ : Optional[int] = 3 * ["this is a negative prompt"] UpperCAmelCase_ : Any = negative_prompt UpperCAmelCase_ : Union[str, Any] = 3 * [inputs["prompt"]] # forward UpperCAmelCase_ : Dict = audioldm_pipe(**_snake_case ) UpperCAmelCase_ : Dict = output.audios[0] UpperCAmelCase_ : Tuple = self.get_dummy_inputs(_snake_case ) UpperCAmelCase_ : Optional[Any] = 3 * [inputs.pop("prompt" )] UpperCAmelCase_ : List[Any] = [] for p in [prompt, negative_prompt]: UpperCAmelCase_ : Any = audioldm_pipe.tokenizer( _snake_case ,padding="max_length" ,max_length=audioldm_pipe.tokenizer.model_max_length ,truncation=_snake_case ,return_tensors="pt" ,) UpperCAmelCase_ : List[Any] = text_inputs["input_ids"].to(_snake_case ) UpperCAmelCase_ : str = audioldm_pipe.text_encoder( _snake_case ,) UpperCAmelCase_ : List[Any] = text_embeds.text_embeds # additional L_2 normalization over each hidden-state UpperCAmelCase_ : Any = F.normalize(_snake_case ,dim=-1 ) embeds.append(_snake_case ) UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = embeds # forward UpperCAmelCase_ : Tuple = audioldm_pipe(**_snake_case ) UpperCAmelCase_ : Any = output.audios[0] assert np.abs(audio_a - audio_a ).max() < 1E-2 def UpperCamelCase__ ( self ): UpperCAmelCase_ : Optional[int] = "cpu" # ensure determinism for the device-dependent torch.Generator UpperCAmelCase_ : Optional[Any] = self.get_dummy_components() UpperCAmelCase_ : Any = PNDMScheduler(skip_prk_steps=_snake_case ) UpperCAmelCase_ : Optional[Any] = AudioLDMPipeline(**_snake_case ) UpperCAmelCase_ : List[Any] = audioldm_pipe.to(_snake_case ) audioldm_pipe.set_progress_bar_config(disable=_snake_case ) UpperCAmelCase_ : Any = self.get_dummy_inputs(_snake_case ) UpperCAmelCase_ : int = "egg cracking" UpperCAmelCase_ : Optional[Any] = audioldm_pipe(**_snake_case ,negative_prompt=_snake_case ) UpperCAmelCase_ : int = output.audios[0] assert audio.ndim == 1 assert len(_snake_case ) == 2_56 UpperCAmelCase_ : List[Any] = audio[:10] UpperCAmelCase_ : Any = np.array( [-0.0051, 0.0050, -0.0060, 0.0034, -0.0026, 0.0033, -0.0027, 0.0033, -0.0028, 0.0032] ) assert np.abs(audio_slice - expected_slice ).max() < 1E-2 def UpperCamelCase__ ( self ): UpperCAmelCase_ : Optional[int] = "cpu" # ensure determinism for the device-dependent torch.Generator UpperCAmelCase_ : List[str] = self.get_dummy_components() UpperCAmelCase_ : Dict = PNDMScheduler(skip_prk_steps=_snake_case ) UpperCAmelCase_ : Any = AudioLDMPipeline(**_snake_case ) UpperCAmelCase_ : Any = audioldm_pipe.to(_snake_case ) audioldm_pipe.set_progress_bar_config(disable=_snake_case ) UpperCAmelCase_ : Dict = "A hammer hitting a wooden surface" # test num_waveforms_per_prompt=1 (default) UpperCAmelCase_ : Any = audioldm_pipe(_snake_case ,num_inference_steps=2 ).audios assert audios.shape == (1, 2_56) # test num_waveforms_per_prompt=1 (default) for batch of prompts UpperCAmelCase_ : List[str] = 2 UpperCAmelCase_ : Dict = audioldm_pipe([prompt] * batch_size ,num_inference_steps=2 ).audios assert audios.shape == (batch_size, 2_56) # test num_waveforms_per_prompt for single prompt UpperCAmelCase_ : List[str] = 2 UpperCAmelCase_ : List[Any] = audioldm_pipe(_snake_case ,num_inference_steps=2 ,num_waveforms_per_prompt=_snake_case ).audios assert audios.shape == (num_waveforms_per_prompt, 2_56) # test num_waveforms_per_prompt for batch of prompts UpperCAmelCase_ : Union[str, Any] = 2 UpperCAmelCase_ : Optional[int] = audioldm_pipe( [prompt] * batch_size ,num_inference_steps=2 ,num_waveforms_per_prompt=_snake_case ).audios assert audios.shape == (batch_size * num_waveforms_per_prompt, 2_56) def UpperCamelCase__ ( self ): UpperCAmelCase_ : List[str] = "cpu" # ensure determinism for the device-dependent torch.Generator UpperCAmelCase_ : Optional[Any] = self.get_dummy_components() UpperCAmelCase_ : Union[str, Any] = AudioLDMPipeline(**_snake_case ) UpperCAmelCase_ : List[Any] = audioldm_pipe.to(_snake_case ) audioldm_pipe.set_progress_bar_config(disable=_snake_case ) UpperCAmelCase_ : Optional[Any] = audioldm_pipe.vocoder.config.sampling_rate UpperCAmelCase_ : Any = self.get_dummy_inputs(_snake_case ) UpperCAmelCase_ : Optional[int] = audioldm_pipe(audio_length_in_s=0.016 ,**_snake_case ) UpperCAmelCase_ : str = output.audios[0] assert audio.ndim == 1 assert len(_snake_case ) / vocoder_sampling_rate == 0.016 UpperCAmelCase_ : List[Any] = audioldm_pipe(audio_length_in_s=0.032 ,**_snake_case ) UpperCAmelCase_ : Any = output.audios[0] assert audio.ndim == 1 assert len(_snake_case ) / vocoder_sampling_rate == 0.032 def UpperCamelCase__ ( self ): UpperCAmelCase_ : Optional[int] = self.get_dummy_components() UpperCAmelCase_ : str = AudioLDMPipeline(**_snake_case ) UpperCAmelCase_ : int = audioldm_pipe.to(_snake_case ) audioldm_pipe.set_progress_bar_config(disable=_snake_case ) UpperCAmelCase_ : int = ["hey"] UpperCAmelCase_ : Dict = audioldm_pipe(_snake_case ,num_inference_steps=1 ) UpperCAmelCase_ : Any = output.audios.shape assert audio_shape == (1, 2_56) UpperCAmelCase_ : Tuple = audioldm_pipe.vocoder.config config.model_in_dim *= 2 UpperCAmelCase_ : List[Any] = SpeechTaHifiGan(_snake_case ).to(_snake_case ) UpperCAmelCase_ : Tuple = audioldm_pipe(_snake_case ,num_inference_steps=1 ) UpperCAmelCase_ : int = output.audios.shape # waveform shape is unchanged, we just have 2x the number of mel channels in the spectrogram assert audio_shape == (1, 2_56) def UpperCamelCase__ ( self ): self._test_attention_slicing_forward_pass(test_mean_pixel_difference=_snake_case ) def UpperCamelCase__ ( self ): self._test_inference_batch_single_identical(test_mean_pixel_difference=_snake_case ) @unittest.skipIf( torch_device != "cuda" or not is_xformers_available() ,reason="XFormers attention is only available with CUDA and `xformers` installed" ,) def UpperCamelCase__ ( self ): self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=_snake_case ) @slow class _snake_case (unittest.TestCase): def UpperCamelCase__ ( self ): super().tearDown() gc.collect() torch.cuda.empty_cache() def UpperCamelCase__ ( self ,_snake_case ,_snake_case="cpu" ,_snake_case=torch.floataa ,_snake_case=0 ): UpperCAmelCase_ : Union[str, Any] = torch.Generator(device=_snake_case ).manual_seed(_snake_case ) UpperCAmelCase_ : str = np.random.RandomState(_snake_case ).standard_normal((1, 8, 1_28, 16) ) UpperCAmelCase_ : Optional[Any] = torch.from_numpy(_snake_case ).to(device=_snake_case ,dtype=_snake_case ) UpperCAmelCase_ : List[str] = { "prompt": "A hammer hitting a wooden surface", "latents": latents, "generator": generator, "num_inference_steps": 3, "guidance_scale": 2.5, } return inputs def UpperCamelCase__ ( self ): UpperCAmelCase_ : int = AudioLDMPipeline.from_pretrained("cvssp/audioldm" ) UpperCAmelCase_ : Optional[int] = audioldm_pipe.to(_snake_case ) audioldm_pipe.set_progress_bar_config(disable=_snake_case ) UpperCAmelCase_ : List[Any] = self.get_inputs(_snake_case ) UpperCAmelCase_ : List[Any] = 25 UpperCAmelCase_ : Union[str, Any] = audioldm_pipe(**_snake_case ).audios[0] assert audio.ndim == 1 assert len(_snake_case ) == 8_19_20 UpperCAmelCase_ : Union[str, Any] = audio[7_72_30:7_72_40] UpperCAmelCase_ : Any = np.array( [-0.4884, -0.4607, 0.0023, 0.5007, 0.5896, 0.5151, 0.3813, -0.0208, -0.3687, -0.4315] ) UpperCAmelCase_ : Dict = np.abs(expected_slice - audio_slice ).max() assert max_diff < 1E-2 def UpperCamelCase__ ( self ): UpperCAmelCase_ : Optional[int] = AudioLDMPipeline.from_pretrained("cvssp/audioldm" ) UpperCAmelCase_ : List[Any] = LMSDiscreteScheduler.from_config(audioldm_pipe.scheduler.config ) UpperCAmelCase_ : int = audioldm_pipe.to(_snake_case ) audioldm_pipe.set_progress_bar_config(disable=_snake_case ) UpperCAmelCase_ : Tuple = self.get_inputs(_snake_case ) UpperCAmelCase_ : Optional[Any] = audioldm_pipe(**_snake_case ).audios[0] assert audio.ndim == 1 assert len(_snake_case ) == 8_19_20 UpperCAmelCase_ : Any = audio[2_77_80:2_77_90] UpperCAmelCase_ : List[str] = np.array([-0.2131, -0.0873, -0.0124, -0.0189, 0.0569, 0.1373, 0.1883, 0.2886, 0.3297, 0.2212] ) UpperCAmelCase_ : Union[str, Any] = np.abs(expected_slice - audio_slice ).max() assert max_diff < 3E-2
71
1
'''simple docstring''' from collections.abc import Generator from math import sin def a__ ( _SCREAMING_SNAKE_CASE : bytes ) -> bytes: """simple docstring""" if len(_SCREAMING_SNAKE_CASE ) != 32: raise ValueError("Input must be of length 32" ) UpperCAmelCase_ : Optional[Any] = B"" for i in [3, 2, 1, 0]: little_endian += string_aa[8 * i : 8 * i + 8] return little_endian def a__ ( _SCREAMING_SNAKE_CASE : int ) -> bytes: """simple docstring""" if i < 0: raise ValueError("Input must be non-negative" ) UpperCAmelCase_ : Tuple = format(_SCREAMING_SNAKE_CASE , "08x" )[-8:] UpperCAmelCase_ : Tuple = B"" for i in [3, 2, 1, 0]: little_endian_hex += hex_rep[2 * i : 2 * i + 2].encode("utf-8" ) return little_endian_hex def a__ ( _SCREAMING_SNAKE_CASE : bytes ) -> bytes: """simple docstring""" UpperCAmelCase_ : List[Any] = B"" for char in message: bit_string += format(_SCREAMING_SNAKE_CASE , "08b" ).encode("utf-8" ) UpperCAmelCase_ : Optional[Any] = format(len(_SCREAMING_SNAKE_CASE ) , "064b" ).encode("utf-8" ) # Pad bit_string to a multiple of 512 chars bit_string += b"1" while len(_SCREAMING_SNAKE_CASE ) % 5_12 != 4_48: bit_string += b"0" bit_string += to_little_endian(start_len[32:] ) + to_little_endian(start_len[:32] ) return bit_string def a__ ( _SCREAMING_SNAKE_CASE : bytes ) -> Generator[list[int], None, None]: """simple docstring""" if len(_SCREAMING_SNAKE_CASE ) % 5_12 != 0: raise ValueError("Input must have length that's a multiple of 512" ) for pos in range(0 , len(_SCREAMING_SNAKE_CASE ) , 5_12 ): UpperCAmelCase_ : List[str] = bit_string[pos : pos + 5_12] UpperCAmelCase_ : List[Any] = [] for i in range(0 , 5_12 , 32 ): block_words.append(int(to_little_endian(block[i : i + 32] ) , 2 ) ) yield block_words def a__ ( _SCREAMING_SNAKE_CASE : int ) -> int: """simple docstring""" if i < 0: raise ValueError("Input must be non-negative" ) UpperCAmelCase_ : List[Any] = format(_SCREAMING_SNAKE_CASE , "032b" ) UpperCAmelCase_ : Tuple = "" for c in i_str: new_str += "1" if c == "0" else "0" return int(_SCREAMING_SNAKE_CASE , 2 ) def a__ ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int ) -> int: """simple docstring""" return (a + b) % 2**32 def a__ ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int ) -> int: """simple docstring""" if i < 0: raise ValueError("Input must be non-negative" ) if shift < 0: raise ValueError("Shift must be non-negative" ) return ((i << shift) ^ (i >> (32 - shift))) % 2**32 def a__ ( _SCREAMING_SNAKE_CASE : bytes ) -> bytes: """simple docstring""" UpperCAmelCase_ : str = preprocess(_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : List[Any] = [int(2**32 * abs(sin(i + 1 ) ) ) for i in range(64 )] # Starting states UpperCAmelCase_ : Any = 0x6_7_4_5_2_3_0_1 UpperCAmelCase_ : Optional[int] = 0xe_f_c_d_a_b_8_9 UpperCAmelCase_ : Dict = 0x9_8_b_a_d_c_f_e UpperCAmelCase_ : List[str] = 0x1_0_3_2_5_4_7_6 UpperCAmelCase_ : Tuple = [ 7, 12, 17, 22, 7, 12, 17, 22, 7, 12, 17, 22, 7, 12, 17, 22, 5, 9, 14, 20, 5, 9, 14, 20, 5, 9, 14, 20, 5, 9, 14, 20, 4, 11, 16, 23, 4, 11, 16, 23, 4, 11, 16, 23, 4, 11, 16, 23, 6, 10, 15, 21, 6, 10, 15, 21, 6, 10, 15, 21, 6, 10, 15, 21, ] # Process bit string in chunks, each with 16 32-char words for block_words in get_block_words(_SCREAMING_SNAKE_CASE ): UpperCAmelCase_ : Union[str, Any] = aa UpperCAmelCase_ : Optional[Any] = ba UpperCAmelCase_ : int = ca UpperCAmelCase_ : Dict = da # Hash current chunk for i in range(64 ): if i <= 15: # f = (b & c) | (not_32(b) & d) # Alternate definition for f UpperCAmelCase_ : List[Any] = d ^ (b & (c ^ d)) UpperCAmelCase_ : int = i elif i <= 31: # f = (d & b) | (not_32(d) & c) # Alternate definition for f UpperCAmelCase_ : List[Any] = c ^ (d & (b ^ c)) UpperCAmelCase_ : Tuple = (5 * i + 1) % 16 elif i <= 47: UpperCAmelCase_ : List[Any] = b ^ c ^ d UpperCAmelCase_ : List[str] = (3 * i + 5) % 16 else: UpperCAmelCase_ : str = c ^ (b | not_aa(_SCREAMING_SNAKE_CASE )) UpperCAmelCase_ : Dict = (7 * i) % 16 UpperCAmelCase_ : Dict = (f + a + added_consts[i] + block_words[g]) % 2**32 UpperCAmelCase_ : Dict = d UpperCAmelCase_ : str = c UpperCAmelCase_ : str = b UpperCAmelCase_ : str = sum_aa(_SCREAMING_SNAKE_CASE , left_rotate_aa(_SCREAMING_SNAKE_CASE , shift_amounts[i] ) ) # Add hashed chunk to running total UpperCAmelCase_ : Dict = sum_aa(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : int = sum_aa(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : str = sum_aa(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : Tuple = sum_aa(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : int = reformat_hex(_SCREAMING_SNAKE_CASE ) + reformat_hex(_SCREAMING_SNAKE_CASE ) + reformat_hex(_SCREAMING_SNAKE_CASE ) + reformat_hex(_SCREAMING_SNAKE_CASE ) return digest if __name__ == "__main__": import doctest doctest.testmod()
71
'''simple docstring''' from typing import TYPE_CHECKING from ...file_utils import _LazyModule, is_torch_available from ...utils import OptionalDependencyNotAvailable _lowerCamelCase = { """configuration_gpt_neox_japanese""": ["""GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GPTNeoXJapaneseConfig"""], """tokenization_gpt_neox_japanese""": ["""GPTNeoXJapaneseTokenizer"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCamelCase = [ """GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST""", """GPTNeoXJapaneseForCausalLM""", """GPTNeoXJapaneseLayer""", """GPTNeoXJapaneseModel""", """GPTNeoXJapanesePreTrainedModel""", ] if TYPE_CHECKING: from .configuration_gpt_neox_japanese import GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXJapaneseConfig from .tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_gpt_neox_japanese import ( GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST, GPTNeoXJapaneseForCausalLM, GPTNeoXJapaneseLayer, GPTNeoXJapaneseModel, GPTNeoXJapanesePreTrainedModel, ) else: import sys _lowerCamelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
71
1
'''simple docstring''' import collections import os import re from pathlib import Path _lowerCamelCase = """src/transformers""" # Matches is_xxx_available() _lowerCamelCase = re.compile(R"""is\_([a-z_]*)_available()""") # Catches a one-line _import_struct = {xxx} _lowerCamelCase = re.compile(R"""^_import_structure\s+=\s+\{([^\}]+)\}""") # Catches a line with a key-values pattern: "bla": ["foo", "bar"] _lowerCamelCase = re.compile(R"""\s+\"\S*\":\s+\[([^\]]*)\]""") # Catches a line if not is_foo_available _lowerCamelCase = re.compile(R"""^\s*if\s+not\s+is\_[a-z_]*\_available\(\)""") # Catches a line _import_struct["bla"].append("foo") _lowerCamelCase = re.compile(R"""^\s*_import_structure\[\"\S*\"\]\.append\(\"(\S*)\"\)""") # Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"] _lowerCamelCase = re.compile(R"""^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]""") # Catches a line with an object between quotes and a comma: "MyModel", _lowerCamelCase = re.compile(R"""^\s+\"([^\"]+)\",""") # Catches a line with objects between brackets only: ["foo", "bar"], _lowerCamelCase = re.compile(R"""^\s+\[([^\]]+)\]""") # Catches a line with from foo import bar, bla, boo _lowerCamelCase = re.compile(R"""\s+from\s+\S*\s+import\s+([^\(\s].*)\n""") # Catches a line with try: _lowerCamelCase = re.compile(R"""^\s*try:""") # Catches a line with else: _lowerCamelCase = re.compile(R"""^\s*else:""") def a__ ( _SCREAMING_SNAKE_CASE : List[str] ) -> Optional[Any]: """simple docstring""" if _re_test_backend.search(_SCREAMING_SNAKE_CASE ) is None: return None UpperCAmelCase_ : Union[str, Any] = [b[0] for b in _re_backend.findall(_SCREAMING_SNAKE_CASE )] backends.sort() return "_and_".join(_SCREAMING_SNAKE_CASE ) def a__ ( _SCREAMING_SNAKE_CASE : Optional[Any] ) -> Union[str, Any]: """simple docstring""" with open(_SCREAMING_SNAKE_CASE , "r" , encoding="utf-8" , newline="\n" ) as f: UpperCAmelCase_ : Tuple = f.readlines() UpperCAmelCase_ : int = 0 while line_index < len(_SCREAMING_SNAKE_CASE ) and not lines[line_index].startswith("_import_structure = {" ): line_index += 1 # If this is a traditional init, just return. if line_index >= len(_SCREAMING_SNAKE_CASE ): return None # First grab the objects without a specific backend in _import_structure UpperCAmelCase_ : str = [] while not lines[line_index].startswith("if TYPE_CHECKING" ) and find_backend(lines[line_index] ) is None: UpperCAmelCase_ : int = lines[line_index] # If we have everything on a single line, let's deal with it. if _re_one_line_import_struct.search(_SCREAMING_SNAKE_CASE ): UpperCAmelCase_ : Union[str, Any] = _re_one_line_import_struct.search(_SCREAMING_SNAKE_CASE ).groups()[0] UpperCAmelCase_ : List[Any] = re.findall(r"\[([^\]]+)\]" , _SCREAMING_SNAKE_CASE ) for imp in imports: objects.extend([obj[1:-1] for obj in imp.split(", " )] ) line_index += 1 continue UpperCAmelCase_ : Optional[Any] = _re_import_struct_key_value.search(_SCREAMING_SNAKE_CASE ) if single_line_import_search is not None: UpperCAmelCase_ : Any = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(", " ) if len(_SCREAMING_SNAKE_CASE ) > 0] objects.extend(_SCREAMING_SNAKE_CASE ) elif line.startswith(" " * 8 + "\"" ): objects.append(line[9:-3] ) line_index += 1 UpperCAmelCase_ : Tuple = {"none": objects} # Let's continue with backend-specific objects in _import_structure while not lines[line_index].startswith("if TYPE_CHECKING" ): # If the line is an if not is_backend_available, we grab all objects associated. UpperCAmelCase_ : str = find_backend(lines[line_index] ) # Check if the backend declaration is inside a try block: if _re_try.search(lines[line_index - 1] ) is None: UpperCAmelCase_ : List[Any] = None if backend is not None: line_index += 1 # Scroll until we hit the else block of try-except-else while _re_else.search(lines[line_index] ) is None: line_index += 1 line_index += 1 UpperCAmelCase_ : Union[str, Any] = [] # Until we unindent, add backend objects to the list while len(lines[line_index] ) <= 1 or lines[line_index].startswith(" " * 4 ): UpperCAmelCase_ : Optional[int] = lines[line_index] if _re_import_struct_add_one.search(_SCREAMING_SNAKE_CASE ) is not None: objects.append(_re_import_struct_add_one.search(_SCREAMING_SNAKE_CASE ).groups()[0] ) elif _re_import_struct_add_many.search(_SCREAMING_SNAKE_CASE ) is not None: UpperCAmelCase_ : Dict = _re_import_struct_add_many.search(_SCREAMING_SNAKE_CASE ).groups()[0].split(", " ) UpperCAmelCase_ : Union[str, Any] = [obj[1:-1] for obj in imports if len(_SCREAMING_SNAKE_CASE ) > 0] objects.extend(_SCREAMING_SNAKE_CASE ) elif _re_between_brackets.search(_SCREAMING_SNAKE_CASE ) is not None: UpperCAmelCase_ : str = _re_between_brackets.search(_SCREAMING_SNAKE_CASE ).groups()[0].split(", " ) UpperCAmelCase_ : Union[str, Any] = [obj[1:-1] for obj in imports if len(_SCREAMING_SNAKE_CASE ) > 0] objects.extend(_SCREAMING_SNAKE_CASE ) elif _re_quote_object.search(_SCREAMING_SNAKE_CASE ) is not None: objects.append(_re_quote_object.search(_SCREAMING_SNAKE_CASE ).groups()[0] ) elif line.startswith(" " * 8 + "\"" ): objects.append(line[9:-3] ) elif line.startswith(" " * 12 + "\"" ): objects.append(line[13:-3] ) line_index += 1 UpperCAmelCase_ : Optional[int] = objects else: line_index += 1 # At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend UpperCAmelCase_ : Union[str, Any] = [] while ( line_index < len(_SCREAMING_SNAKE_CASE ) and find_backend(lines[line_index] ) is None and not lines[line_index].startswith("else" ) ): UpperCAmelCase_ : Optional[Any] = lines[line_index] UpperCAmelCase_ : List[Any] = _re_import.search(_SCREAMING_SNAKE_CASE ) if single_line_import_search is not None: objects.extend(single_line_import_search.groups()[0].split(", " ) ) elif line.startswith(" " * 8 ): objects.append(line[8:-2] ) line_index += 1 UpperCAmelCase_ : str = {"none": objects} # Let's continue with backend-specific objects while line_index < len(_SCREAMING_SNAKE_CASE ): # If the line is an if is_backend_available, we grab all objects associated. UpperCAmelCase_ : List[Any] = find_backend(lines[line_index] ) # Check if the backend declaration is inside a try block: if _re_try.search(lines[line_index - 1] ) is None: UpperCAmelCase_ : Optional[int] = None if backend is not None: line_index += 1 # Scroll until we hit the else block of try-except-else while _re_else.search(lines[line_index] ) is None: line_index += 1 line_index += 1 UpperCAmelCase_ : Any = [] # Until we unindent, add backend objects to the list while len(lines[line_index] ) <= 1 or lines[line_index].startswith(" " * 8 ): UpperCAmelCase_ : Any = lines[line_index] UpperCAmelCase_ : List[str] = _re_import.search(_SCREAMING_SNAKE_CASE ) if single_line_import_search is not None: objects.extend(single_line_import_search.groups()[0].split(", " ) ) elif line.startswith(" " * 12 ): objects.append(line[12:-2] ) line_index += 1 UpperCAmelCase_ : Dict = objects else: line_index += 1 return import_dict_objects, type_hint_objects def a__ ( _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : int ) -> Optional[Any]: """simple docstring""" def find_duplicates(_SCREAMING_SNAKE_CASE : Any ): return [k for k, v in collections.Counter(_SCREAMING_SNAKE_CASE ).items() if v > 1] if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ): return ["Both sides of the init do not have the same backends!"] UpperCAmelCase_ : Union[str, Any] = [] for key in import_dict_objects.keys(): UpperCAmelCase_ : List[Any] = find_duplicates(import_dict_objects[key] ) if duplicate_imports: errors.append(F'''Duplicate _import_structure definitions for: {duplicate_imports}''' ) UpperCAmelCase_ : Tuple = find_duplicates(type_hint_objects[key] ) if duplicate_type_hints: errors.append(F'''Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}''' ) if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ): UpperCAmelCase_ : Optional[Any] = "base imports" if key == "none" else F'''{key} backend''' errors.append(F'''Differences for {name}:''' ) for a in type_hint_objects[key]: if a not in import_dict_objects[key]: errors.append(F''' {a} in TYPE_HINT but not in _import_structure.''' ) for a in import_dict_objects[key]: if a not in type_hint_objects[key]: errors.append(F''' {a} in _import_structure but not in TYPE_HINT.''' ) return errors def a__ ( ) -> str: """simple docstring""" UpperCAmelCase_ : Optional[int] = [] for root, _, files in os.walk(_SCREAMING_SNAKE_CASE ): if "__init__.py" in files: UpperCAmelCase_ : Tuple = os.path.join(_SCREAMING_SNAKE_CASE , "__init__.py" ) UpperCAmelCase_ : Dict = parse_init(_SCREAMING_SNAKE_CASE ) if objects is not None: UpperCAmelCase_ : str = analyze_results(*_SCREAMING_SNAKE_CASE ) if len(_SCREAMING_SNAKE_CASE ) > 0: UpperCAmelCase_ : Dict = F'''Problem in {fname}, both halves do not define the same objects.\n{errors[0]}''' failures.append("\n".join(_SCREAMING_SNAKE_CASE ) ) if len(_SCREAMING_SNAKE_CASE ) > 0: raise ValueError("\n\n".join(_SCREAMING_SNAKE_CASE ) ) def a__ ( ) -> int: """simple docstring""" UpperCAmelCase_ : Optional[Any] = [] for path, directories, files in os.walk(_SCREAMING_SNAKE_CASE ): for folder in directories: # Ignore private modules if folder.startswith("_" ): directories.remove(_SCREAMING_SNAKE_CASE ) continue # Ignore leftovers from branches (empty folders apart from pycache) if len(list((Path(_SCREAMING_SNAKE_CASE ) / folder).glob("*.py" ) ) ) == 0: continue UpperCAmelCase_ : Dict = str((Path(_SCREAMING_SNAKE_CASE ) / folder).relative_to(_SCREAMING_SNAKE_CASE ) ) UpperCAmelCase_ : Dict = short_path.replace(os.path.sep , "." ) submodules.append(_SCREAMING_SNAKE_CASE ) for fname in files: if fname == "__init__.py": continue UpperCAmelCase_ : List[Any] = str((Path(_SCREAMING_SNAKE_CASE ) / fname).relative_to(_SCREAMING_SNAKE_CASE ) ) UpperCAmelCase_ : Union[str, Any] = short_path.replace(".py" , "" ).replace(os.path.sep , "." ) if len(submodule.split("." ) ) == 1: submodules.append(_SCREAMING_SNAKE_CASE ) return submodules _lowerCamelCase = [ """convert_pytorch_checkpoint_to_tf2""", """modeling_flax_pytorch_utils""", """models.esm.openfold_utils""", ] def a__ ( ) -> str: """simple docstring""" from transformers.utils import direct_transformers_import UpperCAmelCase_ : Optional[int] = direct_transformers_import(_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : Any = set(transformers._import_structure.keys() ) # This contains all the base keys of the _import_structure object defined in the init, but if the user is missing # some optional dependencies, they may not have all of them. Thus we read the init to read all additions and # (potentiall re-) add them. with open(os.path.join(_SCREAMING_SNAKE_CASE , "__init__.py" ) , "r" ) as f: UpperCAmelCase_ : Dict = f.read() import_structure_keys.update(set(re.findall(r"import_structure\[\"([^\"]*)\"\]" , _SCREAMING_SNAKE_CASE ) ) ) UpperCAmelCase_ : Union[str, Any] = [ module for module in get_transformers_submodules() if module not in IGNORE_SUBMODULES and module not in import_structure_keys ] if len(_SCREAMING_SNAKE_CASE ) > 0: UpperCAmelCase_ : str = "\n".join(F'''- {module}''' for module in module_not_registered ) raise ValueError( "The following submodules are not properly registed in the main init of Transformers:\n" F'''{list_of_modules}\n''' "Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value." ) if __name__ == "__main__": check_all_inits() check_submodules()
71
'''simple docstring''' import heapq def a__ ( _SCREAMING_SNAKE_CASE : dict ) -> set[int]: """simple docstring""" UpperCAmelCase_ : list[list] = [] # for each node and his adjacency list add them and the rank of the node to queue # using heapq module the queue will be filled like a Priority Queue # heapq works with a min priority queue, so I used -1*len(v) to build it for key, value in graph.items(): # O(log(n)) heapq.heappush(_SCREAMING_SNAKE_CASE , [-1 * len(_SCREAMING_SNAKE_CASE ), (key, value)] ) # chosen_vertices = set of chosen vertices UpperCAmelCase_ : Optional[int] = set() # while queue isn't empty and there are still edges # (queue[0][0] is the rank of the node with max rank) while queue and queue[0][0] != 0: # extract vertex with max rank from queue and add it to chosen_vertices UpperCAmelCase_ : Tuple = heapq.heappop(_SCREAMING_SNAKE_CASE )[1][0] chosen_vertices.add(_SCREAMING_SNAKE_CASE ) # Remove all arcs adjacent to argmax for elem in queue: # if v haven't adjacent node, skip if elem[0] == 0: continue # if argmax is reachable from elem # remove argmax from elem's adjacent list and update his rank if argmax in elem[1][1]: UpperCAmelCase_ : Any = elem[1][1].index(_SCREAMING_SNAKE_CASE ) del elem[1][1][index] elem[0] += 1 # re-order the queue heapq.heapify(_SCREAMING_SNAKE_CASE ) return chosen_vertices if __name__ == "__main__": import doctest doctest.testmod() _lowerCamelCase = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]} print(f"""Minimum vertex cover:\n{greedy_min_vertex_cover(graph)}""")
71
1
'''simple docstring''' import os import unicodedata from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import SPIECE_UNDERLINE, logging _lowerCamelCase = logging.get_logger(__name__) _lowerCamelCase = {"""vocab_file""": """spiece.model"""} _lowerCamelCase = { """vocab_file""": { """TsinghuaAI/CPM-Generate""": """https://huggingface.co/TsinghuaAI/CPM-Generate/resolve/main/spiece.model""", } } class _snake_case (__SCREAMING_SNAKE_CASE): def __init__( self ,_snake_case ,_snake_case=False ,_snake_case=True ,_snake_case=False ,_snake_case="<s>" ,_snake_case="</s>" ,_snake_case="<unk>" ,_snake_case="<sep>" ,_snake_case="<pad>" ,_snake_case="<cls>" ,_snake_case="<mask>" ,_snake_case=["<eop>", "<eod>"] ,_snake_case = None ,**_snake_case ,): UpperCAmelCase_ : List[str] = AddedToken(_snake_case ,lstrip=_snake_case ,rstrip=_snake_case ) if isinstance(_snake_case ,_snake_case ) else mask_token UpperCAmelCase_ : Dict = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( do_lower_case=_snake_case ,remove_space=_snake_case ,keep_accents=_snake_case ,bos_token=_snake_case ,eos_token=_snake_case ,unk_token=_snake_case ,sep_token=_snake_case ,pad_token=_snake_case ,cls_token=_snake_case ,mask_token=_snake_case ,additional_special_tokens=_snake_case ,sp_model_kwargs=self.sp_model_kwargs ,**_snake_case ,) UpperCAmelCase_ : Dict = 3 UpperCAmelCase_ : Union[str, Any] = do_lower_case UpperCAmelCase_ : Any = remove_space UpperCAmelCase_ : Optional[Any] = keep_accents UpperCAmelCase_ : Optional[Any] = vocab_file UpperCAmelCase_ : List[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(_snake_case ) try: import jieba except ModuleNotFoundError as error: raise error.__class__( "You need to install jieba to use CpmTokenizer or CpmTokenizerFast. " "See https://pypi.org/project/jieba/ for installation." ) UpperCAmelCase_ : List[str] = jieba UpperCAmelCase_ : Any = str.maketrans(" \n" ,"\u2582\u2583" ) @property # Copied from transformers.models.xlnet.tokenization_xlnet.XLNetTokenizer.vocab_size def UpperCamelCase__ ( self ): return len(self.sp_model ) def UpperCamelCase__ ( self ): UpperCAmelCase_ : str = {self.convert_ids_to_tokens(_snake_case ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __getstate__( self ): UpperCAmelCase_ : List[Any] = self.__dict__.copy() UpperCAmelCase_ : Optional[int] = None return state def __setstate__( self ,_snake_case ): UpperCAmelCase_ : Union[str, Any] = d # for backward compatibility if not hasattr(self ,"sp_model_kwargs" ): UpperCAmelCase_ : Tuple = {} UpperCAmelCase_ : int = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def UpperCamelCase__ ( self ,_snake_case ): if self.remove_space: UpperCAmelCase_ : Union[str, Any] = " ".join(inputs.strip().split() ) else: UpperCAmelCase_ : str = inputs UpperCAmelCase_ : List[str] = outputs.replace("``" ,"\"" ).replace("''" ,"\"" ) if not self.keep_accents: UpperCAmelCase_ : int = unicodedata.normalize("NFKD" ,_snake_case ) UpperCAmelCase_ : int = "".join([c for c in outputs if not unicodedata.combining(_snake_case )] ) if self.do_lower_case: UpperCAmelCase_ : Dict = outputs.lower() return outputs def UpperCamelCase__ ( self ,_snake_case ): UpperCAmelCase_ : Any = self.preprocess_text(_snake_case ) UpperCAmelCase_ : Optional[int] = self.sp_model.encode(_snake_case ,out_type=_snake_case ) UpperCAmelCase_ : List[Any] = [] for piece in pieces: if len(_snake_case ) > 1 and piece[-1] == str("," ) and piece[-2].isdigit(): UpperCAmelCase_ : Optional[int] = self.sp_model.EncodeAsPieces(piece[:-1].replace(_snake_case ,"" ) ) if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE: if len(cur_pieces[0] ) == 1: UpperCAmelCase_ : Any = cur_pieces[1:] else: UpperCAmelCase_ : int = cur_pieces[0][1:] cur_pieces.append(piece[-1] ) new_pieces.extend(_snake_case ) else: new_pieces.append(_snake_case ) return new_pieces def UpperCamelCase__ ( self ,_snake_case ): return self.sp_model.PieceToId(_snake_case ) def UpperCamelCase__ ( self ,_snake_case ): return self.sp_model.IdToPiece(_snake_case ) def UpperCamelCase__ ( self ,_snake_case ): UpperCAmelCase_ : Dict = "".join(_snake_case ).replace(_snake_case ," " ).strip() return out_string def UpperCamelCase__ ( self ,_snake_case ,_snake_case = None ): UpperCAmelCase_ : Dict = [self.sep_token_id] UpperCAmelCase_ : Optional[int] = [self.cls_token_id] if token_ids_a is None: return token_ids_a + sep + cls return token_ids_a + sep + token_ids_a + sep + cls def UpperCamelCase__ ( self ,_snake_case ,_snake_case = None ,_snake_case = False ): if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=_snake_case ,token_ids_a=_snake_case ,already_has_special_tokens=_snake_case ) if token_ids_a is not None: return ([0] * len(_snake_case )) + [1] + ([0] * len(_snake_case )) + [1, 1] return ([0] * len(_snake_case )) + [1, 1] def UpperCamelCase__ ( self ,_snake_case ,_snake_case = None ): UpperCAmelCase_ : List[Any] = [self.sep_token_id] UpperCAmelCase_ : Union[str, Any] = [2] if token_ids_a is None: return len(token_ids_a + sep ) * [0] + cls_segment_id return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id def UpperCamelCase__ ( self ,_snake_case ,_snake_case = None ): if not os.path.isdir(_snake_case ): logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' ) return UpperCAmelCase_ : Optional[int] = os.path.join( _snake_case ,(filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(_snake_case ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file ,_snake_case ) elif not os.path.isfile(self.vocab_file ): with open(_snake_case ,"wb" ) as fi: UpperCAmelCase_ : int = self.sp_model.serialized_model_proto() fi.write(_snake_case ) return (out_vocab_file,) def UpperCamelCase__ ( self ,*_snake_case ,**_snake_case ): UpperCAmelCase_ : Tuple = super()._decode(*_snake_case ,**_snake_case ) UpperCAmelCase_ : List[str] = text.replace(" " ,"" ).replace("\u2582" ," " ).replace("\u2583" ,"\n" ) return text
71
'''simple docstring''' from typing import Dict, List, Optional, Union import numpy as np from transformers.utils import is_vision_available from transformers.utils.generic import TensorType from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, is_valid_image, to_numpy_array, valid_images, ) from ...utils import logging if is_vision_available(): import PIL _lowerCamelCase = logging.get_logger(__name__) def a__ ( _SCREAMING_SNAKE_CASE : Tuple ) -> List[List[ImageInput]]: """simple docstring""" if isinstance(_SCREAMING_SNAKE_CASE , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ): return videos elif isinstance(_SCREAMING_SNAKE_CASE , (list, tuple) ) and is_valid_image(videos[0] ): return [videos] elif is_valid_image(_SCREAMING_SNAKE_CASE ): return [[videos]] raise ValueError(F'''Could not make batched video from {videos}''' ) class _snake_case (__SCREAMING_SNAKE_CASE): __A : Tuple =["pixel_values"] def __init__( self ,_snake_case = True ,_snake_case = None ,_snake_case = PILImageResampling.BILINEAR ,_snake_case = True ,_snake_case = None ,_snake_case = True ,_snake_case = 1 / 2_55 ,_snake_case = True ,_snake_case = True ,_snake_case = None ,_snake_case = None ,**_snake_case ,): super().__init__(**_snake_case ) UpperCAmelCase_ : Optional[Any] = size if size is not None else {"shortest_edge": 2_56} UpperCAmelCase_ : List[str] = get_size_dict(_snake_case ,default_to_square=_snake_case ) UpperCAmelCase_ : str = crop_size if crop_size is not None else {"height": 2_24, "width": 2_24} UpperCAmelCase_ : Optional[Any] = get_size_dict(_snake_case ,param_name="crop_size" ) UpperCAmelCase_ : int = do_resize UpperCAmelCase_ : List[str] = size UpperCAmelCase_ : Dict = do_center_crop UpperCAmelCase_ : Optional[Any] = crop_size UpperCAmelCase_ : Optional[Any] = resample UpperCAmelCase_ : int = do_rescale UpperCAmelCase_ : Optional[int] = rescale_factor UpperCAmelCase_ : Dict = offset UpperCAmelCase_ : Optional[Any] = do_normalize UpperCAmelCase_ : Union[str, Any] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN UpperCAmelCase_ : Dict = image_std if image_std is not None else IMAGENET_STANDARD_STD def UpperCamelCase__ ( self ,_snake_case ,_snake_case ,_snake_case = PILImageResampling.BILINEAR ,_snake_case = None ,**_snake_case ,): UpperCAmelCase_ : Any = get_size_dict(_snake_case ,default_to_square=_snake_case ) if "shortest_edge" in size: UpperCAmelCase_ : Optional[Any] = get_resize_output_image_size(_snake_case ,size["shortest_edge"] ,default_to_square=_snake_case ) elif "height" in size and "width" in size: UpperCAmelCase_ : Optional[Any] = (size["height"], size["width"]) else: raise ValueError(f'''Size must have \'height\' and \'width\' or \'shortest_edge\' as keys. Got {size.keys()}''' ) return resize(_snake_case ,size=_snake_case ,resample=_snake_case ,data_format=_snake_case ,**_snake_case ) def UpperCamelCase__ ( self ,_snake_case ,_snake_case ,_snake_case = None ,**_snake_case ,): UpperCAmelCase_ : Dict = get_size_dict(_snake_case ) if "height" not in size or "width" not in size: raise ValueError(f'''Size must have \'height\' and \'width\' as keys. Got {size.keys()}''' ) return center_crop(_snake_case ,size=(size["height"], size["width"]) ,data_format=_snake_case ,**_snake_case ) def UpperCamelCase__ ( self ,_snake_case ,_snake_case ,_snake_case = True ,_snake_case = None ,**_snake_case ,): UpperCAmelCase_ : int = image.astype(np.floataa ) if offset: UpperCAmelCase_ : Any = image - (scale / 2) return rescale(_snake_case ,scale=_snake_case ,data_format=_snake_case ,**_snake_case ) def UpperCamelCase__ ( self ,_snake_case ,_snake_case ,_snake_case ,_snake_case = None ,**_snake_case ,): return normalize(_snake_case ,mean=_snake_case ,std=_snake_case ,data_format=_snake_case ,**_snake_case ) def UpperCamelCase__ ( self ,_snake_case ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = ChannelDimension.FIRST ,): if do_resize and size is None or resample is None: raise ValueError("Size and resample must be specified if do_resize is True." ) if do_center_crop and crop_size is None: raise ValueError("Crop size must be specified if do_center_crop is True." ) if do_rescale and rescale_factor is None: raise ValueError("Rescale factor must be specified if do_rescale is True." ) if do_normalize and (image_mean is None or image_std is None): raise ValueError("Image mean and std must be specified if do_normalize is True." ) if offset and not do_rescale: raise ValueError("For offset, do_rescale must also be set to True." ) # All transformations expect numpy arrays. UpperCAmelCase_ : Optional[int] = to_numpy_array(_snake_case ) if do_resize: UpperCAmelCase_ : Dict = self.resize(image=_snake_case ,size=_snake_case ,resample=_snake_case ) if do_center_crop: UpperCAmelCase_ : Optional[Any] = self.center_crop(_snake_case ,size=_snake_case ) if do_rescale: UpperCAmelCase_ : Union[str, Any] = self.rescale(image=_snake_case ,scale=_snake_case ,offset=_snake_case ) if do_normalize: UpperCAmelCase_ : Any = self.normalize(image=_snake_case ,mean=_snake_case ,std=_snake_case ) UpperCAmelCase_ : Any = to_channel_dimension_format(_snake_case ,_snake_case ) return image def UpperCamelCase__ ( self ,_snake_case ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = ChannelDimension.FIRST ,**_snake_case ,): UpperCAmelCase_ : Tuple = do_resize if do_resize is not None else self.do_resize UpperCAmelCase_ : str = resample if resample is not None else self.resample UpperCAmelCase_ : List[Any] = do_center_crop if do_center_crop is not None else self.do_center_crop UpperCAmelCase_ : Tuple = do_rescale if do_rescale is not None else self.do_rescale UpperCAmelCase_ : List[str] = rescale_factor if rescale_factor is not None else self.rescale_factor UpperCAmelCase_ : List[Any] = offset if offset is not None else self.offset UpperCAmelCase_ : Optional[Any] = do_normalize if do_normalize is not None else self.do_normalize UpperCAmelCase_ : int = image_mean if image_mean is not None else self.image_mean UpperCAmelCase_ : int = image_std if image_std is not None else self.image_std UpperCAmelCase_ : Dict = size if size is not None else self.size UpperCAmelCase_ : int = get_size_dict(_snake_case ,default_to_square=_snake_case ) UpperCAmelCase_ : List[Any] = crop_size if crop_size is not None else self.crop_size UpperCAmelCase_ : int = get_size_dict(_snake_case ,param_name="crop_size" ) if not valid_images(_snake_case ): raise ValueError( "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, " "torch.Tensor, tf.Tensor or jax.ndarray." ) UpperCAmelCase_ : Any = make_batched(_snake_case ) UpperCAmelCase_ : Dict = [ [ self._preprocess_image( image=_snake_case ,do_resize=_snake_case ,size=_snake_case ,resample=_snake_case ,do_center_crop=_snake_case ,crop_size=_snake_case ,do_rescale=_snake_case ,rescale_factor=_snake_case ,offset=_snake_case ,do_normalize=_snake_case ,image_mean=_snake_case ,image_std=_snake_case ,data_format=_snake_case ,) for img in video ] for video in videos ] UpperCAmelCase_ : List[str] = {"pixel_values": videos} return BatchFeature(data=_snake_case ,tensor_type=_snake_case )
71
1
'''simple docstring''' import json import sys def a__ ( _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : int ) -> Tuple: """simple docstring""" with open(_SCREAMING_SNAKE_CASE , encoding="utf-8" ) as f: UpperCAmelCase_ : Dict = json.load(_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : str = ["<details>", "<summary>Show updated benchmarks!</summary>", " "] for benchmark_name in sorted(_SCREAMING_SNAKE_CASE ): UpperCAmelCase_ : Optional[Any] = results[benchmark_name] UpperCAmelCase_ : Any = benchmark_name.split("/" )[-1] output_md.append(F'''### Benchmark: {benchmark_file_name}''' ) UpperCAmelCase_ : Any = "| metric |" UpperCAmelCase_ : Any = "|--------|" UpperCAmelCase_ : Union[str, Any] = "| new / old (diff) |" for metric_name in sorted(_SCREAMING_SNAKE_CASE ): UpperCAmelCase_ : Tuple = benchmark_res[metric_name] UpperCAmelCase_ : Union[str, Any] = metric_vals["new"] UpperCAmelCase_ : Optional[Any] = metric_vals.get("old" , _SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : Dict = metric_vals.get("diff" , _SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : Dict = F''' {new_val:f}''' if isinstance(_SCREAMING_SNAKE_CASE , (int, float) ) else "None" if old_val is not None: val_str += F''' / {old_val:f}''' if isinstance(_SCREAMING_SNAKE_CASE , (int, float) ) else "None" if dif_val is not None: val_str += F''' ({dif_val:f})''' if isinstance(_SCREAMING_SNAKE_CASE , (int, float) ) else "None" title += " " + metric_name + " |" lines += "---|" value += val_str + " |" output_md += [title, lines, value, " "] output_md.append("</details>" ) with open(_SCREAMING_SNAKE_CASE , "w" , encoding="utf-8" ) as f: f.writelines("\n".join(_SCREAMING_SNAKE_CASE ) ) if __name__ == "__main__": _lowerCamelCase = sys.argv[1] _lowerCamelCase = sys.argv[2] format_json_to_md(input_json_file, output_md_file)
71
'''simple docstring''' import multiprocessing import os from typing import BinaryIO, Optional, Union import fsspec from .. import Dataset, Features, NamedSplit, config from ..formatting import query_table from ..packaged_modules.json.json import Json from ..utils import logging from ..utils.typing import NestedDataStructureLike, PathLike from .abc import AbstractDatasetReader class _snake_case (__SCREAMING_SNAKE_CASE): def __init__( self ,_snake_case ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = False ,_snake_case = False ,_snake_case = None ,_snake_case = None ,**_snake_case ,): super().__init__( _snake_case ,split=_snake_case ,features=_snake_case ,cache_dir=_snake_case ,keep_in_memory=_snake_case ,streaming=_snake_case ,num_proc=_snake_case ,**_snake_case ,) UpperCAmelCase_ : Tuple = field UpperCAmelCase_ : List[Any] = path_or_paths if isinstance(_snake_case ,_snake_case ) else {self.split: path_or_paths} UpperCAmelCase_ : Optional[int] = Json( cache_dir=_snake_case ,data_files=_snake_case ,features=_snake_case ,field=_snake_case ,**_snake_case ,) def UpperCamelCase__ ( self ): # Build iterable dataset if self.streaming: UpperCAmelCase_ : List[str] = self.builder.as_streaming_dataset(split=self.split ) # Build regular (map-style) dataset else: UpperCAmelCase_ : Union[str, Any] = None UpperCAmelCase_ : int = None UpperCAmelCase_ : List[Any] = None UpperCAmelCase_ : int = None self.builder.download_and_prepare( download_config=_snake_case ,download_mode=_snake_case ,verification_mode=_snake_case ,base_path=_snake_case ,num_proc=self.num_proc ,) UpperCAmelCase_ : Dict = self.builder.as_dataset( split=self.split ,verification_mode=_snake_case ,in_memory=self.keep_in_memory ) return dataset class _snake_case : def __init__( self ,_snake_case ,_snake_case ,_snake_case = None ,_snake_case = None ,**_snake_case ,): if num_proc is not None and num_proc <= 0: raise ValueError(f'''num_proc {num_proc} must be an integer > 0.''' ) UpperCAmelCase_ : int = dataset UpperCAmelCase_ : Union[str, Any] = path_or_buf UpperCAmelCase_ : str = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE UpperCAmelCase_ : Dict = num_proc UpperCAmelCase_ : Optional[Any] = "utf-8" UpperCAmelCase_ : Optional[int] = to_json_kwargs def UpperCamelCase__ ( self ): UpperCAmelCase_ : Dict = self.to_json_kwargs.pop("path_or_buf" ,_snake_case ) UpperCAmelCase_ : Tuple = self.to_json_kwargs.pop("orient" ,"records" ) UpperCAmelCase_ : Any = self.to_json_kwargs.pop("lines" ,True if orient == "records" else False ) UpperCAmelCase_ : Optional[int] = self.to_json_kwargs.pop("index" ,False if orient in ["split", "table"] else True ) UpperCAmelCase_ : int = self.to_json_kwargs.pop("compression" ,_snake_case ) if compression not in [None, "infer", "gzip", "bz2", "xz"]: raise NotImplementedError(f'''`datasets` currently does not support {compression} compression''' ) if isinstance(self.path_or_buf ,(str, bytes, os.PathLike) ): with fsspec.open(self.path_or_buf ,"wb" ,compression=_snake_case ) as buffer: UpperCAmelCase_ : List[str] = self._write(file_obj=_snake_case ,orient=_snake_case ,lines=_snake_case ,index=_snake_case ,**self.to_json_kwargs ) else: if compression: raise NotImplementedError( f'''The compression parameter is not supported when writing to a buffer, but compression={compression}''' " was passed. Please provide a local path instead." ) UpperCAmelCase_ : Union[str, Any] = self._write( file_obj=self.path_or_buf ,orient=_snake_case ,lines=_snake_case ,index=_snake_case ,**self.to_json_kwargs ) return written def UpperCamelCase__ ( self ,_snake_case ): UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = args UpperCAmelCase_ : List[str] = query_table( table=self.dataset.data ,key=slice(_snake_case ,offset + self.batch_size ) ,indices=self.dataset._indices ,) UpperCAmelCase_ : Optional[Any] = batch.to_pandas().to_json( path_or_buf=_snake_case ,orient=_snake_case ,lines=_snake_case ,index=_snake_case ,**_snake_case ) if not json_str.endswith("\n" ): json_str += "\n" return json_str.encode(self.encoding ) def UpperCamelCase__ ( self ,_snake_case ,_snake_case ,_snake_case ,_snake_case ,**_snake_case ,): UpperCAmelCase_ : Optional[Any] = 0 if self.num_proc is None or self.num_proc == 1: for offset in logging.tqdm( range(0 ,len(self.dataset ) ,self.batch_size ) ,unit="ba" ,disable=not logging.is_progress_bar_enabled() ,desc="Creating json from Arrow format" ,): UpperCAmelCase_ : Any = self._batch_json((offset, orient, lines, index, to_json_kwargs) ) written += file_obj.write(_snake_case ) else: UpperCAmelCase_ , UpperCAmelCase_ : int = len(self.dataset ), self.batch_size with multiprocessing.Pool(self.num_proc ) as pool: for json_str in logging.tqdm( pool.imap( self._batch_json ,[(offset, orient, lines, index, to_json_kwargs) for offset in range(0 ,_snake_case ,_snake_case )] ,) ,total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size ,unit="ba" ,disable=not logging.is_progress_bar_enabled() ,desc="Creating json from Arrow format" ,): written += file_obj.write(_snake_case ) return written
71
1
'''simple docstring''' import argparse import json import os import re import shutil import torch from transformers import BioGptConfig, BioGptForCausalLM from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE from transformers.utils import WEIGHTS_NAME, logging logging.set_verbosity_warning() _lowerCamelCase = 2 class _snake_case : def __init__( self ,*, # begin keyword-only arguments _snake_case="<s>" ,_snake_case="<pad>" ,_snake_case="</s>" ,_snake_case="<unk>" ,_snake_case=None ,): UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : List[str] = bos, unk, pad, eos UpperCAmelCase_ : Dict = [] UpperCAmelCase_ : Dict = [] UpperCAmelCase_ : Dict = {} UpperCAmelCase_ : Optional[int] = self.add_symbol(_snake_case ) UpperCAmelCase_ : Tuple = self.add_symbol(_snake_case ) UpperCAmelCase_ : Dict = self.add_symbol(_snake_case ) UpperCAmelCase_ : Optional[Any] = self.add_symbol(_snake_case ) if extra_special_symbols: for s in extra_special_symbols: self.add_symbol(_snake_case ) UpperCAmelCase_ : Tuple = len(self.symbols ) def __eq__( self ,_snake_case ): return self.indices == other.indices def __getitem__( self ,_snake_case ): if idx < len(self.symbols ): return self.symbols[idx] return self.unk_word def __len__( self ): return len(self.symbols ) def __contains__( self ,_snake_case ): return sym in self.indices @classmethod def UpperCamelCase__ ( cls ,_snake_case ): UpperCAmelCase_ : Dict = cls() d.add_from_file(_snake_case ) return d def UpperCamelCase__ ( self ,_snake_case ,_snake_case=1 ,_snake_case=False ): if word in self.indices and not overwrite: UpperCAmelCase_ : Union[str, Any] = self.indices[word] UpperCAmelCase_ : Optional[int] = self.count[idx] + n return idx else: UpperCAmelCase_ : List[Any] = len(self.symbols ) UpperCAmelCase_ : int = idx self.symbols.append(_snake_case ) self.count.append(_snake_case ) return idx def UpperCamelCase__ ( self ,_snake_case ): return 0 def UpperCamelCase__ ( self ,_snake_case ): if isinstance(_snake_case ,_snake_case ): try: with open(_snake_case ,"r" ,encoding="utf-8" ) as fd: self.add_from_file(_snake_case ) except FileNotFoundError as fnfe: raise fnfe except UnicodeError: raise Exception("Incorrect encoding detected in {}, please rebuild the dataset".format(_snake_case ) ) return UpperCAmelCase_ : Optional[int] = f.readlines() UpperCAmelCase_ : Dict = self._load_meta(_snake_case ) for line in lines[indices_start_line:]: try: UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = line.rstrip().rsplit(" " ,1 ) if field == "#fairseq:overwrite": UpperCAmelCase_ : List[Any] = True UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = line.rsplit(" " ,1 ) else: UpperCAmelCase_ : Any = False UpperCAmelCase_ : Optional[Any] = int(_snake_case ) UpperCAmelCase_ : Any = line if word in self and not overwrite: raise RuntimeError( "Duplicate word found when loading Dictionary: '{}'. " "Duplicate words can overwrite earlier ones by adding the " "#fairseq:overwrite flag at the end of the corresponding row " "in the dictionary file. If using the Camembert model, please " "download an updated copy of the model file.".format(_snake_case ) ) self.add_symbol(_snake_case ,n=_snake_case ,overwrite=_snake_case ) except ValueError: raise ValueError("Incorrect dictionary format, expected '<token> <cnt> [flags]'" ) def a__ ( _SCREAMING_SNAKE_CASE : List[Any] ) -> Any: """simple docstring""" UpperCAmelCase_ : Union[str, Any] = dict((re.sub(r"@@$" , "" , _SCREAMING_SNAKE_CASE ), v) if k.endswith("@@" ) else (re.sub(r"$" , "</w>" , _SCREAMING_SNAKE_CASE ), v) for k, v in d.items() ) UpperCAmelCase_ : Any = "<s> <pad> </s> <unk>".split() # restore the special tokens for k in keep_keys: del da[F'''{k}</w>'''] UpperCAmelCase_ : int = d[k] # restore return da def a__ ( _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : Union[str, Any] ) -> Union[str, Any]: """simple docstring""" if not os.path.exists(_SCREAMING_SNAKE_CASE ): raise ValueError(F'''path {biogpt_checkpoint_path} does not exist!''' ) os.makedirs(_SCREAMING_SNAKE_CASE , exist_ok=_SCREAMING_SNAKE_CASE ) print(F'''Writing results to {pytorch_dump_folder_path}''' ) # handle various types of models UpperCAmelCase_ : str = os.path.join(_SCREAMING_SNAKE_CASE , "checkpoint.pt" ) if not os.path.isfile(_SCREAMING_SNAKE_CASE ): raise ValueError(F'''path to the file {checkpoint_file} does not exist!''' ) UpperCAmelCase_ : Tuple = torch.load(_SCREAMING_SNAKE_CASE , map_location="cpu" ) UpperCAmelCase_ : Optional[Any] = chkpt["cfg"]["model"] # dicts UpperCAmelCase_ : Optional[Any] = os.path.join(_SCREAMING_SNAKE_CASE , "dict.txt" ) if not os.path.isfile(_SCREAMING_SNAKE_CASE ): raise ValueError(F'''path to the file {dict_file} does not exist!''' ) UpperCAmelCase_ : Optional[int] = Dictionary.load(_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : Tuple = rewrite_dict_keys(src_dict.indices ) UpperCAmelCase_ : List[Any] = len(_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : Optional[int] = os.path.join(_SCREAMING_SNAKE_CASE , VOCAB_FILES_NAMES["vocab_file"] ) print(F'''Generating {src_vocab_file} of {src_vocab_size} records''' ) with open(_SCREAMING_SNAKE_CASE , "w" , encoding="utf-8" ) as f: f.write(json.dumps(_SCREAMING_SNAKE_CASE , ensure_ascii=_SCREAMING_SNAKE_CASE , indent=_SCREAMING_SNAKE_CASE ) ) # merges_file (bpecodes) UpperCAmelCase_ : Optional[Any] = os.path.join(_SCREAMING_SNAKE_CASE , "bpecodes" ) if not os.path.isfile(_SCREAMING_SNAKE_CASE ): raise ValueError(F'''path to the file {bpecodes_file} does not exist!''' ) UpperCAmelCase_ : Optional[int] = os.path.join(_SCREAMING_SNAKE_CASE , VOCAB_FILES_NAMES["merges_file"] ) shutil.copyfile(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # model config UpperCAmelCase_ : str = os.path.join(_SCREAMING_SNAKE_CASE , "config.json" ) UpperCAmelCase_ : List[str] = { "activation_dropout": args["activation_dropout"], "architectures": ["BioGptForCausalLM"], "attention_probs_dropout_prob": args["attention_dropout"], "bos_token_id": 0, "eos_token_id": 2, "hidden_act": args["activation_fn"], "hidden_dropout_prob": args["dropout"], "hidden_size": args["decoder_embed_dim"], "initializer_range": 0.02, "intermediate_size": args["decoder_ffn_embed_dim"], "layer_norm_eps": 1E-12, "layerdrop": args["decoder_layerdrop"], "max_position_embeddings": args["max_target_positions"], "model_type": "biogpt", "num_attention_heads": args["decoder_attention_heads"], "num_hidden_layers": args["decoder_layers"], "pad_token_id": 1, "scale_embedding": not args["no_scale_embedding"], "tie_word_embeddings": args["share_decoder_input_output_embed"], "vocab_size": src_vocab_size, } # good hparam defaults to start with print(F'''Generating {biogpt_model_config_file}''' ) with open(_SCREAMING_SNAKE_CASE , "w" , encoding="utf-8" ) as f: f.write(json.dumps(_SCREAMING_SNAKE_CASE , ensure_ascii=_SCREAMING_SNAKE_CASE , indent=_SCREAMING_SNAKE_CASE ) ) # tokenizer config UpperCAmelCase_ : Optional[int] = os.path.join(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : Any = { "bos_token": "<s>", "eos_token": "</s>", "model_max_length": 10_24, "pad_token": "<pad>", "special_tokens_map_file": None, "tokenizer_class": "BioGptTokenizer", "unk_token": "<unk>", } print(F'''Generating {biogpt_tokenizer_config_file}''' ) with open(_SCREAMING_SNAKE_CASE , "w" , encoding="utf-8" ) as f: f.write(json.dumps(_SCREAMING_SNAKE_CASE , ensure_ascii=_SCREAMING_SNAKE_CASE , indent=_SCREAMING_SNAKE_CASE ) ) # model UpperCAmelCase_ : Union[str, Any] = chkpt["model"] # remove unneeded keys UpperCAmelCase_ : Dict = [ "decoder.version", ] for k in ignore_keys: model_state_dict.pop(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : Tuple = list(model_state_dict.keys() ) for layer_name in layer_names: if layer_name.endswith("output_projection.weight" ): UpperCAmelCase_ : Dict = model_state_dict.pop(_SCREAMING_SNAKE_CASE ) else: UpperCAmelCase_ : str = model_state_dict.pop(_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : Tuple = BioGptConfig.from_pretrained(_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : Any = BioGptForCausalLM(_SCREAMING_SNAKE_CASE ) # check that it loads ok model_new.load_state_dict(_SCREAMING_SNAKE_CASE ) # save UpperCAmelCase_ : int = os.path.join(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) print(F'''Generating {pytorch_weights_dump_path}''' ) torch.save(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) print("Conversion is done!" ) if __name__ == "__main__": _lowerCamelCase = argparse.ArgumentParser() # Required parameters parser.add_argument( """--biogpt_checkpoint_path""", default=None, type=str, required=True, help=( """Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts,""" """ bpecodes, etc.""" ), ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model.""" ) _lowerCamelCase = parser.parse_args() convert_biogpt_checkpoint_to_pytorch(args.biogpt_checkpoint_path, args.pytorch_dump_folder_path)
71
'''simple docstring''' from ..utils import DummyObject, requires_backends class _snake_case (metaclass=__SCREAMING_SNAKE_CASE): __A : Any =["speech"] def __init__( self ,*_snake_case ,**_snake_case ): requires_backends(self ,["speech"] ) class _snake_case (metaclass=__SCREAMING_SNAKE_CASE): __A : Dict =["speech"] def __init__( self ,*_snake_case ,**_snake_case ): requires_backends(self ,["speech"] )
71
1
'''simple docstring''' import argparse import json import os import torch from torch import nn from transformers import NllbMoeConfig, NllbMoeModel from transformers.modeling_utils import dtype_byte_size from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME def a__ ( _SCREAMING_SNAKE_CASE : Union[str, Any] ) -> Optional[Any]: """simple docstring""" UpperCAmelCase_ : Optional[Any] = [ "encoder.version", "decoder.version", "model.encoder.version", "model.decoder.version", "decoder.output_projection.weight", "_float_tensor", "encoder.embed_positions._float_tensor", "decoder.embed_positions._float_tensor", ] for k in ignore_keys: state_dict.pop(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) def a__ ( _SCREAMING_SNAKE_CASE : Optional[Any] ) -> str: """simple docstring""" UpperCAmelCase_ , UpperCAmelCase_ : str = emb.weight.shape UpperCAmelCase_ : List[Any] = nn.Linear(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , bias=_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : Tuple = emb.weight.data return lin_layer def a__ ( _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : Optional[Any]=None ) -> Union[str, Any]: """simple docstring""" UpperCAmelCase_ : Optional[Any] = {} for old_key in state_dict.keys(): UpperCAmelCase_ : Optional[int] = old_key if "moe_layer.experts." in key: if expert_idx is not None: UpperCAmelCase_ : List[Any] = key.replace("moe_layer.experts.0" , F'''ffn.experts.expert_{expert_idx}''' ) else: UpperCAmelCase_ : Tuple = key.replace("moe_layer.experts." , "ffn.experts.expert_" ) if "gate" in key: UpperCAmelCase_ : List[Any] = key.replace(".moe_layer.gate.wg" , ".ffn.router.classifier" ) if "fc2" and "experts" not in key: UpperCAmelCase_ : Dict = key.replace(".fc2." , ".ffn.fc2." ) if "fc1" and "experts" not in key: UpperCAmelCase_ : List[str] = key.replace(".fc1." , ".ffn.fc1." ) if ".encoder_attn." in key: UpperCAmelCase_ : List[str] = key.replace(".encoder_attn." , ".cross_attention." ) if "encoder_attn_layer_norm" in key: UpperCAmelCase_ : Union[str, Any] = key.replace("encoder_attn_layer_norm" , "cross_attention_layer_norm" ) if "final_layer_norm" in key: UpperCAmelCase_ : Any = key.replace("final_layer_norm" , "ff_layer_norm" ) UpperCAmelCase_ : Optional[Any] = state_dict[old_key] return new_dict def a__ ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : str = WEIGHTS_NAME ) -> str: """simple docstring""" UpperCAmelCase_ : Optional[Any] = [] UpperCAmelCase_ : Optional[Any] = 0 os.makedirs(_SCREAMING_SNAKE_CASE , exist_ok=_SCREAMING_SNAKE_CASE ) for expert in range(_SCREAMING_SNAKE_CASE ): UpperCAmelCase_ : Any = switch_checkpoint_path + F'''-rank-{expert}.pt''' if os.path.isfile(_SCREAMING_SNAKE_CASE ): UpperCAmelCase_ : List[Any] = torch.load(_SCREAMING_SNAKE_CASE )["model"] remove_ignore_keys_(_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : List[Any] = rename_fairseq_keys(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : Union[str, Any] = os.path.join( _SCREAMING_SNAKE_CASE , weights_name.replace(".bin" , F'''-{len(_SCREAMING_SNAKE_CASE )+1:05d}-of-???.bin''' ) ) torch.save(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) sharded_state_dicts.append(expert_state.keys() ) total_size += sum([value.numel() for key, value in expert_state.items()] ) * dtype_byte_size( expert_state[list(_SCREAMING_SNAKE_CASE )[0]].dtype ) # Add the last block UpperCAmelCase_ : Tuple = os.path.join(_SCREAMING_SNAKE_CASE , weights_name.replace(".bin" , F'''-{len(_SCREAMING_SNAKE_CASE )+1:05d}-of-???.bin''' ) ) UpperCAmelCase_ : List[str] = torch.load(switch_checkpoint_path + "-shared.pt" )["model"] remove_ignore_keys_(_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : str = rename_fairseq_keys(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : List[str] = shared_weights["decoder.embed_tokens.weight"] sharded_state_dicts.append(shared_weights.keys() ) # If we only have the shared weights (dummy model/experts saved on the same file) if len(_SCREAMING_SNAKE_CASE ) == 1: UpperCAmelCase_ : Dict = os.path.join(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) torch.save(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) return {weights_name: sharded_state_dicts[0]}, None else: torch.save(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # Otherwise, let's build the index UpperCAmelCase_ : Optional[Any] = {} for idx, shard in enumerate(_SCREAMING_SNAKE_CASE ): UpperCAmelCase_ : str = weights_name.replace(".bin" , F'''-{idx+1:05d}-of-{len(_SCREAMING_SNAKE_CASE ):05d}.bin''' ) UpperCAmelCase_ : List[Any] = os.path.join(_SCREAMING_SNAKE_CASE , weights_name.replace(".bin" , F'''-{idx+1:05d}-of-???.bin''' ) ) os.rename(_SCREAMING_SNAKE_CASE , os.path.join(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ) for key in shard: UpperCAmelCase_ : Tuple = shard_file # Add the metadata UpperCAmelCase_ : Dict = {"total_size": total_size} UpperCAmelCase_ : List[Any] = {"metadata": metadata, "weight_map": weight_map} with open(os.path.join(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , "w" , encoding="utf-8" ) as f: UpperCAmelCase_ : Dict = json.dumps(_SCREAMING_SNAKE_CASE , indent=2 , sort_keys=_SCREAMING_SNAKE_CASE ) + "\n" f.write(_SCREAMING_SNAKE_CASE ) return metadata, index if __name__ == "__main__": _lowerCamelCase = argparse.ArgumentParser() # Required parameters parser.add_argument( """--nllb_moe_checkpoint_path""", default="""/home/arthur_huggingface_co/fairseq/weights/checkpoints/model_moe_54b/checkpoint_2_300000""", type=str, required=False, help="""Path to a directory containing a folder per layer. Follows the original Google format.""", ) parser.add_argument("""--dtype""", default="""float32""", type=str, required=False, help="""dtype of the saved model""") parser.add_argument( """--pytorch_dump_folder_path""", default="""/home/arthur_huggingface_co/fairseq/weights/checkpoints/hf-converted-moe-54b""", type=str, required=False, help="""Path to the output pytorch model.""", ) _lowerCamelCase = parser.parse_args() _lowerCamelCase , _lowerCamelCase = shard_on_the_fly( args.nllb_moe_checkpoint_path, args.pytorch_dump_folder_path, 128, args.dtype, ) _lowerCamelCase = NllbMoeConfig.from_pretrained( """facebook/nllb-200-3.3B""", encoder_sparse_step=4, decoder_sparse_step=4, num_experts=128 ) config.save_pretrained(args.pytorch_dump_folder_path) _lowerCamelCase = NllbMoeModel.from_pretrained(args.pytorch_dump_folder_path) print("""Done""") model.save_pretrained(args.pytorch_dump_folder_path)
71
'''simple docstring''' def a__ ( _SCREAMING_SNAKE_CASE : list[int] , _SCREAMING_SNAKE_CASE : list[int] ) -> tuple[float, float]: """simple docstring""" if not len(_SCREAMING_SNAKE_CASE ) == len(_SCREAMING_SNAKE_CASE ) == 3: raise ValueError("Please enter a valid equation." ) if equationa[0] == equationa[1] == equationa[0] == equationa[1] == 0: raise ValueError("Both a & b of two equations can't be zero." ) # Extract the coefficients UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : List[str] = equationa UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = equationa # Calculate the determinants of the matrices UpperCAmelCase_ : Optional[int] = aa * ba - aa * ba UpperCAmelCase_ : Optional[int] = ca * ba - ca * ba UpperCAmelCase_ : Any = aa * ca - aa * ca # Check if the system of linear equations has a solution (using Cramer's rule) if determinant == 0: if determinant_x == determinant_y == 0: raise ValueError("Infinite solutions. (Consistent system)" ) else: raise ValueError("No solution. (Inconsistent system)" ) else: if determinant_x == determinant_y == 0: # Trivial solution (Inconsistent system) return (0.0, 0.0) else: UpperCAmelCase_ : Optional[int] = determinant_x / determinant UpperCAmelCase_ : List[Any] = determinant_y / determinant # Non-Trivial Solution (Consistent system) return (x, y)
71
1
'''simple docstring''' import argparse import os from io import BytesIO from pathlib import Path import requests from clip_retrieval.clip_client import ClipClient from PIL import Image from tqdm import tqdm def a__ ( _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : int ) -> List[Any]: """simple docstring""" UpperCAmelCase_ : List[str] = 1.5 UpperCAmelCase_ : Tuple = int(factor * num_class_images ) UpperCAmelCase_ : str = ClipClient( url="https://knn.laion.ai/knn-service" , indice_name="laion_400m" , num_images=_SCREAMING_SNAKE_CASE , aesthetic_weight=0.1 ) os.makedirs(F'''{class_data_dir}/images''' , exist_ok=_SCREAMING_SNAKE_CASE ) if len(list(Path(F'''{class_data_dir}/images''' ).iterdir() ) ) >= num_class_images: return while True: UpperCAmelCase_ : Tuple = client.query(text=_SCREAMING_SNAKE_CASE ) if len(_SCREAMING_SNAKE_CASE ) >= factor * num_class_images or num_images > 1E4: break else: UpperCAmelCase_ : Dict = int(factor * num_images ) UpperCAmelCase_ : int = ClipClient( url="https://knn.laion.ai/knn-service" , indice_name="laion_400m" , num_images=_SCREAMING_SNAKE_CASE , aesthetic_weight=0.1 , ) UpperCAmelCase_ : Any = 0 UpperCAmelCase_ : Dict = 0 UpperCAmelCase_ : str = tqdm(desc="downloading real regularization images" , total=_SCREAMING_SNAKE_CASE ) with open(F'''{class_data_dir}/caption.txt''' , "w" ) as fa, open(F'''{class_data_dir}/urls.txt''' , "w" ) as fa, open( F'''{class_data_dir}/images.txt''' , "w" ) as fa: while total < num_class_images: UpperCAmelCase_ : Optional[Any] = class_images[count] count += 1 try: UpperCAmelCase_ : Tuple = requests.get(images["url"] ) if img.status_code == 2_00: UpperCAmelCase_ : Optional[int] = Image.open(BytesIO(img.content ) ) with open(F'''{class_data_dir}/images/{total}.jpg''' , "wb" ) as f: f.write(img.content ) fa.write(images["caption"] + "\n" ) fa.write(images["url"] + "\n" ) fa.write(F'''{class_data_dir}/images/{total}.jpg''' + "\n" ) total += 1 pbar.update(1 ) else: continue except Exception: continue return def a__ ( ) -> Tuple: """simple docstring""" UpperCAmelCase_ : Union[str, Any] = argparse.ArgumentParser("" , add_help=_SCREAMING_SNAKE_CASE ) parser.add_argument("--class_prompt" , help="text prompt to retrieve images" , required=_SCREAMING_SNAKE_CASE , type=_SCREAMING_SNAKE_CASE ) parser.add_argument("--class_data_dir" , help="path to save images" , required=_SCREAMING_SNAKE_CASE , type=_SCREAMING_SNAKE_CASE ) parser.add_argument("--num_class_images" , help="number of images to download" , default=2_00 , type=_SCREAMING_SNAKE_CASE ) return parser.parse_args() if __name__ == "__main__": _lowerCamelCase = parse_args() retrieve(args.class_prompt, args.class_data_dir, args.num_class_images)
71
'''simple docstring''' from statistics import mean, stdev def a__ ( _SCREAMING_SNAKE_CASE : list , _SCREAMING_SNAKE_CASE : int = 3 ) -> list: """simple docstring""" UpperCAmelCase_ : Dict = min(_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : Union[str, Any] = max(_SCREAMING_SNAKE_CASE ) # normalize data return [round((x - x_min) / (x_max - x_min) , _SCREAMING_SNAKE_CASE ) for x in data] def a__ ( _SCREAMING_SNAKE_CASE : list , _SCREAMING_SNAKE_CASE : int = 3 ) -> list: """simple docstring""" UpperCAmelCase_ : Tuple = mean(_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : str = stdev(_SCREAMING_SNAKE_CASE ) # standardize data return [round((x - mu) / (sigma) , _SCREAMING_SNAKE_CASE ) for x in data]
71
1
'''simple docstring''' from __future__ import annotations from random import choice def a__ ( _SCREAMING_SNAKE_CASE : Any ) -> Union[str, Any]: """simple docstring""" return choice(_SCREAMING_SNAKE_CASE ) def a__ ( _SCREAMING_SNAKE_CASE : list[int] , _SCREAMING_SNAKE_CASE : int ) -> int: """simple docstring""" UpperCAmelCase_ : Optional[Any] = random_pivot(_SCREAMING_SNAKE_CASE ) # partition based on pivot # linear time UpperCAmelCase_ : Tuple = [e for e in lst if e < pivot] UpperCAmelCase_ : Optional[int] = [e for e in lst if e > pivot] # if we get lucky, pivot might be the element we want. # we can easily see this: # small (elements smaller than k) # + pivot (kth element) # + big (elements larger than k) if len(_SCREAMING_SNAKE_CASE ) == k - 1: return pivot # pivot is in elements bigger than k elif len(_SCREAMING_SNAKE_CASE ) < k - 1: return kth_number(_SCREAMING_SNAKE_CASE , k - len(_SCREAMING_SNAKE_CASE ) - 1 ) # pivot is in elements smaller than k else: return kth_number(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) if __name__ == "__main__": import doctest doctest.testmod()
71
'''simple docstring''' import argparse import os # New Code # import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType from accelerate.utils import find_executable_batch_size ######################################################################## # This is a fully working simple example to use Accelerate, # specifically showcasing how to ensure out-of-memory errors never # interrupt training, and builds off the `nlp_example.py` script. # # This example trains a Bert base model on GLUE MRPC # in any of the following settings (with the same script): # - single CPU or single GPU # - multi GPUS (using PyTorch distributed mode) # - (multi) TPUs # - fp16 (mixed-precision) or fp32 (normal precision) # # New additions from the base script can be found quickly by # looking for the # New Code # tags # # To run it in each of these various modes, follow the instructions # in the readme for examples: # https://github.com/huggingface/accelerate/tree/main/examples # ######################################################################## _lowerCamelCase = 16 _lowerCamelCase = 32 def a__ ( _SCREAMING_SNAKE_CASE : Accelerator , _SCREAMING_SNAKE_CASE : int = 16 ) -> List[Any]: """simple docstring""" UpperCAmelCase_ : Dict = AutoTokenizer.from_pretrained("bert-base-cased" ) UpperCAmelCase_ : Tuple = load_dataset("glue" , "mrpc" ) def tokenize_function(_SCREAMING_SNAKE_CASE : Union[str, Any] ): # max_length=None => use the model max length (it's actually the default) UpperCAmelCase_ : Any = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=_SCREAMING_SNAKE_CASE , max_length=_SCREAMING_SNAKE_CASE ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset # starting with the main process first: with accelerator.main_process_first(): UpperCAmelCase_ : Union[str, Any] = datasets.map( _SCREAMING_SNAKE_CASE , batched=_SCREAMING_SNAKE_CASE , remove_columns=["idx", "sentence1", "sentence2"] , ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library UpperCAmelCase_ : Tuple = tokenized_datasets.rename_column("label" , "labels" ) def collate_fn(_SCREAMING_SNAKE_CASE : List[Any] ): # On TPU it's best to pad everything to the same length or training will be very slow. UpperCAmelCase_ : Optional[int] = 1_28 if accelerator.distributed_type == DistributedType.TPU else None # When using mixed precision we want round multiples of 8/16 if accelerator.mixed_precision == "fp8": UpperCAmelCase_ : Optional[int] = 16 elif accelerator.mixed_precision != "no": UpperCAmelCase_ : int = 8 else: UpperCAmelCase_ : Optional[Any] = None return tokenizer.pad( _SCREAMING_SNAKE_CASE , padding="longest" , max_length=_SCREAMING_SNAKE_CASE , pad_to_multiple_of=_SCREAMING_SNAKE_CASE , return_tensors="pt" , ) # Instantiate dataloaders. UpperCAmelCase_ : Any = DataLoader( tokenized_datasets["train"] , shuffle=_SCREAMING_SNAKE_CASE , collate_fn=_SCREAMING_SNAKE_CASE , batch_size=_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : int = DataLoader( tokenized_datasets["validation"] , shuffle=_SCREAMING_SNAKE_CASE , collate_fn=_SCREAMING_SNAKE_CASE , batch_size=_SCREAMING_SNAKE_CASE ) return train_dataloader, eval_dataloader # For testing only if os.environ.get("""TESTING_MOCKED_DATALOADERS""", None) == "1": from accelerate.test_utils.training import mocked_dataloaders _lowerCamelCase = mocked_dataloaders # noqa: F811 def a__ ( _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : int ) -> int: """simple docstring""" if os.environ.get("TESTING_MOCKED_DATALOADERS" , _SCREAMING_SNAKE_CASE ) == "1": UpperCAmelCase_ : Tuple = 2 # Initialize accelerator UpperCAmelCase_ : int = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision ) # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs UpperCAmelCase_ : str = config["lr"] UpperCAmelCase_ : Union[str, Any] = int(config["num_epochs"] ) UpperCAmelCase_ : Tuple = int(config["seed"] ) UpperCAmelCase_ : Union[str, Any] = int(config["batch_size"] ) UpperCAmelCase_ : List[str] = evaluate.load("glue" , "mrpc" ) # New Code # # We now can define an inner training loop function. It should take a batch size as the only parameter, # and build the dataloaders in there. # It also gets our decorator @find_executable_batch_size(starting_batch_size=_SCREAMING_SNAKE_CASE ) def inner_training_loop(_SCREAMING_SNAKE_CASE : List[str] ): # And now just move everything below under this function # We need to bring in the Accelerator object from earlier nonlocal accelerator # And reset all of its attributes that could hold onto any memory: accelerator.free_memory() # Then we can declare the model, optimizer, and everything else: set_seed(_SCREAMING_SNAKE_CASE ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) UpperCAmelCase_ : Dict = AutoModelForSequenceClassification.from_pretrained("bert-base-cased" , return_dict=_SCREAMING_SNAKE_CASE ) # We could avoid this line since the accelerator is set with `device_placement=True` (default value). # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). UpperCAmelCase_ : Dict = model.to(accelerator.device ) # Instantiate optimizer UpperCAmelCase_ : int = AdamW(params=model.parameters() , lr=_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ , UpperCAmelCase_ : Dict = get_dataloaders(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # Instantiate scheduler UpperCAmelCase_ : Union[str, Any] = get_linear_schedule_with_warmup( optimizer=_SCREAMING_SNAKE_CASE , num_warmup_steps=1_00 , num_training_steps=(len(_SCREAMING_SNAKE_CASE ) * num_epochs) , ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = accelerator.prepare( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # Now we train the model for epoch in range(_SCREAMING_SNAKE_CASE ): model.train() for step, batch in enumerate(_SCREAMING_SNAKE_CASE ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) UpperCAmelCase_ : str = model(**_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : List[Any] = outputs.loss accelerator.backward(_SCREAMING_SNAKE_CASE ) optimizer.step() lr_scheduler.step() optimizer.zero_grad() model.eval() for step, batch in enumerate(_SCREAMING_SNAKE_CASE ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): UpperCAmelCase_ : Optional[Any] = model(**_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : str = outputs.logits.argmax(dim=-1 ) UpperCAmelCase_ , UpperCAmelCase_ : str = accelerator.gather_for_metrics((predictions, batch["labels"]) ) metric.add_batch( predictions=_SCREAMING_SNAKE_CASE , references=_SCREAMING_SNAKE_CASE , ) UpperCAmelCase_ : str = metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(F'''epoch {epoch}:''' , _SCREAMING_SNAKE_CASE ) # New Code # # And call it at the end with no arguments # Note: You could also refactor this outside of your training loop function inner_training_loop() def a__ ( ) -> Optional[Any]: """simple docstring""" UpperCAmelCase_ : Dict = argparse.ArgumentParser(description="Simple example of training script." ) parser.add_argument( "--mixed_precision" , type=_SCREAMING_SNAKE_CASE , default=_SCREAMING_SNAKE_CASE , choices=["no", "fp16", "bf16", "fp8"] , help="Whether to use mixed precision. Choose" "between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10." "and an Nvidia Ampere GPU." , ) parser.add_argument("--cpu" , action="store_true" , help="If passed, will train on the CPU." ) UpperCAmelCase_ : Tuple = parser.parse_args() UpperCAmelCase_ : int = {"lr": 2E-5, "num_epochs": 3, "seed": 42, "batch_size": 16} training_function(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) if __name__ == "__main__": main()
71
1
'''simple docstring''' import unittest from huggingface_hub import hf_hub_download from transformers import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING, VideoMAEFeatureExtractor from transformers.pipelines import VideoClassificationPipeline, pipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_decord, require_tf, require_torch, require_torch_or_tf, require_vision, ) from .test_pipelines_common import ANY @is_pipeline_test @require_torch_or_tf @require_vision @require_decord class _snake_case (unittest.TestCase): __A : List[str] =MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING def UpperCamelCase__ ( self ,_snake_case ,_snake_case ,_snake_case ): UpperCAmelCase_ : Optional[int] = hf_hub_download( repo_id="nateraw/video-demo" ,filename="archery.mp4" ,repo_type="dataset" ) UpperCAmelCase_ : Union[str, Any] = VideoClassificationPipeline(model=_snake_case ,image_processor=_snake_case ,top_k=2 ) UpperCAmelCase_ : int = [ example_video_filepath, "https://huggingface.co/datasets/nateraw/video-demo/resolve/main/archery.mp4", ] return video_classifier, examples def UpperCamelCase__ ( self ,_snake_case ,_snake_case ): for example in examples: UpperCAmelCase_ : Any = video_classifier(_snake_case ) self.assertEqual( _snake_case ,[ {"score": ANY(_snake_case ), "label": ANY(_snake_case )}, {"score": ANY(_snake_case ), "label": ANY(_snake_case )}, ] ,) @require_torch def UpperCamelCase__ ( self ): UpperCAmelCase_ : List[str] = "hf-internal-testing/tiny-random-VideoMAEForVideoClassification" UpperCAmelCase_ : Tuple = VideoMAEFeatureExtractor( size={"shortest_edge": 10} ,crop_size={"height": 10, "width": 10} ) UpperCAmelCase_ : str = pipeline( "video-classification" ,model=_snake_case ,feature_extractor=_snake_case ,frame_sampling_rate=4 ) UpperCAmelCase_ : int = hf_hub_download(repo_id="nateraw/video-demo" ,filename="archery.mp4" ,repo_type="dataset" ) UpperCAmelCase_ : Any = video_classifier(_snake_case ,top_k=2 ) self.assertEqual( nested_simplify(_snake_case ,decimals=4 ) ,[{"score": 0.5199, "label": "LABEL_0"}, {"score": 0.4801, "label": "LABEL_1"}] ,) UpperCAmelCase_ : Optional[Any] = video_classifier( [ video_file_path, video_file_path, ] ,top_k=2 ,) self.assertEqual( nested_simplify(_snake_case ,decimals=4 ) ,[ [{"score": 0.5199, "label": "LABEL_0"}, {"score": 0.4801, "label": "LABEL_1"}], [{"score": 0.5199, "label": "LABEL_0"}, {"score": 0.4801, "label": "LABEL_1"}], ] ,) @require_tf def UpperCamelCase__ ( self ): pass
71
'''simple docstring''' from __future__ import annotations def a__ ( _SCREAMING_SNAKE_CASE : int ) -> list[int]: """simple docstring""" UpperCAmelCase_ : Union[str, Any] = 2 UpperCAmelCase_ : Optional[int] = [] while i * i <= n: if n % i: i += 1 else: n //= i factors.append(_SCREAMING_SNAKE_CASE ) if n > 1: factors.append(_SCREAMING_SNAKE_CASE ) return factors if __name__ == "__main__": import doctest doctest.testmod()
71
1
'''simple docstring''' def a__ ( _SCREAMING_SNAKE_CASE : int ) -> bool: """simple docstring""" return sum(i for i in range(1 , number // 2 + 1 ) if number % i == 0 ) == number if __name__ == "__main__": print("""Program to check whether a number is a Perfect number or not...""") _lowerCamelCase = int(input("""Enter number: """).strip()) print(f"""{number} is {'' if perfect(number) else 'not '}a Perfect Number.""")
71
'''simple docstring''' from typing import Dict, List from nltk.translate import gleu_score import datasets from datasets import MetricInfo _lowerCamelCase = """\ @misc{wu2016googles, title={Google's Neural Machine Translation System: Bridging the Gap between Human and Machine Translation}, author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes and Jeffrey Dean}, year={2016}, eprint={1609.08144}, archivePrefix={arXiv}, primaryClass={cs.CL} } """ _lowerCamelCase = """\ The BLEU score has some undesirable properties when used for single sentences, as it was designed to be a corpus measure. We therefore use a slightly different score for our RL experiments which we call the 'GLEU score'. For the GLEU score, we record all sub-sequences of 1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then compute a recall, which is the ratio of the number of matching n-grams to the number of total n-grams in the target (ground truth) sequence, and a precision, which is the ratio of the number of matching n-grams to the number of total n-grams in the generated output sequence. Then GLEU score is simply the minimum of recall and precision. This GLEU score's range is always between 0 (no matches) and 1 (all match) and it is symmetrical when switching output and target. According to our experiments, GLEU score correlates quite well with the BLEU metric on a corpus level but does not have its drawbacks for our per sentence reward objective. """ _lowerCamelCase = """\ Computes corpus-level Google BLEU (GLEU) score of translated segments against one or more references. Instead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching tokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values. Args: predictions (list of str): list of translations to score. Each translation should be tokenized into a list of tokens. references (list of list of str): list of lists of references for each translation. Each reference should be tokenized into a list of tokens. min_len (int): The minimum order of n-gram this function should extract. Defaults to 1. max_len (int): The maximum order of n-gram this function should extract. Defaults to 4. Returns: 'google_bleu': google_bleu score Examples: Example 1: >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which', ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always', ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat'] >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which', ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never', ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat'] >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was', ... 'interested', 'in', 'world', 'history'] >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history', ... 'because', 'he', 'read', 'the', 'book'] >>> list_of_references = [[ref1a], [ref2a]] >>> hypotheses = [hyp1, hyp2] >>> google_bleu = datasets.load_metric(\"google_bleu\") >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references) >>> print(round(results[\"google_bleu\"], 2)) 0.44 Example 2: >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which', ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always', ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat'] >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which', ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never', ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat'] >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that', ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never', ... 'heed', 'the', 'cat', 'commands'] >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the', ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions', ... 'of', 'the', 'cat'] >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was', ... 'interested', 'in', 'world', 'history'] >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history', ... 'because', 'he', 'read', 'the', 'book'] >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]] >>> hypotheses = [hyp1, hyp2] >>> google_bleu = datasets.load_metric(\"google_bleu\") >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references) >>> print(round(results[\"google_bleu\"], 2)) 0.61 Example 3: >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which', ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always', ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat'] >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which', ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never', ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat'] >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that', ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never', ... 'heed', 'the', 'cat', 'commands'] >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the', ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions', ... 'of', 'the', 'cat'] >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was', ... 'interested', 'in', 'world', 'history'] >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history', ... 'because', 'he', 'read', 'the', 'book'] >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]] >>> hypotheses = [hyp1, hyp2] >>> google_bleu = datasets.load_metric(\"google_bleu\") >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2) >>> print(round(results[\"google_bleu\"], 2)) 0.53 Example 4: >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which', ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always', ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat'] >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which', ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never', ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat'] >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that', ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never', ... 'heed', 'the', 'cat', 'commands'] >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the', ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions', ... 'of', 'the', 'cat'] >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was', ... 'interested', 'in', 'world', 'history'] >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history', ... 'because', 'he', 'read', 'the', 'book'] >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]] >>> hypotheses = [hyp1, hyp2] >>> google_bleu = datasets.load_metric(\"google_bleu\") >>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6) >>> print(round(results[\"google_bleu\"], 2)) 0.4 """ @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION) class _snake_case (datasets.Metric): def UpperCamelCase__ ( self ): return datasets.MetricInfo( description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features( { "predictions": datasets.Sequence(datasets.Value("string" ,id="token" ) ,id="sequence" ), "references": datasets.Sequence( datasets.Sequence(datasets.Value("string" ,id="token" ) ,id="sequence" ) ,id="references" ), } ) ,) def UpperCamelCase__ ( self ,_snake_case ,_snake_case ,_snake_case = 1 ,_snake_case = 4 ,): return { "google_bleu": gleu_score.corpus_gleu( list_of_references=_snake_case ,hypotheses=_snake_case ,min_len=_snake_case ,max_len=_snake_case ) }
71
1
'''simple docstring''' from typing import Optional from torch import nn from .transformer_ad import TransformeraDModel, TransformeraDModelOutput class _snake_case (nn.Module): def __init__( self ,_snake_case = 16 ,_snake_case = 88 ,_snake_case = None ,_snake_case = 1 ,_snake_case = 0.0 ,_snake_case = 32 ,_snake_case = None ,_snake_case = False ,_snake_case = None ,_snake_case = None ,_snake_case = "geglu" ,_snake_case = None ,): super().__init__() UpperCAmelCase_ : Optional[Any] = nn.ModuleList( [ TransformeraDModel( num_attention_heads=_snake_case ,attention_head_dim=_snake_case ,in_channels=_snake_case ,num_layers=_snake_case ,dropout=_snake_case ,norm_num_groups=_snake_case ,cross_attention_dim=_snake_case ,attention_bias=_snake_case ,sample_size=_snake_case ,num_vector_embeds=_snake_case ,activation_fn=_snake_case ,num_embeds_ada_norm=_snake_case ,) for _ in range(2 ) ] ) # Variables that can be set by a pipeline: # The ratio of transformer1 to transformer2's output states to be combined during inference UpperCAmelCase_ : List[str] = 0.5 # The shape of `encoder_hidden_states` is expected to be # `(batch_size, condition_lengths[0]+condition_lengths[1], num_features)` UpperCAmelCase_ : int = [77, 2_57] # Which transformer to use to encode which condition. # E.g. `(1, 0)` means that we'll use `transformers[1](conditions[0])` and `transformers[0](conditions[1])` UpperCAmelCase_ : List[Any] = [1, 0] def UpperCamelCase__ ( self ,_snake_case ,_snake_case ,_snake_case=None ,_snake_case=None ,_snake_case=None ,_snake_case = True ,): UpperCAmelCase_ : List[str] = hidden_states UpperCAmelCase_ : str = [] UpperCAmelCase_ : Optional[int] = 0 # attention_mask is not used yet for i in range(2 ): # for each of the two transformers, pass the corresponding condition tokens UpperCAmelCase_ : Any = encoder_hidden_states[:, tokens_start : tokens_start + self.condition_lengths[i]] UpperCAmelCase_ : Any = self.transformer_index_for_condition[i] UpperCAmelCase_ : int = self.transformers[transformer_index]( _snake_case ,encoder_hidden_states=_snake_case ,timestep=_snake_case ,cross_attention_kwargs=_snake_case ,return_dict=_snake_case ,)[0] encoded_states.append(encoded_state - input_states ) tokens_start += self.condition_lengths[i] UpperCAmelCase_ : Dict = encoded_states[0] * self.mix_ratio + encoded_states[1] * (1 - self.mix_ratio) UpperCAmelCase_ : List[Any] = output_states + input_states if not return_dict: return (output_states,) return TransformeraDModelOutput(sample=_snake_case )
71
'''simple docstring''' import logging import os import sys from dataclasses import dataclass, field from typing import Optional from seqaseq_trainer import SeqaSeqTrainer from seqaseq_training_args import SeqaSeqTrainingArguments import transformers from transformers import ( AutoConfig, AutoModelForSeqaSeqLM, AutoTokenizer, HfArgumentParser, MBartTokenizer, MBartTokenizerFast, set_seed, ) from transformers.trainer_utils import EvaluationStrategy, is_main_process from transformers.training_args import ParallelMode from utils import ( SeqaSeqDataCollator, SeqaSeqDataset, assert_all_frozen, build_compute_metrics_fn, check_output_dir, freeze_embeds, freeze_params, lmap, save_json, use_task_specific_params, write_txt_file, ) _lowerCamelCase = logging.getLogger(__name__) @dataclass class _snake_case : __A : str =field( metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"}) __A : Optional[str] =field( default=__SCREAMING_SNAKE_CASE , metadata={"help": "Pretrained config name or path if not the same as model_name"}) __A : Optional[str] =field( default=__SCREAMING_SNAKE_CASE , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"}) __A : Optional[str] =field( default=__SCREAMING_SNAKE_CASE , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , ) __A : bool =field(default=__SCREAMING_SNAKE_CASE , metadata={"help": "Whether tp freeze the encoder."}) __A : bool =field(default=__SCREAMING_SNAKE_CASE , metadata={"help": "Whether to freeze the embeddings."}) @dataclass class _snake_case : __A : str =field( metadata={"help": "The input data dir. Should contain the .tsv files (or other data files) for the task."}) __A : Optional[str] =field( default="summarization" , metadata={"help": "Task name, summarization (or summarization_{dataset} for pegasus) or translation"} , ) __A : Optional[int] =field( default=10_24 , metadata={ "help": ( "The maximum total input sequence length after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded." ) } , ) __A : Optional[int] =field( default=1_28 , metadata={ "help": ( "The maximum total sequence length for target text after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded." ) } , ) __A : Optional[int] =field( default=1_42 , metadata={ "help": ( "The maximum total sequence length for validation target text after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded. " "This argument is also used to override the ``max_length`` param of ``model.generate``, which is used " "during ``evaluate`` and ``predict``." ) } , ) __A : Optional[int] =field( default=1_42 , metadata={ "help": ( "The maximum total sequence length for test target text after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded." ) } , ) __A : Optional[int] =field(default=-1 , metadata={"help": "# training examples. -1 means use all."}) __A : Optional[int] =field(default=-1 , metadata={"help": "# validation examples. -1 means use all."}) __A : Optional[int] =field(default=-1 , metadata={"help": "# test examples. -1 means use all."}) __A : Optional[str] =field(default=__SCREAMING_SNAKE_CASE , metadata={"help": "Source language id for translation."}) __A : Optional[str] =field(default=__SCREAMING_SNAKE_CASE , metadata={"help": "Target language id for translation."}) __A : Optional[int] =field(default=__SCREAMING_SNAKE_CASE , metadata={"help": "# num_beams to use for evaluation."}) __A : bool =field( default=__SCREAMING_SNAKE_CASE , metadata={"help": "If only pad tokens should be ignored. This assumes that `config.pad_token_id` is defined."} , ) def a__ ( _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : str ) -> Union[str, Any]: """simple docstring""" logger.info(F'''***** {split} metrics *****''' ) for key in sorted(metrics.keys() ): logger.info(F''' {key} = {metrics[key]}''' ) save_json(_SCREAMING_SNAKE_CASE , os.path.join(_SCREAMING_SNAKE_CASE , F'''{split}_results.json''' ) ) def a__ ( ) -> Any: """simple docstring""" UpperCAmelCase_ : List[str] = HfArgumentParser((ModelArguments, DataTrainingArguments, SeqaSeqTrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : List[str] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : int = parser.parse_args_into_dataclasses() check_output_dir(_SCREAMING_SNAKE_CASE ) # Setup logging logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , ) logger.warning( "Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.parallel_mode == ParallelMode.DISTRIBUTED ) , training_args.fpaa , ) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Set the verbosity to info of the Transformers logger (on main process only): if is_main_process(training_args.local_rank ): transformers.utils.logging.set_verbosity_info() logger.info("Training/evaluation parameters %s" , _SCREAMING_SNAKE_CASE ) # Set seed set_seed(training_args.seed ) # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. UpperCAmelCase_ : List[str] = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , ) UpperCAmelCase_ : List[Any] = ("encoder_layerdrop", "decoder_layerdrop", "dropout", "attention_dropout") for p in extra_model_params: if getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): assert hasattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ), F'''({config.__class__.__name__}) doesn\'t have a `{p}` attribute''' setattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ) UpperCAmelCase_ : Dict = AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , ) UpperCAmelCase_ : Union[str, Any] = AutoModelForSeqaSeqLM.from_pretrained( model_args.model_name_or_path , from_tf=".ckpt" in model_args.model_name_or_path , config=_SCREAMING_SNAKE_CASE , cache_dir=model_args.cache_dir , ) # use task specific params use_task_specific_params(_SCREAMING_SNAKE_CASE , data_args.task ) # set num_beams for evaluation if data_args.eval_beams is None: UpperCAmelCase_ : Dict = model.config.num_beams # set decoder_start_token_id for MBart if model.config.decoder_start_token_id is None and isinstance(_SCREAMING_SNAKE_CASE , (MBartTokenizer, MBartTokenizerFast) ): assert ( data_args.tgt_lang is not None and data_args.src_lang is not None ), "mBart requires --tgt_lang and --src_lang" if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): UpperCAmelCase_ : Dict = tokenizer.lang_code_to_id[data_args.tgt_lang] else: UpperCAmelCase_ : List[Any] = tokenizer.convert_tokens_to_ids(data_args.tgt_lang ) if model_args.freeze_embeds: freeze_embeds(_SCREAMING_SNAKE_CASE ) if model_args.freeze_encoder: freeze_params(model.get_encoder() ) assert_all_frozen(model.get_encoder() ) UpperCAmelCase_ : Dict = SeqaSeqDataset # Get datasets UpperCAmelCase_ : Tuple = ( dataset_class( _SCREAMING_SNAKE_CASE , type_path="train" , data_dir=data_args.data_dir , n_obs=data_args.n_train , max_target_length=data_args.max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or "" , ) if training_args.do_train else None ) UpperCAmelCase_ : Dict = ( dataset_class( _SCREAMING_SNAKE_CASE , type_path="val" , data_dir=data_args.data_dir , n_obs=data_args.n_val , max_target_length=data_args.val_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or "" , ) if training_args.do_eval or training_args.evaluation_strategy != EvaluationStrategy.NO else None ) UpperCAmelCase_ : int = ( dataset_class( _SCREAMING_SNAKE_CASE , type_path="test" , data_dir=data_args.data_dir , n_obs=data_args.n_test , max_target_length=data_args.test_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or "" , ) if training_args.do_predict else None ) # Initialize our Trainer UpperCAmelCase_ : Optional[Any] = ( build_compute_metrics_fn(data_args.task , _SCREAMING_SNAKE_CASE ) if training_args.predict_with_generate else None ) UpperCAmelCase_ : List[str] = SeqaSeqTrainer( model=_SCREAMING_SNAKE_CASE , args=_SCREAMING_SNAKE_CASE , data_args=_SCREAMING_SNAKE_CASE , train_dataset=_SCREAMING_SNAKE_CASE , eval_dataset=_SCREAMING_SNAKE_CASE , data_collator=SeqaSeqDataCollator( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , model.config.decoder_start_token_id , training_args.tpu_num_cores ) , compute_metrics=_SCREAMING_SNAKE_CASE , tokenizer=_SCREAMING_SNAKE_CASE , ) UpperCAmelCase_ : List[Any] = {} # Training if training_args.do_train: logger.info("*** Train ***" ) UpperCAmelCase_ : Any = trainer.train( model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None ) UpperCAmelCase_ : int = train_result.metrics UpperCAmelCase_ : Dict = data_args.n_train trainer.save_model() # this also saves the tokenizer if trainer.is_world_process_zero(): handle_metrics("train" , _SCREAMING_SNAKE_CASE , training_args.output_dir ) all_metrics.update(_SCREAMING_SNAKE_CASE ) # Need to save the state, since Trainer.save_model saves only the tokenizer with the model trainer.state.save_to_json(os.path.join(training_args.output_dir , "trainer_state.json" ) ) # For convenience, we also re-save the tokenizer to the same directory, # so that you can share your model easily on huggingface.co/models =) tokenizer.save_pretrained(training_args.output_dir ) # Evaluation if training_args.do_eval: logger.info("*** Evaluate ***" ) UpperCAmelCase_ : Union[str, Any] = trainer.evaluate(metric_key_prefix="val" ) UpperCAmelCase_ : Optional[Any] = data_args.n_val UpperCAmelCase_ : Union[str, Any] = round(metrics["val_loss"] , 4 ) if trainer.is_world_process_zero(): handle_metrics("val" , _SCREAMING_SNAKE_CASE , training_args.output_dir ) all_metrics.update(_SCREAMING_SNAKE_CASE ) if training_args.do_predict: logger.info("*** Predict ***" ) UpperCAmelCase_ : List[Any] = trainer.predict(test_dataset=_SCREAMING_SNAKE_CASE , metric_key_prefix="test" ) UpperCAmelCase_ : List[str] = test_output.metrics UpperCAmelCase_ : int = data_args.n_test if trainer.is_world_process_zero(): UpperCAmelCase_ : Optional[Any] = round(metrics["test_loss"] , 4 ) handle_metrics("test" , _SCREAMING_SNAKE_CASE , training_args.output_dir ) all_metrics.update(_SCREAMING_SNAKE_CASE ) if training_args.predict_with_generate: UpperCAmelCase_ : Optional[int] = tokenizer.batch_decode( test_output.predictions , skip_special_tokens=_SCREAMING_SNAKE_CASE , clean_up_tokenization_spaces=_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : Optional[int] = lmap(str.strip , _SCREAMING_SNAKE_CASE ) write_txt_file(_SCREAMING_SNAKE_CASE , os.path.join(training_args.output_dir , "test_generations.txt" ) ) if trainer.is_world_process_zero(): save_json(_SCREAMING_SNAKE_CASE , os.path.join(training_args.output_dir , "all_results.json" ) ) return all_metrics def a__ ( _SCREAMING_SNAKE_CASE : str ) -> Optional[int]: """simple docstring""" main() if __name__ == "__main__": main()
71
1
'''simple docstring''' import argparse import json import os import torch from transformers import LukeConfig, LukeModel, LukeTokenizer, RobertaTokenizer from transformers.tokenization_utils_base import AddedToken @torch.no_grad() def a__ ( _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : str ) -> Optional[int]: """simple docstring""" with open(_SCREAMING_SNAKE_CASE ) as metadata_file: UpperCAmelCase_ : List[Any] = json.load(_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : List[str] = LukeConfig(use_entity_aware_attention=_SCREAMING_SNAKE_CASE , **metadata["model_config"] ) # Load in the weights from the checkpoint_path UpperCAmelCase_ : Any = torch.load(_SCREAMING_SNAKE_CASE , map_location="cpu" ) # Load the entity vocab file UpperCAmelCase_ : Optional[Any] = load_entity_vocab(_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : Optional[Any] = RobertaTokenizer.from_pretrained(metadata["model_config"]["bert_model_name"] ) # Add special tokens to the token vocabulary for downstream tasks UpperCAmelCase_ : List[Any] = AddedToken("<ent>" , lstrip=_SCREAMING_SNAKE_CASE , rstrip=_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : str = AddedToken("<ent2>" , lstrip=_SCREAMING_SNAKE_CASE , rstrip=_SCREAMING_SNAKE_CASE ) tokenizer.add_special_tokens({"additional_special_tokens": [entity_token_a, entity_token_a]} ) config.vocab_size += 2 print(F'''Saving tokenizer to {pytorch_dump_folder_path}''' ) tokenizer.save_pretrained(_SCREAMING_SNAKE_CASE ) with open(os.path.join(_SCREAMING_SNAKE_CASE , LukeTokenizer.vocab_files_names["entity_vocab_file"] ) , "w" ) as f: json.dump(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : List[str] = LukeTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE ) # Initialize the embeddings of the special tokens UpperCAmelCase_ : str = state_dict["embeddings.word_embeddings.weight"] UpperCAmelCase_ : Dict = word_emb[tokenizer.convert_tokens_to_ids(["@"] )[0]].unsqueeze(0 ) UpperCAmelCase_ : Any = word_emb[tokenizer.convert_tokens_to_ids(["#"] )[0]].unsqueeze(0 ) UpperCAmelCase_ : int = torch.cat([word_emb, ent_emb, enta_emb] ) # Initialize the query layers of the entity-aware self-attention mechanism for layer_index in range(config.num_hidden_layers ): for matrix_name in ["query.weight", "query.bias"]: UpperCAmelCase_ : List[str] = F'''encoder.layer.{layer_index}.attention.self.''' UpperCAmelCase_ : Tuple = state_dict[prefix + matrix_name] UpperCAmelCase_ : Union[str, Any] = state_dict[prefix + matrix_name] UpperCAmelCase_ : Optional[int] = state_dict[prefix + matrix_name] # Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks UpperCAmelCase_ : List[str] = state_dict["entity_embeddings.entity_embeddings.weight"] UpperCAmelCase_ : int = entity_emb[entity_vocab["[MASK]"]] UpperCAmelCase_ : Dict = LukeModel(config=_SCREAMING_SNAKE_CASE ).eval() UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = model.load_state_dict(_SCREAMING_SNAKE_CASE , strict=_SCREAMING_SNAKE_CASE ) if not (len(_SCREAMING_SNAKE_CASE ) == 1 and missing_keys[0] == "embeddings.position_ids"): raise ValueError(F'''Missing keys {", ".join(_SCREAMING_SNAKE_CASE )}. Expected only missing embeddings.position_ids''' ) if not (all(key.startswith("entity_predictions" ) or key.startswith("lm_head" ) for key in unexpected_keys )): raise ValueError( "Unexpected keys" F''' {", ".join([key for key in unexpected_keys if not (key.startswith("entity_predictions" ) or key.startswith("lm_head" ))] )}''' ) # Check outputs UpperCAmelCase_ : Optional[Any] = LukeTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE , task="entity_classification" ) UpperCAmelCase_ : Tuple = ( "Top seed Ana Ivanovic said on Thursday she could hardly believe her luck as a fortuitous netcord helped the" " new world number one avoid a humiliating second- round exit at Wimbledon ." ) UpperCAmelCase_ : Optional[Any] = (39, 42) UpperCAmelCase_ : List[str] = tokenizer(_SCREAMING_SNAKE_CASE , entity_spans=[span] , add_prefix_space=_SCREAMING_SNAKE_CASE , return_tensors="pt" ) UpperCAmelCase_ : Dict = model(**_SCREAMING_SNAKE_CASE ) # Verify word hidden states if model_size == "large": UpperCAmelCase_ : str = torch.Size((1, 42, 10_24) ) UpperCAmelCase_ : List[Any] = torch.tensor( [[0.0_133, 0.0_865, 0.0_095], [0.3_093, -0.2_576, -0.7_418], [-0.1_720, -0.2_117, -0.2_869]] ) else: # base UpperCAmelCase_ : int = torch.Size((1, 42, 7_68) ) UpperCAmelCase_ : Dict = torch.tensor([[0.0_037, 0.1_368, -0.0_091], [0.1_099, 0.3_329, -0.1_095], [0.0_765, 0.5_335, 0.1_179]] ) if not (outputs.last_hidden_state.shape == expected_shape): raise ValueError( F'''Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}''' ) if not torch.allclose(outputs.last_hidden_state[0, :3, :3] , _SCREAMING_SNAKE_CASE , atol=1E-4 ): raise ValueError # Verify entity hidden states if model_size == "large": UpperCAmelCase_ : int = torch.Size((1, 1, 10_24) ) UpperCAmelCase_ : Optional[Any] = torch.tensor([[0.0_466, -0.0_106, -0.0_179]] ) else: # base UpperCAmelCase_ : int = torch.Size((1, 1, 7_68) ) UpperCAmelCase_ : Optional[Any] = torch.tensor([[0.1_457, 0.1_044, 0.0_174]] ) if not (outputs.entity_last_hidden_state.shape != expected_shape): raise ValueError( F'''Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is''' F''' {expected_shape}''' ) if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] , _SCREAMING_SNAKE_CASE , atol=1E-4 ): raise ValueError # Finally, save our PyTorch model and tokenizer print("Saving PyTorch model to {}".format(_SCREAMING_SNAKE_CASE ) ) model.save_pretrained(_SCREAMING_SNAKE_CASE ) def a__ ( _SCREAMING_SNAKE_CASE : Optional[Any] ) -> List[Any]: """simple docstring""" UpperCAmelCase_ : Any = {} with open(_SCREAMING_SNAKE_CASE , "r" , encoding="utf-8" ) as f: for index, line in enumerate(_SCREAMING_SNAKE_CASE ): UpperCAmelCase_ , UpperCAmelCase_ : List[str] = line.rstrip().split("\t" ) UpperCAmelCase_ : List[str] = index return entity_vocab if __name__ == "__main__": _lowerCamelCase = argparse.ArgumentParser() # Required parameters parser.add_argument("""--checkpoint_path""", type=str, help="""Path to a pytorch_model.bin file.""") parser.add_argument( """--metadata_path""", default=None, type=str, help="""Path to a metadata.json file, defining the configuration.""" ) parser.add_argument( """--entity_vocab_path""", default=None, type=str, help="""Path to an entity_vocab.tsv file, containing the entity vocabulary.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to where to dump the output PyTorch model.""" ) parser.add_argument( """--model_size""", default="""base""", type=str, choices=["""base""", """large"""], help="""Size of the model to be converted.""" ) _lowerCamelCase = parser.parse_args() convert_luke_checkpoint( args.checkpoint_path, args.metadata_path, args.entity_vocab_path, args.pytorch_dump_folder_path, args.model_size, )
71
'''simple docstring''' from __future__ import annotations import unittest from transformers import BlenderbotConfig, BlenderbotTokenizer, is_tf_available from transformers.testing_utils import require_tf, require_tokenizers, slow from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotForConditionalGeneration, TFBlenderbotModel @require_tf class _snake_case : __A : Dict =BlenderbotConfig __A : Union[str, Any] ={} __A : Any ="gelu" def __init__( self ,_snake_case ,_snake_case=13 ,_snake_case=7 ,_snake_case=True ,_snake_case=False ,_snake_case=99 ,_snake_case=32 ,_snake_case=2 ,_snake_case=4 ,_snake_case=37 ,_snake_case=0.1 ,_snake_case=0.1 ,_snake_case=20 ,_snake_case=2 ,_snake_case=1 ,_snake_case=0 ,): UpperCAmelCase_ : List[Any] = parent UpperCAmelCase_ : str = batch_size UpperCAmelCase_ : Dict = seq_length UpperCAmelCase_ : int = is_training UpperCAmelCase_ : Optional[Any] = use_labels UpperCAmelCase_ : Any = vocab_size UpperCAmelCase_ : Optional[int] = hidden_size UpperCAmelCase_ : Optional[int] = num_hidden_layers UpperCAmelCase_ : int = num_attention_heads UpperCAmelCase_ : Tuple = intermediate_size UpperCAmelCase_ : Any = hidden_dropout_prob UpperCAmelCase_ : Optional[int] = attention_probs_dropout_prob UpperCAmelCase_ : List[Any] = max_position_embeddings UpperCAmelCase_ : str = eos_token_id UpperCAmelCase_ : List[Any] = pad_token_id UpperCAmelCase_ : List[Any] = bos_token_id def UpperCamelCase__ ( self ): UpperCAmelCase_ : Any = ids_tensor([self.batch_size, self.seq_length - 1] ,self.vocab_size ) UpperCAmelCase_ : int = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) ,1 ) UpperCAmelCase_ : Optional[Any] = tf.concat([input_ids, eos_tensor] ,axis=1 ) UpperCAmelCase_ : int = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size ) UpperCAmelCase_ : Optional[Any] = self.config_cls( vocab_size=self.vocab_size ,d_model=self.hidden_size ,encoder_layers=self.num_hidden_layers ,decoder_layers=self.num_hidden_layers ,encoder_attention_heads=self.num_attention_heads ,decoder_attention_heads=self.num_attention_heads ,encoder_ffn_dim=self.intermediate_size ,decoder_ffn_dim=self.intermediate_size ,dropout=self.hidden_dropout_prob ,attention_dropout=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,eos_token_ids=[2] ,bos_token_id=self.bos_token_id ,pad_token_id=self.pad_token_id ,decoder_start_token_id=self.pad_token_id ,**self.config_updates ,) UpperCAmelCase_ : List[str] = prepare_blenderbot_inputs_dict(_snake_case ,_snake_case ,_snake_case ) return config, inputs_dict def UpperCamelCase__ ( self ,_snake_case ,_snake_case ): UpperCAmelCase_ : Tuple = TFBlenderbotModel(config=_snake_case ).get_decoder() UpperCAmelCase_ : int = inputs_dict["input_ids"] UpperCAmelCase_ : Dict = input_ids[:1, :] UpperCAmelCase_ : Any = inputs_dict["attention_mask"][:1, :] UpperCAmelCase_ : int = inputs_dict["head_mask"] UpperCAmelCase_ : Optional[int] = 1 # first forward pass UpperCAmelCase_ : List[str] = model(_snake_case ,attention_mask=_snake_case ,head_mask=_snake_case ,use_cache=_snake_case ) UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = outputs.to_tuple() # create hypothetical next token and extent to next_input_ids UpperCAmelCase_ : Optional[int] = ids_tensor((self.batch_size, 3) ,config.vocab_size ) UpperCAmelCase_ : Any = tf.cast(ids_tensor((self.batch_size, 3) ,2 ) ,tf.inta ) # append to next input_ids and UpperCAmelCase_ : Union[str, Any] = tf.concat([input_ids, next_tokens] ,axis=-1 ) UpperCAmelCase_ : Any = tf.concat([attention_mask, next_attn_mask] ,axis=-1 ) UpperCAmelCase_ : Any = model(_snake_case ,attention_mask=_snake_case )[0] UpperCAmelCase_ : List[Any] = model(_snake_case ,attention_mask=_snake_case ,past_key_values=_snake_case )[0] self.parent.assertEqual(next_tokens.shape[1] ,output_from_past.shape[1] ) # select random slice UpperCAmelCase_ : str = int(ids_tensor((1,) ,output_from_past.shape[-1] ) ) UpperCAmelCase_ : List[str] = output_from_no_past[:, -3:, random_slice_idx] UpperCAmelCase_ : Union[str, Any] = output_from_past[:, :, random_slice_idx] # test that outputs are equal for slice tf.debugging.assert_near(_snake_case ,_snake_case ,rtol=1E-3 ) def a__ ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : str=None , _SCREAMING_SNAKE_CASE : Any=None , _SCREAMING_SNAKE_CASE : Any=None , _SCREAMING_SNAKE_CASE : List[str]=None , _SCREAMING_SNAKE_CASE : Dict=None , ) -> Union[str, Any]: """simple docstring""" if attention_mask is None: UpperCAmelCase_ : Dict = tf.cast(tf.math.not_equal(_SCREAMING_SNAKE_CASE , config.pad_token_id ) , tf.inta ) if decoder_attention_mask is None: UpperCAmelCase_ : Optional[int] = tf.concat( [ tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ), tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ), ] , axis=-1 , ) if head_mask is None: UpperCAmelCase_ : List[Any] = tf.ones((config.encoder_layers, config.encoder_attention_heads) ) if decoder_head_mask is None: UpperCAmelCase_ : Optional[int] = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) if cross_attn_head_mask is None: UpperCAmelCase_ : str = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": decoder_attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, "cross_attn_head_mask": cross_attn_head_mask, } @require_tf class _snake_case (__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase): __A : Union[str, Any] =(TFBlenderbotForConditionalGeneration, TFBlenderbotModel) if is_tf_available() else () __A : List[str] =(TFBlenderbotForConditionalGeneration,) if is_tf_available() else () __A : Dict =( { "conversational": TFBlenderbotForConditionalGeneration, "feature-extraction": TFBlenderbotModel, "summarization": TFBlenderbotForConditionalGeneration, "text2text-generation": TFBlenderbotForConditionalGeneration, "translation": TFBlenderbotForConditionalGeneration, } if is_tf_available() else {} ) __A : Any =True __A : Dict =False __A : Dict =False def UpperCamelCase__ ( self ): UpperCAmelCase_ : Optional[int] = TFBlenderbotModelTester(self ) UpperCAmelCase_ : int = ConfigTester(self ,config_class=_snake_case ) def UpperCamelCase__ ( self ): self.config_tester.run_common_tests() def UpperCamelCase__ ( self ): UpperCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_decoder_model_past_large_inputs(*_snake_case ) @require_tokenizers @require_tf class _snake_case (unittest.TestCase): __A : Optional[int] =["My friends are cool but they eat too many carbs."] __A : Optional[Any] ="facebook/blenderbot-400M-distill" @cached_property def UpperCamelCase__ ( self ): return BlenderbotTokenizer.from_pretrained(self.model_name ) @cached_property def UpperCamelCase__ ( self ): UpperCAmelCase_ : List[Any] = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name ) return model @slow def UpperCamelCase__ ( self ): UpperCAmelCase_ : List[Any] = self.tokenizer(self.src_text ,return_tensors="tf" ) UpperCAmelCase_ : Union[str, Any] = self.model.generate( model_inputs.input_ids ,) UpperCAmelCase_ : str = self.tokenizer.batch_decode(generated_ids.numpy() ,skip_special_tokens=_snake_case )[0] assert ( generated_words == " That's unfortunate. Are they trying to lose weight or are they just trying to be healthier?" )
71
1
'''simple docstring''' import copy import os from collections import OrderedDict from typing import TYPE_CHECKING, Any, Dict, Mapping, Optional, Union if TYPE_CHECKING: from ...processing_utils import ProcessorMixin from ...utils import TensorType from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging _lowerCamelCase = logging.get_logger(__name__) _lowerCamelCase = { """google/owlvit-base-patch32""": """https://huggingface.co/google/owlvit-base-patch32/resolve/main/config.json""", """google/owlvit-base-patch16""": """https://huggingface.co/google/owlvit-base-patch16/resolve/main/config.json""", """google/owlvit-large-patch14""": """https://huggingface.co/google/owlvit-large-patch14/resolve/main/config.json""", } class _snake_case (__SCREAMING_SNAKE_CASE): __A : Any ="owlvit_text_model" def __init__( self ,_snake_case=4_94_08 ,_snake_case=5_12 ,_snake_case=20_48 ,_snake_case=12 ,_snake_case=8 ,_snake_case=16 ,_snake_case="quick_gelu" ,_snake_case=1E-5 ,_snake_case=0.0 ,_snake_case=0.02 ,_snake_case=1.0 ,_snake_case=0 ,_snake_case=4_94_06 ,_snake_case=4_94_07 ,**_snake_case ,): super().__init__(pad_token_id=_snake_case ,bos_token_id=_snake_case ,eos_token_id=_snake_case ,**_snake_case ) UpperCAmelCase_ : Any = vocab_size UpperCAmelCase_ : List[Any] = hidden_size UpperCAmelCase_ : List[str] = intermediate_size UpperCAmelCase_ : str = num_hidden_layers UpperCAmelCase_ : Optional[Any] = num_attention_heads UpperCAmelCase_ : List[Any] = max_position_embeddings UpperCAmelCase_ : Union[str, Any] = hidden_act UpperCAmelCase_ : Dict = layer_norm_eps UpperCAmelCase_ : Optional[int] = attention_dropout UpperCAmelCase_ : List[Any] = initializer_range UpperCAmelCase_ : Optional[int] = initializer_factor @classmethod def UpperCamelCase__ ( cls ,_snake_case ,**_snake_case ): cls._set_token_in_kwargs(_snake_case ) UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = cls.get_config_dict(_snake_case ,**_snake_case ) # get the text config dict if we are loading from OwlViTConfig if config_dict.get("model_type" ) == "owlvit": UpperCAmelCase_ : Tuple = config_dict["text_config"] if "model_type" in config_dict and hasattr(cls ,"model_type" ) and config_dict["model_type"] != cls.model_type: logger.warning( f'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type ''' f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' ) return cls.from_dict(_snake_case ,**_snake_case ) class _snake_case (__SCREAMING_SNAKE_CASE): __A : Tuple ="owlvit_vision_model" def __init__( self ,_snake_case=7_68 ,_snake_case=30_72 ,_snake_case=12 ,_snake_case=12 ,_snake_case=3 ,_snake_case=7_68 ,_snake_case=32 ,_snake_case="quick_gelu" ,_snake_case=1E-5 ,_snake_case=0.0 ,_snake_case=0.02 ,_snake_case=1.0 ,**_snake_case ,): super().__init__(**_snake_case ) UpperCAmelCase_ : Any = hidden_size UpperCAmelCase_ : Any = intermediate_size UpperCAmelCase_ : int = num_hidden_layers UpperCAmelCase_ : str = num_attention_heads UpperCAmelCase_ : int = num_channels UpperCAmelCase_ : Dict = image_size UpperCAmelCase_ : Dict = patch_size UpperCAmelCase_ : int = hidden_act UpperCAmelCase_ : List[str] = layer_norm_eps UpperCAmelCase_ : List[str] = attention_dropout UpperCAmelCase_ : Optional[Any] = initializer_range UpperCAmelCase_ : Optional[int] = initializer_factor @classmethod def UpperCamelCase__ ( cls ,_snake_case ,**_snake_case ): cls._set_token_in_kwargs(_snake_case ) UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = cls.get_config_dict(_snake_case ,**_snake_case ) # get the vision config dict if we are loading from OwlViTConfig if config_dict.get("model_type" ) == "owlvit": UpperCAmelCase_ : Optional[Any] = config_dict["vision_config"] if "model_type" in config_dict and hasattr(cls ,"model_type" ) and config_dict["model_type"] != cls.model_type: logger.warning( f'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type ''' f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' ) return cls.from_dict(_snake_case ,**_snake_case ) class _snake_case (__SCREAMING_SNAKE_CASE): __A : Tuple ="owlvit" __A : Optional[int] =True def __init__( self ,_snake_case=None ,_snake_case=None ,_snake_case=5_12 ,_snake_case=2.6592 ,_snake_case=True ,**_snake_case ,): super().__init__(**_snake_case ) if text_config is None: UpperCAmelCase_ : Tuple = {} logger.info("text_config is None. Initializing the OwlViTTextConfig with default values." ) if vision_config is None: UpperCAmelCase_ : Tuple = {} logger.info("vision_config is None. initializing the OwlViTVisionConfig with default values." ) UpperCAmelCase_ : Tuple = OwlViTTextConfig(**_snake_case ) UpperCAmelCase_ : Optional[int] = OwlViTVisionConfig(**_snake_case ) UpperCAmelCase_ : List[Any] = projection_dim UpperCAmelCase_ : Union[str, Any] = logit_scale_init_value UpperCAmelCase_ : int = return_dict UpperCAmelCase_ : Optional[Any] = 1.0 @classmethod def UpperCamelCase__ ( cls ,_snake_case ,**_snake_case ): cls._set_token_in_kwargs(_snake_case ) UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = cls.get_config_dict(_snake_case ,**_snake_case ) if "model_type" in config_dict and hasattr(cls ,"model_type" ) and config_dict["model_type"] != cls.model_type: logger.warning( f'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type ''' f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' ) return cls.from_dict(_snake_case ,**_snake_case ) @classmethod def UpperCamelCase__ ( cls ,_snake_case ,_snake_case ,**_snake_case ): UpperCAmelCase_ : Tuple = {} UpperCAmelCase_ : List[Any] = text_config UpperCAmelCase_ : int = vision_config return cls.from_dict(_snake_case ,**_snake_case ) def UpperCamelCase__ ( self ): UpperCAmelCase_ : str = copy.deepcopy(self.__dict__ ) UpperCAmelCase_ : Any = self.text_config.to_dict() UpperCAmelCase_ : Tuple = self.vision_config.to_dict() UpperCAmelCase_ : Union[str, Any] = self.__class__.model_type return output class _snake_case (__SCREAMING_SNAKE_CASE): @property def UpperCamelCase__ ( self ): return OrderedDict( [ ("input_ids", {0: "batch", 1: "sequence"}), ("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}), ("attention_mask", {0: "batch", 1: "sequence"}), ] ) @property def UpperCamelCase__ ( self ): return OrderedDict( [ ("logits_per_image", {0: "batch"}), ("logits_per_text", {0: "batch"}), ("text_embeds", {0: "batch"}), ("image_embeds", {0: "batch"}), ] ) @property def UpperCamelCase__ ( self ): return 1E-4 def UpperCamelCase__ ( self ,_snake_case ,_snake_case = -1 ,_snake_case = -1 ,_snake_case = None ,): UpperCAmelCase_ : str = super().generate_dummy_inputs( processor.tokenizer ,batch_size=_snake_case ,seq_length=_snake_case ,framework=_snake_case ) UpperCAmelCase_ : List[Any] = super().generate_dummy_inputs( processor.image_processor ,batch_size=_snake_case ,framework=_snake_case ) return {**text_input_dict, **image_input_dict} @property def UpperCamelCase__ ( self ): return 14
71
'''simple docstring''' from numpy import exp, pi, sqrt def a__ ( _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : float = 0.0 , _SCREAMING_SNAKE_CASE : float = 1.0 ) -> int: """simple docstring""" return 1 / sqrt(2 * pi * sigma**2 ) * exp(-((x - mu) ** 2) / (2 * sigma**2) ) if __name__ == "__main__": import doctest doctest.testmod()
71
1
'''simple docstring''' import unittest from dataclasses import dataclass import pytest from accelerate.commands.config.config_args import SageMakerConfig from accelerate.utils import ComputeEnvironment from accelerate.utils.launch import _convert_nargs_to_dict @dataclass class _snake_case (__SCREAMING_SNAKE_CASE): __A : List[Any] =ComputeEnvironment.AMAZON_SAGEMAKER __A : List[Any] =True __A : int ="ml.p3.2xlarge" __A : Tuple ="accelerate_sagemaker_execution_role" __A : Optional[int] ="hf-sm" __A : Optional[Any] ="us-east-1" __A : List[str] =1 __A : Dict ="accelerate-sagemaker-1" __A : List[Any] ="1.6" __A : str ="4.4" __A : List[Any] ="train.py" __A : Optional[Any] =[ "--model_name_or_path", "bert", "--do_train", "False", "--epochs", "3", "--learning_rate", "5e-5", "--max_steps", "50.5", ] __A : Optional[int] =[ "--model_name_or_path", "bert", "--do_train", "--do_test", "False", "--do_predict", "--epochs", "3", "--learning_rate", "5e-5", "--max_steps", "50.5", ] class _snake_case (unittest.TestCase): def UpperCamelCase__ ( self ): # If no defaults are changed, `to_kwargs` returns an empty dict. UpperCAmelCase_ : List[str] = _convert_nargs_to_dict(MockLaunchConfig.success_training_script_args ) assert isinstance(converted_args["model_name_or_path"] ,_snake_case ) assert isinstance(converted_args["do_train"] ,_snake_case ) assert isinstance(converted_args["epochs"] ,_snake_case ) assert isinstance(converted_args["learning_rate"] ,_snake_case ) assert isinstance(converted_args["max_steps"] ,_snake_case ) with pytest.raises(_snake_case ): _convert_nargs_to_dict(MockLaunchConfig.fail_training_script_args )
71
'''simple docstring''' from typing import Optional from torch import nn from .transformer_ad import TransformeraDModel, TransformeraDModelOutput class _snake_case (nn.Module): def __init__( self ,_snake_case = 16 ,_snake_case = 88 ,_snake_case = None ,_snake_case = 1 ,_snake_case = 0.0 ,_snake_case = 32 ,_snake_case = None ,_snake_case = False ,_snake_case = None ,_snake_case = None ,_snake_case = "geglu" ,_snake_case = None ,): super().__init__() UpperCAmelCase_ : Optional[Any] = nn.ModuleList( [ TransformeraDModel( num_attention_heads=_snake_case ,attention_head_dim=_snake_case ,in_channels=_snake_case ,num_layers=_snake_case ,dropout=_snake_case ,norm_num_groups=_snake_case ,cross_attention_dim=_snake_case ,attention_bias=_snake_case ,sample_size=_snake_case ,num_vector_embeds=_snake_case ,activation_fn=_snake_case ,num_embeds_ada_norm=_snake_case ,) for _ in range(2 ) ] ) # Variables that can be set by a pipeline: # The ratio of transformer1 to transformer2's output states to be combined during inference UpperCAmelCase_ : List[str] = 0.5 # The shape of `encoder_hidden_states` is expected to be # `(batch_size, condition_lengths[0]+condition_lengths[1], num_features)` UpperCAmelCase_ : int = [77, 2_57] # Which transformer to use to encode which condition. # E.g. `(1, 0)` means that we'll use `transformers[1](conditions[0])` and `transformers[0](conditions[1])` UpperCAmelCase_ : List[Any] = [1, 0] def UpperCamelCase__ ( self ,_snake_case ,_snake_case ,_snake_case=None ,_snake_case=None ,_snake_case=None ,_snake_case = True ,): UpperCAmelCase_ : List[str] = hidden_states UpperCAmelCase_ : str = [] UpperCAmelCase_ : Optional[int] = 0 # attention_mask is not used yet for i in range(2 ): # for each of the two transformers, pass the corresponding condition tokens UpperCAmelCase_ : Any = encoder_hidden_states[:, tokens_start : tokens_start + self.condition_lengths[i]] UpperCAmelCase_ : Any = self.transformer_index_for_condition[i] UpperCAmelCase_ : int = self.transformers[transformer_index]( _snake_case ,encoder_hidden_states=_snake_case ,timestep=_snake_case ,cross_attention_kwargs=_snake_case ,return_dict=_snake_case ,)[0] encoded_states.append(encoded_state - input_states ) tokens_start += self.condition_lengths[i] UpperCAmelCase_ : Dict = encoded_states[0] * self.mix_ratio + encoded_states[1] * (1 - self.mix_ratio) UpperCAmelCase_ : List[Any] = output_states + input_states if not return_dict: return (output_states,) return TransformeraDModelOutput(sample=_snake_case )
71
1
'''simple docstring''' from __future__ import annotations from collections import deque class _snake_case : def __init__( self ,_snake_case ): UpperCAmelCase_ : list[dict] = [] self.adlist.append( {"value": "", "next_states": [], "fail_state": 0, "output": []} ) for keyword in keywords: self.add_keyword(_snake_case ) self.set_fail_transitions() def UpperCamelCase__ ( self ,_snake_case ,_snake_case ): for state in self.adlist[current_state]["next_states"]: if char == self.adlist[state]["value"]: return state return None def UpperCamelCase__ ( self ,_snake_case ): UpperCAmelCase_ : Optional[int] = 0 for character in keyword: UpperCAmelCase_ : List[str] = self.find_next_state(_snake_case ,_snake_case ) if next_state is None: self.adlist.append( { "value": character, "next_states": [], "fail_state": 0, "output": [], } ) self.adlist[current_state]["next_states"].append(len(self.adlist ) - 1 ) UpperCAmelCase_ : str = len(self.adlist ) - 1 else: UpperCAmelCase_ : str = next_state self.adlist[current_state]["output"].append(_snake_case ) def UpperCamelCase__ ( self ): UpperCAmelCase_ : deque = deque() for node in self.adlist[0]["next_states"]: q.append(_snake_case ) UpperCAmelCase_ : Optional[int] = 0 while q: UpperCAmelCase_ : int = q.popleft() for child in self.adlist[r]["next_states"]: q.append(_snake_case ) UpperCAmelCase_ : int = self.adlist[r]["fail_state"] while ( self.find_next_state(_snake_case ,self.adlist[child]["value"] ) is None and state != 0 ): UpperCAmelCase_ : Tuple = self.adlist[state]["fail_state"] UpperCAmelCase_ : Union[str, Any] = self.find_next_state( _snake_case ,self.adlist[child]["value"] ) if self.adlist[child]["fail_state"] is None: UpperCAmelCase_ : Tuple = 0 UpperCAmelCase_ : Union[str, Any] = ( self.adlist[child]["output"] + self.adlist[self.adlist[child]["fail_state"]]["output"] ) def UpperCamelCase__ ( self ,_snake_case ): UpperCAmelCase_ : dict = {} # returns a dict with keywords and list of its occurrences UpperCAmelCase_ : Optional[Any] = 0 for i in range(len(_snake_case ) ): while ( self.find_next_state(_snake_case ,string[i] ) is None and current_state != 0 ): UpperCAmelCase_ : List[str] = self.adlist[current_state]["fail_state"] UpperCAmelCase_ : Union[str, Any] = self.find_next_state(_snake_case ,string[i] ) if next_state is None: UpperCAmelCase_ : Optional[Any] = 0 else: UpperCAmelCase_ : Union[str, Any] = next_state for key in self.adlist[current_state]["output"]: if key not in result: UpperCAmelCase_ : int = [] result[key].append(i - len(_snake_case ) + 1 ) return result if __name__ == "__main__": import doctest doctest.testmod()
71
'''simple docstring''' import json import sys def a__ ( _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : int ) -> Tuple: """simple docstring""" with open(_SCREAMING_SNAKE_CASE , encoding="utf-8" ) as f: UpperCAmelCase_ : Dict = json.load(_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : str = ["<details>", "<summary>Show updated benchmarks!</summary>", " "] for benchmark_name in sorted(_SCREAMING_SNAKE_CASE ): UpperCAmelCase_ : Optional[Any] = results[benchmark_name] UpperCAmelCase_ : Any = benchmark_name.split("/" )[-1] output_md.append(F'''### Benchmark: {benchmark_file_name}''' ) UpperCAmelCase_ : Any = "| metric |" UpperCAmelCase_ : Any = "|--------|" UpperCAmelCase_ : Union[str, Any] = "| new / old (diff) |" for metric_name in sorted(_SCREAMING_SNAKE_CASE ): UpperCAmelCase_ : Tuple = benchmark_res[metric_name] UpperCAmelCase_ : Union[str, Any] = metric_vals["new"] UpperCAmelCase_ : Optional[Any] = metric_vals.get("old" , _SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : Dict = metric_vals.get("diff" , _SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : Dict = F''' {new_val:f}''' if isinstance(_SCREAMING_SNAKE_CASE , (int, float) ) else "None" if old_val is not None: val_str += F''' / {old_val:f}''' if isinstance(_SCREAMING_SNAKE_CASE , (int, float) ) else "None" if dif_val is not None: val_str += F''' ({dif_val:f})''' if isinstance(_SCREAMING_SNAKE_CASE , (int, float) ) else "None" title += " " + metric_name + " |" lines += "---|" value += val_str + " |" output_md += [title, lines, value, " "] output_md.append("</details>" ) with open(_SCREAMING_SNAKE_CASE , "w" , encoding="utf-8" ) as f: f.writelines("\n".join(_SCREAMING_SNAKE_CASE ) ) if __name__ == "__main__": _lowerCamelCase = sys.argv[1] _lowerCamelCase = sys.argv[2] format_json_to_md(input_json_file, output_md_file)
71
1
'''simple docstring''' import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer from ...utils import logging _lowerCamelCase = logging.get_logger(__name__) _lowerCamelCase = """▁""" _lowerCamelCase = {"""vocab_file""": """sentencepiece.bpe.model"""} _lowerCamelCase = { """vocab_file""": { """facebook/mbart-large-50-one-to-many-mmt""": ( """https://huggingface.co/facebook/mbart-large-50-one-to-many-mmt/resolve/main/sentencepiece.bpe.model""" ), } } _lowerCamelCase = { """facebook/mbart-large-50-one-to-many-mmt""": 1024, } # fmt: off _lowerCamelCase = ["""ar_AR""", """cs_CZ""", """de_DE""", """en_XX""", """es_XX""", """et_EE""", """fi_FI""", """fr_XX""", """gu_IN""", """hi_IN""", """it_IT""", """ja_XX""", """kk_KZ""", """ko_KR""", """lt_LT""", """lv_LV""", """my_MM""", """ne_NP""", """nl_XX""", """ro_RO""", """ru_RU""", """si_LK""", """tr_TR""", """vi_VN""", """zh_CN""", """af_ZA""", """az_AZ""", """bn_IN""", """fa_IR""", """he_IL""", """hr_HR""", """id_ID""", """ka_GE""", """km_KH""", """mk_MK""", """ml_IN""", """mn_MN""", """mr_IN""", """pl_PL""", """ps_AF""", """pt_XX""", """sv_SE""", """sw_KE""", """ta_IN""", """te_IN""", """th_TH""", """tl_XX""", """uk_UA""", """ur_PK""", """xh_ZA""", """gl_ES""", """sl_SI"""] class _snake_case (__SCREAMING_SNAKE_CASE): __A : List[Any] =VOCAB_FILES_NAMES __A : int =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __A : Dict =PRETRAINED_VOCAB_FILES_MAP __A : Dict =["input_ids", "attention_mask"] __A : List[int] =[] __A : List[int] =[] def __init__( self ,_snake_case ,_snake_case=None ,_snake_case=None ,_snake_case="</s>" ,_snake_case="</s>" ,_snake_case="<s>" ,_snake_case="<unk>" ,_snake_case="<pad>" ,_snake_case="<mask>" ,_snake_case = None ,**_snake_case ,): # Mask token behave like a normal word, i.e. include the space before it UpperCAmelCase_ : Optional[Any] = AddedToken(_snake_case ,lstrip=_snake_case ,rstrip=_snake_case ) if isinstance(_snake_case ,_snake_case ) else mask_token UpperCAmelCase_ : Any = {} if sp_model_kwargs is None else sp_model_kwargs UpperCAmelCase_ : Optional[Any] = kwargs.get("additional_special_tokens" ,[] ) kwargs["additional_special_tokens"] += [ code for code in FAIRSEQ_LANGUAGE_CODES if code not in kwargs["additional_special_tokens"] ] super().__init__( src_lang=_snake_case ,tgt_lang=_snake_case ,eos_token=_snake_case ,unk_token=_snake_case ,sep_token=_snake_case ,cls_token=_snake_case ,pad_token=_snake_case ,mask_token=_snake_case ,sp_model_kwargs=self.sp_model_kwargs ,**_snake_case ,) UpperCAmelCase_ : List[str] = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(str(_snake_case ) ) UpperCAmelCase_ : List[str] = vocab_file # Original fairseq vocab and spm vocab must be "aligned": # Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 # -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ---- # fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-' # spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a' # Mimic fairseq token-to-id alignment for the first 4 token UpperCAmelCase_ : Optional[int] = {"<s>": 0, "<pad>": 1, "</s>": 2, "<unk>": 3} # The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab UpperCAmelCase_ : List[Any] = 1 UpperCAmelCase_ : Union[str, Any] = len(self.sp_model ) UpperCAmelCase_ : int = { code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(_snake_case ) } UpperCAmelCase_ : List[Any] = {v: k for k, v in self.lang_code_to_id.items()} UpperCAmelCase_ : int = len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset self.fairseq_tokens_to_ids.update(self.lang_code_to_id ) UpperCAmelCase_ : str = {v: k for k, v in self.fairseq_tokens_to_ids.items()} UpperCAmelCase_ : Optional[int] = src_lang if src_lang is not None else "en_XX" UpperCAmelCase_ : List[Any] = self.lang_code_to_id[self._src_lang] UpperCAmelCase_ : str = tgt_lang self.set_src_lang_special_tokens(self._src_lang ) @property def UpperCamelCase__ ( self ): return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token @property def UpperCamelCase__ ( self ): return self._src_lang @src_lang.setter def UpperCamelCase__ ( self ,_snake_case ): UpperCAmelCase_ : Tuple = new_src_lang self.set_src_lang_special_tokens(self._src_lang ) def __getstate__( self ): UpperCAmelCase_ : List[Any] = self.__dict__.copy() UpperCAmelCase_ : List[str] = None return state def __setstate__( self ,_snake_case ): UpperCAmelCase_ : Union[str, Any] = d # for backward compatibility if not hasattr(self ,"sp_model_kwargs" ): UpperCAmelCase_ : int = {} UpperCAmelCase_ : Dict = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def UpperCamelCase__ ( self ): UpperCAmelCase_ : Optional[Any] = {self.convert_ids_to_tokens(_snake_case ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def UpperCamelCase__ ( self ,_snake_case ): return self.sp_model.encode(_snake_case ,out_type=_snake_case ) def UpperCamelCase__ ( self ,_snake_case ): if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] UpperCAmelCase_ : Dict = self.sp_model.PieceToId(_snake_case ) # Need to return unknown token if the SP model returned 0 return spm_id + self.fairseq_offset if spm_id else self.unk_token_id def UpperCamelCase__ ( self ,_snake_case ): if index in self.fairseq_ids_to_tokens: return self.fairseq_ids_to_tokens[index] return self.sp_model.IdToPiece(index - self.fairseq_offset ) def UpperCamelCase__ ( self ,_snake_case ): UpperCAmelCase_ : Optional[int] = [] UpperCAmelCase_ : int = "" UpperCAmelCase_ : Optional[int] = False for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: if not prev_is_special: out_string += " " out_string += self.sp_model.decode(_snake_case ) + token UpperCAmelCase_ : int = True UpperCAmelCase_ : List[Any] = [] else: current_sub_tokens.append(_snake_case ) UpperCAmelCase_ : Any = False out_string += self.sp_model.decode(_snake_case ) return out_string.strip() def UpperCamelCase__ ( self ,_snake_case ,_snake_case = None ): if not os.path.isdir(_snake_case ): logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' ) return UpperCAmelCase_ : Optional[Any] = os.path.join( _snake_case ,(filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(_snake_case ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file ,_snake_case ) elif not os.path.isfile(self.vocab_file ): with open(_snake_case ,"wb" ) as fi: UpperCAmelCase_ : Union[str, Any] = self.sp_model.serialized_model_proto() fi.write(_snake_case ) return (out_vocab_file,) def UpperCamelCase__ ( self ,_snake_case ,_snake_case = None ,_snake_case = False ): if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=_snake_case ,token_ids_a=_snake_case ,already_has_special_tokens=_snake_case ) UpperCAmelCase_ : Union[str, Any] = [1] * len(self.prefix_tokens ) UpperCAmelCase_ : str = [1] * len(self.suffix_tokens ) if token_ids_a is None: return prefix_ones + ([0] * len(_snake_case )) + suffix_ones return prefix_ones + ([0] * len(_snake_case )) + ([0] * len(_snake_case )) + suffix_ones def UpperCamelCase__ ( self ,_snake_case ,_snake_case = None ): if token_ids_a is None: return self.prefix_tokens + token_ids_a + self.suffix_tokens # We don't expect to process pairs, but leave the pair logic for API consistency return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens def UpperCamelCase__ ( self ,_snake_case ,_snake_case ,_snake_case ,_snake_case ,**_snake_case ): if src_lang is None or tgt_lang is None: raise ValueError("Translation requires a `src_lang` and a `tgt_lang` for this model" ) UpperCAmelCase_ : str = src_lang UpperCAmelCase_ : Dict = self(_snake_case ,add_special_tokens=_snake_case ,return_tensors=_snake_case ,**_snake_case ) UpperCAmelCase_ : List[str] = self.convert_tokens_to_ids(_snake_case ) UpperCAmelCase_ : Optional[Any] = tgt_lang_id return inputs def UpperCamelCase__ ( self ,_snake_case ,_snake_case = "en_XX" ,_snake_case = None ,_snake_case = "ro_RO" ,**_snake_case ,): UpperCAmelCase_ : Optional[Any] = src_lang UpperCAmelCase_ : int = tgt_lang return super().prepare_seqaseq_batch(_snake_case ,_snake_case ,**_snake_case ) def UpperCamelCase__ ( self ): return self.set_src_lang_special_tokens(self.src_lang ) def UpperCamelCase__ ( self ): return self.set_tgt_lang_special_tokens(self.tgt_lang ) def UpperCamelCase__ ( self ,_snake_case ): UpperCAmelCase_ : List[Any] = self.lang_code_to_id[src_lang] UpperCAmelCase_ : List[str] = [self.cur_lang_code_id] UpperCAmelCase_ : Dict = [self.eos_token_id] def UpperCamelCase__ ( self ,_snake_case ): UpperCAmelCase_ : int = self.lang_code_to_id[tgt_lang] UpperCAmelCase_ : Optional[Any] = [self.cur_lang_code_id] UpperCAmelCase_ : int = [self.eos_token_id]
71
'''simple docstring''' import unittest from transformers import DebertaVaTokenizer, DebertaVaTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin _lowerCamelCase = get_tests_dir("""fixtures/spiece.model""") @require_sentencepiece @require_tokenizers class _snake_case (__SCREAMING_SNAKE_CASE , unittest.TestCase): __A : Optional[int] =DebertaVaTokenizer __A : Union[str, Any] =DebertaVaTokenizerFast __A : str =True __A : List[str] =True def UpperCamelCase__ ( self ): super().setUp() # We have a SentencePiece fixture for testing UpperCAmelCase_ : Optional[int] = DebertaVaTokenizer(_snake_case ,unk_token="<unk>" ) tokenizer.save_pretrained(self.tmpdirname ) def UpperCamelCase__ ( self ,_snake_case ): UpperCAmelCase_ : List[Any] = "this is a test" UpperCAmelCase_ : Optional[Any] = "this is a test" return input_text, output_text def UpperCamelCase__ ( self ): UpperCAmelCase_ : Optional[Any] = "<pad>" UpperCAmelCase_ : str = 0 self.assertEqual(self.get_tokenizer()._convert_token_to_id(_snake_case ) ,_snake_case ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(_snake_case ) ,_snake_case ) def UpperCamelCase__ ( self ): UpperCAmelCase_ : int = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] ,"<pad>" ) self.assertEqual(vocab_keys[1] ,"<unk>" ) self.assertEqual(vocab_keys[-1] ,"[PAD]" ) self.assertEqual(len(_snake_case ) ,3_00_01 ) def UpperCamelCase__ ( self ): self.assertEqual(self.get_tokenizer().vocab_size ,3_00_00 ) def UpperCamelCase__ ( self ): # fmt: off UpperCAmelCase_ : str = " \tHeLLo!how \n Are yoU? " UpperCAmelCase_ : Union[str, Any] = ["▁hello", "!", "how", "▁are", "▁you", "?"] # fmt: on UpperCAmelCase_ : Tuple = DebertaVaTokenizer(_snake_case ,do_lower_case=_snake_case ) UpperCAmelCase_ : Union[str, Any] = tokenizer.convert_ids_to_tokens(tokenizer.encode(_snake_case ,add_special_tokens=_snake_case ) ) self.assertListEqual(_snake_case ,_snake_case ) UpperCAmelCase_ : Tuple = DebertaVaTokenizerFast(_snake_case ,do_lower_case=_snake_case ) UpperCAmelCase_ : Any = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_snake_case ,add_special_tokens=_snake_case ) ) self.assertListEqual(_snake_case ,_snake_case ) @unittest.skip("There is an inconsistency between slow and fast tokenizer due to a bug in the fast one." ) def UpperCamelCase__ ( self ): pass @unittest.skip("There is an inconsistency between slow and fast tokenizer due to a bug in the fast one." ) def UpperCamelCase__ ( self ): pass def UpperCamelCase__ ( self ): # fmt: off UpperCAmelCase_ : Optional[int] = "I was born in 92000, and this is falsé." UpperCAmelCase_ : List[str] = ["▁", "<unk>", "▁was", "▁born", "▁in", "▁9", "2000", "▁", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", "▁", ".", ] # fmt: on UpperCAmelCase_ : List[Any] = DebertaVaTokenizer(_snake_case ,split_by_punct=_snake_case ) UpperCAmelCase_ : Optional[int] = tokenizer.convert_ids_to_tokens(tokenizer.encode(_snake_case ,add_special_tokens=_snake_case ) ) self.assertListEqual(_snake_case ,_snake_case ) UpperCAmelCase_ : int = DebertaVaTokenizerFast(_snake_case ,split_by_punct=_snake_case ) UpperCAmelCase_ : Dict = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_snake_case ,add_special_tokens=_snake_case ) ) self.assertListEqual(_snake_case ,_snake_case ) def UpperCamelCase__ ( self ): # fmt: off UpperCAmelCase_ : Tuple = "I was born in 92000, and this is falsé." UpperCAmelCase_ : Dict = ["▁i", "▁was", "▁born", "▁in", "▁9", "2000", "▁", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", "▁", ".", ] # fmt: on UpperCAmelCase_ : Optional[Any] = DebertaVaTokenizer(_snake_case ,do_lower_case=_snake_case ,split_by_punct=_snake_case ) UpperCAmelCase_ : Any = tokenizer.convert_ids_to_tokens(tokenizer.encode(_snake_case ,add_special_tokens=_snake_case ) ) self.assertListEqual(_snake_case ,_snake_case ) UpperCAmelCase_ : List[Any] = DebertaVaTokenizerFast(_snake_case ,do_lower_case=_snake_case ,split_by_punct=_snake_case ) UpperCAmelCase_ : Tuple = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_snake_case ,add_special_tokens=_snake_case ) ) self.assertListEqual(_snake_case ,_snake_case ) def UpperCamelCase__ ( self ): # fmt: off UpperCAmelCase_ : Optional[int] = "I was born in 92000, and this is falsé." UpperCAmelCase_ : Optional[int] = ["▁i", "▁was", "▁born", "▁in", "▁9", "2000", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", ".", ] # fmt: on UpperCAmelCase_ : List[Any] = DebertaVaTokenizer(_snake_case ,do_lower_case=_snake_case ,split_by_punct=_snake_case ) UpperCAmelCase_ : str = tokenizer.convert_ids_to_tokens(tokenizer.encode(_snake_case ,add_special_tokens=_snake_case ) ) self.assertListEqual(_snake_case ,_snake_case ) UpperCAmelCase_ : Optional[Any] = DebertaVaTokenizerFast(_snake_case ,do_lower_case=_snake_case ,split_by_punct=_snake_case ) UpperCAmelCase_ : str = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_snake_case ,add_special_tokens=_snake_case ) ) self.assertListEqual(_snake_case ,_snake_case ) def UpperCamelCase__ ( self ): # fmt: off UpperCAmelCase_ : Optional[int] = "I was born in 92000, and this is falsé." UpperCAmelCase_ : Optional[Any] = ["▁", "<unk>", "▁was", "▁born", "▁in", "▁9", "2000", "▁", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", "▁", ".", ] # fmt: on UpperCAmelCase_ : List[str] = DebertaVaTokenizer(_snake_case ,do_lower_case=_snake_case ,split_by_punct=_snake_case ) UpperCAmelCase_ : Tuple = tokenizer.convert_ids_to_tokens(tokenizer.encode(_snake_case ,add_special_tokens=_snake_case ) ) self.assertListEqual(_snake_case ,_snake_case ) UpperCAmelCase_ : Dict = DebertaVaTokenizerFast(_snake_case ,do_lower_case=_snake_case ,split_by_punct=_snake_case ) UpperCAmelCase_ : Dict = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_snake_case ,add_special_tokens=_snake_case ) ) self.assertListEqual(_snake_case ,_snake_case ) def UpperCamelCase__ ( self ): # fmt: off UpperCAmelCase_ : Tuple = " \tHeLLo!how \n Are yoU? " UpperCAmelCase_ : List[Any] = ["▁", "<unk>", "e", "<unk>", "o", "!", "how", "▁", "<unk>", "re", "▁yo", "<unk>", "?"] # fmt: on UpperCAmelCase_ : Any = DebertaVaTokenizer(_snake_case ,do_lower_case=_snake_case ,split_by_punct=_snake_case ) UpperCAmelCase_ : List[str] = tokenizer.convert_ids_to_tokens(tokenizer.encode(_snake_case ,add_special_tokens=_snake_case ) ) self.assertListEqual(_snake_case ,_snake_case ) UpperCAmelCase_ : int = DebertaVaTokenizerFast(_snake_case ,do_lower_case=_snake_case ,split_by_punct=_snake_case ) UpperCAmelCase_ : Tuple = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_snake_case ,add_special_tokens=_snake_case ) ) self.assertListEqual(_snake_case ,_snake_case ) def UpperCamelCase__ ( self ): UpperCAmelCase_ : List[str] = self.get_tokenizer() UpperCAmelCase_ : Union[str, Any] = self.get_rust_tokenizer() UpperCAmelCase_ : Dict = "I was born in 92000, and this is falsé." UpperCAmelCase_ : Any = tokenizer.convert_ids_to_tokens(tokenizer.encode(_snake_case ,add_special_tokens=_snake_case ) ) UpperCAmelCase_ : Optional[Any] = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_snake_case ,add_special_tokens=_snake_case ) ) self.assertListEqual(_snake_case ,_snake_case ) UpperCAmelCase_ : Tuple = tokenizer.encode(_snake_case ,add_special_tokens=_snake_case ) UpperCAmelCase_ : int = rust_tokenizer.encode(_snake_case ,add_special_tokens=_snake_case ) self.assertListEqual(_snake_case ,_snake_case ) UpperCAmelCase_ : Optional[Any] = self.get_rust_tokenizer() UpperCAmelCase_ : Union[str, Any] = tokenizer.encode(_snake_case ) UpperCAmelCase_ : List[Any] = rust_tokenizer.encode(_snake_case ) self.assertListEqual(_snake_case ,_snake_case ) def UpperCamelCase__ ( self ): UpperCAmelCase_ : Any = "This is a test" UpperCAmelCase_ : Optional[int] = [13, 1, 43_98, 25, 21, 12_89] UpperCAmelCase_ : Optional[Any] = ["▁", "T", "his", "▁is", "▁a", "▁test"] UpperCAmelCase_ : List[str] = ["▁", "<unk>", "his", "▁is", "▁a", "▁test"] UpperCAmelCase_ : str = DebertaVaTokenizer(_snake_case ,keep_accents=_snake_case ) UpperCAmelCase_ : List[Any] = DebertaVaTokenizerFast(_snake_case ,keep_accents=_snake_case ) UpperCAmelCase_ : Optional[int] = tokenizer.encode(_snake_case ,add_special_tokens=_snake_case ) self.assertListEqual(_snake_case ,_snake_case ) UpperCAmelCase_ : Any = tokenizer.tokenize(_snake_case ) self.assertListEqual(_snake_case ,_snake_case ) UpperCAmelCase_ : Union[str, Any] = tokenizer.convert_ids_to_tokens(_snake_case ) self.assertListEqual(_snake_case ,_snake_case ) UpperCAmelCase_ : List[Any] = rust_tokenizer.encode(_snake_case ,add_special_tokens=_snake_case ) self.assertListEqual(_snake_case ,_snake_case ) UpperCAmelCase_ : Dict = rust_tokenizer.tokenize(_snake_case ) self.assertListEqual(_snake_case ,_snake_case ) UpperCAmelCase_ : List[str] = rust_tokenizer.convert_ids_to_tokens(_snake_case ) self.assertListEqual(_snake_case ,_snake_case ) # fmt: off UpperCAmelCase_ : List[str] = "I was born in 92000, and this is falsé." UpperCAmelCase_ : Optional[int] = [13, 1, 23, 3_86, 19, 5_61, 30_50, 15, 17, 48, 25, 82_56, 18, 1, 9] UpperCAmelCase_ : str = ["▁", "I", "▁was", "▁born", "▁in", "▁9", "2000", ",", "▁and", "▁this", "▁is", "▁fal", "s", "é", ".", ] UpperCAmelCase_ : List[str] = ["▁", "<unk>", "▁was", "▁born", "▁in", "▁9", "2000", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", ".", ] # fmt: on UpperCAmelCase_ : List[str] = tokenizer.encode(_snake_case ,add_special_tokens=_snake_case ) self.assertListEqual(_snake_case ,_snake_case ) UpperCAmelCase_ : Dict = tokenizer.tokenize(_snake_case ) self.assertListEqual(_snake_case ,_snake_case ) UpperCAmelCase_ : int = tokenizer.convert_ids_to_tokens(_snake_case ) self.assertListEqual(_snake_case ,_snake_case ) UpperCAmelCase_ : Optional[int] = rust_tokenizer.encode(_snake_case ,add_special_tokens=_snake_case ) self.assertListEqual(_snake_case ,_snake_case ) UpperCAmelCase_ : Optional[int] = rust_tokenizer.tokenize(_snake_case ) self.assertListEqual(_snake_case ,_snake_case ) UpperCAmelCase_ : Any = rust_tokenizer.convert_ids_to_tokens(_snake_case ) self.assertListEqual(_snake_case ,_snake_case ) def UpperCamelCase__ ( self ): UpperCAmelCase_ : Any = DebertaVaTokenizer(_snake_case ) UpperCAmelCase_ : Optional[int] = tokenizer.encode("sequence builders" ) UpperCAmelCase_ : Dict = tokenizer.encode("multi-sequence build" ) UpperCAmelCase_ : Tuple = tokenizer.build_inputs_with_special_tokens(_snake_case ) UpperCAmelCase_ : Optional[Any] = tokenizer.build_inputs_with_special_tokens(_snake_case ,_snake_case ) self.assertEqual([tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] ,_snake_case ) self.assertEqual( [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [tokenizer.sep_token_id] ,_snake_case ,) @slow def UpperCamelCase__ ( self ): # fmt: off UpperCAmelCase_ : Union[str, Any] = {"input_ids": [[1, 3_98_67, 36, 1_93_90, 4_86, 27, 3_50_52, 8_14_36, 18, 6_06_85, 12_25, 7, 3_50_52, 8_14_36, 18, 93_67, 1_68_99, 18, 1_59_37, 53, 5_94, 7_73, 18, 1_62_87, 3_04_65, 36, 1_59_37, 6, 4_11_39, 38, 3_69_79, 6_07_63, 1_91, 6, 3_41_32, 99, 6, 5_05_38, 3_90, 4_32_30, 6, 3_41_32, 27_79, 2_08_50, 14, 6_99, 10_72, 11_94, 36, 3_82, 1_09_01, 53, 7, 6_99, 10_72, 20_84, 36, 2_04_22, 6_30, 53, 19, 1_05, 30_49, 18_96, 10_53, 1_68_99, 15_06, 11, 3_79_78, 42_43, 7, 12_37, 3_18_69, 2_00, 1_65_66, 6_54, 6, 3_50_52, 8_14_36, 7, 5_56_30, 1_35_93, 4, 2], [1, 26, 1_50_11, 13, 6_67, 8, 10_53, 18, 2_36_11, 12_37, 7_23_56, 1_28_20, 34, 10_41_34, 12_09, 35, 1_33_13, 66_27, 21, 2_02, 3_47, 7, 1_64, 23_99, 11, 46, 44_85, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 5, 12_32, 28_64, 1_57_85, 1_49_51, 1_05, 5, 85_81, 12_50, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "token_type_ids": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=_snake_case ,model_name="microsoft/deberta-v2-xlarge" ,revision="ad6e42c1532ddf3a15c39246b63f5559d558b670" ,)
71
1
'''simple docstring''' import argparse import collections import json import os import re import string import sys import numpy as np _lowerCamelCase = re.compile(R"""\b(a|an|the)\b""", re.UNICODE) _lowerCamelCase = None def a__ ( ) -> Tuple: """simple docstring""" UpperCAmelCase_ : Optional[Any] = argparse.ArgumentParser("Official evaluation script for SQuAD version 2.0." ) parser.add_argument("data_file" , metavar="data.json" , help="Input data JSON file." ) parser.add_argument("pred_file" , metavar="pred.json" , help="Model predictions." ) parser.add_argument( "--out-file" , "-o" , metavar="eval.json" , help="Write accuracy metrics to file (default is stdout)." ) parser.add_argument( "--na-prob-file" , "-n" , metavar="na_prob.json" , help="Model estimates of probability of no answer." ) parser.add_argument( "--na-prob-thresh" , "-t" , type=_SCREAMING_SNAKE_CASE , default=1.0 , help="Predict \"\" if no-answer probability exceeds this (default = 1.0)." , ) parser.add_argument( "--out-image-dir" , "-p" , metavar="out_images" , default=_SCREAMING_SNAKE_CASE , help="Save precision-recall curves to directory." ) parser.add_argument("--verbose" , "-v" , action="store_true" ) if len(sys.argv ) == 1: parser.print_help() sys.exit(1 ) return parser.parse_args() def a__ ( _SCREAMING_SNAKE_CASE : Optional[Any] ) -> Dict: """simple docstring""" UpperCAmelCase_ : Tuple = {} for article in dataset: for p in article["paragraphs"]: for qa in p["qas"]: UpperCAmelCase_ : Dict = bool(qa["answers"]["text"] ) return qid_to_has_ans def a__ ( _SCREAMING_SNAKE_CASE : Union[str, Any] ) -> Any: """simple docstring""" def remove_articles(_SCREAMING_SNAKE_CASE : Optional[int] ): return ARTICLES_REGEX.sub(" " , _SCREAMING_SNAKE_CASE ) def white_space_fix(_SCREAMING_SNAKE_CASE : Optional[int] ): return " ".join(text.split() ) def remove_punc(_SCREAMING_SNAKE_CASE : Optional[int] ): UpperCAmelCase_ : Tuple = set(string.punctuation ) return "".join(ch for ch in text if ch not in exclude ) def lower(_SCREAMING_SNAKE_CASE : List[Any] ): return text.lower() return white_space_fix(remove_articles(remove_punc(lower(_SCREAMING_SNAKE_CASE ) ) ) ) def a__ ( _SCREAMING_SNAKE_CASE : Optional[int] ) -> str: """simple docstring""" if not s: return [] return normalize_answer(_SCREAMING_SNAKE_CASE ).split() def a__ ( _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : List[str] ) -> Dict: """simple docstring""" return int(normalize_answer(_SCREAMING_SNAKE_CASE ) == normalize_answer(_SCREAMING_SNAKE_CASE ) ) def a__ ( _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : Union[str, Any] ) -> Optional[Any]: """simple docstring""" UpperCAmelCase_ : str = get_tokens(_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : Tuple = get_tokens(_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : List[Any] = collections.Counter(_SCREAMING_SNAKE_CASE ) & collections.Counter(_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : Tuple = sum(common.values() ) if len(_SCREAMING_SNAKE_CASE ) == 0 or len(_SCREAMING_SNAKE_CASE ) == 0: # If either is no-answer, then F1 is 1 if they agree, 0 otherwise return int(gold_toks == pred_toks ) if num_same == 0: return 0 UpperCAmelCase_ : Dict = 1.0 * num_same / len(_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : Optional[int] = 1.0 * num_same / len(_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : List[str] = (2 * precision * recall) / (precision + recall) return fa def a__ ( _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : Union[str, Any] ) -> int: """simple docstring""" UpperCAmelCase_ : Tuple = {} UpperCAmelCase_ : int = {} for article in dataset: for p in article["paragraphs"]: for qa in p["qas"]: UpperCAmelCase_ : Union[str, Any] = qa["id"] UpperCAmelCase_ : str = [t for t in qa["answers"]["text"] if normalize_answer(_SCREAMING_SNAKE_CASE )] if not gold_answers: # For unanswerable questions, only correct answer is empty string UpperCAmelCase_ : int = [""] if qid not in preds: print(F'''Missing prediction for {qid}''' ) continue UpperCAmelCase_ : Dict = preds[qid] # Take max over all gold answers UpperCAmelCase_ : Any = max(compute_exact(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for a in gold_answers ) UpperCAmelCase_ : str = max(compute_fa(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for a in gold_answers ) return exact_scores, fa_scores def a__ ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : str ) -> Tuple: """simple docstring""" UpperCAmelCase_ : int = {} for qid, s in scores.items(): UpperCAmelCase_ : Dict = na_probs[qid] > na_prob_thresh if pred_na: UpperCAmelCase_ : Tuple = float(not qid_to_has_ans[qid] ) else: UpperCAmelCase_ : Any = s return new_scores def a__ ( _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : Tuple=None ) -> int: """simple docstring""" if not qid_list: UpperCAmelCase_ : Union[str, Any] = len(_SCREAMING_SNAKE_CASE ) return collections.OrderedDict( [ ("exact", 100.0 * sum(exact_scores.values() ) / total), ("f1", 100.0 * sum(fa_scores.values() ) / total), ("total", total), ] ) else: UpperCAmelCase_ : str = len(_SCREAMING_SNAKE_CASE ) return collections.OrderedDict( [ ("exact", 100.0 * sum(exact_scores[k] for k in qid_list ) / total), ("f1", 100.0 * sum(fa_scores[k] for k in qid_list ) / total), ("total", total), ] ) def a__ ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : Union[str, Any] ) -> Any: """simple docstring""" for k in new_eval: UpperCAmelCase_ : int = new_eval[k] def a__ ( _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Optional[int] ) -> str: """simple docstring""" plt.step(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , color="b" , alpha=0.2 , where="post" ) plt.fill_between(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , step="post" , alpha=0.2 , color="b" ) plt.xlabel("Recall" ) plt.ylabel("Precision" ) plt.xlim([0.0, 1.05] ) plt.ylim([0.0, 1.05] ) plt.title(_SCREAMING_SNAKE_CASE ) plt.savefig(_SCREAMING_SNAKE_CASE ) plt.clf() def a__ ( _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : int=None , _SCREAMING_SNAKE_CASE : Optional[Any]=None ) -> Optional[int]: """simple docstring""" UpperCAmelCase_ : List[Any] = sorted(_SCREAMING_SNAKE_CASE , key=lambda _SCREAMING_SNAKE_CASE : na_probs[k] ) UpperCAmelCase_ : List[Any] = 0.0 UpperCAmelCase_ : Dict = 1.0 UpperCAmelCase_ : Union[str, Any] = 0.0 UpperCAmelCase_ : Dict = [1.0] UpperCAmelCase_ : List[str] = [0.0] UpperCAmelCase_ : Any = 0.0 for i, qid in enumerate(_SCREAMING_SNAKE_CASE ): if qid_to_has_ans[qid]: true_pos += scores[qid] UpperCAmelCase_ : Optional[Any] = true_pos / float(i + 1 ) UpperCAmelCase_ : Any = true_pos / float(_SCREAMING_SNAKE_CASE ) if i == len(_SCREAMING_SNAKE_CASE ) - 1 or na_probs[qid] != na_probs[qid_list[i + 1]]: # i.e., if we can put a threshold after this point avg_prec += cur_p * (cur_r - recalls[-1]) precisions.append(_SCREAMING_SNAKE_CASE ) recalls.append(_SCREAMING_SNAKE_CASE ) if out_image: plot_pr_curve(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) return {"ap": 100.0 * avg_prec} def a__ ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : str ) -> Optional[Any]: """simple docstring""" if out_image_dir and not os.path.exists(_SCREAMING_SNAKE_CASE ): os.makedirs(_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : Dict = sum(1 for v in qid_to_has_ans.values() if v ) if num_true_pos == 0: return UpperCAmelCase_ : Optional[int] = make_precision_recall_eval( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , out_image=os.path.join(_SCREAMING_SNAKE_CASE , "pr_exact.png" ) , title="Precision-Recall curve for Exact Match score" , ) UpperCAmelCase_ : Tuple = make_precision_recall_eval( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , out_image=os.path.join(_SCREAMING_SNAKE_CASE , "pr_f1.png" ) , title="Precision-Recall curve for F1 score" , ) UpperCAmelCase_ : Union[str, Any] = {k: float(_SCREAMING_SNAKE_CASE ) for k, v in qid_to_has_ans.items()} UpperCAmelCase_ : Optional[int] = make_precision_recall_eval( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , out_image=os.path.join(_SCREAMING_SNAKE_CASE , "pr_oracle.png" ) , title="Oracle Precision-Recall curve (binary task of HasAns vs. NoAns)" , ) merge_eval(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , "pr_exact" ) merge_eval(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , "pr_f1" ) merge_eval(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , "pr_oracle" ) def a__ ( _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : int ) -> List[Any]: """simple docstring""" if not qid_list: return UpperCAmelCase_ : List[Any] = [na_probs[k] for k in qid_list] UpperCAmelCase_ : Dict = np.ones_like(_SCREAMING_SNAKE_CASE ) / float(len(_SCREAMING_SNAKE_CASE ) ) plt.hist(_SCREAMING_SNAKE_CASE , weights=_SCREAMING_SNAKE_CASE , bins=20 , range=(0.0, 1.0) ) plt.xlabel("Model probability of no-answer" ) plt.ylabel("Proportion of dataset" ) plt.title(F'''Histogram of no-answer probability: {name}''' ) plt.savefig(os.path.join(_SCREAMING_SNAKE_CASE , F'''na_prob_hist_{name}.png''' ) ) plt.clf() def a__ ( _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : Optional[Any] ) -> Dict: """simple docstring""" UpperCAmelCase_ : Optional[int] = sum(1 for k in qid_to_has_ans if not qid_to_has_ans[k] ) UpperCAmelCase_ : List[str] = num_no_ans UpperCAmelCase_ : Optional[int] = cur_score UpperCAmelCase_ : Any = 0.0 UpperCAmelCase_ : List[str] = sorted(_SCREAMING_SNAKE_CASE , key=lambda _SCREAMING_SNAKE_CASE : na_probs[k] ) for i, qid in enumerate(_SCREAMING_SNAKE_CASE ): if qid not in scores: continue if qid_to_has_ans[qid]: UpperCAmelCase_ : Tuple = scores[qid] else: if preds[qid]: UpperCAmelCase_ : int = -1 else: UpperCAmelCase_ : Optional[int] = 0 cur_score += diff if cur_score > best_score: UpperCAmelCase_ : int = cur_score UpperCAmelCase_ : Tuple = na_probs[qid] return 100.0 * best_score / len(_SCREAMING_SNAKE_CASE ), best_thresh def a__ ( _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : str ) -> int: """simple docstring""" UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = find_best_thresh(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) UpperCAmelCase_ , UpperCAmelCase_ : int = find_best_thresh(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : Optional[Any] = best_exact UpperCAmelCase_ : int = exact_thresh UpperCAmelCase_ : int = best_fa UpperCAmelCase_ : Union[str, Any] = fa_thresh def a__ ( ) -> str: """simple docstring""" with open(OPTS.data_file ) as f: UpperCAmelCase_ : Union[str, Any] = json.load(_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : Union[str, Any] = dataset_json["data"] with open(OPTS.pred_file ) as f: UpperCAmelCase_ : int = json.load(_SCREAMING_SNAKE_CASE ) if OPTS.na_prob_file: with open(OPTS.na_prob_file ) as f: UpperCAmelCase_ : Dict = json.load(_SCREAMING_SNAKE_CASE ) else: UpperCAmelCase_ : List[Any] = {k: 0.0 for k in preds} UpperCAmelCase_ : Tuple = make_qid_to_has_ans(_SCREAMING_SNAKE_CASE ) # maps qid to True/False UpperCAmelCase_ : Dict = [k for k, v in qid_to_has_ans.items() if v] UpperCAmelCase_ : Dict = [k for k, v in qid_to_has_ans.items() if not v] UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = get_raw_scores(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : Optional[int] = apply_no_ans_threshold(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , OPTS.na_prob_thresh ) UpperCAmelCase_ : Dict = apply_no_ans_threshold(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , OPTS.na_prob_thresh ) UpperCAmelCase_ : Optional[int] = make_eval_dict(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) if has_ans_qids: UpperCAmelCase_ : Optional[Any] = make_eval_dict(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , qid_list=_SCREAMING_SNAKE_CASE ) merge_eval(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , "HasAns" ) if no_ans_qids: UpperCAmelCase_ : Any = make_eval_dict(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , qid_list=_SCREAMING_SNAKE_CASE ) merge_eval(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , "NoAns" ) if OPTS.na_prob_file: find_all_best_thresh(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) if OPTS.na_prob_file and OPTS.out_image_dir: run_precision_recall_analysis(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , OPTS.out_image_dir ) histogram_na_prob(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , OPTS.out_image_dir , "hasAns" ) histogram_na_prob(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , OPTS.out_image_dir , "noAns" ) if OPTS.out_file: with open(OPTS.out_file , "w" ) as f: json.dump(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) else: print(json.dumps(_SCREAMING_SNAKE_CASE , indent=2 ) ) if __name__ == "__main__": _lowerCamelCase = parse_args() if OPTS.out_image_dir: import matplotlib matplotlib.use("""Agg""") import matplotlib.pyplot as plt main()
71
'''simple docstring''' def a__ ( _SCREAMING_SNAKE_CASE : int ) -> int: """simple docstring""" if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): raise TypeError("Input value must be an 'int' type" ) UpperCAmelCase_ : Union[str, Any] = 0 while number: position += 1 number >>= 1 return position if __name__ == "__main__": import doctest doctest.testmod()
71
1
'''simple docstring''' import argparse import os import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType ######################################################################## # This is a fully working simple example to use Accelerate, # specifically showcasing how to properly calculate the metrics on the # validation dataset when in a distributed system, and builds off the # `nlp_example.py` script. # # This example trains a Bert base model on GLUE MRPC # in any of the following settings (with the same script): # - single CPU or single GPU # - multi GPUS (using PyTorch distributed mode) # - (multi) TPUs # - fp16 (mixed-precision) or fp32 (normal precision) # # To help focus on the differences in the code, building `DataLoaders` # was refactored into its own function. # New additions from the base script can be found quickly by # looking for the # New Code # tags # # To run it in each of these various modes, follow the instructions # in the readme for examples: # https://github.com/huggingface/accelerate/tree/main/examples # ######################################################################## _lowerCamelCase = 16 _lowerCamelCase = 32 def a__ ( _SCREAMING_SNAKE_CASE : Accelerator , _SCREAMING_SNAKE_CASE : int = 16 ) -> int: """simple docstring""" UpperCAmelCase_ : Union[str, Any] = AutoTokenizer.from_pretrained("bert-base-cased" ) UpperCAmelCase_ : List[Any] = load_dataset("glue" , "mrpc" ) def tokenize_function(_SCREAMING_SNAKE_CASE : Optional[int] ): # max_length=None => use the model max length (it's actually the default) UpperCAmelCase_ : Union[str, Any] = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=_SCREAMING_SNAKE_CASE , max_length=_SCREAMING_SNAKE_CASE ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset # starting with the main process first: with accelerator.main_process_first(): UpperCAmelCase_ : Optional[Any] = datasets.map( _SCREAMING_SNAKE_CASE , batched=_SCREAMING_SNAKE_CASE , remove_columns=["idx", "sentence1", "sentence2"] , ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library UpperCAmelCase_ : int = tokenized_datasets.rename_column("label" , "labels" ) def collate_fn(_SCREAMING_SNAKE_CASE : Optional[int] ): # On TPU it's best to pad everything to the same length or training will be very slow. UpperCAmelCase_ : int = 1_28 if accelerator.distributed_type == DistributedType.TPU else None # When using mixed precision we want round multiples of 8/16 if accelerator.mixed_precision == "fp8": UpperCAmelCase_ : List[Any] = 16 elif accelerator.mixed_precision != "no": UpperCAmelCase_ : Optional[int] = 8 else: UpperCAmelCase_ : str = None return tokenizer.pad( _SCREAMING_SNAKE_CASE , padding="longest" , max_length=_SCREAMING_SNAKE_CASE , pad_to_multiple_of=_SCREAMING_SNAKE_CASE , return_tensors="pt" , ) # Instantiate dataloaders. UpperCAmelCase_ : Optional[Any] = DataLoader( tokenized_datasets["train"] , shuffle=_SCREAMING_SNAKE_CASE , collate_fn=_SCREAMING_SNAKE_CASE , batch_size=_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : Any = DataLoader( tokenized_datasets["validation"] , shuffle=_SCREAMING_SNAKE_CASE , collate_fn=_SCREAMING_SNAKE_CASE , batch_size=_SCREAMING_SNAKE_CASE ) return train_dataloader, eval_dataloader # For testing only if os.environ.get("""TESTING_MOCKED_DATALOADERS""", None) == "1": from accelerate.test_utils.training import mocked_dataloaders _lowerCamelCase = mocked_dataloaders # noqa: F811 def a__ ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Optional[Any] ) -> Dict: """simple docstring""" if os.environ.get("TESTING_MOCKED_DATALOADERS" , _SCREAMING_SNAKE_CASE ) == "1": UpperCAmelCase_ : Any = 2 # Initialize accelerator UpperCAmelCase_ : Union[str, Any] = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision ) # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs UpperCAmelCase_ : str = config["lr"] UpperCAmelCase_ : Dict = int(config["num_epochs"] ) UpperCAmelCase_ : str = int(config["seed"] ) UpperCAmelCase_ : List[str] = int(config["batch_size"] ) UpperCAmelCase_ : Union[str, Any] = evaluate.load("glue" , "mrpc" ) # If the batch size is too big we use gradient accumulation UpperCAmelCase_ : Dict = 1 if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU: UpperCAmelCase_ : List[Any] = batch_size // MAX_GPU_BATCH_SIZE UpperCAmelCase_ : List[str] = MAX_GPU_BATCH_SIZE set_seed(_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = get_dataloaders(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) UpperCAmelCase_ : Tuple = AutoModelForSequenceClassification.from_pretrained("bert-base-cased" , return_dict=_SCREAMING_SNAKE_CASE ) # We could avoid this line since the accelerator is set with `device_placement=True` (default value). # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). UpperCAmelCase_ : Any = model.to(accelerator.device ) # Instantiate optimizer UpperCAmelCase_ : Optional[Any] = AdamW(params=model.parameters() , lr=_SCREAMING_SNAKE_CASE ) # Instantiate scheduler UpperCAmelCase_ : Union[str, Any] = get_linear_schedule_with_warmup( optimizer=_SCREAMING_SNAKE_CASE , num_warmup_steps=1_00 , num_training_steps=(len(_SCREAMING_SNAKE_CASE ) * num_epochs) // gradient_accumulation_steps , ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : str = accelerator.prepare( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # Now we train the model for epoch in range(_SCREAMING_SNAKE_CASE ): model.train() for step, batch in enumerate(_SCREAMING_SNAKE_CASE ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) UpperCAmelCase_ : List[Any] = model(**_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : Union[str, Any] = outputs.loss UpperCAmelCase_ : Union[str, Any] = loss / gradient_accumulation_steps accelerator.backward(_SCREAMING_SNAKE_CASE ) if step % gradient_accumulation_steps == 0: optimizer.step() lr_scheduler.step() optimizer.zero_grad() model.eval() UpperCAmelCase_ : Dict = 0 for step, batch in enumerate(_SCREAMING_SNAKE_CASE ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): UpperCAmelCase_ : Optional[int] = model(**_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : List[Any] = outputs.logits.argmax(dim=-1 ) UpperCAmelCase_ , UpperCAmelCase_ : List[str] = accelerator.gather((predictions, batch["labels"]) ) # New Code # # First we check if it's a distributed system if accelerator.use_distributed: # Then see if we're on the last batch of our eval dataloader if step == len(_SCREAMING_SNAKE_CASE ) - 1: # Last batch needs to be truncated on distributed systems as it contains additional samples UpperCAmelCase_ : Union[str, Any] = predictions[: len(eval_dataloader.dataset ) - samples_seen] UpperCAmelCase_ : Optional[Any] = references[: len(eval_dataloader.dataset ) - samples_seen] else: # Otherwise we add the number of samples seen samples_seen += references.shape[0] # All of this can be avoided if you use `Accelerator.gather_for_metrics` instead of `Accelerator.gather`: # accelerator.gather_for_metrics((predictions, batch["labels"])) metric.add_batch( predictions=_SCREAMING_SNAKE_CASE , references=_SCREAMING_SNAKE_CASE , ) UpperCAmelCase_ : Union[str, Any] = metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(F'''epoch {epoch}:''' , _SCREAMING_SNAKE_CASE ) def a__ ( ) -> Union[str, Any]: """simple docstring""" UpperCAmelCase_ : Any = argparse.ArgumentParser(description="Simple example of training script." ) parser.add_argument( "--mixed_precision" , type=_SCREAMING_SNAKE_CASE , default=_SCREAMING_SNAKE_CASE , choices=["no", "fp16", "bf16", "fp8"] , help="Whether to use mixed precision. Choose" "between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10." "and an Nvidia Ampere GPU." , ) parser.add_argument("--cpu" , action="store_true" , help="If passed, will train on the CPU." ) UpperCAmelCase_ : str = parser.parse_args() UpperCAmelCase_ : Union[str, Any] = {"lr": 2E-5, "num_epochs": 3, "seed": 42, "batch_size": 16} training_function(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) if __name__ == "__main__": main()
71
'''simple docstring''' from math import factorial def a__ ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int ) -> int: """simple docstring""" if n < k or k < 0: raise ValueError("Please enter positive integers for n and k where n >= k" ) return factorial(_SCREAMING_SNAKE_CASE ) // (factorial(_SCREAMING_SNAKE_CASE ) * factorial(n - k )) if __name__ == "__main__": print( """The number of five-card hands possible from a standard""", f"""fifty-two card deck is: {combinations(52, 5)}\n""", ) print( """If a class of 40 students must be arranged into groups of""", f"""4 for group projects, there are {combinations(40, 4)} ways""", """to arrange them.\n""", ) print( """If 10 teams are competing in a Formula One race, there""", f"""are {combinations(10, 3)} ways that first, second and""", """third place can be awarded.""", )
71
1
'''simple docstring''' from collections import namedtuple import requests from lxml import html # type: ignore _lowerCamelCase = namedtuple("""covid_data""", """cases deaths recovered""") def a__ ( _SCREAMING_SNAKE_CASE : str = "https://www.worldometers.info/coronavirus/" ) -> covid_data: """simple docstring""" UpperCAmelCase_ : Union[str, Any] = "//div[@class = \"maincounter-number\"]/span/text()" return covid_data(*html.fromstring(requests.get(_SCREAMING_SNAKE_CASE ).content ).xpath(_SCREAMING_SNAKE_CASE ) ) _lowerCamelCase = """Total COVID-19 cases in the world: {} Total deaths due to COVID-19 in the world: {} Total COVID-19 patients recovered in the world: {}""" print(fmt.format(*covid_stats()))
71
'''simple docstring''' import random import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, UNetaDConditionModel, VideoToVideoSDPipeline, ) from diffusers.utils import floats_tensor, is_xformers_available, skip_mps from diffusers.utils.testing_utils import enable_full_determinism, slow, torch_device from ..pipeline_params import ( TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS, ) from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() @skip_mps class _snake_case (__SCREAMING_SNAKE_CASE , unittest.TestCase): __A : Union[str, Any] =VideoToVideoSDPipeline __A : Tuple =TEXT_GUIDED_IMAGE_VARIATION_PARAMS.union({"video"}) - {"image", "width", "height"} __A : Union[str, Any] =TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({"video"}) - {"image"} __A : str =PipelineTesterMixin.required_optional_params - {"latents"} __A : Dict =False # No `output_type`. __A : Optional[int] =frozenset( [ "num_inference_steps", "generator", "latents", "return_dict", "callback", "callback_steps", ]) def UpperCamelCase__ ( self ): torch.manual_seed(0 ) UpperCAmelCase_ : Optional[int] = UNetaDConditionModel( block_out_channels=(32, 64, 64, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=4 ,out_channels=4 ,down_block_types=("CrossAttnDownBlock3D", "CrossAttnDownBlock3D", "CrossAttnDownBlock3D", "DownBlock3D") ,up_block_types=("UpBlock3D", "CrossAttnUpBlock3D", "CrossAttnUpBlock3D", "CrossAttnUpBlock3D") ,cross_attention_dim=32 ,attention_head_dim=4 ,) UpperCAmelCase_ : int = DDIMScheduler( beta_start=0.00085 ,beta_end=0.012 ,beta_schedule="scaled_linear" ,clip_sample=_snake_case ,set_alpha_to_one=_snake_case ,) torch.manual_seed(0 ) UpperCAmelCase_ : Dict = AutoencoderKL( block_out_channels=[32, 64] ,in_channels=3 ,out_channels=3 ,down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] ,up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] ,latent_channels=4 ,sample_size=1_28 ,) torch.manual_seed(0 ) UpperCAmelCase_ : Dict = CLIPTextConfig( bos_token_id=0 ,eos_token_id=2 ,hidden_size=32 ,intermediate_size=37 ,layer_norm_eps=1E-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=10_00 ,hidden_act="gelu" ,projection_dim=5_12 ,) UpperCAmelCase_ : Union[str, Any] = CLIPTextModel(_snake_case ) UpperCAmelCase_ : List[Any] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" ) UpperCAmelCase_ : Optional[int] = { "unet": unet, "scheduler": scheduler, "vae": vae, "text_encoder": text_encoder, "tokenizer": tokenizer, } return components def UpperCamelCase__ ( self ,_snake_case ,_snake_case=0 ): # 3 frames UpperCAmelCase_ : Dict = floats_tensor((1, 3, 3, 32, 32) ,rng=random.Random(_snake_case ) ).to(_snake_case ) if str(_snake_case ).startswith("mps" ): UpperCAmelCase_ : Tuple = torch.manual_seed(_snake_case ) else: UpperCAmelCase_ : Tuple = torch.Generator(device=_snake_case ).manual_seed(_snake_case ) UpperCAmelCase_ : Union[str, Any] = { "prompt": "A painting of a squirrel eating a burger", "video": video, "generator": generator, "num_inference_steps": 2, "guidance_scale": 6.0, "output_type": "pt", } return inputs def UpperCamelCase__ ( self ): UpperCAmelCase_ : str = "cpu" # ensure determinism for the device-dependent torch.Generator UpperCAmelCase_ : Dict = self.get_dummy_components() UpperCAmelCase_ : str = VideoToVideoSDPipeline(**_snake_case ) UpperCAmelCase_ : int = sd_pipe.to(_snake_case ) sd_pipe.set_progress_bar_config(disable=_snake_case ) UpperCAmelCase_ : Tuple = self.get_dummy_inputs(_snake_case ) UpperCAmelCase_ : str = "np" UpperCAmelCase_ : Dict = sd_pipe(**_snake_case ).frames UpperCAmelCase_ : Tuple = frames[0][-3:, -3:, -1] assert frames[0].shape == (32, 32, 3) UpperCAmelCase_ : Dict = np.array([1_06, 1_17, 1_13, 1_74, 1_37, 1_12, 1_48, 1_51, 1_31] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 @unittest.skipIf( torch_device != "cuda" or not is_xformers_available() ,reason="XFormers attention is only available with CUDA and `xformers` installed" ,) def UpperCamelCase__ ( self ): self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=_snake_case ,expected_max_diff=5E-3 ) @unittest.skip(reason="Batching needs to be properly figured out first for this pipeline." ) def UpperCamelCase__ ( self ): pass @unittest.skip(reason="Batching needs to be properly figured out first for this pipeline." ) def UpperCamelCase__ ( self ): pass @unittest.skip(reason="`num_images_per_prompt` argument is not supported for this pipeline." ) def UpperCamelCase__ ( self ): pass def UpperCamelCase__ ( self ): return super().test_progress_bar() @slow @skip_mps class _snake_case (unittest.TestCase): def UpperCamelCase__ ( self ): UpperCAmelCase_ : Dict = VideoToVideoSDPipeline.from_pretrained("cerspense/zeroscope_v2_XL" ,torch_dtype=torch.floataa ) pipe.enable_model_cpu_offload() # 10 frames UpperCAmelCase_ : str = torch.Generator(device="cpu" ).manual_seed(0 ) UpperCAmelCase_ : int = torch.randn((1, 10, 3, 10_24, 5_76) ,generator=_snake_case ) UpperCAmelCase_ : List[Any] = video.to("cuda" ) UpperCAmelCase_ : List[Any] = "Spiderman is surfing" UpperCAmelCase_ : Optional[Any] = pipe(_snake_case ,video=_snake_case ,generator=_snake_case ,num_inference_steps=3 ,output_type="pt" ).frames UpperCAmelCase_ : Any = np.array([-1.0458984, -1.1279297, -0.9663086, -0.91503906, -0.75097656] ) assert np.abs(video_frames.cpu().numpy()[0, 0, 0, 0, -5:] - expected_array ).sum() < 1E-2
71
1
'''simple docstring''' import copy import os from typing import Union from ...configuration_utils import PretrainedConfig from ...utils import logging _lowerCamelCase = logging.get_logger(__name__) _lowerCamelCase = { """google/pix2struct-textcaps-base""": ( """https://huggingface.co/google/pix2struct-textcaps-base/resolve/main/config.json""" ), } class _snake_case (__SCREAMING_SNAKE_CASE): __A : List[str] ="pix2struct_text_model" __A : int =["past_key_values"] __A : int ={ "hidden_size": "hidden_size", "num_attention_heads": "num_heads", "num_hidden_layers": "num_layers", } def __init__( self ,_snake_case=5_02_44 ,_snake_case=7_68 ,_snake_case=64 ,_snake_case=20_48 ,_snake_case=12 ,_snake_case=12 ,_snake_case=32 ,_snake_case=1_28 ,_snake_case=0.1 ,_snake_case=1E-6 ,_snake_case=1.0 ,_snake_case="gelu_new" ,_snake_case=0 ,_snake_case=False ,_snake_case=0 ,_snake_case=1 ,_snake_case=False ,_snake_case=True ,**_snake_case ,): UpperCAmelCase_ : Dict = vocab_size UpperCAmelCase_ : List[str] = hidden_size UpperCAmelCase_ : Optional[int] = d_kv UpperCAmelCase_ : List[str] = d_ff UpperCAmelCase_ : Optional[Any] = num_layers UpperCAmelCase_ : int = num_heads UpperCAmelCase_ : List[Any] = relative_attention_num_buckets UpperCAmelCase_ : str = relative_attention_max_distance UpperCAmelCase_ : Any = dropout_rate UpperCAmelCase_ : Any = layer_norm_epsilon UpperCAmelCase_ : List[str] = initializer_factor UpperCAmelCase_ : Tuple = use_cache UpperCAmelCase_ : Union[str, Any] = eos_token_id UpperCAmelCase_ : List[Any] = decoder_start_token_id # for backwards compatibility UpperCAmelCase_ : List[Any] = dense_act_fn super().__init__( pad_token_id=_snake_case ,eos_token_id=_snake_case ,decoder_start_token_id=_snake_case ,tie_word_embeddings=_snake_case ,is_decoder=_snake_case ,**_snake_case ,) @classmethod def UpperCamelCase__ ( cls ,_snake_case ,**_snake_case ): cls._set_token_in_kwargs(_snake_case ) UpperCAmelCase_ , UpperCAmelCase_ : Tuple = cls.get_config_dict(_snake_case ,**_snake_case ) # get the text config dict if we are loading from Pix2StructConfig if config_dict.get("model_type" ) == "pix2struct": UpperCAmelCase_ : Tuple = config_dict["text_config"] if "model_type" in config_dict and hasattr(cls ,"model_type" ) and config_dict["model_type"] != cls.model_type: logger.warning( f'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type ''' f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' ) return cls.from_dict(_snake_case ,**_snake_case ) class _snake_case (__SCREAMING_SNAKE_CASE): __A : int ="pix2struct_vision_model" def __init__( self ,_snake_case=7_68 ,_snake_case=7_68 ,_snake_case=20_48 ,_snake_case=64 ,_snake_case=12 ,_snake_case=12 ,_snake_case="gelu_new" ,_snake_case=1E-6 ,_snake_case=0.0 ,_snake_case=0.0 ,_snake_case=1E-10 ,_snake_case=1.0 ,_snake_case=40_96 ,_snake_case=32 ,_snake_case=1_28 ,**_snake_case ,): super().__init__(**_snake_case ) UpperCAmelCase_ : Optional[int] = hidden_size UpperCAmelCase_ : List[str] = patch_embed_hidden_size UpperCAmelCase_ : List[str] = d_ff UpperCAmelCase_ : int = dropout_rate UpperCAmelCase_ : Optional[int] = num_hidden_layers UpperCAmelCase_ : Any = num_attention_heads UpperCAmelCase_ : Dict = initializer_range UpperCAmelCase_ : List[str] = initializer_factor UpperCAmelCase_ : Union[str, Any] = attention_dropout UpperCAmelCase_ : Dict = layer_norm_eps UpperCAmelCase_ : Optional[int] = dense_act_fn UpperCAmelCase_ : Union[str, Any] = seq_len UpperCAmelCase_ : Tuple = relative_attention_num_buckets UpperCAmelCase_ : int = relative_attention_max_distance UpperCAmelCase_ : Dict = d_kv @classmethod def UpperCamelCase__ ( cls ,_snake_case ,**_snake_case ): cls._set_token_in_kwargs(_snake_case ) UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = cls.get_config_dict(_snake_case ,**_snake_case ) # get the vision config dict if we are loading from Pix2StructConfig if config_dict.get("model_type" ) == "pix2struct": UpperCAmelCase_ : List[Any] = config_dict["vision_config"] if "model_type" in config_dict and hasattr(cls ,"model_type" ) and config_dict["model_type"] != cls.model_type: logger.warning( f'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type ''' f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' ) return cls.from_dict(_snake_case ,**_snake_case ) class _snake_case (__SCREAMING_SNAKE_CASE): __A : List[Any] ="pix2struct" __A : Any =True def __init__( self ,_snake_case=None ,_snake_case=None ,_snake_case=1.0 ,_snake_case=0.02 ,_snake_case=False ,_snake_case=False ,_snake_case=True ,**_snake_case ,): super().__init__(tie_word_embeddings=_snake_case ,is_encoder_decoder=_snake_case ,**_snake_case ) if text_config is None: UpperCAmelCase_ : List[Any] = {} logger.info("text_config is None. Initializing the Pix2StructTextConfig with default values." ) if vision_config is None: UpperCAmelCase_ : List[Any] = {} logger.info("vision_config is None. Initializing the Pix2StructVisionConfig with default values." ) UpperCAmelCase_ : Tuple = PixaStructTextConfig(**_snake_case ) UpperCAmelCase_ : str = PixaStructVisionConfig(**_snake_case ) UpperCAmelCase_ : Union[str, Any] = self.text_config.decoder_start_token_id UpperCAmelCase_ : str = self.text_config.pad_token_id UpperCAmelCase_ : str = self.text_config.eos_token_id UpperCAmelCase_ : List[str] = initializer_factor UpperCAmelCase_ : List[Any] = initializer_range UpperCAmelCase_ : Optional[Any] = self.initializer_range UpperCAmelCase_ : List[Any] = self.initializer_range UpperCAmelCase_ : Union[str, Any] = is_vqa @classmethod def UpperCamelCase__ ( cls ,_snake_case ,_snake_case ,**_snake_case ): return cls(text_config=text_config.to_dict() ,vision_config=vision_config.to_dict() ,**_snake_case ) def UpperCamelCase__ ( self ): UpperCAmelCase_ : Any = copy.deepcopy(self.__dict__ ) UpperCAmelCase_ : Any = self.text_config.to_dict() UpperCAmelCase_ : List[Any] = self.vision_config.to_dict() UpperCAmelCase_ : Optional[int] = self.__class__.model_type return output
71
'''simple docstring''' import os import pytest from datasets import ( get_dataset_config_info, get_dataset_config_names, get_dataset_infos, get_dataset_split_names, inspect_dataset, inspect_metric, ) _lowerCamelCase = pytest.mark.integration @pytest.mark.parametrize("path" , ["paws", "csv"] ) def a__ ( _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : str ) -> Tuple: """simple docstring""" inspect_dataset(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : Optional[Any] = path + ".py" assert script_name in os.listdir(_SCREAMING_SNAKE_CASE ) assert "__pycache__" not in os.listdir(_SCREAMING_SNAKE_CASE ) @pytest.mark.filterwarnings("ignore:inspect_metric is deprecated:FutureWarning" ) @pytest.mark.filterwarnings("ignore:metric_module_factory is deprecated:FutureWarning" ) @pytest.mark.parametrize("path" , ["accuracy"] ) def a__ ( _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : Tuple ) -> List[str]: """simple docstring""" inspect_metric(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : List[Any] = path + ".py" assert script_name in os.listdir(_SCREAMING_SNAKE_CASE ) assert "__pycache__" not in os.listdir(_SCREAMING_SNAKE_CASE ) @pytest.mark.parametrize( "path, config_name, expected_splits" , [ ("squad", "plain_text", ["train", "validation"]), ("dalle-mini/wit", "dalle-mini--wit", ["train"]), ("paws", "labeled_final", ["train", "test", "validation"]), ] , ) def a__ ( _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : Dict ) -> str: """simple docstring""" UpperCAmelCase_ : int = get_dataset_config_info(_SCREAMING_SNAKE_CASE , config_name=_SCREAMING_SNAKE_CASE ) assert info.config_name == config_name assert list(info.splits.keys() ) == expected_splits @pytest.mark.parametrize( "path, config_name, expected_exception" , [ ("paws", None, ValueError), ] , ) def a__ ( _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : List[str] ) -> str: """simple docstring""" with pytest.raises(_SCREAMING_SNAKE_CASE ): get_dataset_config_info(_SCREAMING_SNAKE_CASE , config_name=_SCREAMING_SNAKE_CASE ) @pytest.mark.parametrize( "path, expected" , [ ("squad", "plain_text"), ("acronym_identification", "default"), ("lhoestq/squad", "plain_text"), ("lhoestq/test", "default"), ("lhoestq/demo1", "lhoestq--demo1"), ("dalle-mini/wit", "dalle-mini--wit"), ] , ) def a__ ( _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : int ) -> List[str]: """simple docstring""" UpperCAmelCase_ : Optional[int] = get_dataset_config_names(_SCREAMING_SNAKE_CASE ) assert expected in config_names @pytest.mark.parametrize( "path, expected_configs, expected_splits_in_first_config" , [ ("squad", ["plain_text"], ["train", "validation"]), ("dalle-mini/wit", ["dalle-mini--wit"], ["train"]), ("paws", ["labeled_final", "labeled_swap", "unlabeled_final"], ["train", "test", "validation"]), ] , ) def a__ ( _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : List[str] ) -> Any: """simple docstring""" UpperCAmelCase_ : Any = get_dataset_infos(_SCREAMING_SNAKE_CASE ) assert list(infos.keys() ) == expected_configs UpperCAmelCase_ : Optional[Any] = expected_configs[0] assert expected_config in infos UpperCAmelCase_ : Dict = infos[expected_config] assert info.config_name == expected_config assert list(info.splits.keys() ) == expected_splits_in_first_config @pytest.mark.parametrize( "path, expected_config, expected_splits" , [ ("squad", "plain_text", ["train", "validation"]), ("dalle-mini/wit", "dalle-mini--wit", ["train"]), ("paws", "labeled_final", ["train", "test", "validation"]), ] , ) def a__ ( _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : Dict ) -> Any: """simple docstring""" UpperCAmelCase_ : Optional[int] = get_dataset_infos(_SCREAMING_SNAKE_CASE ) assert expected_config in infos UpperCAmelCase_ : Dict = infos[expected_config] assert info.config_name == expected_config assert list(info.splits.keys() ) == expected_splits @pytest.mark.parametrize( "path, config_name, expected_exception" , [ ("paws", None, ValueError), ] , ) def a__ ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : str ) -> Any: """simple docstring""" with pytest.raises(_SCREAMING_SNAKE_CASE ): get_dataset_split_names(_SCREAMING_SNAKE_CASE , config_name=_SCREAMING_SNAKE_CASE )
71
1
'''simple docstring''' import warnings from ...utils import logging from .image_processing_glpn import GLPNImageProcessor _lowerCamelCase = logging.get_logger(__name__) class _snake_case (__SCREAMING_SNAKE_CASE): def __init__( self ,*_snake_case ,**_snake_case ): warnings.warn( "The class GLPNFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please" " use GLPNImageProcessor instead." ,_snake_case ,) super().__init__(*_snake_case ,**_snake_case )
71
'''simple docstring''' import json import os import shutil import tempfile import unittest import numpy as np import pytest from transformers import CLIPTokenizer, CLIPTokenizerFast from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES from transformers.testing_utils import require_vision from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available if is_vision_available(): from PIL import Image from transformers import CLIPImageProcessor, CLIPProcessor @require_vision class _snake_case (unittest.TestCase): def UpperCamelCase__ ( self ): UpperCAmelCase_ : Dict = tempfile.mkdtemp() # fmt: off UpperCAmelCase_ : List[str] = ["l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "lo", "l</w>", "w</w>", "r</w>", "t</w>", "low</w>", "er</w>", "lowest</w>", "newer</w>", "wider", "<unk>", "<|startoftext|>", "<|endoftext|>"] # fmt: on UpperCAmelCase_ : List[str] = dict(zip(_snake_case ,range(len(_snake_case ) ) ) ) UpperCAmelCase_ : List[Any] = ["#version: 0.2", "l o", "lo w</w>", "e r</w>", ""] UpperCAmelCase_ : Dict = {"unk_token": "<unk>"} UpperCAmelCase_ : Optional[int] = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["vocab_file"] ) UpperCAmelCase_ : str = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["merges_file"] ) with open(self.vocab_file ,"w" ,encoding="utf-8" ) as fp: fp.write(json.dumps(_snake_case ) + "\n" ) with open(self.merges_file ,"w" ,encoding="utf-8" ) as fp: fp.write("\n".join(_snake_case ) ) UpperCAmelCase_ : Optional[Any] = { "do_resize": True, "size": 20, "do_center_crop": True, "crop_size": 18, "do_normalize": True, "image_mean": [0.48145466, 0.4578275, 0.40821073], "image_std": [0.26862954, 0.26130258, 0.27577711], } UpperCAmelCase_ : str = os.path.join(self.tmpdirname ,_snake_case ) with open(self.image_processor_file ,"w" ,encoding="utf-8" ) as fp: json.dump(_snake_case ,_snake_case ) def UpperCamelCase__ ( self ,**_snake_case ): return CLIPTokenizer.from_pretrained(self.tmpdirname ,**_snake_case ) def UpperCamelCase__ ( self ,**_snake_case ): return CLIPTokenizerFast.from_pretrained(self.tmpdirname ,**_snake_case ) def UpperCamelCase__ ( self ,**_snake_case ): return CLIPImageProcessor.from_pretrained(self.tmpdirname ,**_snake_case ) def UpperCamelCase__ ( self ): shutil.rmtree(self.tmpdirname ) def UpperCamelCase__ ( self ): UpperCAmelCase_ : Optional[Any] = [np.random.randint(2_55 ,size=(3, 30, 4_00) ,dtype=np.uinta )] UpperCAmelCase_ : Union[str, Any] = [Image.fromarray(np.moveaxis(_snake_case ,0 ,-1 ) ) for x in image_inputs] return image_inputs def UpperCamelCase__ ( self ): UpperCAmelCase_ : Tuple = self.get_tokenizer() UpperCAmelCase_ : str = self.get_rust_tokenizer() UpperCAmelCase_ : List[str] = self.get_image_processor() UpperCAmelCase_ : Tuple = CLIPProcessor(tokenizer=_snake_case ,image_processor=_snake_case ) processor_slow.save_pretrained(self.tmpdirname ) UpperCAmelCase_ : int = CLIPProcessor.from_pretrained(self.tmpdirname ,use_fast=_snake_case ) UpperCAmelCase_ : str = CLIPProcessor(tokenizer=_snake_case ,image_processor=_snake_case ) processor_fast.save_pretrained(self.tmpdirname ) UpperCAmelCase_ : str = CLIPProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor_slow.tokenizer.get_vocab() ,tokenizer_slow.get_vocab() ) self.assertEqual(processor_fast.tokenizer.get_vocab() ,tokenizer_fast.get_vocab() ) self.assertEqual(tokenizer_slow.get_vocab() ,tokenizer_fast.get_vocab() ) self.assertIsInstance(processor_slow.tokenizer ,_snake_case ) self.assertIsInstance(processor_fast.tokenizer ,_snake_case ) self.assertEqual(processor_slow.image_processor.to_json_string() ,image_processor.to_json_string() ) self.assertEqual(processor_fast.image_processor.to_json_string() ,image_processor.to_json_string() ) self.assertIsInstance(processor_slow.image_processor ,_snake_case ) self.assertIsInstance(processor_fast.image_processor ,_snake_case ) def UpperCamelCase__ ( self ): UpperCAmelCase_ : List[str] = CLIPProcessor(tokenizer=self.get_tokenizer() ,image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) UpperCAmelCase_ : Union[str, Any] = self.get_tokenizer(bos_token="(BOS)" ,eos_token="(EOS)" ) UpperCAmelCase_ : Tuple = self.get_image_processor(do_normalize=_snake_case ,padding_value=1.0 ) UpperCAmelCase_ : int = CLIPProcessor.from_pretrained( self.tmpdirname ,bos_token="(BOS)" ,eos_token="(EOS)" ,do_normalize=_snake_case ,padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab() ,tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer ,_snake_case ) self.assertEqual(processor.image_processor.to_json_string() ,image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor ,_snake_case ) def UpperCamelCase__ ( self ): UpperCAmelCase_ : List[str] = self.get_image_processor() UpperCAmelCase_ : Dict = self.get_tokenizer() UpperCAmelCase_ : Dict = CLIPProcessor(tokenizer=_snake_case ,image_processor=_snake_case ) UpperCAmelCase_ : Any = self.prepare_image_inputs() UpperCAmelCase_ : Optional[int] = image_processor(_snake_case ,return_tensors="np" ) UpperCAmelCase_ : Any = processor(images=_snake_case ,return_tensors="np" ) for key in input_image_proc.keys(): self.assertAlmostEqual(input_image_proc[key].sum() ,input_processor[key].sum() ,delta=1E-2 ) def UpperCamelCase__ ( self ): UpperCAmelCase_ : Optional[Any] = self.get_image_processor() UpperCAmelCase_ : Union[str, Any] = self.get_tokenizer() UpperCAmelCase_ : Optional[int] = CLIPProcessor(tokenizer=_snake_case ,image_processor=_snake_case ) UpperCAmelCase_ : Tuple = "lower newer" UpperCAmelCase_ : Any = processor(text=_snake_case ) UpperCAmelCase_ : List[Any] = tokenizer(_snake_case ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] ,encoded_processor[key] ) def UpperCamelCase__ ( self ): UpperCAmelCase_ : str = self.get_image_processor() UpperCAmelCase_ : Union[str, Any] = self.get_tokenizer() UpperCAmelCase_ : Tuple = CLIPProcessor(tokenizer=_snake_case ,image_processor=_snake_case ) UpperCAmelCase_ : Any = "lower newer" UpperCAmelCase_ : List[str] = self.prepare_image_inputs() UpperCAmelCase_ : str = processor(text=_snake_case ,images=_snake_case ) self.assertListEqual(list(inputs.keys() ) ,["input_ids", "attention_mask", "pixel_values"] ) # test if it raises when no input is passed with pytest.raises(_snake_case ): processor() def UpperCamelCase__ ( self ): UpperCAmelCase_ : str = self.get_image_processor() UpperCAmelCase_ : Dict = self.get_tokenizer() UpperCAmelCase_ : Optional[int] = CLIPProcessor(tokenizer=_snake_case ,image_processor=_snake_case ) UpperCAmelCase_ : List[Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] UpperCAmelCase_ : int = processor.batch_decode(_snake_case ) UpperCAmelCase_ : int = tokenizer.batch_decode(_snake_case ) self.assertListEqual(_snake_case ,_snake_case ) def UpperCamelCase__ ( self ): UpperCAmelCase_ : Dict = self.get_image_processor() UpperCAmelCase_ : int = self.get_tokenizer() UpperCAmelCase_ : Tuple = CLIPProcessor(tokenizer=_snake_case ,image_processor=_snake_case ) UpperCAmelCase_ : Optional[int] = "lower newer" UpperCAmelCase_ : Any = self.prepare_image_inputs() UpperCAmelCase_ : Dict = processor(text=_snake_case ,images=_snake_case ) self.assertListEqual(list(inputs.keys() ) ,processor.model_input_names )
71
1
'''simple docstring''' def a__ ( _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : List[Any] ) -> Tuple: """simple docstring""" UpperCAmelCase_ : Dict = [False] * len(_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : int = [] queue.append(_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : Any = True while queue: UpperCAmelCase_ : Tuple = queue.pop(0 ) for ind in range(len(graph[u] ) ): if visited[ind] is False and graph[u][ind] > 0: queue.append(_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : int = True UpperCAmelCase_ : Optional[Any] = u return visited[t] def a__ ( _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : int ) -> Dict: """simple docstring""" UpperCAmelCase_ : str = [-1] * (len(_SCREAMING_SNAKE_CASE )) UpperCAmelCase_ : Any = 0 while bfs(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): UpperCAmelCase_ : List[Any] = float("Inf" ) UpperCAmelCase_ : List[Any] = sink while s != source: # Find the minimum value in select path UpperCAmelCase_ : List[str] = min(_SCREAMING_SNAKE_CASE , graph[parent[s]][s] ) UpperCAmelCase_ : str = parent[s] max_flow += path_flow UpperCAmelCase_ : Dict = sink while v != source: UpperCAmelCase_ : Dict = parent[v] graph[u][v] -= path_flow graph[v][u] += path_flow UpperCAmelCase_ : Optional[Any] = parent[v] return max_flow _lowerCamelCase = [ [0, 16, 13, 0, 0, 0], [0, 0, 10, 12, 0, 0], [0, 4, 0, 0, 14, 0], [0, 0, 9, 0, 0, 20], [0, 0, 0, 7, 0, 4], [0, 0, 0, 0, 0, 0], ] _lowerCamelCase , _lowerCamelCase = 0, 5 print(ford_fulkerson(graph, source, sink))
71
'''simple docstring''' import gc import unittest import numpy as np import torch import torch.nn.functional as F from transformers import ( ClapTextConfig, ClapTextModelWithProjection, RobertaTokenizer, SpeechTaHifiGan, SpeechTaHifiGanConfig, ) from diffusers import ( AudioLDMPipeline, AutoencoderKL, DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel, ) from diffusers.utils import is_xformers_available, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism from ..pipeline_params import TEXT_TO_AUDIO_BATCH_PARAMS, TEXT_TO_AUDIO_PARAMS from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class _snake_case (__SCREAMING_SNAKE_CASE , unittest.TestCase): __A : Any =AudioLDMPipeline __A : Dict =TEXT_TO_AUDIO_PARAMS __A : Any =TEXT_TO_AUDIO_BATCH_PARAMS __A : Tuple =frozenset( [ "num_inference_steps", "num_waveforms_per_prompt", "generator", "latents", "output_type", "return_dict", "callback", "callback_steps", ]) def UpperCamelCase__ ( self ): torch.manual_seed(0 ) UpperCAmelCase_ : Union[str, Any] = UNetaDConditionModel( block_out_channels=(32, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=4 ,out_channels=4 ,down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") ,up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") ,cross_attention_dim=(32, 64) ,class_embed_type="simple_projection" ,projection_class_embeddings_input_dim=32 ,class_embeddings_concat=_snake_case ,) UpperCAmelCase_ : Optional[Any] = DDIMScheduler( beta_start=0.00085 ,beta_end=0.012 ,beta_schedule="scaled_linear" ,clip_sample=_snake_case ,set_alpha_to_one=_snake_case ,) torch.manual_seed(0 ) UpperCAmelCase_ : Union[str, Any] = AutoencoderKL( block_out_channels=[32, 64] ,in_channels=1 ,out_channels=1 ,down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] ,up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] ,latent_channels=4 ,) torch.manual_seed(0 ) UpperCAmelCase_ : Optional[int] = ClapTextConfig( bos_token_id=0 ,eos_token_id=2 ,hidden_size=32 ,intermediate_size=37 ,layer_norm_eps=1E-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=10_00 ,projection_dim=32 ,) UpperCAmelCase_ : Optional[Any] = ClapTextModelWithProjection(_snake_case ) UpperCAmelCase_ : List[Any] = RobertaTokenizer.from_pretrained("hf-internal-testing/tiny-random-roberta" ,model_max_length=77 ) UpperCAmelCase_ : Optional[int] = SpeechTaHifiGanConfig( model_in_dim=8 ,sampling_rate=1_60_00 ,upsample_initial_channel=16 ,upsample_rates=[2, 2] ,upsample_kernel_sizes=[4, 4] ,resblock_kernel_sizes=[3, 7] ,resblock_dilation_sizes=[[1, 3, 5], [1, 3, 5]] ,normalize_before=_snake_case ,) UpperCAmelCase_ : Union[str, Any] = SpeechTaHifiGan(_snake_case ) UpperCAmelCase_ : Union[str, Any] = { "unet": unet, "scheduler": scheduler, "vae": vae, "text_encoder": text_encoder, "tokenizer": tokenizer, "vocoder": vocoder, } return components def UpperCamelCase__ ( self ,_snake_case ,_snake_case=0 ): if str(_snake_case ).startswith("mps" ): UpperCAmelCase_ : Optional[int] = torch.manual_seed(_snake_case ) else: UpperCAmelCase_ : List[str] = torch.Generator(device=_snake_case ).manual_seed(_snake_case ) UpperCAmelCase_ : Any = { "prompt": "A hammer hitting a wooden surface", "generator": generator, "num_inference_steps": 2, "guidance_scale": 6.0, } return inputs def UpperCamelCase__ ( self ): UpperCAmelCase_ : int = "cpu" # ensure determinism for the device-dependent torch.Generator UpperCAmelCase_ : str = self.get_dummy_components() UpperCAmelCase_ : Optional[Any] = AudioLDMPipeline(**_snake_case ) UpperCAmelCase_ : List[Any] = audioldm_pipe.to(_snake_case ) audioldm_pipe.set_progress_bar_config(disable=_snake_case ) UpperCAmelCase_ : List[str] = self.get_dummy_inputs(_snake_case ) UpperCAmelCase_ : Any = audioldm_pipe(**_snake_case ) UpperCAmelCase_ : Dict = output.audios[0] assert audio.ndim == 1 assert len(_snake_case ) == 2_56 UpperCAmelCase_ : Any = audio[:10] UpperCAmelCase_ : Any = np.array( [-0.0050, 0.0050, -0.0060, 0.0033, -0.0026, 0.0033, -0.0027, 0.0033, -0.0028, 0.0033] ) assert np.abs(audio_slice - expected_slice ).max() < 1E-2 def UpperCamelCase__ ( self ): UpperCAmelCase_ : Optional[int] = self.get_dummy_components() UpperCAmelCase_ : int = AudioLDMPipeline(**_snake_case ) UpperCAmelCase_ : Dict = audioldm_pipe.to(_snake_case ) UpperCAmelCase_ : Tuple = audioldm_pipe.to(_snake_case ) audioldm_pipe.set_progress_bar_config(disable=_snake_case ) UpperCAmelCase_ : Union[str, Any] = self.get_dummy_inputs(_snake_case ) UpperCAmelCase_ : Tuple = 3 * [inputs["prompt"]] # forward UpperCAmelCase_ : Any = audioldm_pipe(**_snake_case ) UpperCAmelCase_ : List[str] = output.audios[0] UpperCAmelCase_ : Optional[Any] = self.get_dummy_inputs(_snake_case ) UpperCAmelCase_ : str = 3 * [inputs.pop("prompt" )] UpperCAmelCase_ : str = audioldm_pipe.tokenizer( _snake_case ,padding="max_length" ,max_length=audioldm_pipe.tokenizer.model_max_length ,truncation=_snake_case ,return_tensors="pt" ,) UpperCAmelCase_ : Dict = text_inputs["input_ids"].to(_snake_case ) UpperCAmelCase_ : str = audioldm_pipe.text_encoder( _snake_case ,) UpperCAmelCase_ : Optional[Any] = prompt_embeds.text_embeds # additional L_2 normalization over each hidden-state UpperCAmelCase_ : Tuple = F.normalize(_snake_case ,dim=-1 ) UpperCAmelCase_ : int = prompt_embeds # forward UpperCAmelCase_ : int = audioldm_pipe(**_snake_case ) UpperCAmelCase_ : List[Any] = output.audios[0] assert np.abs(audio_a - audio_a ).max() < 1E-2 def UpperCamelCase__ ( self ): UpperCAmelCase_ : List[Any] = self.get_dummy_components() UpperCAmelCase_ : Tuple = AudioLDMPipeline(**_snake_case ) UpperCAmelCase_ : List[Any] = audioldm_pipe.to(_snake_case ) UpperCAmelCase_ : List[Any] = audioldm_pipe.to(_snake_case ) audioldm_pipe.set_progress_bar_config(disable=_snake_case ) UpperCAmelCase_ : Union[str, Any] = self.get_dummy_inputs(_snake_case ) UpperCAmelCase_ : Optional[int] = 3 * ["this is a negative prompt"] UpperCAmelCase_ : Any = negative_prompt UpperCAmelCase_ : Union[str, Any] = 3 * [inputs["prompt"]] # forward UpperCAmelCase_ : Dict = audioldm_pipe(**_snake_case ) UpperCAmelCase_ : Dict = output.audios[0] UpperCAmelCase_ : Tuple = self.get_dummy_inputs(_snake_case ) UpperCAmelCase_ : Optional[Any] = 3 * [inputs.pop("prompt" )] UpperCAmelCase_ : List[Any] = [] for p in [prompt, negative_prompt]: UpperCAmelCase_ : Any = audioldm_pipe.tokenizer( _snake_case ,padding="max_length" ,max_length=audioldm_pipe.tokenizer.model_max_length ,truncation=_snake_case ,return_tensors="pt" ,) UpperCAmelCase_ : List[Any] = text_inputs["input_ids"].to(_snake_case ) UpperCAmelCase_ : str = audioldm_pipe.text_encoder( _snake_case ,) UpperCAmelCase_ : List[Any] = text_embeds.text_embeds # additional L_2 normalization over each hidden-state UpperCAmelCase_ : Any = F.normalize(_snake_case ,dim=-1 ) embeds.append(_snake_case ) UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = embeds # forward UpperCAmelCase_ : Tuple = audioldm_pipe(**_snake_case ) UpperCAmelCase_ : Any = output.audios[0] assert np.abs(audio_a - audio_a ).max() < 1E-2 def UpperCamelCase__ ( self ): UpperCAmelCase_ : Optional[int] = "cpu" # ensure determinism for the device-dependent torch.Generator UpperCAmelCase_ : Optional[Any] = self.get_dummy_components() UpperCAmelCase_ : Any = PNDMScheduler(skip_prk_steps=_snake_case ) UpperCAmelCase_ : Optional[Any] = AudioLDMPipeline(**_snake_case ) UpperCAmelCase_ : List[Any] = audioldm_pipe.to(_snake_case ) audioldm_pipe.set_progress_bar_config(disable=_snake_case ) UpperCAmelCase_ : Any = self.get_dummy_inputs(_snake_case ) UpperCAmelCase_ : int = "egg cracking" UpperCAmelCase_ : Optional[Any] = audioldm_pipe(**_snake_case ,negative_prompt=_snake_case ) UpperCAmelCase_ : int = output.audios[0] assert audio.ndim == 1 assert len(_snake_case ) == 2_56 UpperCAmelCase_ : List[Any] = audio[:10] UpperCAmelCase_ : Any = np.array( [-0.0051, 0.0050, -0.0060, 0.0034, -0.0026, 0.0033, -0.0027, 0.0033, -0.0028, 0.0032] ) assert np.abs(audio_slice - expected_slice ).max() < 1E-2 def UpperCamelCase__ ( self ): UpperCAmelCase_ : Optional[int] = "cpu" # ensure determinism for the device-dependent torch.Generator UpperCAmelCase_ : List[str] = self.get_dummy_components() UpperCAmelCase_ : Dict = PNDMScheduler(skip_prk_steps=_snake_case ) UpperCAmelCase_ : Any = AudioLDMPipeline(**_snake_case ) UpperCAmelCase_ : Any = audioldm_pipe.to(_snake_case ) audioldm_pipe.set_progress_bar_config(disable=_snake_case ) UpperCAmelCase_ : Dict = "A hammer hitting a wooden surface" # test num_waveforms_per_prompt=1 (default) UpperCAmelCase_ : Any = audioldm_pipe(_snake_case ,num_inference_steps=2 ).audios assert audios.shape == (1, 2_56) # test num_waveforms_per_prompt=1 (default) for batch of prompts UpperCAmelCase_ : List[str] = 2 UpperCAmelCase_ : Dict = audioldm_pipe([prompt] * batch_size ,num_inference_steps=2 ).audios assert audios.shape == (batch_size, 2_56) # test num_waveforms_per_prompt for single prompt UpperCAmelCase_ : List[str] = 2 UpperCAmelCase_ : List[Any] = audioldm_pipe(_snake_case ,num_inference_steps=2 ,num_waveforms_per_prompt=_snake_case ).audios assert audios.shape == (num_waveforms_per_prompt, 2_56) # test num_waveforms_per_prompt for batch of prompts UpperCAmelCase_ : Union[str, Any] = 2 UpperCAmelCase_ : Optional[int] = audioldm_pipe( [prompt] * batch_size ,num_inference_steps=2 ,num_waveforms_per_prompt=_snake_case ).audios assert audios.shape == (batch_size * num_waveforms_per_prompt, 2_56) def UpperCamelCase__ ( self ): UpperCAmelCase_ : List[str] = "cpu" # ensure determinism for the device-dependent torch.Generator UpperCAmelCase_ : Optional[Any] = self.get_dummy_components() UpperCAmelCase_ : Union[str, Any] = AudioLDMPipeline(**_snake_case ) UpperCAmelCase_ : List[Any] = audioldm_pipe.to(_snake_case ) audioldm_pipe.set_progress_bar_config(disable=_snake_case ) UpperCAmelCase_ : Optional[Any] = audioldm_pipe.vocoder.config.sampling_rate UpperCAmelCase_ : Any = self.get_dummy_inputs(_snake_case ) UpperCAmelCase_ : Optional[int] = audioldm_pipe(audio_length_in_s=0.016 ,**_snake_case ) UpperCAmelCase_ : str = output.audios[0] assert audio.ndim == 1 assert len(_snake_case ) / vocoder_sampling_rate == 0.016 UpperCAmelCase_ : List[Any] = audioldm_pipe(audio_length_in_s=0.032 ,**_snake_case ) UpperCAmelCase_ : Any = output.audios[0] assert audio.ndim == 1 assert len(_snake_case ) / vocoder_sampling_rate == 0.032 def UpperCamelCase__ ( self ): UpperCAmelCase_ : Optional[int] = self.get_dummy_components() UpperCAmelCase_ : str = AudioLDMPipeline(**_snake_case ) UpperCAmelCase_ : int = audioldm_pipe.to(_snake_case ) audioldm_pipe.set_progress_bar_config(disable=_snake_case ) UpperCAmelCase_ : int = ["hey"] UpperCAmelCase_ : Dict = audioldm_pipe(_snake_case ,num_inference_steps=1 ) UpperCAmelCase_ : Any = output.audios.shape assert audio_shape == (1, 2_56) UpperCAmelCase_ : Tuple = audioldm_pipe.vocoder.config config.model_in_dim *= 2 UpperCAmelCase_ : List[Any] = SpeechTaHifiGan(_snake_case ).to(_snake_case ) UpperCAmelCase_ : Tuple = audioldm_pipe(_snake_case ,num_inference_steps=1 ) UpperCAmelCase_ : int = output.audios.shape # waveform shape is unchanged, we just have 2x the number of mel channels in the spectrogram assert audio_shape == (1, 2_56) def UpperCamelCase__ ( self ): self._test_attention_slicing_forward_pass(test_mean_pixel_difference=_snake_case ) def UpperCamelCase__ ( self ): self._test_inference_batch_single_identical(test_mean_pixel_difference=_snake_case ) @unittest.skipIf( torch_device != "cuda" or not is_xformers_available() ,reason="XFormers attention is only available with CUDA and `xformers` installed" ,) def UpperCamelCase__ ( self ): self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=_snake_case ) @slow class _snake_case (unittest.TestCase): def UpperCamelCase__ ( self ): super().tearDown() gc.collect() torch.cuda.empty_cache() def UpperCamelCase__ ( self ,_snake_case ,_snake_case="cpu" ,_snake_case=torch.floataa ,_snake_case=0 ): UpperCAmelCase_ : Union[str, Any] = torch.Generator(device=_snake_case ).manual_seed(_snake_case ) UpperCAmelCase_ : str = np.random.RandomState(_snake_case ).standard_normal((1, 8, 1_28, 16) ) UpperCAmelCase_ : Optional[Any] = torch.from_numpy(_snake_case ).to(device=_snake_case ,dtype=_snake_case ) UpperCAmelCase_ : List[str] = { "prompt": "A hammer hitting a wooden surface", "latents": latents, "generator": generator, "num_inference_steps": 3, "guidance_scale": 2.5, } return inputs def UpperCamelCase__ ( self ): UpperCAmelCase_ : int = AudioLDMPipeline.from_pretrained("cvssp/audioldm" ) UpperCAmelCase_ : Optional[int] = audioldm_pipe.to(_snake_case ) audioldm_pipe.set_progress_bar_config(disable=_snake_case ) UpperCAmelCase_ : List[Any] = self.get_inputs(_snake_case ) UpperCAmelCase_ : List[Any] = 25 UpperCAmelCase_ : Union[str, Any] = audioldm_pipe(**_snake_case ).audios[0] assert audio.ndim == 1 assert len(_snake_case ) == 8_19_20 UpperCAmelCase_ : Union[str, Any] = audio[7_72_30:7_72_40] UpperCAmelCase_ : Any = np.array( [-0.4884, -0.4607, 0.0023, 0.5007, 0.5896, 0.5151, 0.3813, -0.0208, -0.3687, -0.4315] ) UpperCAmelCase_ : Dict = np.abs(expected_slice - audio_slice ).max() assert max_diff < 1E-2 def UpperCamelCase__ ( self ): UpperCAmelCase_ : Optional[int] = AudioLDMPipeline.from_pretrained("cvssp/audioldm" ) UpperCAmelCase_ : List[Any] = LMSDiscreteScheduler.from_config(audioldm_pipe.scheduler.config ) UpperCAmelCase_ : int = audioldm_pipe.to(_snake_case ) audioldm_pipe.set_progress_bar_config(disable=_snake_case ) UpperCAmelCase_ : Tuple = self.get_inputs(_snake_case ) UpperCAmelCase_ : Optional[Any] = audioldm_pipe(**_snake_case ).audios[0] assert audio.ndim == 1 assert len(_snake_case ) == 8_19_20 UpperCAmelCase_ : Any = audio[2_77_80:2_77_90] UpperCAmelCase_ : List[str] = np.array([-0.2131, -0.0873, -0.0124, -0.0189, 0.0569, 0.1373, 0.1883, 0.2886, 0.3297, 0.2212] ) UpperCAmelCase_ : Union[str, Any] = np.abs(expected_slice - audio_slice ).max() assert max_diff < 3E-2
71
1
'''simple docstring''' from tempfile import TemporaryDirectory from unittest import TestCase from unittest.mock import MagicMock, patch from transformers import AutoModel, TFAutoModel from transformers.onnx import FeaturesManager from transformers.testing_utils import SMALL_MODEL_IDENTIFIER, require_tf, require_torch @require_torch @require_tf class _snake_case (__SCREAMING_SNAKE_CASE): def UpperCamelCase__ ( self ): UpperCAmelCase_ : List[str] = SMALL_MODEL_IDENTIFIER UpperCAmelCase_ : List[Any] = "pt" UpperCAmelCase_ : List[str] = "tf" def UpperCamelCase__ ( self ,_snake_case ): UpperCAmelCase_ : str = AutoModel.from_pretrained(self.test_model ) model_pt.save_pretrained(_snake_case ) def UpperCamelCase__ ( self ,_snake_case ): UpperCAmelCase_ : int = TFAutoModel.from_pretrained(self.test_model ,from_pt=_snake_case ) model_tf.save_pretrained(_snake_case ) def UpperCamelCase__ ( self ): UpperCAmelCase_ : List[Any] = "mock_framework" # Framework provided - return whatever the user provides UpperCAmelCase_ : List[Any] = FeaturesManager.determine_framework(self.test_model ,_snake_case ) self.assertEqual(_snake_case ,_snake_case ) # Local checkpoint and framework provided - return provided framework # PyTorch checkpoint with TemporaryDirectory() as local_pt_ckpt: self._setup_pt_ckpt(_snake_case ) UpperCAmelCase_ : Optional[Any] = FeaturesManager.determine_framework(_snake_case ,_snake_case ) self.assertEqual(_snake_case ,_snake_case ) # TensorFlow checkpoint with TemporaryDirectory() as local_tf_ckpt: self._setup_tf_ckpt(_snake_case ) UpperCAmelCase_ : int = FeaturesManager.determine_framework(_snake_case ,_snake_case ) self.assertEqual(_snake_case ,_snake_case ) def UpperCamelCase__ ( self ): # PyTorch checkpoint with TemporaryDirectory() as local_pt_ckpt: self._setup_pt_ckpt(_snake_case ) UpperCAmelCase_ : List[str] = FeaturesManager.determine_framework(_snake_case ) self.assertEqual(_snake_case ,self.framework_pt ) # TensorFlow checkpoint with TemporaryDirectory() as local_tf_ckpt: self._setup_tf_ckpt(_snake_case ) UpperCAmelCase_ : Any = FeaturesManager.determine_framework(_snake_case ) self.assertEqual(_snake_case ,self.framework_tf ) # Invalid local checkpoint with TemporaryDirectory() as local_invalid_ckpt: with self.assertRaises(_snake_case ): UpperCAmelCase_ : List[Any] = FeaturesManager.determine_framework(_snake_case ) def UpperCamelCase__ ( self ): UpperCAmelCase_ : Tuple = MagicMock(return_value=_snake_case ) with patch("transformers.onnx.features.is_tf_available" ,_snake_case ): UpperCAmelCase_ : Union[str, Any] = FeaturesManager.determine_framework(self.test_model ) self.assertEqual(_snake_case ,self.framework_pt ) # PyTorch not in environment -> use TensorFlow UpperCAmelCase_ : Dict = MagicMock(return_value=_snake_case ) with patch("transformers.onnx.features.is_torch_available" ,_snake_case ): UpperCAmelCase_ : Dict = FeaturesManager.determine_framework(self.test_model ) self.assertEqual(_snake_case ,self.framework_tf ) # Both in environment -> use PyTorch UpperCAmelCase_ : List[Any] = MagicMock(return_value=_snake_case ) UpperCAmelCase_ : Union[str, Any] = MagicMock(return_value=_snake_case ) with patch("transformers.onnx.features.is_tf_available" ,_snake_case ), patch( "transformers.onnx.features.is_torch_available" ,_snake_case ): UpperCAmelCase_ : int = FeaturesManager.determine_framework(self.test_model ) self.assertEqual(_snake_case ,self.framework_pt ) # Both not in environment -> raise error UpperCAmelCase_ : Optional[int] = MagicMock(return_value=_snake_case ) UpperCAmelCase_ : Any = MagicMock(return_value=_snake_case ) with patch("transformers.onnx.features.is_tf_available" ,_snake_case ), patch( "transformers.onnx.features.is_torch_available" ,_snake_case ): with self.assertRaises(_snake_case ): UpperCAmelCase_ : Dict = FeaturesManager.determine_framework(self.test_model )
71
'''simple docstring''' from typing import TYPE_CHECKING from ...file_utils import _LazyModule, is_torch_available from ...utils import OptionalDependencyNotAvailable _lowerCamelCase = { """configuration_gpt_neox_japanese""": ["""GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GPTNeoXJapaneseConfig"""], """tokenization_gpt_neox_japanese""": ["""GPTNeoXJapaneseTokenizer"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCamelCase = [ """GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST""", """GPTNeoXJapaneseForCausalLM""", """GPTNeoXJapaneseLayer""", """GPTNeoXJapaneseModel""", """GPTNeoXJapanesePreTrainedModel""", ] if TYPE_CHECKING: from .configuration_gpt_neox_japanese import GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXJapaneseConfig from .tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_gpt_neox_japanese import ( GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST, GPTNeoXJapaneseForCausalLM, GPTNeoXJapaneseLayer, GPTNeoXJapaneseModel, GPTNeoXJapanesePreTrainedModel, ) else: import sys _lowerCamelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
71
1
'''simple docstring''' from typing import Any import numpy as np def a__ ( _SCREAMING_SNAKE_CASE : np.ndarray ) -> bool: """simple docstring""" return np.array_equal(_SCREAMING_SNAKE_CASE , matrix.conjugate().T ) def a__ ( _SCREAMING_SNAKE_CASE : np.ndarray , _SCREAMING_SNAKE_CASE : np.ndarray ) -> Any: """simple docstring""" UpperCAmelCase_ : str = v.conjugate().T UpperCAmelCase_ : Optional[int] = v_star.dot(_SCREAMING_SNAKE_CASE ) assert isinstance(_SCREAMING_SNAKE_CASE , np.ndarray ) return (v_star_dot.dot(_SCREAMING_SNAKE_CASE )) / (v_star.dot(_SCREAMING_SNAKE_CASE )) def a__ ( ) -> None: """simple docstring""" UpperCAmelCase_ : str = np.array([[2, 2 + 1J, 4], [2 - 1J, 3, 1J], [4, -1J, 1]] ) UpperCAmelCase_ : List[Any] = np.array([[1], [2], [3]] ) assert is_hermitian(_SCREAMING_SNAKE_CASE ), F'''{a} is not hermitian.''' print(rayleigh_quotient(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ) UpperCAmelCase_ : Tuple = np.array([[1, 2, 4], [2, 3, -1], [4, -1, 1]] ) assert is_hermitian(_SCREAMING_SNAKE_CASE ), F'''{a} is not hermitian.''' assert rayleigh_quotient(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) == float(3 ) if __name__ == "__main__": import doctest doctest.testmod() tests()
71
'''simple docstring''' import heapq def a__ ( _SCREAMING_SNAKE_CASE : dict ) -> set[int]: """simple docstring""" UpperCAmelCase_ : list[list] = [] # for each node and his adjacency list add them and the rank of the node to queue # using heapq module the queue will be filled like a Priority Queue # heapq works with a min priority queue, so I used -1*len(v) to build it for key, value in graph.items(): # O(log(n)) heapq.heappush(_SCREAMING_SNAKE_CASE , [-1 * len(_SCREAMING_SNAKE_CASE ), (key, value)] ) # chosen_vertices = set of chosen vertices UpperCAmelCase_ : Optional[int] = set() # while queue isn't empty and there are still edges # (queue[0][0] is the rank of the node with max rank) while queue and queue[0][0] != 0: # extract vertex with max rank from queue and add it to chosen_vertices UpperCAmelCase_ : Tuple = heapq.heappop(_SCREAMING_SNAKE_CASE )[1][0] chosen_vertices.add(_SCREAMING_SNAKE_CASE ) # Remove all arcs adjacent to argmax for elem in queue: # if v haven't adjacent node, skip if elem[0] == 0: continue # if argmax is reachable from elem # remove argmax from elem's adjacent list and update his rank if argmax in elem[1][1]: UpperCAmelCase_ : Any = elem[1][1].index(_SCREAMING_SNAKE_CASE ) del elem[1][1][index] elem[0] += 1 # re-order the queue heapq.heapify(_SCREAMING_SNAKE_CASE ) return chosen_vertices if __name__ == "__main__": import doctest doctest.testmod() _lowerCamelCase = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]} print(f"""Minimum vertex cover:\n{greedy_min_vertex_cover(graph)}""")
71
1
'''simple docstring''' from ..utils import DummyObject, requires_backends class _snake_case (metaclass=__SCREAMING_SNAKE_CASE): __A : str =["keras_nlp"] def __init__( self ,*_snake_case ,**_snake_case ): requires_backends(self ,["keras_nlp"] )
71
'''simple docstring''' from typing import Dict, List, Optional, Union import numpy as np from transformers.utils import is_vision_available from transformers.utils.generic import TensorType from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, is_valid_image, to_numpy_array, valid_images, ) from ...utils import logging if is_vision_available(): import PIL _lowerCamelCase = logging.get_logger(__name__) def a__ ( _SCREAMING_SNAKE_CASE : Tuple ) -> List[List[ImageInput]]: """simple docstring""" if isinstance(_SCREAMING_SNAKE_CASE , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ): return videos elif isinstance(_SCREAMING_SNAKE_CASE , (list, tuple) ) and is_valid_image(videos[0] ): return [videos] elif is_valid_image(_SCREAMING_SNAKE_CASE ): return [[videos]] raise ValueError(F'''Could not make batched video from {videos}''' ) class _snake_case (__SCREAMING_SNAKE_CASE): __A : Tuple =["pixel_values"] def __init__( self ,_snake_case = True ,_snake_case = None ,_snake_case = PILImageResampling.BILINEAR ,_snake_case = True ,_snake_case = None ,_snake_case = True ,_snake_case = 1 / 2_55 ,_snake_case = True ,_snake_case = True ,_snake_case = None ,_snake_case = None ,**_snake_case ,): super().__init__(**_snake_case ) UpperCAmelCase_ : Optional[Any] = size if size is not None else {"shortest_edge": 2_56} UpperCAmelCase_ : List[str] = get_size_dict(_snake_case ,default_to_square=_snake_case ) UpperCAmelCase_ : str = crop_size if crop_size is not None else {"height": 2_24, "width": 2_24} UpperCAmelCase_ : Optional[Any] = get_size_dict(_snake_case ,param_name="crop_size" ) UpperCAmelCase_ : int = do_resize UpperCAmelCase_ : List[str] = size UpperCAmelCase_ : Dict = do_center_crop UpperCAmelCase_ : Optional[Any] = crop_size UpperCAmelCase_ : Optional[Any] = resample UpperCAmelCase_ : int = do_rescale UpperCAmelCase_ : Optional[int] = rescale_factor UpperCAmelCase_ : Dict = offset UpperCAmelCase_ : Optional[Any] = do_normalize UpperCAmelCase_ : Union[str, Any] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN UpperCAmelCase_ : Dict = image_std if image_std is not None else IMAGENET_STANDARD_STD def UpperCamelCase__ ( self ,_snake_case ,_snake_case ,_snake_case = PILImageResampling.BILINEAR ,_snake_case = None ,**_snake_case ,): UpperCAmelCase_ : Any = get_size_dict(_snake_case ,default_to_square=_snake_case ) if "shortest_edge" in size: UpperCAmelCase_ : Optional[Any] = get_resize_output_image_size(_snake_case ,size["shortest_edge"] ,default_to_square=_snake_case ) elif "height" in size and "width" in size: UpperCAmelCase_ : Optional[Any] = (size["height"], size["width"]) else: raise ValueError(f'''Size must have \'height\' and \'width\' or \'shortest_edge\' as keys. Got {size.keys()}''' ) return resize(_snake_case ,size=_snake_case ,resample=_snake_case ,data_format=_snake_case ,**_snake_case ) def UpperCamelCase__ ( self ,_snake_case ,_snake_case ,_snake_case = None ,**_snake_case ,): UpperCAmelCase_ : Dict = get_size_dict(_snake_case ) if "height" not in size or "width" not in size: raise ValueError(f'''Size must have \'height\' and \'width\' as keys. Got {size.keys()}''' ) return center_crop(_snake_case ,size=(size["height"], size["width"]) ,data_format=_snake_case ,**_snake_case ) def UpperCamelCase__ ( self ,_snake_case ,_snake_case ,_snake_case = True ,_snake_case = None ,**_snake_case ,): UpperCAmelCase_ : int = image.astype(np.floataa ) if offset: UpperCAmelCase_ : Any = image - (scale / 2) return rescale(_snake_case ,scale=_snake_case ,data_format=_snake_case ,**_snake_case ) def UpperCamelCase__ ( self ,_snake_case ,_snake_case ,_snake_case ,_snake_case = None ,**_snake_case ,): return normalize(_snake_case ,mean=_snake_case ,std=_snake_case ,data_format=_snake_case ,**_snake_case ) def UpperCamelCase__ ( self ,_snake_case ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = ChannelDimension.FIRST ,): if do_resize and size is None or resample is None: raise ValueError("Size and resample must be specified if do_resize is True." ) if do_center_crop and crop_size is None: raise ValueError("Crop size must be specified if do_center_crop is True." ) if do_rescale and rescale_factor is None: raise ValueError("Rescale factor must be specified if do_rescale is True." ) if do_normalize and (image_mean is None or image_std is None): raise ValueError("Image mean and std must be specified if do_normalize is True." ) if offset and not do_rescale: raise ValueError("For offset, do_rescale must also be set to True." ) # All transformations expect numpy arrays. UpperCAmelCase_ : Optional[int] = to_numpy_array(_snake_case ) if do_resize: UpperCAmelCase_ : Dict = self.resize(image=_snake_case ,size=_snake_case ,resample=_snake_case ) if do_center_crop: UpperCAmelCase_ : Optional[Any] = self.center_crop(_snake_case ,size=_snake_case ) if do_rescale: UpperCAmelCase_ : Union[str, Any] = self.rescale(image=_snake_case ,scale=_snake_case ,offset=_snake_case ) if do_normalize: UpperCAmelCase_ : Any = self.normalize(image=_snake_case ,mean=_snake_case ,std=_snake_case ) UpperCAmelCase_ : Any = to_channel_dimension_format(_snake_case ,_snake_case ) return image def UpperCamelCase__ ( self ,_snake_case ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = ChannelDimension.FIRST ,**_snake_case ,): UpperCAmelCase_ : Tuple = do_resize if do_resize is not None else self.do_resize UpperCAmelCase_ : str = resample if resample is not None else self.resample UpperCAmelCase_ : List[Any] = do_center_crop if do_center_crop is not None else self.do_center_crop UpperCAmelCase_ : Tuple = do_rescale if do_rescale is not None else self.do_rescale UpperCAmelCase_ : List[str] = rescale_factor if rescale_factor is not None else self.rescale_factor UpperCAmelCase_ : List[Any] = offset if offset is not None else self.offset UpperCAmelCase_ : Optional[Any] = do_normalize if do_normalize is not None else self.do_normalize UpperCAmelCase_ : int = image_mean if image_mean is not None else self.image_mean UpperCAmelCase_ : int = image_std if image_std is not None else self.image_std UpperCAmelCase_ : Dict = size if size is not None else self.size UpperCAmelCase_ : int = get_size_dict(_snake_case ,default_to_square=_snake_case ) UpperCAmelCase_ : List[Any] = crop_size if crop_size is not None else self.crop_size UpperCAmelCase_ : int = get_size_dict(_snake_case ,param_name="crop_size" ) if not valid_images(_snake_case ): raise ValueError( "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, " "torch.Tensor, tf.Tensor or jax.ndarray." ) UpperCAmelCase_ : Any = make_batched(_snake_case ) UpperCAmelCase_ : Dict = [ [ self._preprocess_image( image=_snake_case ,do_resize=_snake_case ,size=_snake_case ,resample=_snake_case ,do_center_crop=_snake_case ,crop_size=_snake_case ,do_rescale=_snake_case ,rescale_factor=_snake_case ,offset=_snake_case ,do_normalize=_snake_case ,image_mean=_snake_case ,image_std=_snake_case ,data_format=_snake_case ,) for img in video ] for video in videos ] UpperCAmelCase_ : List[str] = {"pixel_values": videos} return BatchFeature(data=_snake_case ,tensor_type=_snake_case )
71
1
'''simple docstring''' from __future__ import annotations class _snake_case : def __init__( self ,_snake_case ,_snake_case ): UpperCAmelCase_ , UpperCAmelCase_ : Dict = text, pattern UpperCAmelCase_ , UpperCAmelCase_ : Dict = len(_snake_case ), len(_snake_case ) def UpperCamelCase__ ( self ,_snake_case ): for i in range(self.patLen - 1 ,-1 ,-1 ): if char == self.pattern[i]: return i return -1 def UpperCamelCase__ ( self ,_snake_case ): for i in range(self.patLen - 1 ,-1 ,-1 ): if self.pattern[i] != self.text[current_pos + i]: return current_pos + i return -1 def UpperCamelCase__ ( self ): # searches pattern in text and returns index positions UpperCAmelCase_ : str = [] for i in range(self.textLen - self.patLen + 1 ): UpperCAmelCase_ : str = self.mismatch_in_text(_snake_case ) if mismatch_index == -1: positions.append(_snake_case ) else: UpperCAmelCase_ : Optional[int] = self.match_in_pattern(self.text[mismatch_index] ) UpperCAmelCase_ : Optional[Any] = ( mismatch_index - match_index ) # shifting index lgtm [py/multiple-definition] return positions _lowerCamelCase = """ABAABA""" _lowerCamelCase = """AB""" _lowerCamelCase = BoyerMooreSearch(text, pattern) _lowerCamelCase = bms.bad_character_heuristic() if len(positions) == 0: print("""No match found""") else: print("""Pattern found in following positions: """) print(positions)
71
'''simple docstring''' import multiprocessing import os from typing import BinaryIO, Optional, Union import fsspec from .. import Dataset, Features, NamedSplit, config from ..formatting import query_table from ..packaged_modules.json.json import Json from ..utils import logging from ..utils.typing import NestedDataStructureLike, PathLike from .abc import AbstractDatasetReader class _snake_case (__SCREAMING_SNAKE_CASE): def __init__( self ,_snake_case ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = False ,_snake_case = False ,_snake_case = None ,_snake_case = None ,**_snake_case ,): super().__init__( _snake_case ,split=_snake_case ,features=_snake_case ,cache_dir=_snake_case ,keep_in_memory=_snake_case ,streaming=_snake_case ,num_proc=_snake_case ,**_snake_case ,) UpperCAmelCase_ : Tuple = field UpperCAmelCase_ : List[Any] = path_or_paths if isinstance(_snake_case ,_snake_case ) else {self.split: path_or_paths} UpperCAmelCase_ : Optional[int] = Json( cache_dir=_snake_case ,data_files=_snake_case ,features=_snake_case ,field=_snake_case ,**_snake_case ,) def UpperCamelCase__ ( self ): # Build iterable dataset if self.streaming: UpperCAmelCase_ : List[str] = self.builder.as_streaming_dataset(split=self.split ) # Build regular (map-style) dataset else: UpperCAmelCase_ : Union[str, Any] = None UpperCAmelCase_ : int = None UpperCAmelCase_ : List[Any] = None UpperCAmelCase_ : int = None self.builder.download_and_prepare( download_config=_snake_case ,download_mode=_snake_case ,verification_mode=_snake_case ,base_path=_snake_case ,num_proc=self.num_proc ,) UpperCAmelCase_ : Dict = self.builder.as_dataset( split=self.split ,verification_mode=_snake_case ,in_memory=self.keep_in_memory ) return dataset class _snake_case : def __init__( self ,_snake_case ,_snake_case ,_snake_case = None ,_snake_case = None ,**_snake_case ,): if num_proc is not None and num_proc <= 0: raise ValueError(f'''num_proc {num_proc} must be an integer > 0.''' ) UpperCAmelCase_ : int = dataset UpperCAmelCase_ : Union[str, Any] = path_or_buf UpperCAmelCase_ : str = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE UpperCAmelCase_ : Dict = num_proc UpperCAmelCase_ : Optional[Any] = "utf-8" UpperCAmelCase_ : Optional[int] = to_json_kwargs def UpperCamelCase__ ( self ): UpperCAmelCase_ : Dict = self.to_json_kwargs.pop("path_or_buf" ,_snake_case ) UpperCAmelCase_ : Tuple = self.to_json_kwargs.pop("orient" ,"records" ) UpperCAmelCase_ : Any = self.to_json_kwargs.pop("lines" ,True if orient == "records" else False ) UpperCAmelCase_ : Optional[int] = self.to_json_kwargs.pop("index" ,False if orient in ["split", "table"] else True ) UpperCAmelCase_ : int = self.to_json_kwargs.pop("compression" ,_snake_case ) if compression not in [None, "infer", "gzip", "bz2", "xz"]: raise NotImplementedError(f'''`datasets` currently does not support {compression} compression''' ) if isinstance(self.path_or_buf ,(str, bytes, os.PathLike) ): with fsspec.open(self.path_or_buf ,"wb" ,compression=_snake_case ) as buffer: UpperCAmelCase_ : List[str] = self._write(file_obj=_snake_case ,orient=_snake_case ,lines=_snake_case ,index=_snake_case ,**self.to_json_kwargs ) else: if compression: raise NotImplementedError( f'''The compression parameter is not supported when writing to a buffer, but compression={compression}''' " was passed. Please provide a local path instead." ) UpperCAmelCase_ : Union[str, Any] = self._write( file_obj=self.path_or_buf ,orient=_snake_case ,lines=_snake_case ,index=_snake_case ,**self.to_json_kwargs ) return written def UpperCamelCase__ ( self ,_snake_case ): UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = args UpperCAmelCase_ : List[str] = query_table( table=self.dataset.data ,key=slice(_snake_case ,offset + self.batch_size ) ,indices=self.dataset._indices ,) UpperCAmelCase_ : Optional[Any] = batch.to_pandas().to_json( path_or_buf=_snake_case ,orient=_snake_case ,lines=_snake_case ,index=_snake_case ,**_snake_case ) if not json_str.endswith("\n" ): json_str += "\n" return json_str.encode(self.encoding ) def UpperCamelCase__ ( self ,_snake_case ,_snake_case ,_snake_case ,_snake_case ,**_snake_case ,): UpperCAmelCase_ : Optional[Any] = 0 if self.num_proc is None or self.num_proc == 1: for offset in logging.tqdm( range(0 ,len(self.dataset ) ,self.batch_size ) ,unit="ba" ,disable=not logging.is_progress_bar_enabled() ,desc="Creating json from Arrow format" ,): UpperCAmelCase_ : Any = self._batch_json((offset, orient, lines, index, to_json_kwargs) ) written += file_obj.write(_snake_case ) else: UpperCAmelCase_ , UpperCAmelCase_ : int = len(self.dataset ), self.batch_size with multiprocessing.Pool(self.num_proc ) as pool: for json_str in logging.tqdm( pool.imap( self._batch_json ,[(offset, orient, lines, index, to_json_kwargs) for offset in range(0 ,_snake_case ,_snake_case )] ,) ,total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size ,unit="ba" ,disable=not logging.is_progress_bar_enabled() ,desc="Creating json from Arrow format" ,): written += file_obj.write(_snake_case ) return written
71
1
'''simple docstring''' import argparse import json from collections import OrderedDict from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( SegformerConfig, SegformerForImageClassification, SegformerForSemanticSegmentation, SegformerImageProcessor, ) from transformers.utils import logging logging.set_verbosity_info() _lowerCamelCase = logging.get_logger(__name__) def a__ ( _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : int=False ) -> int: """simple docstring""" UpperCAmelCase_ : List[str] = OrderedDict() for key, value in state_dict.items(): if encoder_only and not key.startswith("head" ): UpperCAmelCase_ : int = "segformer.encoder." + key if key.startswith("backbone" ): UpperCAmelCase_ : Tuple = key.replace("backbone" , "segformer.encoder" ) if "patch_embed" in key: # replace for example patch_embed1 by patch_embeddings.0 UpperCAmelCase_ : Optional[int] = key[key.find("patch_embed" ) + len("patch_embed" )] UpperCAmelCase_ : Optional[int] = key.replace(F'''patch_embed{idx}''' , F'''patch_embeddings.{int(_SCREAMING_SNAKE_CASE )-1}''' ) if "norm" in key: UpperCAmelCase_ : List[Any] = key.replace("norm" , "layer_norm" ) if "segformer.encoder.layer_norm" in key: # replace for example layer_norm1 by layer_norm.0 UpperCAmelCase_ : Dict = key[key.find("segformer.encoder.layer_norm" ) + len("segformer.encoder.layer_norm" )] UpperCAmelCase_ : List[str] = key.replace(F'''layer_norm{idx}''' , F'''layer_norm.{int(_SCREAMING_SNAKE_CASE )-1}''' ) if "layer_norm1" in key: UpperCAmelCase_ : int = key.replace("layer_norm1" , "layer_norm_1" ) if "layer_norm2" in key: UpperCAmelCase_ : str = key.replace("layer_norm2" , "layer_norm_2" ) if "block" in key: # replace for example block1 by block.0 UpperCAmelCase_ : Optional[int] = key[key.find("block" ) + len("block" )] UpperCAmelCase_ : Any = key.replace(F'''block{idx}''' , F'''block.{int(_SCREAMING_SNAKE_CASE )-1}''' ) if "attn.q" in key: UpperCAmelCase_ : List[str] = key.replace("attn.q" , "attention.self.query" ) if "attn.proj" in key: UpperCAmelCase_ : Union[str, Any] = key.replace("attn.proj" , "attention.output.dense" ) if "attn" in key: UpperCAmelCase_ : Union[str, Any] = key.replace("attn" , "attention.self" ) if "fc1" in key: UpperCAmelCase_ : int = key.replace("fc1" , "dense1" ) if "fc2" in key: UpperCAmelCase_ : int = key.replace("fc2" , "dense2" ) if "linear_pred" in key: UpperCAmelCase_ : List[str] = key.replace("linear_pred" , "classifier" ) if "linear_fuse" in key: UpperCAmelCase_ : Union[str, Any] = key.replace("linear_fuse.conv" , "linear_fuse" ) UpperCAmelCase_ : Dict = key.replace("linear_fuse.bn" , "batch_norm" ) if "linear_c" in key: # replace for example linear_c4 by linear_c.3 UpperCAmelCase_ : List[str] = key[key.find("linear_c" ) + len("linear_c" )] UpperCAmelCase_ : List[Any] = key.replace(F'''linear_c{idx}''' , F'''linear_c.{int(_SCREAMING_SNAKE_CASE )-1}''' ) if key.startswith("head" ): UpperCAmelCase_ : Dict = key.replace("head" , "classifier" ) UpperCAmelCase_ : str = value return new_state_dict def a__ ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : Union[str, Any] ) -> List[Any]: """simple docstring""" for i in range(config.num_encoder_blocks ): for j in range(config.depths[i] ): # read in weights + bias of keys and values (which is a single matrix in the original implementation) UpperCAmelCase_ : int = state_dict.pop(F'''segformer.encoder.block.{i}.{j}.attention.self.kv.weight''' ) UpperCAmelCase_ : Optional[int] = state_dict.pop(F'''segformer.encoder.block.{i}.{j}.attention.self.kv.bias''' ) # next, add keys and values (in that order) to the state dict UpperCAmelCase_ : Optional[int] = kv_weight[ : config.hidden_sizes[i], : ] UpperCAmelCase_ : Optional[int] = kv_bias[: config.hidden_sizes[i]] UpperCAmelCase_ : int = kv_weight[ config.hidden_sizes[i] :, : ] UpperCAmelCase_ : Tuple = kv_bias[ config.hidden_sizes[i] : ] def a__ ( ) -> int: """simple docstring""" UpperCAmelCase_ : Dict = "http://images.cocodataset.org/val2017/000000039769.jpg" UpperCAmelCase_ : int = Image.open(requests.get(_SCREAMING_SNAKE_CASE , stream=_SCREAMING_SNAKE_CASE ).raw ) return image @torch.no_grad() def a__ ( _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : Optional[int] ) -> str: """simple docstring""" UpperCAmelCase_ : Tuple = SegformerConfig() UpperCAmelCase_ : Any = False # set attributes based on model_name UpperCAmelCase_ : Optional[Any] = "huggingface/label-files" if "segformer" in model_name: UpperCAmelCase_ : Optional[int] = model_name[len("segformer." ) : len("segformer." ) + 2] if "ade" in model_name: UpperCAmelCase_ : List[str] = 1_50 UpperCAmelCase_ : List[Any] = "ade20k-id2label.json" UpperCAmelCase_ : str = (1, 1_50, 1_28, 1_28) elif "city" in model_name: UpperCAmelCase_ : Optional[int] = 19 UpperCAmelCase_ : Union[str, Any] = "cityscapes-id2label.json" UpperCAmelCase_ : List[Any] = (1, 19, 1_28, 1_28) else: raise ValueError(F'''Model {model_name} not supported''' ) elif "mit" in model_name: UpperCAmelCase_ : Tuple = True UpperCAmelCase_ : List[Any] = model_name[4:6] UpperCAmelCase_ : Optional[int] = 10_00 UpperCAmelCase_ : str = "imagenet-1k-id2label.json" UpperCAmelCase_ : Optional[int] = (1, 10_00) else: raise ValueError(F'''Model {model_name} not supported''' ) # set config attributes UpperCAmelCase_ : Tuple = json.load(open(hf_hub_download(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , repo_type="dataset" ) , "r" ) ) UpperCAmelCase_ : List[str] = {int(_SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()} UpperCAmelCase_ : List[Any] = idalabel UpperCAmelCase_ : Any = {v: k for k, v in idalabel.items()} if size == "b0": pass elif size == "b1": UpperCAmelCase_ : Optional[Any] = [64, 1_28, 3_20, 5_12] UpperCAmelCase_ : Dict = 2_56 elif size == "b2": UpperCAmelCase_ : Tuple = [64, 1_28, 3_20, 5_12] UpperCAmelCase_ : Union[str, Any] = 7_68 UpperCAmelCase_ : Union[str, Any] = [3, 4, 6, 3] elif size == "b3": UpperCAmelCase_ : Union[str, Any] = [64, 1_28, 3_20, 5_12] UpperCAmelCase_ : int = 7_68 UpperCAmelCase_ : Optional[Any] = [3, 4, 18, 3] elif size == "b4": UpperCAmelCase_ : List[Any] = [64, 1_28, 3_20, 5_12] UpperCAmelCase_ : List[Any] = 7_68 UpperCAmelCase_ : Tuple = [3, 8, 27, 3] elif size == "b5": UpperCAmelCase_ : Dict = [64, 1_28, 3_20, 5_12] UpperCAmelCase_ : Optional[int] = 7_68 UpperCAmelCase_ : List[Any] = [3, 6, 40, 3] else: raise ValueError(F'''Size {size} not supported''' ) # load image processor (only resize + normalize) UpperCAmelCase_ : Dict = SegformerImageProcessor( image_scale=(5_12, 5_12) , keep_ratio=_SCREAMING_SNAKE_CASE , align=_SCREAMING_SNAKE_CASE , do_random_crop=_SCREAMING_SNAKE_CASE ) # prepare image UpperCAmelCase_ : Tuple = prepare_img() UpperCAmelCase_ : int = image_processor(images=_SCREAMING_SNAKE_CASE , return_tensors="pt" ).pixel_values logger.info(F'''Converting model {model_name}...''' ) # load original state dict if encoder_only: UpperCAmelCase_ : List[Any] = torch.load(_SCREAMING_SNAKE_CASE , map_location=torch.device("cpu" ) ) else: UpperCAmelCase_ : Optional[int] = torch.load(_SCREAMING_SNAKE_CASE , map_location=torch.device("cpu" ) )["state_dict"] # rename keys UpperCAmelCase_ : Union[str, Any] = rename_keys(_SCREAMING_SNAKE_CASE , encoder_only=_SCREAMING_SNAKE_CASE ) if not encoder_only: del state_dict["decode_head.conv_seg.weight"] del state_dict["decode_head.conv_seg.bias"] # key and value matrices need special treatment read_in_k_v(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # create HuggingFace model and load state dict if encoder_only: UpperCAmelCase_ : Any = False UpperCAmelCase_ : Optional[Any] = SegformerForImageClassification(_SCREAMING_SNAKE_CASE ) else: UpperCAmelCase_ : Any = SegformerForSemanticSegmentation(_SCREAMING_SNAKE_CASE ) model.load_state_dict(_SCREAMING_SNAKE_CASE ) model.eval() # forward pass UpperCAmelCase_ : int = model(_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : List[str] = outputs.logits # set expected_slice based on model name # ADE20k checkpoints if model_name == "segformer.b0.512x512.ade.160k": UpperCAmelCase_ : Any = torch.tensor( [ [[-4.6_310, -5.5_232, -6.2_356], [-5.1_921, -6.1_444, -6.5_996], [-5.4_424, -6.2_790, -6.7_574]], [[-12.1_391, -13.3_122, -13.9_554], [-12.8_732, -13.9_352, -14.3_563], [-12.9_438, -13.8_226, -14.2_513]], [[-12.5_134, -13.4_686, -14.4_915], [-12.8_669, -14.4_343, -14.7_758], [-13.2_523, -14.5_819, -15.0_694]], ] ) elif model_name == "segformer.b1.512x512.ade.160k": UpperCAmelCase_ : Optional[Any] = torch.tensor( [ [[-7.5_820, -8.7_231, -8.3_215], [-8.0_600, -10.3_529, -10.0_304], [-7.5_208, -9.4_103, -9.6_239]], [[-12.6_918, -13.8_994, -13.7_137], [-13.3_196, -15.7_523, -15.4_789], [-12.9_343, -14.8_757, -14.9_689]], [[-11.1_911, -11.9_421, -11.3_243], [-11.3_342, -13.6_839, -13.3_581], [-10.3_909, -12.1_832, -12.4_858]], ] ) elif model_name == "segformer.b2.512x512.ade.160k": UpperCAmelCase_ : Tuple = torch.tensor( [ [[-11.8_173, -14.3_850, -16.3_128], [-14.5_648, -16.5_804, -18.6_568], [-14.7_223, -15.7_387, -18.4_218]], [[-15.7_290, -17.9_171, -19.4_423], [-18.3_105, -19.9_448, -21.4_661], [-17.9_296, -18.6_497, -20.7_910]], [[-15.0_783, -17.0_336, -18.2_789], [-16.8_771, -18.6_870, -20.1_612], [-16.2_454, -17.1_426, -19.5_055]], ] ) elif model_name == "segformer.b3.512x512.ade.160k": UpperCAmelCase_ : Any = torch.tensor( [ [[-9.0_878, -10.2_081, -10.1_891], [-9.3_144, -10.7_941, -10.9_843], [-9.2_294, -10.3_855, -10.5_704]], [[-12.2_316, -13.9_068, -13.6_102], [-12.9_161, -14.3_702, -14.3_235], [-12.5_233, -13.7_174, -13.7_932]], [[-14.6_275, -15.2_490, -14.9_727], [-14.3_400, -15.9_687, -16.2_827], [-14.1_484, -15.4_033, -15.8_937]], ] ) elif model_name == "segformer.b4.512x512.ade.160k": UpperCAmelCase_ : Tuple = torch.tensor( [ [[-12.3_144, -13.2_447, -14.0_802], [-13.3_614, -14.5_816, -15.6_117], [-13.3_340, -14.4_433, -16.2_219]], [[-19.2_781, -20.4_128, -20.7_506], [-20.6_153, -21.6_566, -22.0_998], [-19.9_800, -21.0_430, -22.1_494]], [[-18.8_739, -19.7_804, -21.1_834], [-20.1_233, -21.6_765, -23.2_944], [-20.0_315, -21.2_641, -23.6_944]], ] ) elif model_name == "segformer.b5.640x640.ade.160k": UpperCAmelCase_ : List[Any] = torch.tensor( [ [[-9.5_524, -12.0_835, -11.7_348], [-10.5_229, -13.6_446, -14.5_662], [-9.5_842, -12.8_851, -13.9_414]], [[-15.3_432, -17.5_323, -17.0_818], [-16.3_330, -18.9_255, -19.2_101], [-15.1_340, -17.7_848, -18.3_971]], [[-12.6_072, -14.9_486, -14.6_631], [-13.7_629, -17.0_907, -17.7_745], [-12.7_899, -16.1_695, -17.1_671]], ] ) # Cityscapes checkpoints elif model_name == "segformer.b0.1024x1024.city.160k": UpperCAmelCase_ : Any = torch.tensor( [ [[-11.9_295, -13.4_057, -14.8_106], [-13.3_431, -14.8_179, -15.3_781], [-14.2_836, -15.5_942, -16.1_588]], [[-11.4_906, -12.8_067, -13.6_564], [-13.1_189, -14.0_500, -14.1_543], [-13.8_748, -14.5_136, -14.8_789]], [[0.5_374, 0.1_067, -0.4_742], [0.1_141, -0.2_255, -0.7_099], [-0.3_000, -0.5_924, -1.3_105]], ] ) elif model_name == "segformer.b0.512x1024.city.160k": UpperCAmelCase_ : Union[str, Any] = torch.tensor( [ [[-7.8_217, -9.8_767, -10.1_717], [-9.4_438, -10.9_058, -11.4_047], [-9.7_939, -12.3_495, -12.1_079]], [[-7.1_514, -9.5_336, -10.0_860], [-9.7_776, -11.6_822, -11.8_439], [-10.1_411, -12.7_655, -12.8_972]], [[0.3_021, 0.0_805, -0.2_310], [-0.0_328, -0.1_605, -0.2_714], [-0.1_408, -0.5_477, -0.6_976]], ] ) elif model_name == "segformer.b0.640x1280.city.160k": UpperCAmelCase_ : Optional[Any] = torch.tensor( [ [ [-1.1372E01, -1.2787E01, -1.3477E01], [-1.2536E01, -1.4194E01, -1.4409E01], [-1.3217E01, -1.4888E01, -1.5327E01], ], [ [-1.4791E01, -1.7122E01, -1.8277E01], [-1.7163E01, -1.9192E01, -1.9533E01], [-1.7897E01, -1.9991E01, -2.0315E01], ], [ [7.6723E-01, 4.1921E-01, -7.7878E-02], [4.7772E-01, 9.5557E-03, -2.8082E-01], [3.6032E-01, -2.4826E-01, -5.1168E-01], ], ] ) elif model_name == "segformer.b0.768x768.city.160k": UpperCAmelCase_ : List[str] = torch.tensor( [ [[-9.4_959, -11.3_087, -11.7_479], [-11.0_025, -12.6_540, -12.3_319], [-11.4_064, -13.0_487, -12.9_905]], [[-9.8_905, -11.3_084, -12.0_854], [-11.1_726, -12.7_698, -12.9_583], [-11.5_985, -13.3_278, -14.1_774]], [[0.2_213, 0.0_192, -0.2_466], [-0.1_731, -0.4_213, -0.4_874], [-0.3_126, -0.6_541, -1.1_389]], ] ) elif model_name == "segformer.b1.1024x1024.city.160k": UpperCAmelCase_ : List[str] = torch.tensor( [ [[-13.5_748, -13.9_111, -12.6_500], [-14.3_500, -15.3_683, -14.2_328], [-14.7_532, -16.0_424, -15.6_087]], [[-17.1_651, -15.8_725, -12.9_653], [-17.2_580, -17.3_718, -14.8_223], [-16.6_058, -16.8_783, -16.7_452]], [[-3.6_456, -3.0_209, -1.4_203], [-3.0_797, -3.1_959, -2.0_000], [-1.8_757, -1.9_217, -1.6_997]], ] ) elif model_name == "segformer.b2.1024x1024.city.160k": UpperCAmelCase_ : Union[str, Any] = torch.tensor( [ [[-16.0_976, -16.4_856, -17.3_962], [-16.6_234, -19.0_342, -19.7_685], [-16.0_900, -18.0_661, -19.1_180]], [[-18.4_750, -18.8_488, -19.5_074], [-19.4_030, -22.1_570, -22.5_977], [-19.1_191, -20.8_486, -22.3_783]], [[-4.5_178, -5.5_037, -6.5_109], [-5.0_884, -7.2_174, -8.0_334], [-4.4_156, -5.8_117, -7.2_970]], ] ) elif model_name == "segformer.b3.1024x1024.city.160k": UpperCAmelCase_ : int = torch.tensor( [ [[-14.2_081, -14.4_732, -14.1_977], [-14.5_867, -16.4_423, -16.6_356], [-13.4_441, -14.9_685, -16.8_696]], [[-14.4_576, -14.7_073, -15.0_451], [-15.0_816, -17.6_237, -17.9_873], [-14.4_213, -16.0_199, -18.5_992]], [[-4.7_349, -4.9_588, -5.0_966], [-4.3_210, -6.9_325, -7.2_591], [-3.4_312, -4.7_484, -7.1_917]], ] ) elif model_name == "segformer.b4.1024x1024.city.160k": UpperCAmelCase_ : Optional[Any] = torch.tensor( [ [[-11.7_737, -11.9_526, -11.3_273], [-13.6_692, -14.4_574, -13.8_878], [-13.8_937, -14.6_924, -15.9_345]], [[-14.6_706, -14.5_330, -14.1_306], [-16.1_502, -16.8_180, -16.4_269], [-16.8_338, -17.8_939, -20.1_746]], [[1.0_491, 0.8_289, 1.0_310], [1.1_044, 0.5_219, 0.8_055], [1.0_899, 0.6_926, 0.5_590]], ] ) elif model_name == "segformer.b5.1024x1024.city.160k": UpperCAmelCase_ : List[Any] = torch.tensor( [ [[-12.5_641, -13.4_777, -13.0_684], [-13.9_587, -15.8_983, -16.6_557], [-13.3_109, -15.7_350, -16.3_141]], [[-14.7_074, -15.4_352, -14.5_944], [-16.6_353, -18.1_663, -18.6_120], [-15.1_702, -18.0_329, -18.1_547]], [[-1.7_990, -2.0_951, -1.7_784], [-2.6_397, -3.8_245, -3.9_686], [-1.5_264, -2.8_126, -2.9_316]], ] ) else: UpperCAmelCase_ : Any = logits.argmax(-1 ).item() print("Predicted class:" , model.config.idalabel[predicted_class_idx] ) # verify logits if not encoder_only: assert logits.shape == expected_shape assert torch.allclose(logits[0, :3, :3, :3] , _SCREAMING_SNAKE_CASE , atol=1E-2 ) # finally, save model and image processor logger.info(F'''Saving PyTorch model and image processor to {pytorch_dump_folder_path}...''' ) Path(_SCREAMING_SNAKE_CASE ).mkdir(exist_ok=_SCREAMING_SNAKE_CASE ) model.save_pretrained(_SCREAMING_SNAKE_CASE ) image_processor.save_pretrained(_SCREAMING_SNAKE_CASE ) if __name__ == "__main__": _lowerCamelCase = argparse.ArgumentParser() parser.add_argument( """--model_name""", default="""segformer.b0.512x512.ade.160k""", type=str, help="""Name of the model you'd like to convert.""", ) parser.add_argument( """--checkpoint_path""", default=None, type=str, help="""Path to the original PyTorch checkpoint (.pth file).""" ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model.""" ) _lowerCamelCase = parser.parse_args() convert_segformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path)
71
'''simple docstring''' from ..utils import DummyObject, requires_backends class _snake_case (metaclass=__SCREAMING_SNAKE_CASE): __A : Any =["speech"] def __init__( self ,*_snake_case ,**_snake_case ): requires_backends(self ,["speech"] ) class _snake_case (metaclass=__SCREAMING_SNAKE_CASE): __A : Dict =["speech"] def __init__( self ,*_snake_case ,**_snake_case ): requires_backends(self ,["speech"] )
71
1
'''simple docstring''' from __future__ import annotations import math class _snake_case : def __init__( self ,_snake_case ): UpperCAmelCase_ : Tuple = size # approximate the overall size of segment tree with given value UpperCAmelCase_ : str = [0 for i in range(0 ,4 * size )] # create array to store lazy update UpperCAmelCase_ : int = [0 for i in range(0 ,4 * size )] UpperCAmelCase_ : Union[str, Any] = [0 for i in range(0 ,4 * size )] # flag for lazy update def UpperCamelCase__ ( self ,_snake_case ): return idx * 2 def UpperCamelCase__ ( self ,_snake_case ): return idx * 2 + 1 def UpperCamelCase__ ( self ,_snake_case ,_snake_case ,_snake_case ,_snake_case ): if left_element == right_element: UpperCAmelCase_ : Optional[Any] = a[left_element - 1] else: UpperCAmelCase_ : int = (left_element + right_element) // 2 self.build(self.left(_snake_case ) ,_snake_case ,_snake_case ,_snake_case ) self.build(self.right(_snake_case ) ,mid + 1 ,_snake_case ,_snake_case ) UpperCAmelCase_ : int = max( self.segment_tree[self.left(_snake_case )] ,self.segment_tree[self.right(_snake_case )] ) def UpperCamelCase__ ( self ,_snake_case ,_snake_case ,_snake_case ,_snake_case ,_snake_case ,_snake_case ): if self.flag[idx] is True: UpperCAmelCase_ : Any = self.lazy[idx] UpperCAmelCase_ : Any = False if left_element != right_element: UpperCAmelCase_ : int = self.lazy[idx] UpperCAmelCase_ : Dict = self.lazy[idx] UpperCAmelCase_ : int = True UpperCAmelCase_ : Tuple = True if right_element < a or left_element > b: return True if left_element >= a and right_element <= b: UpperCAmelCase_ : int = val if left_element != right_element: UpperCAmelCase_ : Dict = val UpperCAmelCase_ : Optional[Any] = val UpperCAmelCase_ : int = True UpperCAmelCase_ : List[Any] = True return True UpperCAmelCase_ : Any = (left_element + right_element) // 2 self.update(self.left(_snake_case ) ,_snake_case ,_snake_case ,_snake_case ,_snake_case ,_snake_case ) self.update(self.right(_snake_case ) ,mid + 1 ,_snake_case ,_snake_case ,_snake_case ,_snake_case ) UpperCAmelCase_ : Optional[Any] = max( self.segment_tree[self.left(_snake_case )] ,self.segment_tree[self.right(_snake_case )] ) return True def UpperCamelCase__ ( self ,_snake_case ,_snake_case ,_snake_case ,_snake_case ,_snake_case ): if self.flag[idx] is True: UpperCAmelCase_ : Optional[int] = self.lazy[idx] UpperCAmelCase_ : Dict = False if left_element != right_element: UpperCAmelCase_ : Dict = self.lazy[idx] UpperCAmelCase_ : Dict = self.lazy[idx] UpperCAmelCase_ : List[str] = True UpperCAmelCase_ : List[Any] = True if right_element < a or left_element > b: return -math.inf if left_element >= a and right_element <= b: return self.segment_tree[idx] UpperCAmelCase_ : List[Any] = (left_element + right_element) // 2 UpperCAmelCase_ : List[str] = self.query(self.left(_snake_case ) ,_snake_case ,_snake_case ,_snake_case ,_snake_case ) UpperCAmelCase_ : Tuple = self.query(self.right(_snake_case ) ,mid + 1 ,_snake_case ,_snake_case ,_snake_case ) return max(_snake_case ,_snake_case ) def __str__( self ): return str([self.query(1 ,1 ,self.size ,_snake_case ,_snake_case ) for i in range(1 ,self.size + 1 )] ) if __name__ == "__main__": _lowerCamelCase = [1, 2, -4, 7, 3, -5, 6, 11, -20, 9, 14, 15, 5, 2, -8] _lowerCamelCase = 15 _lowerCamelCase = SegmentTree(size) segt.build(1, 1, size, A) print(segt.query(1, 1, size, 4, 6)) print(segt.query(1, 1, size, 7, 11)) print(segt.query(1, 1, size, 7, 12)) segt.update(1, 1, size, 1, 3, 111) print(segt.query(1, 1, size, 1, 15)) segt.update(1, 1, size, 7, 8, 235) print(segt)
71
'''simple docstring''' def a__ ( _SCREAMING_SNAKE_CASE : list[int] , _SCREAMING_SNAKE_CASE : list[int] ) -> tuple[float, float]: """simple docstring""" if not len(_SCREAMING_SNAKE_CASE ) == len(_SCREAMING_SNAKE_CASE ) == 3: raise ValueError("Please enter a valid equation." ) if equationa[0] == equationa[1] == equationa[0] == equationa[1] == 0: raise ValueError("Both a & b of two equations can't be zero." ) # Extract the coefficients UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : List[str] = equationa UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = equationa # Calculate the determinants of the matrices UpperCAmelCase_ : Optional[int] = aa * ba - aa * ba UpperCAmelCase_ : Optional[int] = ca * ba - ca * ba UpperCAmelCase_ : Any = aa * ca - aa * ca # Check if the system of linear equations has a solution (using Cramer's rule) if determinant == 0: if determinant_x == determinant_y == 0: raise ValueError("Infinite solutions. (Consistent system)" ) else: raise ValueError("No solution. (Inconsistent system)" ) else: if determinant_x == determinant_y == 0: # Trivial solution (Inconsistent system) return (0.0, 0.0) else: UpperCAmelCase_ : Optional[int] = determinant_x / determinant UpperCAmelCase_ : List[Any] = determinant_y / determinant # Non-Trivial Solution (Consistent system) return (x, y)
71
1
'''simple docstring''' from __future__ import annotations def a__ ( _SCREAMING_SNAKE_CASE : list , _SCREAMING_SNAKE_CASE : int | None = None , _SCREAMING_SNAKE_CASE : int | None = None ) -> None: """simple docstring""" if start is None: UpperCAmelCase_ : Union[str, Any] = 0 if end is None: UpperCAmelCase_ : List[str] = len(_SCREAMING_SNAKE_CASE ) - 1 if start >= end: return UpperCAmelCase_ : Optional[Any] = (start + end) // 2 slowsort(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) slowsort(_SCREAMING_SNAKE_CASE , mid + 1 , _SCREAMING_SNAKE_CASE ) if sequence[end] < sequence[mid]: UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = sequence[mid], sequence[end] slowsort(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , end - 1 ) if __name__ == "__main__": from doctest import testmod testmod()
71
'''simple docstring''' from statistics import mean, stdev def a__ ( _SCREAMING_SNAKE_CASE : list , _SCREAMING_SNAKE_CASE : int = 3 ) -> list: """simple docstring""" UpperCAmelCase_ : Dict = min(_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : Union[str, Any] = max(_SCREAMING_SNAKE_CASE ) # normalize data return [round((x - x_min) / (x_max - x_min) , _SCREAMING_SNAKE_CASE ) for x in data] def a__ ( _SCREAMING_SNAKE_CASE : list , _SCREAMING_SNAKE_CASE : int = 3 ) -> list: """simple docstring""" UpperCAmelCase_ : Tuple = mean(_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : str = stdev(_SCREAMING_SNAKE_CASE ) # standardize data return [round((x - mu) / (sigma) , _SCREAMING_SNAKE_CASE ) for x in data]
71
1
'''simple docstring''' import tempfile import unittest from transformers import SPIECE_UNDERLINE, BatchEncoding, PLBartTokenizer, is_torch_available from transformers.testing_utils import ( get_tests_dir, nested_simplify, require_sentencepiece, require_tokenizers, require_torch, ) from ...test_tokenization_common import TokenizerTesterMixin _lowerCamelCase = get_tests_dir("""fixtures/test_sentencepiece.model""") if is_torch_available(): from transformers.models.plbart.modeling_plbart import shift_tokens_right _lowerCamelCase = 5_0003 _lowerCamelCase = 5_0002 @require_sentencepiece @require_tokenizers class _snake_case (__SCREAMING_SNAKE_CASE , unittest.TestCase): __A : Dict =PLBartTokenizer __A : List[Any] =None __A : Union[str, Any] =False def UpperCamelCase__ ( self ): super().setUp() # We have a SentencePiece fixture for testing UpperCAmelCase_ : Any = PLBartTokenizer(_snake_case ,language_codes="base" ,keep_accents=_snake_case ) tokenizer.save_pretrained(self.tmpdirname ) def UpperCamelCase__ ( self ): UpperCAmelCase_ : List[Any] = PLBartTokenizer(_snake_case ,language_codes="base" ,keep_accents=_snake_case ) UpperCAmelCase_ : Optional[Any] = tokenizer.tokenize("This is a test" ) self.assertListEqual(_snake_case ,["▁This", "▁is", "▁a", "▁t", "est"] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(_snake_case ) ,[value + tokenizer.fairseq_offset for value in [2_85, 46, 10, 1_70, 3_82]] ,) UpperCAmelCase_ : Optional[Any] = tokenizer.tokenize("I was born in 92000, and this is falsé." ) self.assertListEqual( _snake_case ,[ SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "9", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "é", ".", ] ,) UpperCAmelCase_ : Tuple = tokenizer.convert_tokens_to_ids(_snake_case ) self.assertListEqual( _snake_case ,[ value + tokenizer.fairseq_offset for value in [8, 21, 84, 55, 24, 19, 7, 2, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 2, 4] ] ,) UpperCAmelCase_ : Optional[Any] = tokenizer.convert_ids_to_tokens(_snake_case ) self.assertListEqual( _snake_case ,[ SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "<unk>", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "<unk>", ".", ] ,) UpperCAmelCase_ : Any = tokenizer.vocab_size UpperCAmelCase_ : int = [tokenizer.convert_ids_to_tokens(_snake_case ) for x in range(end - 4 ,_snake_case )] self.assertListEqual(_snake_case ,["__java__", "__python__", "__en_XX__", "<mask>"] ) UpperCAmelCase_ : Dict = "java.lang.Exception, python.lang.Exception, javascript, php, ruby, go" UpperCAmelCase_ : List[Any] = tokenizer(_snake_case ).input_ids self.assertEqual( tokenizer.decode(_snake_case ,skip_special_tokens=_snake_case ,clean_up_tokenization_spaces=_snake_case ) ,_snake_case ,) def UpperCamelCase__ ( self ): UpperCAmelCase_ : int = PLBartTokenizer(_snake_case ,language_codes="multi" ,keep_accents=_snake_case ) UpperCAmelCase_ : List[Any] = tokenizer.tokenize("This is a test" ) self.assertListEqual(_snake_case ,["▁This", "▁is", "▁a", "▁t", "est"] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(_snake_case ) ,[value + tokenizer.fairseq_offset for value in [2_85, 46, 10, 1_70, 3_82]] ,) UpperCAmelCase_ : Optional[int] = tokenizer.tokenize("I was born in 92000, and this is falsé." ) self.assertListEqual( _snake_case ,[ SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "9", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "é", ".", ] ,) UpperCAmelCase_ : Any = tokenizer.convert_tokens_to_ids(_snake_case ) self.assertListEqual( _snake_case ,[ value + tokenizer.fairseq_offset for value in [8, 21, 84, 55, 24, 19, 7, 2, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 2, 4] ] ,) UpperCAmelCase_ : Union[str, Any] = tokenizer.convert_ids_to_tokens(_snake_case ) self.assertListEqual( _snake_case ,[ SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "<unk>", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "<unk>", ".", ] ,) UpperCAmelCase_ : List[Any] = tokenizer.vocab_size UpperCAmelCase_ : List[str] = [tokenizer.convert_ids_to_tokens(_snake_case ) for x in range(end - 7 ,_snake_case )] self.assertListEqual( _snake_case ,["__java__", "__python__", "__en_XX__", "__javascript__", "__php__", "__ruby__", "__go__"] ) UpperCAmelCase_ : Tuple = "java.lang.Exception, python.lang.Exception, javascript, php, ruby, go" UpperCAmelCase_ : str = tokenizer(_snake_case ).input_ids self.assertEqual( tokenizer.decode(_snake_case ,skip_special_tokens=_snake_case ,clean_up_tokenization_spaces=_snake_case ) ,_snake_case ,) @require_torch @require_sentencepiece @require_tokenizers class _snake_case (unittest.TestCase): __A : Optional[Any] ="uclanlp/plbart-python-en_XX" __A : Any =[ "def maximum(a,b,c):NEW_LINE_INDENTreturn max([a,b,c])", "def sum(a,b,c):NEW_LINE_INDENTreturn sum([a,b,c])", ] __A : Optional[int] =[ "Returns the maximum value of a b c.", "Sums the values of a b c.", ] __A : List[str] =[ 1_34, 54_52, 3_34_60, 3_34_41, 3_34_63, 3_34_65, 3_34_63, 3_34_49, 9_88, 20, 3_34_56, 19, 3_34_56, 7_71, 39, 42_58, 8_89, 33_18, 3_34_41, 3_34_63, 3_34_65, 3_34_63, 3_34_49, 24_71, 2, PYTHON_CODE, ] @classmethod def UpperCamelCase__ ( cls ): UpperCAmelCase_ : PLBartTokenizer = PLBartTokenizer.from_pretrained( cls.checkpoint_name ,language_codes="base" ,src_lang="python" ,tgt_lang="en_XX" ) UpperCAmelCase_ : Union[str, Any] = 1 return cls def UpperCamelCase__ ( self ): self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["__java__"] ,5_00_01 ) self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["__python__"] ,5_00_02 ) self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["__en_XX__"] ,5_00_03 ) def UpperCamelCase__ ( self ): UpperCAmelCase_ : Any = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0] self.assertListEqual(self.expected_src_tokens ,_snake_case ) def UpperCamelCase__ ( self ): self.assertIn(_snake_case ,self.tokenizer.all_special_ids ) UpperCAmelCase_ : List[Any] = [EN_CODE, 90_37, 3_34_42, 57, 7_52, 1_53, 14, 56, 18, 9, 2] UpperCAmelCase_ : Optional[Any] = self.tokenizer.decode(_snake_case ,skip_special_tokens=_snake_case ) UpperCAmelCase_ : Any = self.tokenizer.decode(generated_ids[1:] ,skip_special_tokens=_snake_case ) self.assertEqual(_snake_case ,_snake_case ) self.assertNotIn(self.tokenizer.eos_token ,_snake_case ) def UpperCamelCase__ ( self ): UpperCAmelCase_ : Any = ["def sum(a,b,c):NEW_LINE_INDENTreturn sum([a,b,c])" * 20] self.assertIsInstance(src_text[0] ,_snake_case ) UpperCAmelCase_ : Union[str, Any] = 10 UpperCAmelCase_ : Union[str, Any] = self.tokenizer(_snake_case ,max_length=_snake_case ,truncation=_snake_case ).input_ids[0] self.assertEqual(ids[-2] ,2 ) self.assertEqual(ids[-1] ,_snake_case ) self.assertEqual(len(_snake_case ) ,_snake_case ) def UpperCamelCase__ ( self ): self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["<mask>", "__java__"] ) ,[5_00_04, 5_00_01] ) def UpperCamelCase__ ( self ): UpperCAmelCase_ : List[str] = tempfile.mkdtemp() UpperCAmelCase_ : str = self.tokenizer.fairseq_tokens_to_ids self.tokenizer.save_pretrained(_snake_case ) UpperCAmelCase_ : Tuple = PLBartTokenizer.from_pretrained(_snake_case ) self.assertDictEqual(new_tok.fairseq_tokens_to_ids ,_snake_case ) @require_torch def UpperCamelCase__ ( self ): UpperCAmelCase_ : Any = self.tokenizer(self.src_text ,text_target=self.tgt_text ,padding=_snake_case ,return_tensors="pt" ) UpperCAmelCase_ : Dict = shift_tokens_right(batch["labels"] ,self.tokenizer.pad_token_id ) # fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4 self.assertEqual(batch.input_ids[1][-2:].tolist() ,[2, PYTHON_CODE] ) self.assertEqual(batch.decoder_input_ids[1][0] ,_snake_case ) self.assertEqual(batch.decoder_input_ids[1][-1] ,2 ) self.assertEqual(batch.labels[1][-2:].tolist() ,[2, EN_CODE] ) @require_torch def UpperCamelCase__ ( self ): UpperCAmelCase_ : str = self.tokenizer( self.src_text ,text_target=self.tgt_text ,padding=_snake_case ,truncation=_snake_case ,max_length=len(self.expected_src_tokens ) ,return_tensors="pt" ,) UpperCAmelCase_ : Dict = shift_tokens_right(batch["labels"] ,self.tokenizer.pad_token_id ) self.assertIsInstance(_snake_case ,_snake_case ) self.assertEqual((2, 26) ,batch.input_ids.shape ) self.assertEqual((2, 26) ,batch.attention_mask.shape ) UpperCAmelCase_ : str = batch.input_ids.tolist()[0] self.assertListEqual(self.expected_src_tokens ,_snake_case ) self.assertEqual(2 ,batch.decoder_input_ids[0, -1] ) # EOS # Test that special tokens are reset self.assertEqual(self.tokenizer.prefix_tokens ,[] ) self.assertEqual(self.tokenizer.suffix_tokens ,[self.tokenizer.eos_token_id, PYTHON_CODE] ) def UpperCamelCase__ ( self ): UpperCAmelCase_ : Optional[Any] = self.tokenizer(self.src_text ,padding=_snake_case ,truncation=_snake_case ,max_length=3 ,return_tensors="pt" ) UpperCAmelCase_ : Any = self.tokenizer( text_target=self.tgt_text ,padding=_snake_case ,truncation=_snake_case ,max_length=10 ,return_tensors="pt" ) UpperCAmelCase_ : Union[str, Any] = targets["input_ids"] UpperCAmelCase_ : Tuple = shift_tokens_right(_snake_case ,self.tokenizer.pad_token_id ) self.assertEqual(batch.input_ids.shape[1] ,3 ) self.assertEqual(batch.decoder_input_ids.shape[1] ,10 ) @require_torch def UpperCamelCase__ ( self ): UpperCAmelCase_ : Optional[Any] = self.tokenizer._build_translation_inputs( "A test" ,return_tensors="pt" ,src_lang="en_XX" ,tgt_lang="java" ) self.assertEqual( nested_simplify(_snake_case ) ,{ # A, test, EOS, en_XX "input_ids": [[1_50, 2_42, 2, 5_00_03]], "attention_mask": [[1, 1, 1, 1]], # java "forced_bos_token_id": 5_00_01, } ,)
71
'''simple docstring''' import argparse import os # New Code # import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType from accelerate.utils import find_executable_batch_size ######################################################################## # This is a fully working simple example to use Accelerate, # specifically showcasing how to ensure out-of-memory errors never # interrupt training, and builds off the `nlp_example.py` script. # # This example trains a Bert base model on GLUE MRPC # in any of the following settings (with the same script): # - single CPU or single GPU # - multi GPUS (using PyTorch distributed mode) # - (multi) TPUs # - fp16 (mixed-precision) or fp32 (normal precision) # # New additions from the base script can be found quickly by # looking for the # New Code # tags # # To run it in each of these various modes, follow the instructions # in the readme for examples: # https://github.com/huggingface/accelerate/tree/main/examples # ######################################################################## _lowerCamelCase = 16 _lowerCamelCase = 32 def a__ ( _SCREAMING_SNAKE_CASE : Accelerator , _SCREAMING_SNAKE_CASE : int = 16 ) -> List[Any]: """simple docstring""" UpperCAmelCase_ : Dict = AutoTokenizer.from_pretrained("bert-base-cased" ) UpperCAmelCase_ : Tuple = load_dataset("glue" , "mrpc" ) def tokenize_function(_SCREAMING_SNAKE_CASE : Union[str, Any] ): # max_length=None => use the model max length (it's actually the default) UpperCAmelCase_ : Any = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=_SCREAMING_SNAKE_CASE , max_length=_SCREAMING_SNAKE_CASE ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset # starting with the main process first: with accelerator.main_process_first(): UpperCAmelCase_ : Union[str, Any] = datasets.map( _SCREAMING_SNAKE_CASE , batched=_SCREAMING_SNAKE_CASE , remove_columns=["idx", "sentence1", "sentence2"] , ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library UpperCAmelCase_ : Tuple = tokenized_datasets.rename_column("label" , "labels" ) def collate_fn(_SCREAMING_SNAKE_CASE : List[Any] ): # On TPU it's best to pad everything to the same length or training will be very slow. UpperCAmelCase_ : Optional[int] = 1_28 if accelerator.distributed_type == DistributedType.TPU else None # When using mixed precision we want round multiples of 8/16 if accelerator.mixed_precision == "fp8": UpperCAmelCase_ : Optional[int] = 16 elif accelerator.mixed_precision != "no": UpperCAmelCase_ : int = 8 else: UpperCAmelCase_ : Optional[Any] = None return tokenizer.pad( _SCREAMING_SNAKE_CASE , padding="longest" , max_length=_SCREAMING_SNAKE_CASE , pad_to_multiple_of=_SCREAMING_SNAKE_CASE , return_tensors="pt" , ) # Instantiate dataloaders. UpperCAmelCase_ : Any = DataLoader( tokenized_datasets["train"] , shuffle=_SCREAMING_SNAKE_CASE , collate_fn=_SCREAMING_SNAKE_CASE , batch_size=_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : int = DataLoader( tokenized_datasets["validation"] , shuffle=_SCREAMING_SNAKE_CASE , collate_fn=_SCREAMING_SNAKE_CASE , batch_size=_SCREAMING_SNAKE_CASE ) return train_dataloader, eval_dataloader # For testing only if os.environ.get("""TESTING_MOCKED_DATALOADERS""", None) == "1": from accelerate.test_utils.training import mocked_dataloaders _lowerCamelCase = mocked_dataloaders # noqa: F811 def a__ ( _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : int ) -> int: """simple docstring""" if os.environ.get("TESTING_MOCKED_DATALOADERS" , _SCREAMING_SNAKE_CASE ) == "1": UpperCAmelCase_ : Tuple = 2 # Initialize accelerator UpperCAmelCase_ : int = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision ) # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs UpperCAmelCase_ : str = config["lr"] UpperCAmelCase_ : Union[str, Any] = int(config["num_epochs"] ) UpperCAmelCase_ : Tuple = int(config["seed"] ) UpperCAmelCase_ : Union[str, Any] = int(config["batch_size"] ) UpperCAmelCase_ : List[str] = evaluate.load("glue" , "mrpc" ) # New Code # # We now can define an inner training loop function. It should take a batch size as the only parameter, # and build the dataloaders in there. # It also gets our decorator @find_executable_batch_size(starting_batch_size=_SCREAMING_SNAKE_CASE ) def inner_training_loop(_SCREAMING_SNAKE_CASE : List[str] ): # And now just move everything below under this function # We need to bring in the Accelerator object from earlier nonlocal accelerator # And reset all of its attributes that could hold onto any memory: accelerator.free_memory() # Then we can declare the model, optimizer, and everything else: set_seed(_SCREAMING_SNAKE_CASE ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) UpperCAmelCase_ : Dict = AutoModelForSequenceClassification.from_pretrained("bert-base-cased" , return_dict=_SCREAMING_SNAKE_CASE ) # We could avoid this line since the accelerator is set with `device_placement=True` (default value). # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). UpperCAmelCase_ : Dict = model.to(accelerator.device ) # Instantiate optimizer UpperCAmelCase_ : int = AdamW(params=model.parameters() , lr=_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ , UpperCAmelCase_ : Dict = get_dataloaders(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # Instantiate scheduler UpperCAmelCase_ : Union[str, Any] = get_linear_schedule_with_warmup( optimizer=_SCREAMING_SNAKE_CASE , num_warmup_steps=1_00 , num_training_steps=(len(_SCREAMING_SNAKE_CASE ) * num_epochs) , ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = accelerator.prepare( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # Now we train the model for epoch in range(_SCREAMING_SNAKE_CASE ): model.train() for step, batch in enumerate(_SCREAMING_SNAKE_CASE ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) UpperCAmelCase_ : str = model(**_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : List[Any] = outputs.loss accelerator.backward(_SCREAMING_SNAKE_CASE ) optimizer.step() lr_scheduler.step() optimizer.zero_grad() model.eval() for step, batch in enumerate(_SCREAMING_SNAKE_CASE ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): UpperCAmelCase_ : Optional[Any] = model(**_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : str = outputs.logits.argmax(dim=-1 ) UpperCAmelCase_ , UpperCAmelCase_ : str = accelerator.gather_for_metrics((predictions, batch["labels"]) ) metric.add_batch( predictions=_SCREAMING_SNAKE_CASE , references=_SCREAMING_SNAKE_CASE , ) UpperCAmelCase_ : str = metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(F'''epoch {epoch}:''' , _SCREAMING_SNAKE_CASE ) # New Code # # And call it at the end with no arguments # Note: You could also refactor this outside of your training loop function inner_training_loop() def a__ ( ) -> Optional[Any]: """simple docstring""" UpperCAmelCase_ : Dict = argparse.ArgumentParser(description="Simple example of training script." ) parser.add_argument( "--mixed_precision" , type=_SCREAMING_SNAKE_CASE , default=_SCREAMING_SNAKE_CASE , choices=["no", "fp16", "bf16", "fp8"] , help="Whether to use mixed precision. Choose" "between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10." "and an Nvidia Ampere GPU." , ) parser.add_argument("--cpu" , action="store_true" , help="If passed, will train on the CPU." ) UpperCAmelCase_ : Tuple = parser.parse_args() UpperCAmelCase_ : int = {"lr": 2E-5, "num_epochs": 3, "seed": 42, "batch_size": 16} training_function(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) if __name__ == "__main__": main()
71
1
'''simple docstring''' import argparse from pathlib import Path from transformers import AutoConfig, AutoTokenizer, RagConfig, RagSequenceForGeneration, RagTokenForGeneration def a__ ( _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Path , _SCREAMING_SNAKE_CASE : str = None , _SCREAMING_SNAKE_CASE : str = None , _SCREAMING_SNAKE_CASE : str = None , ) -> Optional[int]: """simple docstring""" if config_name_or_path is None: UpperCAmelCase_ : List[str] = "facebook/rag-token-base" if model_type == "rag_token" else "facebook/rag-sequence-base" if generator_tokenizer_name_or_path is None: UpperCAmelCase_ : Any = generator_name_or_path if question_encoder_tokenizer_name_or_path is None: UpperCAmelCase_ : int = question_encoder_name_or_path UpperCAmelCase_ : Optional[Any] = RagTokenForGeneration if model_type == "rag_token" else RagSequenceForGeneration # Save model. UpperCAmelCase_ : List[str] = RagConfig.from_pretrained(_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : Optional[int] = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : Dict = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : Optional[Any] = gen_config UpperCAmelCase_ : str = question_encoder_config UpperCAmelCase_ : Tuple = model_class.from_pretrained_question_encoder_generator( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , config=_SCREAMING_SNAKE_CASE ) rag_model.save_pretrained(_SCREAMING_SNAKE_CASE ) # Sanity check. model_class.from_pretrained(_SCREAMING_SNAKE_CASE ) # Save tokenizers. UpperCAmelCase_ : str = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE ) gen_tokenizer.save_pretrained(dest_dir / "generator_tokenizer/" ) UpperCAmelCase_ : List[str] = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE ) question_encoder_tokenizer.save_pretrained(dest_dir / "question_encoder_tokenizer/" ) if __name__ == "__main__": _lowerCamelCase = argparse.ArgumentParser() parser.add_argument( """--model_type""", choices=["""rag_sequence""", """rag_token"""], required=True, type=str, help="""RAG model type: rag_sequence, rag_token""", ) parser.add_argument("""--dest""", type=str, required=True, help="""Path to the output checkpoint directory.""") parser.add_argument("""--generator_name_or_path""", type=str, required=True, help="""Generator model identifier""") parser.add_argument( """--question_encoder_name_or_path""", type=str, required=True, help="""Question encoder model identifier""" ) parser.add_argument( """--generator_tokenizer_name_or_path""", type=str, help="""Generator tokenizer identifier, if not specified, resolves to ``generator_name_or_path``""", ) parser.add_argument( """--question_encoder_tokenizer_name_or_path""", type=str, help="""Question encoder tokenizer identifier, if not specified, resolves to ``question_encoder_name_or_path``""", ) parser.add_argument( """--config_name_or_path""", type=str, help=( """Identifier of the model config to use, if not provided, resolves to a base config for a given""" """ ``model_type``""" ), ) _lowerCamelCase = parser.parse_args() _lowerCamelCase = Path(args.dest) dest_dir.mkdir(exist_ok=True) consolidate( args.model_type, args.generator_name_or_path, args.question_encoder_name_or_path, dest_dir, args.config_name_or_path, args.generator_tokenizer_name_or_path, args.question_encoder_tokenizer_name_or_path, )
71
'''simple docstring''' from __future__ import annotations def a__ ( _SCREAMING_SNAKE_CASE : int ) -> list[int]: """simple docstring""" UpperCAmelCase_ : Union[str, Any] = 2 UpperCAmelCase_ : Optional[int] = [] while i * i <= n: if n % i: i += 1 else: n //= i factors.append(_SCREAMING_SNAKE_CASE ) if n > 1: factors.append(_SCREAMING_SNAKE_CASE ) return factors if __name__ == "__main__": import doctest doctest.testmod()
71
1
'''simple docstring''' from itertools import permutations def a__ ( _SCREAMING_SNAKE_CASE : tuple ) -> bool: """simple docstring""" if num[3] % 2 != 0: return False if (num[2] + num[3] + num[4]) % 3 != 0: return False if num[5] % 5 != 0: return False UpperCAmelCase_ : int = [7, 11, 13, 17] for i, test in enumerate(_SCREAMING_SNAKE_CASE ): if (num[i + 4] * 1_00 + num[i + 5] * 10 + num[i + 6]) % test != 0: return False return True def a__ ( _SCREAMING_SNAKE_CASE : int = 10 ) -> int: """simple docstring""" return sum( int("".join(map(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ) ) for num in permutations(range(_SCREAMING_SNAKE_CASE ) ) if is_substring_divisible(_SCREAMING_SNAKE_CASE ) ) if __name__ == "__main__": print(f"""{solution() = }""")
71
'''simple docstring''' from typing import Dict, List from nltk.translate import gleu_score import datasets from datasets import MetricInfo _lowerCamelCase = """\ @misc{wu2016googles, title={Google's Neural Machine Translation System: Bridging the Gap between Human and Machine Translation}, author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes and Jeffrey Dean}, year={2016}, eprint={1609.08144}, archivePrefix={arXiv}, primaryClass={cs.CL} } """ _lowerCamelCase = """\ The BLEU score has some undesirable properties when used for single sentences, as it was designed to be a corpus measure. We therefore use a slightly different score for our RL experiments which we call the 'GLEU score'. For the GLEU score, we record all sub-sequences of 1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then compute a recall, which is the ratio of the number of matching n-grams to the number of total n-grams in the target (ground truth) sequence, and a precision, which is the ratio of the number of matching n-grams to the number of total n-grams in the generated output sequence. Then GLEU score is simply the minimum of recall and precision. This GLEU score's range is always between 0 (no matches) and 1 (all match) and it is symmetrical when switching output and target. According to our experiments, GLEU score correlates quite well with the BLEU metric on a corpus level but does not have its drawbacks for our per sentence reward objective. """ _lowerCamelCase = """\ Computes corpus-level Google BLEU (GLEU) score of translated segments against one or more references. Instead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching tokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values. Args: predictions (list of str): list of translations to score. Each translation should be tokenized into a list of tokens. references (list of list of str): list of lists of references for each translation. Each reference should be tokenized into a list of tokens. min_len (int): The minimum order of n-gram this function should extract. Defaults to 1. max_len (int): The maximum order of n-gram this function should extract. Defaults to 4. Returns: 'google_bleu': google_bleu score Examples: Example 1: >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which', ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always', ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat'] >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which', ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never', ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat'] >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was', ... 'interested', 'in', 'world', 'history'] >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history', ... 'because', 'he', 'read', 'the', 'book'] >>> list_of_references = [[ref1a], [ref2a]] >>> hypotheses = [hyp1, hyp2] >>> google_bleu = datasets.load_metric(\"google_bleu\") >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references) >>> print(round(results[\"google_bleu\"], 2)) 0.44 Example 2: >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which', ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always', ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat'] >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which', ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never', ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat'] >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that', ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never', ... 'heed', 'the', 'cat', 'commands'] >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the', ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions', ... 'of', 'the', 'cat'] >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was', ... 'interested', 'in', 'world', 'history'] >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history', ... 'because', 'he', 'read', 'the', 'book'] >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]] >>> hypotheses = [hyp1, hyp2] >>> google_bleu = datasets.load_metric(\"google_bleu\") >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references) >>> print(round(results[\"google_bleu\"], 2)) 0.61 Example 3: >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which', ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always', ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat'] >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which', ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never', ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat'] >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that', ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never', ... 'heed', 'the', 'cat', 'commands'] >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the', ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions', ... 'of', 'the', 'cat'] >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was', ... 'interested', 'in', 'world', 'history'] >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history', ... 'because', 'he', 'read', 'the', 'book'] >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]] >>> hypotheses = [hyp1, hyp2] >>> google_bleu = datasets.load_metric(\"google_bleu\") >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2) >>> print(round(results[\"google_bleu\"], 2)) 0.53 Example 4: >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which', ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always', ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat'] >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which', ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never', ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat'] >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that', ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never', ... 'heed', 'the', 'cat', 'commands'] >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the', ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions', ... 'of', 'the', 'cat'] >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was', ... 'interested', 'in', 'world', 'history'] >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history', ... 'because', 'he', 'read', 'the', 'book'] >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]] >>> hypotheses = [hyp1, hyp2] >>> google_bleu = datasets.load_metric(\"google_bleu\") >>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6) >>> print(round(results[\"google_bleu\"], 2)) 0.4 """ @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION) class _snake_case (datasets.Metric): def UpperCamelCase__ ( self ): return datasets.MetricInfo( description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features( { "predictions": datasets.Sequence(datasets.Value("string" ,id="token" ) ,id="sequence" ), "references": datasets.Sequence( datasets.Sequence(datasets.Value("string" ,id="token" ) ,id="sequence" ) ,id="references" ), } ) ,) def UpperCamelCase__ ( self ,_snake_case ,_snake_case ,_snake_case = 1 ,_snake_case = 4 ,): return { "google_bleu": gleu_score.corpus_gleu( list_of_references=_snake_case ,hypotheses=_snake_case ,min_len=_snake_case ,max_len=_snake_case ) }
71
1
'''simple docstring''' import json import os import unittest from transformers.models.ctrl.tokenization_ctrl import VOCAB_FILES_NAMES, CTRLTokenizer from ...test_tokenization_common import TokenizerTesterMixin class _snake_case (__SCREAMING_SNAKE_CASE , unittest.TestCase): __A : Dict =CTRLTokenizer __A : int =False __A : Any =False def UpperCamelCase__ ( self ): super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt UpperCAmelCase_ : Optional[int] = ["adapt", "re@@", "a@@", "apt", "c@@", "t", "<unk>"] UpperCAmelCase_ : List[str] = dict(zip(_snake_case ,range(len(_snake_case ) ) ) ) UpperCAmelCase_ : List[Any] = ["#version: 0.2", "a p", "ap t</w>", "r e", "a d", "ad apt</w>", ""] UpperCAmelCase_ : str = {"unk_token": "<unk>"} UpperCAmelCase_ : Tuple = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["vocab_file"] ) UpperCAmelCase_ : Dict = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["merges_file"] ) with open(self.vocab_file ,"w" ,encoding="utf-8" ) as fp: fp.write(json.dumps(_snake_case ) + "\n" ) with open(self.merges_file ,"w" ,encoding="utf-8" ) as fp: fp.write("\n".join(_snake_case ) ) def UpperCamelCase__ ( self ,**_snake_case ): kwargs.update(self.special_tokens_map ) return CTRLTokenizer.from_pretrained(self.tmpdirname ,**_snake_case ) def UpperCamelCase__ ( self ,_snake_case ): UpperCAmelCase_ : Any = "adapt react readapt apt" UpperCAmelCase_ : str = "adapt react readapt apt" return input_text, output_text def UpperCamelCase__ ( self ): UpperCAmelCase_ : Union[str, Any] = CTRLTokenizer(self.vocab_file ,self.merges_file ,**self.special_tokens_map ) UpperCAmelCase_ : List[str] = "adapt react readapt apt" UpperCAmelCase_ : List[Any] = "adapt re@@ a@@ c@@ t re@@ adapt apt".split() UpperCAmelCase_ : str = tokenizer.tokenize(_snake_case ) self.assertListEqual(_snake_case ,_snake_case ) UpperCAmelCase_ : List[str] = tokens + [tokenizer.unk_token] UpperCAmelCase_ : str = [0, 1, 2, 4, 5, 1, 0, 3, 6] self.assertListEqual(tokenizer.convert_tokens_to_ids(_snake_case ) ,_snake_case )
71
'''simple docstring''' import logging import os import sys from dataclasses import dataclass, field from typing import Optional from seqaseq_trainer import SeqaSeqTrainer from seqaseq_training_args import SeqaSeqTrainingArguments import transformers from transformers import ( AutoConfig, AutoModelForSeqaSeqLM, AutoTokenizer, HfArgumentParser, MBartTokenizer, MBartTokenizerFast, set_seed, ) from transformers.trainer_utils import EvaluationStrategy, is_main_process from transformers.training_args import ParallelMode from utils import ( SeqaSeqDataCollator, SeqaSeqDataset, assert_all_frozen, build_compute_metrics_fn, check_output_dir, freeze_embeds, freeze_params, lmap, save_json, use_task_specific_params, write_txt_file, ) _lowerCamelCase = logging.getLogger(__name__) @dataclass class _snake_case : __A : str =field( metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"}) __A : Optional[str] =field( default=__SCREAMING_SNAKE_CASE , metadata={"help": "Pretrained config name or path if not the same as model_name"}) __A : Optional[str] =field( default=__SCREAMING_SNAKE_CASE , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"}) __A : Optional[str] =field( default=__SCREAMING_SNAKE_CASE , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , ) __A : bool =field(default=__SCREAMING_SNAKE_CASE , metadata={"help": "Whether tp freeze the encoder."}) __A : bool =field(default=__SCREAMING_SNAKE_CASE , metadata={"help": "Whether to freeze the embeddings."}) @dataclass class _snake_case : __A : str =field( metadata={"help": "The input data dir. Should contain the .tsv files (or other data files) for the task."}) __A : Optional[str] =field( default="summarization" , metadata={"help": "Task name, summarization (or summarization_{dataset} for pegasus) or translation"} , ) __A : Optional[int] =field( default=10_24 , metadata={ "help": ( "The maximum total input sequence length after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded." ) } , ) __A : Optional[int] =field( default=1_28 , metadata={ "help": ( "The maximum total sequence length for target text after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded." ) } , ) __A : Optional[int] =field( default=1_42 , metadata={ "help": ( "The maximum total sequence length for validation target text after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded. " "This argument is also used to override the ``max_length`` param of ``model.generate``, which is used " "during ``evaluate`` and ``predict``." ) } , ) __A : Optional[int] =field( default=1_42 , metadata={ "help": ( "The maximum total sequence length for test target text after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded." ) } , ) __A : Optional[int] =field(default=-1 , metadata={"help": "# training examples. -1 means use all."}) __A : Optional[int] =field(default=-1 , metadata={"help": "# validation examples. -1 means use all."}) __A : Optional[int] =field(default=-1 , metadata={"help": "# test examples. -1 means use all."}) __A : Optional[str] =field(default=__SCREAMING_SNAKE_CASE , metadata={"help": "Source language id for translation."}) __A : Optional[str] =field(default=__SCREAMING_SNAKE_CASE , metadata={"help": "Target language id for translation."}) __A : Optional[int] =field(default=__SCREAMING_SNAKE_CASE , metadata={"help": "# num_beams to use for evaluation."}) __A : bool =field( default=__SCREAMING_SNAKE_CASE , metadata={"help": "If only pad tokens should be ignored. This assumes that `config.pad_token_id` is defined."} , ) def a__ ( _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : str ) -> Union[str, Any]: """simple docstring""" logger.info(F'''***** {split} metrics *****''' ) for key in sorted(metrics.keys() ): logger.info(F''' {key} = {metrics[key]}''' ) save_json(_SCREAMING_SNAKE_CASE , os.path.join(_SCREAMING_SNAKE_CASE , F'''{split}_results.json''' ) ) def a__ ( ) -> Any: """simple docstring""" UpperCAmelCase_ : List[str] = HfArgumentParser((ModelArguments, DataTrainingArguments, SeqaSeqTrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : List[str] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : int = parser.parse_args_into_dataclasses() check_output_dir(_SCREAMING_SNAKE_CASE ) # Setup logging logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , ) logger.warning( "Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.parallel_mode == ParallelMode.DISTRIBUTED ) , training_args.fpaa , ) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Set the verbosity to info of the Transformers logger (on main process only): if is_main_process(training_args.local_rank ): transformers.utils.logging.set_verbosity_info() logger.info("Training/evaluation parameters %s" , _SCREAMING_SNAKE_CASE ) # Set seed set_seed(training_args.seed ) # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. UpperCAmelCase_ : List[str] = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , ) UpperCAmelCase_ : List[Any] = ("encoder_layerdrop", "decoder_layerdrop", "dropout", "attention_dropout") for p in extra_model_params: if getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): assert hasattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ), F'''({config.__class__.__name__}) doesn\'t have a `{p}` attribute''' setattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ) UpperCAmelCase_ : Dict = AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , ) UpperCAmelCase_ : Union[str, Any] = AutoModelForSeqaSeqLM.from_pretrained( model_args.model_name_or_path , from_tf=".ckpt" in model_args.model_name_or_path , config=_SCREAMING_SNAKE_CASE , cache_dir=model_args.cache_dir , ) # use task specific params use_task_specific_params(_SCREAMING_SNAKE_CASE , data_args.task ) # set num_beams for evaluation if data_args.eval_beams is None: UpperCAmelCase_ : Dict = model.config.num_beams # set decoder_start_token_id for MBart if model.config.decoder_start_token_id is None and isinstance(_SCREAMING_SNAKE_CASE , (MBartTokenizer, MBartTokenizerFast) ): assert ( data_args.tgt_lang is not None and data_args.src_lang is not None ), "mBart requires --tgt_lang and --src_lang" if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): UpperCAmelCase_ : Dict = tokenizer.lang_code_to_id[data_args.tgt_lang] else: UpperCAmelCase_ : List[Any] = tokenizer.convert_tokens_to_ids(data_args.tgt_lang ) if model_args.freeze_embeds: freeze_embeds(_SCREAMING_SNAKE_CASE ) if model_args.freeze_encoder: freeze_params(model.get_encoder() ) assert_all_frozen(model.get_encoder() ) UpperCAmelCase_ : Dict = SeqaSeqDataset # Get datasets UpperCAmelCase_ : Tuple = ( dataset_class( _SCREAMING_SNAKE_CASE , type_path="train" , data_dir=data_args.data_dir , n_obs=data_args.n_train , max_target_length=data_args.max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or "" , ) if training_args.do_train else None ) UpperCAmelCase_ : Dict = ( dataset_class( _SCREAMING_SNAKE_CASE , type_path="val" , data_dir=data_args.data_dir , n_obs=data_args.n_val , max_target_length=data_args.val_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or "" , ) if training_args.do_eval or training_args.evaluation_strategy != EvaluationStrategy.NO else None ) UpperCAmelCase_ : int = ( dataset_class( _SCREAMING_SNAKE_CASE , type_path="test" , data_dir=data_args.data_dir , n_obs=data_args.n_test , max_target_length=data_args.test_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or "" , ) if training_args.do_predict else None ) # Initialize our Trainer UpperCAmelCase_ : Optional[Any] = ( build_compute_metrics_fn(data_args.task , _SCREAMING_SNAKE_CASE ) if training_args.predict_with_generate else None ) UpperCAmelCase_ : List[str] = SeqaSeqTrainer( model=_SCREAMING_SNAKE_CASE , args=_SCREAMING_SNAKE_CASE , data_args=_SCREAMING_SNAKE_CASE , train_dataset=_SCREAMING_SNAKE_CASE , eval_dataset=_SCREAMING_SNAKE_CASE , data_collator=SeqaSeqDataCollator( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , model.config.decoder_start_token_id , training_args.tpu_num_cores ) , compute_metrics=_SCREAMING_SNAKE_CASE , tokenizer=_SCREAMING_SNAKE_CASE , ) UpperCAmelCase_ : List[Any] = {} # Training if training_args.do_train: logger.info("*** Train ***" ) UpperCAmelCase_ : Any = trainer.train( model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None ) UpperCAmelCase_ : int = train_result.metrics UpperCAmelCase_ : Dict = data_args.n_train trainer.save_model() # this also saves the tokenizer if trainer.is_world_process_zero(): handle_metrics("train" , _SCREAMING_SNAKE_CASE , training_args.output_dir ) all_metrics.update(_SCREAMING_SNAKE_CASE ) # Need to save the state, since Trainer.save_model saves only the tokenizer with the model trainer.state.save_to_json(os.path.join(training_args.output_dir , "trainer_state.json" ) ) # For convenience, we also re-save the tokenizer to the same directory, # so that you can share your model easily on huggingface.co/models =) tokenizer.save_pretrained(training_args.output_dir ) # Evaluation if training_args.do_eval: logger.info("*** Evaluate ***" ) UpperCAmelCase_ : Union[str, Any] = trainer.evaluate(metric_key_prefix="val" ) UpperCAmelCase_ : Optional[Any] = data_args.n_val UpperCAmelCase_ : Union[str, Any] = round(metrics["val_loss"] , 4 ) if trainer.is_world_process_zero(): handle_metrics("val" , _SCREAMING_SNAKE_CASE , training_args.output_dir ) all_metrics.update(_SCREAMING_SNAKE_CASE ) if training_args.do_predict: logger.info("*** Predict ***" ) UpperCAmelCase_ : List[Any] = trainer.predict(test_dataset=_SCREAMING_SNAKE_CASE , metric_key_prefix="test" ) UpperCAmelCase_ : List[str] = test_output.metrics UpperCAmelCase_ : int = data_args.n_test if trainer.is_world_process_zero(): UpperCAmelCase_ : Optional[Any] = round(metrics["test_loss"] , 4 ) handle_metrics("test" , _SCREAMING_SNAKE_CASE , training_args.output_dir ) all_metrics.update(_SCREAMING_SNAKE_CASE ) if training_args.predict_with_generate: UpperCAmelCase_ : Optional[int] = tokenizer.batch_decode( test_output.predictions , skip_special_tokens=_SCREAMING_SNAKE_CASE , clean_up_tokenization_spaces=_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : Optional[int] = lmap(str.strip , _SCREAMING_SNAKE_CASE ) write_txt_file(_SCREAMING_SNAKE_CASE , os.path.join(training_args.output_dir , "test_generations.txt" ) ) if trainer.is_world_process_zero(): save_json(_SCREAMING_SNAKE_CASE , os.path.join(training_args.output_dir , "all_results.json" ) ) return all_metrics def a__ ( _SCREAMING_SNAKE_CASE : str ) -> Optional[int]: """simple docstring""" main() if __name__ == "__main__": main()
71
1
'''simple docstring''' def a__ ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int ) -> int: """simple docstring""" return abs(_SCREAMING_SNAKE_CASE ) if a == 0 else greatest_common_divisor(b % a , _SCREAMING_SNAKE_CASE ) def a__ ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int ) -> int: """simple docstring""" while y: # --> when y=0 then loop will terminate and return x as final GCD. UpperCAmelCase_ , UpperCAmelCase_ : List[str] = y, x % y return abs(_SCREAMING_SNAKE_CASE ) def a__ ( ) -> str: """simple docstring""" try: UpperCAmelCase_ : int = input("Enter two integers separated by comma (,): " ).split("," ) UpperCAmelCase_ : Optional[Any] = int(nums[0] ) UpperCAmelCase_ : Dict = int(nums[1] ) print( F'''greatest_common_divisor({num_a}, {num_a}) = ''' F'''{greatest_common_divisor(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )}''' ) print(F'''By iterative gcd({num_a}, {num_a}) = {gcd_by_iterative(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )}''' ) except (IndexError, UnboundLocalError, ValueError): print("Wrong input" ) if __name__ == "__main__": main()
71
'''simple docstring''' from __future__ import annotations import unittest from transformers import BlenderbotConfig, BlenderbotTokenizer, is_tf_available from transformers.testing_utils import require_tf, require_tokenizers, slow from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotForConditionalGeneration, TFBlenderbotModel @require_tf class _snake_case : __A : Dict =BlenderbotConfig __A : Union[str, Any] ={} __A : Any ="gelu" def __init__( self ,_snake_case ,_snake_case=13 ,_snake_case=7 ,_snake_case=True ,_snake_case=False ,_snake_case=99 ,_snake_case=32 ,_snake_case=2 ,_snake_case=4 ,_snake_case=37 ,_snake_case=0.1 ,_snake_case=0.1 ,_snake_case=20 ,_snake_case=2 ,_snake_case=1 ,_snake_case=0 ,): UpperCAmelCase_ : List[Any] = parent UpperCAmelCase_ : str = batch_size UpperCAmelCase_ : Dict = seq_length UpperCAmelCase_ : int = is_training UpperCAmelCase_ : Optional[Any] = use_labels UpperCAmelCase_ : Any = vocab_size UpperCAmelCase_ : Optional[int] = hidden_size UpperCAmelCase_ : Optional[int] = num_hidden_layers UpperCAmelCase_ : int = num_attention_heads UpperCAmelCase_ : Tuple = intermediate_size UpperCAmelCase_ : Any = hidden_dropout_prob UpperCAmelCase_ : Optional[int] = attention_probs_dropout_prob UpperCAmelCase_ : List[Any] = max_position_embeddings UpperCAmelCase_ : str = eos_token_id UpperCAmelCase_ : List[Any] = pad_token_id UpperCAmelCase_ : List[Any] = bos_token_id def UpperCamelCase__ ( self ): UpperCAmelCase_ : Any = ids_tensor([self.batch_size, self.seq_length - 1] ,self.vocab_size ) UpperCAmelCase_ : int = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) ,1 ) UpperCAmelCase_ : Optional[Any] = tf.concat([input_ids, eos_tensor] ,axis=1 ) UpperCAmelCase_ : int = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size ) UpperCAmelCase_ : Optional[Any] = self.config_cls( vocab_size=self.vocab_size ,d_model=self.hidden_size ,encoder_layers=self.num_hidden_layers ,decoder_layers=self.num_hidden_layers ,encoder_attention_heads=self.num_attention_heads ,decoder_attention_heads=self.num_attention_heads ,encoder_ffn_dim=self.intermediate_size ,decoder_ffn_dim=self.intermediate_size ,dropout=self.hidden_dropout_prob ,attention_dropout=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,eos_token_ids=[2] ,bos_token_id=self.bos_token_id ,pad_token_id=self.pad_token_id ,decoder_start_token_id=self.pad_token_id ,**self.config_updates ,) UpperCAmelCase_ : List[str] = prepare_blenderbot_inputs_dict(_snake_case ,_snake_case ,_snake_case ) return config, inputs_dict def UpperCamelCase__ ( self ,_snake_case ,_snake_case ): UpperCAmelCase_ : Tuple = TFBlenderbotModel(config=_snake_case ).get_decoder() UpperCAmelCase_ : int = inputs_dict["input_ids"] UpperCAmelCase_ : Dict = input_ids[:1, :] UpperCAmelCase_ : Any = inputs_dict["attention_mask"][:1, :] UpperCAmelCase_ : int = inputs_dict["head_mask"] UpperCAmelCase_ : Optional[int] = 1 # first forward pass UpperCAmelCase_ : List[str] = model(_snake_case ,attention_mask=_snake_case ,head_mask=_snake_case ,use_cache=_snake_case ) UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = outputs.to_tuple() # create hypothetical next token and extent to next_input_ids UpperCAmelCase_ : Optional[int] = ids_tensor((self.batch_size, 3) ,config.vocab_size ) UpperCAmelCase_ : Any = tf.cast(ids_tensor((self.batch_size, 3) ,2 ) ,tf.inta ) # append to next input_ids and UpperCAmelCase_ : Union[str, Any] = tf.concat([input_ids, next_tokens] ,axis=-1 ) UpperCAmelCase_ : Any = tf.concat([attention_mask, next_attn_mask] ,axis=-1 ) UpperCAmelCase_ : Any = model(_snake_case ,attention_mask=_snake_case )[0] UpperCAmelCase_ : List[Any] = model(_snake_case ,attention_mask=_snake_case ,past_key_values=_snake_case )[0] self.parent.assertEqual(next_tokens.shape[1] ,output_from_past.shape[1] ) # select random slice UpperCAmelCase_ : str = int(ids_tensor((1,) ,output_from_past.shape[-1] ) ) UpperCAmelCase_ : List[str] = output_from_no_past[:, -3:, random_slice_idx] UpperCAmelCase_ : Union[str, Any] = output_from_past[:, :, random_slice_idx] # test that outputs are equal for slice tf.debugging.assert_near(_snake_case ,_snake_case ,rtol=1E-3 ) def a__ ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : str=None , _SCREAMING_SNAKE_CASE : Any=None , _SCREAMING_SNAKE_CASE : Any=None , _SCREAMING_SNAKE_CASE : List[str]=None , _SCREAMING_SNAKE_CASE : Dict=None , ) -> Union[str, Any]: """simple docstring""" if attention_mask is None: UpperCAmelCase_ : Dict = tf.cast(tf.math.not_equal(_SCREAMING_SNAKE_CASE , config.pad_token_id ) , tf.inta ) if decoder_attention_mask is None: UpperCAmelCase_ : Optional[int] = tf.concat( [ tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ), tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ), ] , axis=-1 , ) if head_mask is None: UpperCAmelCase_ : List[Any] = tf.ones((config.encoder_layers, config.encoder_attention_heads) ) if decoder_head_mask is None: UpperCAmelCase_ : Optional[int] = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) if cross_attn_head_mask is None: UpperCAmelCase_ : str = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": decoder_attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, "cross_attn_head_mask": cross_attn_head_mask, } @require_tf class _snake_case (__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase): __A : Union[str, Any] =(TFBlenderbotForConditionalGeneration, TFBlenderbotModel) if is_tf_available() else () __A : List[str] =(TFBlenderbotForConditionalGeneration,) if is_tf_available() else () __A : Dict =( { "conversational": TFBlenderbotForConditionalGeneration, "feature-extraction": TFBlenderbotModel, "summarization": TFBlenderbotForConditionalGeneration, "text2text-generation": TFBlenderbotForConditionalGeneration, "translation": TFBlenderbotForConditionalGeneration, } if is_tf_available() else {} ) __A : Any =True __A : Dict =False __A : Dict =False def UpperCamelCase__ ( self ): UpperCAmelCase_ : Optional[int] = TFBlenderbotModelTester(self ) UpperCAmelCase_ : int = ConfigTester(self ,config_class=_snake_case ) def UpperCamelCase__ ( self ): self.config_tester.run_common_tests() def UpperCamelCase__ ( self ): UpperCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_decoder_model_past_large_inputs(*_snake_case ) @require_tokenizers @require_tf class _snake_case (unittest.TestCase): __A : Optional[int] =["My friends are cool but they eat too many carbs."] __A : Optional[Any] ="facebook/blenderbot-400M-distill" @cached_property def UpperCamelCase__ ( self ): return BlenderbotTokenizer.from_pretrained(self.model_name ) @cached_property def UpperCamelCase__ ( self ): UpperCAmelCase_ : List[Any] = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name ) return model @slow def UpperCamelCase__ ( self ): UpperCAmelCase_ : List[Any] = self.tokenizer(self.src_text ,return_tensors="tf" ) UpperCAmelCase_ : Union[str, Any] = self.model.generate( model_inputs.input_ids ,) UpperCAmelCase_ : str = self.tokenizer.batch_decode(generated_ids.numpy() ,skip_special_tokens=_snake_case )[0] assert ( generated_words == " That's unfortunate. Are they trying to lose weight or are they just trying to be healthier?" )
71
1
'''simple docstring''' from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxSeqaSeqConfigWithPast from ...utils import logging _lowerCamelCase = logging.get_logger(__name__) _lowerCamelCase = { """t5-small""": """https://huggingface.co/t5-small/resolve/main/config.json""", """t5-base""": """https://huggingface.co/t5-base/resolve/main/config.json""", """t5-large""": """https://huggingface.co/t5-large/resolve/main/config.json""", """t5-3b""": """https://huggingface.co/t5-3b/resolve/main/config.json""", """t5-11b""": """https://huggingface.co/t5-11b/resolve/main/config.json""", } class _snake_case (__SCREAMING_SNAKE_CASE): __A : List[Any] ="t5" __A : List[Any] =["past_key_values"] __A : int ={"hidden_size": "d_model", "num_attention_heads": "num_heads", "num_hidden_layers": "num_layers"} def __init__( self ,_snake_case=3_21_28 ,_snake_case=5_12 ,_snake_case=64 ,_snake_case=20_48 ,_snake_case=6 ,_snake_case=None ,_snake_case=8 ,_snake_case=32 ,_snake_case=1_28 ,_snake_case=0.1 ,_snake_case=1E-6 ,_snake_case=1.0 ,_snake_case="relu" ,_snake_case=True ,_snake_case=True ,_snake_case=0 ,_snake_case=1 ,**_snake_case ,): UpperCAmelCase_ : int = vocab_size UpperCAmelCase_ : str = d_model UpperCAmelCase_ : Any = d_kv UpperCAmelCase_ : Any = d_ff UpperCAmelCase_ : str = num_layers UpperCAmelCase_ : List[Any] = ( num_decoder_layers if num_decoder_layers is not None else self.num_layers ) # default = symmetry UpperCAmelCase_ : Optional[int] = num_heads UpperCAmelCase_ : List[str] = relative_attention_num_buckets UpperCAmelCase_ : int = relative_attention_max_distance UpperCAmelCase_ : Union[str, Any] = dropout_rate UpperCAmelCase_ : Optional[Any] = layer_norm_epsilon UpperCAmelCase_ : Any = initializer_factor UpperCAmelCase_ : List[Any] = feed_forward_proj UpperCAmelCase_ : List[str] = use_cache UpperCAmelCase_ : Any = self.feed_forward_proj.split("-" ) UpperCAmelCase_ : Union[str, Any] = act_info[-1] UpperCAmelCase_ : str = act_info[0] == "gated" if len(_snake_case ) > 1 and act_info[0] != "gated" or len(_snake_case ) > 2: raise ValueError( f'''`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.''' "Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. " "'gated-gelu' or 'relu'" ) # for backwards compatibility if feed_forward_proj == "gated-gelu": UpperCAmelCase_ : Any = "gelu_new" super().__init__( pad_token_id=_snake_case ,eos_token_id=_snake_case ,is_encoder_decoder=_snake_case ,**_snake_case ,) class _snake_case (__SCREAMING_SNAKE_CASE): @property def UpperCamelCase__ ( self ): UpperCAmelCase_ : Tuple = { "input_ids": {0: "batch", 1: "encoder_sequence"}, "attention_mask": {0: "batch", 1: "encoder_sequence"}, } if self.use_past: UpperCAmelCase_ : Union[str, Any] = "past_encoder_sequence + sequence" UpperCAmelCase_ : Any = {0: "batch"} UpperCAmelCase_ : Dict = {0: "batch", 1: "past_decoder_sequence + sequence"} else: UpperCAmelCase_ : List[Any] = {0: "batch", 1: "decoder_sequence"} UpperCAmelCase_ : List[Any] = {0: "batch", 1: "decoder_sequence"} if self.use_past: self.fill_with_past_key_values_(_snake_case ,direction="inputs" ) return common_inputs @property def UpperCamelCase__ ( self ): return 13
71
'''simple docstring''' from numpy import exp, pi, sqrt def a__ ( _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : float = 0.0 , _SCREAMING_SNAKE_CASE : float = 1.0 ) -> int: """simple docstring""" return 1 / sqrt(2 * pi * sigma**2 ) * exp(-((x - mu) ** 2) / (2 * sigma**2) ) if __name__ == "__main__": import doctest doctest.testmod()
71
1
'''simple docstring''' import os from huggingface_hub.constants import HUGGINGFACE_HUB_CACHE, hf_cache_home _lowerCamelCase = HUGGINGFACE_HUB_CACHE _lowerCamelCase = """config.json""" _lowerCamelCase = """diffusion_pytorch_model.bin""" _lowerCamelCase = """diffusion_flax_model.msgpack""" _lowerCamelCase = """model.onnx""" _lowerCamelCase = """diffusion_pytorch_model.safetensors""" _lowerCamelCase = """weights.pb""" _lowerCamelCase = """https://huggingface.co""" _lowerCamelCase = default_cache_path _lowerCamelCase = """diffusers_modules""" _lowerCamelCase = os.getenv("""HF_MODULES_CACHE""", os.path.join(hf_cache_home, """modules""")) _lowerCamelCase = ["""fp16""", """non-ema"""] _lowerCamelCase = """.self_attn"""
71
'''simple docstring''' from typing import Optional from torch import nn from .transformer_ad import TransformeraDModel, TransformeraDModelOutput class _snake_case (nn.Module): def __init__( self ,_snake_case = 16 ,_snake_case = 88 ,_snake_case = None ,_snake_case = 1 ,_snake_case = 0.0 ,_snake_case = 32 ,_snake_case = None ,_snake_case = False ,_snake_case = None ,_snake_case = None ,_snake_case = "geglu" ,_snake_case = None ,): super().__init__() UpperCAmelCase_ : Optional[Any] = nn.ModuleList( [ TransformeraDModel( num_attention_heads=_snake_case ,attention_head_dim=_snake_case ,in_channels=_snake_case ,num_layers=_snake_case ,dropout=_snake_case ,norm_num_groups=_snake_case ,cross_attention_dim=_snake_case ,attention_bias=_snake_case ,sample_size=_snake_case ,num_vector_embeds=_snake_case ,activation_fn=_snake_case ,num_embeds_ada_norm=_snake_case ,) for _ in range(2 ) ] ) # Variables that can be set by a pipeline: # The ratio of transformer1 to transformer2's output states to be combined during inference UpperCAmelCase_ : List[str] = 0.5 # The shape of `encoder_hidden_states` is expected to be # `(batch_size, condition_lengths[0]+condition_lengths[1], num_features)` UpperCAmelCase_ : int = [77, 2_57] # Which transformer to use to encode which condition. # E.g. `(1, 0)` means that we'll use `transformers[1](conditions[0])` and `transformers[0](conditions[1])` UpperCAmelCase_ : List[Any] = [1, 0] def UpperCamelCase__ ( self ,_snake_case ,_snake_case ,_snake_case=None ,_snake_case=None ,_snake_case=None ,_snake_case = True ,): UpperCAmelCase_ : List[str] = hidden_states UpperCAmelCase_ : str = [] UpperCAmelCase_ : Optional[int] = 0 # attention_mask is not used yet for i in range(2 ): # for each of the two transformers, pass the corresponding condition tokens UpperCAmelCase_ : Any = encoder_hidden_states[:, tokens_start : tokens_start + self.condition_lengths[i]] UpperCAmelCase_ : Any = self.transformer_index_for_condition[i] UpperCAmelCase_ : int = self.transformers[transformer_index]( _snake_case ,encoder_hidden_states=_snake_case ,timestep=_snake_case ,cross_attention_kwargs=_snake_case ,return_dict=_snake_case ,)[0] encoded_states.append(encoded_state - input_states ) tokens_start += self.condition_lengths[i] UpperCAmelCase_ : Dict = encoded_states[0] * self.mix_ratio + encoded_states[1] * (1 - self.mix_ratio) UpperCAmelCase_ : List[Any] = output_states + input_states if not return_dict: return (output_states,) return TransformeraDModelOutput(sample=_snake_case )
71
1
'''simple docstring''' # NOTE: This file is deprecated and will be removed in a future version. # It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works from ...utils import deprecate from ..controlnet.multicontrolnet import MultiControlNetModel # noqa: F401 from ..controlnet.pipeline_controlnet import StableDiffusionControlNetPipeline # noqa: F401 deprecate( """stable diffusion controlnet""", """0.22.0""", """Importing `StableDiffusionControlNetPipeline` or `MultiControlNetModel` from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import StableDiffusionControlNetPipeline` instead.""", standard_warn=False, stacklevel=3, )
71
'''simple docstring''' import json import sys def a__ ( _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : int ) -> Tuple: """simple docstring""" with open(_SCREAMING_SNAKE_CASE , encoding="utf-8" ) as f: UpperCAmelCase_ : Dict = json.load(_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : str = ["<details>", "<summary>Show updated benchmarks!</summary>", " "] for benchmark_name in sorted(_SCREAMING_SNAKE_CASE ): UpperCAmelCase_ : Optional[Any] = results[benchmark_name] UpperCAmelCase_ : Any = benchmark_name.split("/" )[-1] output_md.append(F'''### Benchmark: {benchmark_file_name}''' ) UpperCAmelCase_ : Any = "| metric |" UpperCAmelCase_ : Any = "|--------|" UpperCAmelCase_ : Union[str, Any] = "| new / old (diff) |" for metric_name in sorted(_SCREAMING_SNAKE_CASE ): UpperCAmelCase_ : Tuple = benchmark_res[metric_name] UpperCAmelCase_ : Union[str, Any] = metric_vals["new"] UpperCAmelCase_ : Optional[Any] = metric_vals.get("old" , _SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : Dict = metric_vals.get("diff" , _SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : Dict = F''' {new_val:f}''' if isinstance(_SCREAMING_SNAKE_CASE , (int, float) ) else "None" if old_val is not None: val_str += F''' / {old_val:f}''' if isinstance(_SCREAMING_SNAKE_CASE , (int, float) ) else "None" if dif_val is not None: val_str += F''' ({dif_val:f})''' if isinstance(_SCREAMING_SNAKE_CASE , (int, float) ) else "None" title += " " + metric_name + " |" lines += "---|" value += val_str + " |" output_md += [title, lines, value, " "] output_md.append("</details>" ) with open(_SCREAMING_SNAKE_CASE , "w" , encoding="utf-8" ) as f: f.writelines("\n".join(_SCREAMING_SNAKE_CASE ) ) if __name__ == "__main__": _lowerCamelCase = sys.argv[1] _lowerCamelCase = sys.argv[2] format_json_to_md(input_json_file, output_md_file)
71
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available _lowerCamelCase = { """configuration_nezha""": ["""NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """NezhaConfig"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCamelCase = [ """NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST""", """NezhaForNextSentencePrediction""", """NezhaForMaskedLM""", """NezhaForPreTraining""", """NezhaForMultipleChoice""", """NezhaForQuestionAnswering""", """NezhaForSequenceClassification""", """NezhaForTokenClassification""", """NezhaModel""", """NezhaPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_nezha import NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP, NezhaConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_nezha import ( NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST, NezhaForMaskedLM, NezhaForMultipleChoice, NezhaForNextSentencePrediction, NezhaForPreTraining, NezhaForQuestionAnswering, NezhaForSequenceClassification, NezhaForTokenClassification, NezhaModel, NezhaPreTrainedModel, ) else: import sys _lowerCamelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
71
'''simple docstring''' import unittest from transformers import DebertaVaTokenizer, DebertaVaTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin _lowerCamelCase = get_tests_dir("""fixtures/spiece.model""") @require_sentencepiece @require_tokenizers class _snake_case (__SCREAMING_SNAKE_CASE , unittest.TestCase): __A : Optional[int] =DebertaVaTokenizer __A : Union[str, Any] =DebertaVaTokenizerFast __A : str =True __A : List[str] =True def UpperCamelCase__ ( self ): super().setUp() # We have a SentencePiece fixture for testing UpperCAmelCase_ : Optional[int] = DebertaVaTokenizer(_snake_case ,unk_token="<unk>" ) tokenizer.save_pretrained(self.tmpdirname ) def UpperCamelCase__ ( self ,_snake_case ): UpperCAmelCase_ : List[Any] = "this is a test" UpperCAmelCase_ : Optional[Any] = "this is a test" return input_text, output_text def UpperCamelCase__ ( self ): UpperCAmelCase_ : Optional[Any] = "<pad>" UpperCAmelCase_ : str = 0 self.assertEqual(self.get_tokenizer()._convert_token_to_id(_snake_case ) ,_snake_case ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(_snake_case ) ,_snake_case ) def UpperCamelCase__ ( self ): UpperCAmelCase_ : int = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] ,"<pad>" ) self.assertEqual(vocab_keys[1] ,"<unk>" ) self.assertEqual(vocab_keys[-1] ,"[PAD]" ) self.assertEqual(len(_snake_case ) ,3_00_01 ) def UpperCamelCase__ ( self ): self.assertEqual(self.get_tokenizer().vocab_size ,3_00_00 ) def UpperCamelCase__ ( self ): # fmt: off UpperCAmelCase_ : str = " \tHeLLo!how \n Are yoU? " UpperCAmelCase_ : Union[str, Any] = ["▁hello", "!", "how", "▁are", "▁you", "?"] # fmt: on UpperCAmelCase_ : Tuple = DebertaVaTokenizer(_snake_case ,do_lower_case=_snake_case ) UpperCAmelCase_ : Union[str, Any] = tokenizer.convert_ids_to_tokens(tokenizer.encode(_snake_case ,add_special_tokens=_snake_case ) ) self.assertListEqual(_snake_case ,_snake_case ) UpperCAmelCase_ : Tuple = DebertaVaTokenizerFast(_snake_case ,do_lower_case=_snake_case ) UpperCAmelCase_ : Any = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_snake_case ,add_special_tokens=_snake_case ) ) self.assertListEqual(_snake_case ,_snake_case ) @unittest.skip("There is an inconsistency between slow and fast tokenizer due to a bug in the fast one." ) def UpperCamelCase__ ( self ): pass @unittest.skip("There is an inconsistency between slow and fast tokenizer due to a bug in the fast one." ) def UpperCamelCase__ ( self ): pass def UpperCamelCase__ ( self ): # fmt: off UpperCAmelCase_ : Optional[int] = "I was born in 92000, and this is falsé." UpperCAmelCase_ : List[str] = ["▁", "<unk>", "▁was", "▁born", "▁in", "▁9", "2000", "▁", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", "▁", ".", ] # fmt: on UpperCAmelCase_ : List[Any] = DebertaVaTokenizer(_snake_case ,split_by_punct=_snake_case ) UpperCAmelCase_ : Optional[int] = tokenizer.convert_ids_to_tokens(tokenizer.encode(_snake_case ,add_special_tokens=_snake_case ) ) self.assertListEqual(_snake_case ,_snake_case ) UpperCAmelCase_ : int = DebertaVaTokenizerFast(_snake_case ,split_by_punct=_snake_case ) UpperCAmelCase_ : Dict = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_snake_case ,add_special_tokens=_snake_case ) ) self.assertListEqual(_snake_case ,_snake_case ) def UpperCamelCase__ ( self ): # fmt: off UpperCAmelCase_ : Tuple = "I was born in 92000, and this is falsé." UpperCAmelCase_ : Dict = ["▁i", "▁was", "▁born", "▁in", "▁9", "2000", "▁", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", "▁", ".", ] # fmt: on UpperCAmelCase_ : Optional[Any] = DebertaVaTokenizer(_snake_case ,do_lower_case=_snake_case ,split_by_punct=_snake_case ) UpperCAmelCase_ : Any = tokenizer.convert_ids_to_tokens(tokenizer.encode(_snake_case ,add_special_tokens=_snake_case ) ) self.assertListEqual(_snake_case ,_snake_case ) UpperCAmelCase_ : List[Any] = DebertaVaTokenizerFast(_snake_case ,do_lower_case=_snake_case ,split_by_punct=_snake_case ) UpperCAmelCase_ : Tuple = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_snake_case ,add_special_tokens=_snake_case ) ) self.assertListEqual(_snake_case ,_snake_case ) def UpperCamelCase__ ( self ): # fmt: off UpperCAmelCase_ : Optional[int] = "I was born in 92000, and this is falsé." UpperCAmelCase_ : Optional[int] = ["▁i", "▁was", "▁born", "▁in", "▁9", "2000", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", ".", ] # fmt: on UpperCAmelCase_ : List[Any] = DebertaVaTokenizer(_snake_case ,do_lower_case=_snake_case ,split_by_punct=_snake_case ) UpperCAmelCase_ : str = tokenizer.convert_ids_to_tokens(tokenizer.encode(_snake_case ,add_special_tokens=_snake_case ) ) self.assertListEqual(_snake_case ,_snake_case ) UpperCAmelCase_ : Optional[Any] = DebertaVaTokenizerFast(_snake_case ,do_lower_case=_snake_case ,split_by_punct=_snake_case ) UpperCAmelCase_ : str = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_snake_case ,add_special_tokens=_snake_case ) ) self.assertListEqual(_snake_case ,_snake_case ) def UpperCamelCase__ ( self ): # fmt: off UpperCAmelCase_ : Optional[int] = "I was born in 92000, and this is falsé." UpperCAmelCase_ : Optional[Any] = ["▁", "<unk>", "▁was", "▁born", "▁in", "▁9", "2000", "▁", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", "▁", ".", ] # fmt: on UpperCAmelCase_ : List[str] = DebertaVaTokenizer(_snake_case ,do_lower_case=_snake_case ,split_by_punct=_snake_case ) UpperCAmelCase_ : Tuple = tokenizer.convert_ids_to_tokens(tokenizer.encode(_snake_case ,add_special_tokens=_snake_case ) ) self.assertListEqual(_snake_case ,_snake_case ) UpperCAmelCase_ : Dict = DebertaVaTokenizerFast(_snake_case ,do_lower_case=_snake_case ,split_by_punct=_snake_case ) UpperCAmelCase_ : Dict = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_snake_case ,add_special_tokens=_snake_case ) ) self.assertListEqual(_snake_case ,_snake_case ) def UpperCamelCase__ ( self ): # fmt: off UpperCAmelCase_ : Tuple = " \tHeLLo!how \n Are yoU? " UpperCAmelCase_ : List[Any] = ["▁", "<unk>", "e", "<unk>", "o", "!", "how", "▁", "<unk>", "re", "▁yo", "<unk>", "?"] # fmt: on UpperCAmelCase_ : Any = DebertaVaTokenizer(_snake_case ,do_lower_case=_snake_case ,split_by_punct=_snake_case ) UpperCAmelCase_ : List[str] = tokenizer.convert_ids_to_tokens(tokenizer.encode(_snake_case ,add_special_tokens=_snake_case ) ) self.assertListEqual(_snake_case ,_snake_case ) UpperCAmelCase_ : int = DebertaVaTokenizerFast(_snake_case ,do_lower_case=_snake_case ,split_by_punct=_snake_case ) UpperCAmelCase_ : Tuple = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_snake_case ,add_special_tokens=_snake_case ) ) self.assertListEqual(_snake_case ,_snake_case ) def UpperCamelCase__ ( self ): UpperCAmelCase_ : List[str] = self.get_tokenizer() UpperCAmelCase_ : Union[str, Any] = self.get_rust_tokenizer() UpperCAmelCase_ : Dict = "I was born in 92000, and this is falsé." UpperCAmelCase_ : Any = tokenizer.convert_ids_to_tokens(tokenizer.encode(_snake_case ,add_special_tokens=_snake_case ) ) UpperCAmelCase_ : Optional[Any] = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_snake_case ,add_special_tokens=_snake_case ) ) self.assertListEqual(_snake_case ,_snake_case ) UpperCAmelCase_ : Tuple = tokenizer.encode(_snake_case ,add_special_tokens=_snake_case ) UpperCAmelCase_ : int = rust_tokenizer.encode(_snake_case ,add_special_tokens=_snake_case ) self.assertListEqual(_snake_case ,_snake_case ) UpperCAmelCase_ : Optional[Any] = self.get_rust_tokenizer() UpperCAmelCase_ : Union[str, Any] = tokenizer.encode(_snake_case ) UpperCAmelCase_ : List[Any] = rust_tokenizer.encode(_snake_case ) self.assertListEqual(_snake_case ,_snake_case ) def UpperCamelCase__ ( self ): UpperCAmelCase_ : Any = "This is a test" UpperCAmelCase_ : Optional[int] = [13, 1, 43_98, 25, 21, 12_89] UpperCAmelCase_ : Optional[Any] = ["▁", "T", "his", "▁is", "▁a", "▁test"] UpperCAmelCase_ : List[str] = ["▁", "<unk>", "his", "▁is", "▁a", "▁test"] UpperCAmelCase_ : str = DebertaVaTokenizer(_snake_case ,keep_accents=_snake_case ) UpperCAmelCase_ : List[Any] = DebertaVaTokenizerFast(_snake_case ,keep_accents=_snake_case ) UpperCAmelCase_ : Optional[int] = tokenizer.encode(_snake_case ,add_special_tokens=_snake_case ) self.assertListEqual(_snake_case ,_snake_case ) UpperCAmelCase_ : Any = tokenizer.tokenize(_snake_case ) self.assertListEqual(_snake_case ,_snake_case ) UpperCAmelCase_ : Union[str, Any] = tokenizer.convert_ids_to_tokens(_snake_case ) self.assertListEqual(_snake_case ,_snake_case ) UpperCAmelCase_ : List[Any] = rust_tokenizer.encode(_snake_case ,add_special_tokens=_snake_case ) self.assertListEqual(_snake_case ,_snake_case ) UpperCAmelCase_ : Dict = rust_tokenizer.tokenize(_snake_case ) self.assertListEqual(_snake_case ,_snake_case ) UpperCAmelCase_ : List[str] = rust_tokenizer.convert_ids_to_tokens(_snake_case ) self.assertListEqual(_snake_case ,_snake_case ) # fmt: off UpperCAmelCase_ : List[str] = "I was born in 92000, and this is falsé." UpperCAmelCase_ : Optional[int] = [13, 1, 23, 3_86, 19, 5_61, 30_50, 15, 17, 48, 25, 82_56, 18, 1, 9] UpperCAmelCase_ : str = ["▁", "I", "▁was", "▁born", "▁in", "▁9", "2000", ",", "▁and", "▁this", "▁is", "▁fal", "s", "é", ".", ] UpperCAmelCase_ : List[str] = ["▁", "<unk>", "▁was", "▁born", "▁in", "▁9", "2000", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", ".", ] # fmt: on UpperCAmelCase_ : List[str] = tokenizer.encode(_snake_case ,add_special_tokens=_snake_case ) self.assertListEqual(_snake_case ,_snake_case ) UpperCAmelCase_ : Dict = tokenizer.tokenize(_snake_case ) self.assertListEqual(_snake_case ,_snake_case ) UpperCAmelCase_ : int = tokenizer.convert_ids_to_tokens(_snake_case ) self.assertListEqual(_snake_case ,_snake_case ) UpperCAmelCase_ : Optional[int] = rust_tokenizer.encode(_snake_case ,add_special_tokens=_snake_case ) self.assertListEqual(_snake_case ,_snake_case ) UpperCAmelCase_ : Optional[int] = rust_tokenizer.tokenize(_snake_case ) self.assertListEqual(_snake_case ,_snake_case ) UpperCAmelCase_ : Any = rust_tokenizer.convert_ids_to_tokens(_snake_case ) self.assertListEqual(_snake_case ,_snake_case ) def UpperCamelCase__ ( self ): UpperCAmelCase_ : Any = DebertaVaTokenizer(_snake_case ) UpperCAmelCase_ : Optional[int] = tokenizer.encode("sequence builders" ) UpperCAmelCase_ : Dict = tokenizer.encode("multi-sequence build" ) UpperCAmelCase_ : Tuple = tokenizer.build_inputs_with_special_tokens(_snake_case ) UpperCAmelCase_ : Optional[Any] = tokenizer.build_inputs_with_special_tokens(_snake_case ,_snake_case ) self.assertEqual([tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] ,_snake_case ) self.assertEqual( [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [tokenizer.sep_token_id] ,_snake_case ,) @slow def UpperCamelCase__ ( self ): # fmt: off UpperCAmelCase_ : Union[str, Any] = {"input_ids": [[1, 3_98_67, 36, 1_93_90, 4_86, 27, 3_50_52, 8_14_36, 18, 6_06_85, 12_25, 7, 3_50_52, 8_14_36, 18, 93_67, 1_68_99, 18, 1_59_37, 53, 5_94, 7_73, 18, 1_62_87, 3_04_65, 36, 1_59_37, 6, 4_11_39, 38, 3_69_79, 6_07_63, 1_91, 6, 3_41_32, 99, 6, 5_05_38, 3_90, 4_32_30, 6, 3_41_32, 27_79, 2_08_50, 14, 6_99, 10_72, 11_94, 36, 3_82, 1_09_01, 53, 7, 6_99, 10_72, 20_84, 36, 2_04_22, 6_30, 53, 19, 1_05, 30_49, 18_96, 10_53, 1_68_99, 15_06, 11, 3_79_78, 42_43, 7, 12_37, 3_18_69, 2_00, 1_65_66, 6_54, 6, 3_50_52, 8_14_36, 7, 5_56_30, 1_35_93, 4, 2], [1, 26, 1_50_11, 13, 6_67, 8, 10_53, 18, 2_36_11, 12_37, 7_23_56, 1_28_20, 34, 10_41_34, 12_09, 35, 1_33_13, 66_27, 21, 2_02, 3_47, 7, 1_64, 23_99, 11, 46, 44_85, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 5, 12_32, 28_64, 1_57_85, 1_49_51, 1_05, 5, 85_81, 12_50, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "token_type_ids": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=_snake_case ,model_name="microsoft/deberta-v2-xlarge" ,revision="ad6e42c1532ddf3a15c39246b63f5559d558b670" ,)
71
1
'''simple docstring''' import gc import random import unittest import numpy as np import torch from PIL import Image from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import AutoencoderKL, PNDMScheduler, StableDiffusionInpaintPipeline, UNetaDConditionModel from diffusers.utils import floats_tensor, load_image, load_numpy, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class _snake_case (__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase): __A : List[Any] =StableDiffusionInpaintPipeline __A : Tuple =TEXT_GUIDED_IMAGE_INPAINTING_PARAMS __A : int =TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS __A : Tuple =frozenset( []) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess __A : Optional[Any] =frozenset([]) def UpperCamelCase__ ( self ): torch.manual_seed(0 ) UpperCAmelCase_ : Tuple = UNetaDConditionModel( block_out_channels=(32, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=9 ,out_channels=4 ,down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") ,up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") ,cross_attention_dim=32 ,attention_head_dim=(2, 4) ,use_linear_projection=_snake_case ,) UpperCAmelCase_ : List[str] = PNDMScheduler(skip_prk_steps=_snake_case ) torch.manual_seed(0 ) UpperCAmelCase_ : Optional[Any] = AutoencoderKL( block_out_channels=[32, 64] ,in_channels=3 ,out_channels=3 ,down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] ,up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] ,latent_channels=4 ,sample_size=1_28 ,) torch.manual_seed(0 ) UpperCAmelCase_ : Optional[int] = CLIPTextConfig( bos_token_id=0 ,eos_token_id=2 ,hidden_size=32 ,intermediate_size=37 ,layer_norm_eps=1E-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=10_00 ,hidden_act="gelu" ,projection_dim=5_12 ,) UpperCAmelCase_ : List[Any] = CLIPTextModel(_snake_case ) UpperCAmelCase_ : Optional[int] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" ) UpperCAmelCase_ : List[str] = { "unet": unet, "scheduler": scheduler, "vae": vae, "text_encoder": text_encoder, "tokenizer": tokenizer, "safety_checker": None, "feature_extractor": None, } return components def UpperCamelCase__ ( self ,_snake_case ,_snake_case=0 ): # TODO: use tensor inputs instead of PIL, this is here just to leave the old expected_slices untouched UpperCAmelCase_ : Any = floats_tensor((1, 3, 32, 32) ,rng=random.Random(_snake_case ) ).to(_snake_case ) UpperCAmelCase_ : Any = image.cpu().permute(0 ,2 ,3 ,1 )[0] UpperCAmelCase_ : List[str] = Image.fromarray(np.uinta(_snake_case ) ).convert("RGB" ).resize((64, 64) ) UpperCAmelCase_ : Tuple = Image.fromarray(np.uinta(image + 4 ) ).convert("RGB" ).resize((64, 64) ) if str(_snake_case ).startswith("mps" ): UpperCAmelCase_ : List[Any] = torch.manual_seed(_snake_case ) else: UpperCAmelCase_ : Tuple = torch.Generator(device=_snake_case ).manual_seed(_snake_case ) UpperCAmelCase_ : Union[str, Any] = { "prompt": "A painting of a squirrel eating a burger", "image": init_image, "mask_image": mask_image, "generator": generator, "num_inference_steps": 2, "guidance_scale": 6.0, "output_type": "numpy", } return inputs def UpperCamelCase__ ( self ): UpperCAmelCase_ : int = "cpu" # ensure determinism for the device-dependent torch.Generator UpperCAmelCase_ : Dict = self.get_dummy_components() UpperCAmelCase_ : Tuple = StableDiffusionInpaintPipeline(**_snake_case ) UpperCAmelCase_ : Union[str, Any] = sd_pipe.to(_snake_case ) sd_pipe.set_progress_bar_config(disable=_snake_case ) UpperCAmelCase_ : Optional[int] = self.get_dummy_inputs(_snake_case ) UpperCAmelCase_ : str = sd_pipe(**_snake_case ).images UpperCAmelCase_ : List[Any] = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) UpperCAmelCase_ : int = np.array([0.4727, 0.5735, 0.3941, 0.5446, 0.5926, 0.4394, 0.5062, 0.4654, 0.4476] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def UpperCamelCase__ ( self ): super().test_inference_batch_single_identical(expected_max_diff=3E-3 ) @slow @require_torch_gpu class _snake_case (unittest.TestCase): def UpperCamelCase__ ( self ): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def UpperCamelCase__ ( self ): UpperCAmelCase_ : Dict = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/sd2-inpaint/init_image.png" ) UpperCAmelCase_ : Tuple = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png" ) UpperCAmelCase_ : List[str] = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint" "/yellow_cat_sitting_on_a_park_bench.npy" ) UpperCAmelCase_ : List[Any] = "stabilityai/stable-diffusion-2-inpainting" UpperCAmelCase_ : Optional[int] = StableDiffusionInpaintPipeline.from_pretrained(_snake_case ,safety_checker=_snake_case ) pipe.to(_snake_case ) pipe.set_progress_bar_config(disable=_snake_case ) pipe.enable_attention_slicing() UpperCAmelCase_ : int = "Face of a yellow cat, high resolution, sitting on a park bench" UpperCAmelCase_ : Optional[Any] = torch.manual_seed(0 ) UpperCAmelCase_ : Optional[Any] = pipe( prompt=_snake_case ,image=_snake_case ,mask_image=_snake_case ,generator=_snake_case ,output_type="np" ,) UpperCAmelCase_ : List[str] = output.images[0] assert image.shape == (5_12, 5_12, 3) assert np.abs(expected_image - image ).max() < 9E-3 def UpperCamelCase__ ( self ): UpperCAmelCase_ : List[Any] = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/sd2-inpaint/init_image.png" ) UpperCAmelCase_ : Dict = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png" ) UpperCAmelCase_ : Union[str, Any] = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint" "/yellow_cat_sitting_on_a_park_bench_fp16.npy" ) UpperCAmelCase_ : Tuple = "stabilityai/stable-diffusion-2-inpainting" UpperCAmelCase_ : List[str] = StableDiffusionInpaintPipeline.from_pretrained( _snake_case ,torch_dtype=torch.floataa ,safety_checker=_snake_case ,) pipe.to(_snake_case ) pipe.set_progress_bar_config(disable=_snake_case ) pipe.enable_attention_slicing() UpperCAmelCase_ : Dict = "Face of a yellow cat, high resolution, sitting on a park bench" UpperCAmelCase_ : List[Any] = torch.manual_seed(0 ) UpperCAmelCase_ : str = pipe( prompt=_snake_case ,image=_snake_case ,mask_image=_snake_case ,generator=_snake_case ,output_type="np" ,) UpperCAmelCase_ : Any = output.images[0] assert image.shape == (5_12, 5_12, 3) assert np.abs(expected_image - image ).max() < 5E-1 def UpperCamelCase__ ( self ): torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() UpperCAmelCase_ : List[Any] = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/sd2-inpaint/init_image.png" ) UpperCAmelCase_ : Optional[Any] = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png" ) UpperCAmelCase_ : Tuple = "stabilityai/stable-diffusion-2-inpainting" UpperCAmelCase_ : Optional[int] = PNDMScheduler.from_pretrained(_snake_case ,subfolder="scheduler" ) UpperCAmelCase_ : List[Any] = StableDiffusionInpaintPipeline.from_pretrained( _snake_case ,safety_checker=_snake_case ,scheduler=_snake_case ,torch_dtype=torch.floataa ,) pipe.to(_snake_case ) pipe.set_progress_bar_config(disable=_snake_case ) pipe.enable_attention_slicing(1 ) pipe.enable_sequential_cpu_offload() UpperCAmelCase_ : List[Any] = "Face of a yellow cat, high resolution, sitting on a park bench" UpperCAmelCase_ : List[str] = torch.manual_seed(0 ) UpperCAmelCase_ : List[str] = pipe( prompt=_snake_case ,image=_snake_case ,mask_image=_snake_case ,generator=_snake_case ,num_inference_steps=2 ,output_type="np" ,) UpperCAmelCase_ : Union[str, Any] = torch.cuda.max_memory_allocated() # make sure that less than 2.65 GB is allocated assert mem_bytes < 2.65 * 10**9
71
'''simple docstring''' def a__ ( _SCREAMING_SNAKE_CASE : int ) -> int: """simple docstring""" if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): raise TypeError("Input value must be an 'int' type" ) UpperCAmelCase_ : Union[str, Any] = 0 while number: position += 1 number >>= 1 return position if __name__ == "__main__": import doctest doctest.testmod()
71
1
'''simple docstring''' import logging import os import sys from dataclasses import dataclass, field from itertools import chain from typing import Optional, Union import datasets import numpy as np import torch from datasets import load_dataset import transformers from transformers import ( AutoConfig, AutoModelForMultipleChoice, AutoTokenizer, HfArgumentParser, Trainer, TrainingArguments, default_data_collator, set_seed, ) from transformers.tokenization_utils_base import PreTrainedTokenizerBase from transformers.trainer_utils import get_last_checkpoint from transformers.utils import PaddingStrategy, check_min_version, send_example_telemetry # Will error if the minimal version of Transformers is not installed. Remove at your own risks. check_min_version("""4.31.0""") _lowerCamelCase = logging.getLogger(__name__) @dataclass class _snake_case : __A : str =field( metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"}) __A : Optional[str] =field( default=__SCREAMING_SNAKE_CASE , metadata={"help": "Pretrained config name or path if not the same as model_name"}) __A : Optional[str] =field( default=__SCREAMING_SNAKE_CASE , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"}) __A : Optional[str] =field( default=__SCREAMING_SNAKE_CASE , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , ) __A : bool =field( default=__SCREAMING_SNAKE_CASE , metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."} , ) __A : str =field( default="main" , metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."} , ) __A : bool =field( default=__SCREAMING_SNAKE_CASE , metadata={ "help": ( "Will use the token generated when running `huggingface-cli login` (necessary to use this script " "with private models)." ) } , ) @dataclass class _snake_case : __A : Optional[str] =field(default=__SCREAMING_SNAKE_CASE , metadata={"help": "The input training data file (a text file)."}) __A : Optional[str] =field( default=__SCREAMING_SNAKE_CASE , metadata={"help": "An optional input evaluation data file to evaluate the perplexity on (a text file)."} , ) __A : bool =field( default=__SCREAMING_SNAKE_CASE , metadata={"help": "Overwrite the cached training and evaluation sets"}) __A : Optional[int] =field( default=__SCREAMING_SNAKE_CASE , metadata={"help": "The number of processes to use for the preprocessing."} , ) __A : Optional[int] =field( default=__SCREAMING_SNAKE_CASE , metadata={ "help": ( "The maximum total input sequence length after tokenization. If passed, sequences longer " "than this will be truncated, sequences shorter will be padded." ) } , ) __A : bool =field( default=__SCREAMING_SNAKE_CASE , metadata={ "help": ( "Whether to pad all samples to the maximum sentence length. " "If False, will pad the samples dynamically when batching to the maximum length in the batch. More " "efficient on GPU but very bad for TPU." ) } , ) __A : Optional[int] =field( default=__SCREAMING_SNAKE_CASE , metadata={ "help": ( "For debugging purposes or quicker training, truncate the number of training examples to this " "value if set." ) } , ) __A : Optional[int] =field( default=__SCREAMING_SNAKE_CASE , metadata={ "help": ( "For debugging purposes or quicker training, truncate the number of evaluation examples to this " "value if set." ) } , ) def UpperCamelCase__ ( self ): if self.train_file is not None: UpperCAmelCase_ : str = self.train_file.split("." )[-1] assert extension in ["csv", "json"], "`train_file` should be a csv or a json file." if self.validation_file is not None: UpperCAmelCase_ : Optional[int] = self.validation_file.split("." )[-1] assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file." @dataclass class _snake_case : __A : PreTrainedTokenizerBase __A : Union[bool, str, PaddingStrategy] =True __A : Optional[int] =None __A : Optional[int] =None def __call__( self ,_snake_case ): UpperCAmelCase_ : List[Any] = "label" if "label" in features[0].keys() else "labels" UpperCAmelCase_ : Union[str, Any] = [feature.pop(_snake_case ) for feature in features] UpperCAmelCase_ : Optional[Any] = len(_snake_case ) UpperCAmelCase_ : Tuple = len(features[0]["input_ids"] ) UpperCAmelCase_ : Any = [ [{k: v[i] for k, v in feature.items()} for i in range(_snake_case )] for feature in features ] UpperCAmelCase_ : Dict = list(chain(*_snake_case ) ) UpperCAmelCase_ : int = self.tokenizer.pad( _snake_case ,padding=self.padding ,max_length=self.max_length ,pad_to_multiple_of=self.pad_to_multiple_of ,return_tensors="pt" ,) # Un-flatten UpperCAmelCase_ : List[Any] = {k: v.view(_snake_case ,_snake_case ,-1 ) for k, v in batch.items()} # Add back labels UpperCAmelCase_ : Union[str, Any] = torch.tensor(_snake_case ,dtype=torch.intaa ) return batch def a__ ( ) -> Union[str, Any]: """simple docstring""" UpperCAmelCase_ : Optional[int] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : str = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = parser.parse_args_into_dataclasses() # Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The # information sent is the one passed as arguments along with your Python/PyTorch versions. send_example_telemetry("run_swag" , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # Setup logging logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , ) if training_args.should_log: # The default of training_args.log_level is passive, so we set log level at info here to have that default. transformers.utils.logging.set_verbosity_info() UpperCAmelCase_ : int = training_args.get_process_log_level() logger.setLevel(_SCREAMING_SNAKE_CASE ) datasets.utils.logging.set_verbosity(_SCREAMING_SNAKE_CASE ) transformers.utils.logging.set_verbosity(_SCREAMING_SNAKE_CASE ) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Log on each process the small summary: logger.warning( F'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}''' + F'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' ) logger.info(F'''Training/evaluation parameters {training_args}''' ) # Detecting last checkpoint. UpperCAmelCase_ : Dict = None if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir: UpperCAmelCase_ : str = get_last_checkpoint(training_args.output_dir ) if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0: raise ValueError( F'''Output directory ({training_args.output_dir}) already exists and is not empty. ''' "Use --overwrite_output_dir to overcome." ) elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: logger.info( F'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change ''' "the `--output_dir` or add `--overwrite_output_dir` to train from scratch." ) # Set seed before initializing model. set_seed(training_args.seed ) # Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below) # or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/ # (the dataset will be downloaded automatically from the datasets Hub). # For CSV/JSON files, this script will use the column called 'text' or the first column if no column called # 'text' is found. You can easily tweak this behavior (see below). # In distributed training, the load_dataset function guarantee that only one local process can concurrently # download the dataset. if data_args.train_file is not None or data_args.validation_file is not None: UpperCAmelCase_ : Optional[int] = {} if data_args.train_file is not None: UpperCAmelCase_ : str = data_args.train_file if data_args.validation_file is not None: UpperCAmelCase_ : Optional[int] = data_args.validation_file UpperCAmelCase_ : Optional[Any] = data_args.train_file.split("." )[-1] UpperCAmelCase_ : Any = load_dataset( _SCREAMING_SNAKE_CASE , data_files=_SCREAMING_SNAKE_CASE , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , ) else: # Downloading and loading the swag dataset from the hub. UpperCAmelCase_ : List[Any] = load_dataset( "swag" , "regular" , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , ) # See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at # https://huggingface.co/docs/datasets/loading_datasets.html. # Load pretrained model and tokenizer # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. UpperCAmelCase_ : List[str] = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) UpperCAmelCase_ : Dict = AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) UpperCAmelCase_ : Any = AutoModelForMultipleChoice.from_pretrained( model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=_SCREAMING_SNAKE_CASE , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) # When using your own dataset or a different dataset from swag, you will probably need to change this. UpperCAmelCase_ : Tuple = [F'''ending{i}''' for i in range(4 )] UpperCAmelCase_ : List[str] = "sent1" UpperCAmelCase_ : str = "sent2" if data_args.max_seq_length is None: UpperCAmelCase_ : Dict = tokenizer.model_max_length if max_seq_length > 10_24: logger.warning( "The chosen tokenizer supports a `model_max_length` that is longer than the default `block_size` value" " of 1024. If you would like to use a longer `block_size` up to `tokenizer.model_max_length` you can" " override this default with `--block_size xxx`." ) UpperCAmelCase_ : int = 10_24 else: if data_args.max_seq_length > tokenizer.model_max_length: logger.warning( F'''The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the''' F'''model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.''' ) UpperCAmelCase_ : Dict = min(data_args.max_seq_length , tokenizer.model_max_length ) # Preprocessing the datasets. def preprocess_function(_SCREAMING_SNAKE_CASE : List[str] ): UpperCAmelCase_ : List[str] = [[context] * 4 for context in examples[context_name]] UpperCAmelCase_ : Union[str, Any] = examples[question_header_name] UpperCAmelCase_ : int = [ [F'''{header} {examples[end][i]}''' for end in ending_names] for i, header in enumerate(_SCREAMING_SNAKE_CASE ) ] # Flatten out UpperCAmelCase_ : Optional[Any] = list(chain(*_SCREAMING_SNAKE_CASE ) ) UpperCAmelCase_ : Optional[int] = list(chain(*_SCREAMING_SNAKE_CASE ) ) # Tokenize UpperCAmelCase_ : Any = tokenizer( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , truncation=_SCREAMING_SNAKE_CASE , max_length=_SCREAMING_SNAKE_CASE , padding="max_length" if data_args.pad_to_max_length else False , ) # Un-flatten return {k: [v[i : i + 4] for i in range(0 , len(_SCREAMING_SNAKE_CASE ) , 4 )] for k, v in tokenized_examples.items()} if training_args.do_train: if "train" not in raw_datasets: raise ValueError("--do_train requires a train dataset" ) UpperCAmelCase_ : List[str] = raw_datasets["train"] if data_args.max_train_samples is not None: UpperCAmelCase_ : Optional[int] = min(len(_SCREAMING_SNAKE_CASE ) , data_args.max_train_samples ) UpperCAmelCase_ : Dict = train_dataset.select(range(_SCREAMING_SNAKE_CASE ) ) with training_args.main_process_first(desc="train dataset map pre-processing" ): UpperCAmelCase_ : Tuple = train_dataset.map( _SCREAMING_SNAKE_CASE , batched=_SCREAMING_SNAKE_CASE , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , ) if training_args.do_eval: if "validation" not in raw_datasets: raise ValueError("--do_eval requires a validation dataset" ) UpperCAmelCase_ : Union[str, Any] = raw_datasets["validation"] if data_args.max_eval_samples is not None: UpperCAmelCase_ : int = min(len(_SCREAMING_SNAKE_CASE ) , data_args.max_eval_samples ) UpperCAmelCase_ : int = eval_dataset.select(range(_SCREAMING_SNAKE_CASE ) ) with training_args.main_process_first(desc="validation dataset map pre-processing" ): UpperCAmelCase_ : Union[str, Any] = eval_dataset.map( _SCREAMING_SNAKE_CASE , batched=_SCREAMING_SNAKE_CASE , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , ) # Data collator UpperCAmelCase_ : Any = ( default_data_collator if data_args.pad_to_max_length else DataCollatorForMultipleChoice(tokenizer=_SCREAMING_SNAKE_CASE , pad_to_multiple_of=8 if training_args.fpaa else None ) ) # Metric def compute_metrics(_SCREAMING_SNAKE_CASE : Optional[Any] ): UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = eval_predictions UpperCAmelCase_ : str = np.argmax(_SCREAMING_SNAKE_CASE , axis=1 ) return {"accuracy": (preds == label_ids).astype(np.floataa ).mean().item()} # Initialize our Trainer UpperCAmelCase_ : int = Trainer( model=_SCREAMING_SNAKE_CASE , args=_SCREAMING_SNAKE_CASE , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , tokenizer=_SCREAMING_SNAKE_CASE , data_collator=_SCREAMING_SNAKE_CASE , compute_metrics=_SCREAMING_SNAKE_CASE , ) # Training if training_args.do_train: UpperCAmelCase_ : Any = None if training_args.resume_from_checkpoint is not None: UpperCAmelCase_ : Dict = training_args.resume_from_checkpoint elif last_checkpoint is not None: UpperCAmelCase_ : Union[str, Any] = last_checkpoint UpperCAmelCase_ : Dict = trainer.train(resume_from_checkpoint=_SCREAMING_SNAKE_CASE ) trainer.save_model() # Saves the tokenizer too for easy upload UpperCAmelCase_ : Optional[int] = train_result.metrics UpperCAmelCase_ : Dict = ( data_args.max_train_samples if data_args.max_train_samples is not None else len(_SCREAMING_SNAKE_CASE ) ) UpperCAmelCase_ : Union[str, Any] = min(_SCREAMING_SNAKE_CASE , len(_SCREAMING_SNAKE_CASE ) ) trainer.log_metrics("train" , _SCREAMING_SNAKE_CASE ) trainer.save_metrics("train" , _SCREAMING_SNAKE_CASE ) trainer.save_state() # Evaluation if training_args.do_eval: logger.info("*** Evaluate ***" ) UpperCAmelCase_ : Optional[int] = trainer.evaluate() UpperCAmelCase_ : Union[str, Any] = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : List[Any] = min(_SCREAMING_SNAKE_CASE , len(_SCREAMING_SNAKE_CASE ) ) trainer.log_metrics("eval" , _SCREAMING_SNAKE_CASE ) trainer.save_metrics("eval" , _SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : Any = { "finetuned_from": model_args.model_name_or_path, "tasks": "multiple-choice", "dataset_tags": "swag", "dataset_args": "regular", "dataset": "SWAG", "language": "en", } if training_args.push_to_hub: trainer.push_to_hub(**_SCREAMING_SNAKE_CASE ) else: trainer.create_model_card(**_SCREAMING_SNAKE_CASE ) def a__ ( _SCREAMING_SNAKE_CASE : int ) -> str: """simple docstring""" main() if __name__ == "__main__": main()
71
'''simple docstring''' from math import factorial def a__ ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int ) -> int: """simple docstring""" if n < k or k < 0: raise ValueError("Please enter positive integers for n and k where n >= k" ) return factorial(_SCREAMING_SNAKE_CASE ) // (factorial(_SCREAMING_SNAKE_CASE ) * factorial(n - k )) if __name__ == "__main__": print( """The number of five-card hands possible from a standard""", f"""fifty-two card deck is: {combinations(52, 5)}\n""", ) print( """If a class of 40 students must be arranged into groups of""", f"""4 for group projects, there are {combinations(40, 4)} ways""", """to arrange them.\n""", ) print( """If 10 teams are competing in a Formula One race, there""", f"""are {combinations(10, 3)} ways that first, second and""", """third place can be awarded.""", )
71
1
'''simple docstring''' import math def a__ ( _SCREAMING_SNAKE_CASE : int = 1_00 ) -> int: """simple docstring""" UpperCAmelCase_ : Optional[int] = sum(i * i for i in range(1 , n + 1 ) ) UpperCAmelCase_ : Optional[Any] = int(math.pow(sum(range(1 , n + 1 ) ) , 2 ) ) return square_of_sum - sum_of_squares if __name__ == "__main__": print(f"""{solution() = }""")
71
'''simple docstring''' import random import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, UNetaDConditionModel, VideoToVideoSDPipeline, ) from diffusers.utils import floats_tensor, is_xformers_available, skip_mps from diffusers.utils.testing_utils import enable_full_determinism, slow, torch_device from ..pipeline_params import ( TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS, ) from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() @skip_mps class _snake_case (__SCREAMING_SNAKE_CASE , unittest.TestCase): __A : Union[str, Any] =VideoToVideoSDPipeline __A : Tuple =TEXT_GUIDED_IMAGE_VARIATION_PARAMS.union({"video"}) - {"image", "width", "height"} __A : Union[str, Any] =TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({"video"}) - {"image"} __A : str =PipelineTesterMixin.required_optional_params - {"latents"} __A : Dict =False # No `output_type`. __A : Optional[int] =frozenset( [ "num_inference_steps", "generator", "latents", "return_dict", "callback", "callback_steps", ]) def UpperCamelCase__ ( self ): torch.manual_seed(0 ) UpperCAmelCase_ : Optional[int] = UNetaDConditionModel( block_out_channels=(32, 64, 64, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=4 ,out_channels=4 ,down_block_types=("CrossAttnDownBlock3D", "CrossAttnDownBlock3D", "CrossAttnDownBlock3D", "DownBlock3D") ,up_block_types=("UpBlock3D", "CrossAttnUpBlock3D", "CrossAttnUpBlock3D", "CrossAttnUpBlock3D") ,cross_attention_dim=32 ,attention_head_dim=4 ,) UpperCAmelCase_ : int = DDIMScheduler( beta_start=0.00085 ,beta_end=0.012 ,beta_schedule="scaled_linear" ,clip_sample=_snake_case ,set_alpha_to_one=_snake_case ,) torch.manual_seed(0 ) UpperCAmelCase_ : Dict = AutoencoderKL( block_out_channels=[32, 64] ,in_channels=3 ,out_channels=3 ,down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] ,up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] ,latent_channels=4 ,sample_size=1_28 ,) torch.manual_seed(0 ) UpperCAmelCase_ : Dict = CLIPTextConfig( bos_token_id=0 ,eos_token_id=2 ,hidden_size=32 ,intermediate_size=37 ,layer_norm_eps=1E-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=10_00 ,hidden_act="gelu" ,projection_dim=5_12 ,) UpperCAmelCase_ : Union[str, Any] = CLIPTextModel(_snake_case ) UpperCAmelCase_ : List[Any] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" ) UpperCAmelCase_ : Optional[int] = { "unet": unet, "scheduler": scheduler, "vae": vae, "text_encoder": text_encoder, "tokenizer": tokenizer, } return components def UpperCamelCase__ ( self ,_snake_case ,_snake_case=0 ): # 3 frames UpperCAmelCase_ : Dict = floats_tensor((1, 3, 3, 32, 32) ,rng=random.Random(_snake_case ) ).to(_snake_case ) if str(_snake_case ).startswith("mps" ): UpperCAmelCase_ : Tuple = torch.manual_seed(_snake_case ) else: UpperCAmelCase_ : Tuple = torch.Generator(device=_snake_case ).manual_seed(_snake_case ) UpperCAmelCase_ : Union[str, Any] = { "prompt": "A painting of a squirrel eating a burger", "video": video, "generator": generator, "num_inference_steps": 2, "guidance_scale": 6.0, "output_type": "pt", } return inputs def UpperCamelCase__ ( self ): UpperCAmelCase_ : str = "cpu" # ensure determinism for the device-dependent torch.Generator UpperCAmelCase_ : Dict = self.get_dummy_components() UpperCAmelCase_ : str = VideoToVideoSDPipeline(**_snake_case ) UpperCAmelCase_ : int = sd_pipe.to(_snake_case ) sd_pipe.set_progress_bar_config(disable=_snake_case ) UpperCAmelCase_ : Tuple = self.get_dummy_inputs(_snake_case ) UpperCAmelCase_ : str = "np" UpperCAmelCase_ : Dict = sd_pipe(**_snake_case ).frames UpperCAmelCase_ : Tuple = frames[0][-3:, -3:, -1] assert frames[0].shape == (32, 32, 3) UpperCAmelCase_ : Dict = np.array([1_06, 1_17, 1_13, 1_74, 1_37, 1_12, 1_48, 1_51, 1_31] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 @unittest.skipIf( torch_device != "cuda" or not is_xformers_available() ,reason="XFormers attention is only available with CUDA and `xformers` installed" ,) def UpperCamelCase__ ( self ): self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=_snake_case ,expected_max_diff=5E-3 ) @unittest.skip(reason="Batching needs to be properly figured out first for this pipeline." ) def UpperCamelCase__ ( self ): pass @unittest.skip(reason="Batching needs to be properly figured out first for this pipeline." ) def UpperCamelCase__ ( self ): pass @unittest.skip(reason="`num_images_per_prompt` argument is not supported for this pipeline." ) def UpperCamelCase__ ( self ): pass def UpperCamelCase__ ( self ): return super().test_progress_bar() @slow @skip_mps class _snake_case (unittest.TestCase): def UpperCamelCase__ ( self ): UpperCAmelCase_ : Dict = VideoToVideoSDPipeline.from_pretrained("cerspense/zeroscope_v2_XL" ,torch_dtype=torch.floataa ) pipe.enable_model_cpu_offload() # 10 frames UpperCAmelCase_ : str = torch.Generator(device="cpu" ).manual_seed(0 ) UpperCAmelCase_ : int = torch.randn((1, 10, 3, 10_24, 5_76) ,generator=_snake_case ) UpperCAmelCase_ : List[Any] = video.to("cuda" ) UpperCAmelCase_ : List[Any] = "Spiderman is surfing" UpperCAmelCase_ : Optional[Any] = pipe(_snake_case ,video=_snake_case ,generator=_snake_case ,num_inference_steps=3 ,output_type="pt" ).frames UpperCAmelCase_ : Any = np.array([-1.0458984, -1.1279297, -0.9663086, -0.91503906, -0.75097656] ) assert np.abs(video_frames.cpu().numpy()[0, 0, 0, 0, -5:] - expected_array ).sum() < 1E-2
71
1
'''simple docstring''' import inspect import unittest from transformers import RegNetConfig, is_flax_available from transformers.testing_utils import require_flax, slow from transformers.utils import cached_property, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor if is_flax_available(): import jax import jax.numpy as jnp from transformers.models.regnet.modeling_flax_regnet import FlaxRegNetForImageClassification, FlaxRegNetModel if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class _snake_case (unittest.TestCase): def __init__( self ,_snake_case ,_snake_case=3 ,_snake_case=32 ,_snake_case=3 ,_snake_case=10 ,_snake_case=[10, 20, 30, 40] ,_snake_case=[1, 1, 2, 1] ,_snake_case=True ,_snake_case=True ,_snake_case="relu" ,_snake_case=3 ,_snake_case=None ,): UpperCAmelCase_ : Union[str, Any] = parent UpperCAmelCase_ : List[str] = batch_size UpperCAmelCase_ : Union[str, Any] = image_size UpperCAmelCase_ : Any = num_channels UpperCAmelCase_ : Optional[Any] = embeddings_size UpperCAmelCase_ : int = hidden_sizes UpperCAmelCase_ : Tuple = depths UpperCAmelCase_ : Optional[int] = is_training UpperCAmelCase_ : Tuple = use_labels UpperCAmelCase_ : str = hidden_act UpperCAmelCase_ : List[Any] = num_labels UpperCAmelCase_ : Any = scope UpperCAmelCase_ : str = len(_snake_case ) def UpperCamelCase__ ( self ): UpperCAmelCase_ : str = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) UpperCAmelCase_ : List[str] = self.get_config() return config, pixel_values def UpperCamelCase__ ( self ): return RegNetConfig( num_channels=self.num_channels ,embeddings_size=self.embeddings_size ,hidden_sizes=self.hidden_sizes ,depths=self.depths ,hidden_act=self.hidden_act ,num_labels=self.num_labels ,image_size=self.image_size ,) def UpperCamelCase__ ( self ,_snake_case ,_snake_case ): UpperCAmelCase_ : int = FlaxRegNetModel(config=_snake_case ) UpperCAmelCase_ : int = model(_snake_case ) # Output shape (b, c, h, w) self.parent.assertEqual( result.last_hidden_state.shape ,(self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) ,) def UpperCamelCase__ ( self ,_snake_case ,_snake_case ): UpperCAmelCase_ : str = self.num_labels UpperCAmelCase_ : int = FlaxRegNetForImageClassification(config=_snake_case ) UpperCAmelCase_ : Optional[Any] = model(_snake_case ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) ) def UpperCamelCase__ ( self ): UpperCAmelCase_ : str = self.prepare_config_and_inputs() UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = config_and_inputs UpperCAmelCase_ : Union[str, Any] = {"pixel_values": pixel_values} return config, inputs_dict @require_flax class _snake_case (__SCREAMING_SNAKE_CASE , unittest.TestCase): __A : Union[str, Any] =(FlaxRegNetModel, FlaxRegNetForImageClassification) if is_flax_available() else () __A : Any =False __A : List[str] =False __A : Union[str, Any] =False def UpperCamelCase__ ( self ): UpperCAmelCase_ : Union[str, Any] = FlaxRegNetModelTester(self ) UpperCAmelCase_ : Tuple = ConfigTester(self ,config_class=_snake_case ,has_text_modality=_snake_case ) def UpperCamelCase__ ( self ): self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def UpperCamelCase__ ( self ): return def UpperCamelCase__ ( self ): UpperCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_snake_case ) def UpperCamelCase__ ( self ): UpperCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*_snake_case ) @unittest.skip(reason="RegNet does not use inputs_embeds" ) def UpperCamelCase__ ( self ): pass @unittest.skip(reason="RegNet does not support input and output embeddings" ) def UpperCamelCase__ ( self ): pass def UpperCamelCase__ ( self ): UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCAmelCase_ : Tuple = model_class(_snake_case ) UpperCAmelCase_ : Tuple = inspect.signature(model.__call__ ) # signature.parameters is an OrderedDict => so arg_names order is deterministic UpperCAmelCase_ : Union[str, Any] = [*signature.parameters.keys()] UpperCAmelCase_ : Union[str, Any] = ["pixel_values"] self.assertListEqual(arg_names[:1] ,_snake_case ) def UpperCamelCase__ ( self ): def check_hidden_states_output(_snake_case ,_snake_case ,_snake_case ): UpperCAmelCase_ : str = model_class(_snake_case ) UpperCAmelCase_ : Any = model(**self._prepare_for_class(_snake_case ,_snake_case ) ) UpperCAmelCase_ : Dict = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states UpperCAmelCase_ : Tuple = self.model_tester.num_stages self.assertEqual(len(_snake_case ) ,expected_num_stages + 1 ) UpperCAmelCase_ , UpperCAmelCase_ : int = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCAmelCase_ : Tuple = True check_hidden_states_output(_snake_case ,_snake_case ,_snake_case ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] UpperCAmelCase_ : List[str] = True check_hidden_states_output(_snake_case ,_snake_case ,_snake_case ) def UpperCamelCase__ ( self ): UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): UpperCAmelCase_ : Optional[Any] = self._prepare_for_class(_snake_case ,_snake_case ) UpperCAmelCase_ : Union[str, Any] = model_class(_snake_case ) @jax.jit def model_jitted(_snake_case ,**_snake_case ): return model(pixel_values=_snake_case ,**_snake_case ) with self.subTest("JIT Enabled" ): UpperCAmelCase_ : Tuple = model_jitted(**_snake_case ).to_tuple() with self.subTest("JIT Disabled" ): with jax.disable_jit(): UpperCAmelCase_ : Optional[int] = model_jitted(**_snake_case ).to_tuple() self.assertEqual(len(_snake_case ) ,len(_snake_case ) ) for jitted_output, output in zip(_snake_case ,_snake_case ): self.assertEqual(jitted_output.shape ,output.shape ) def a__ ( ) -> str: """simple docstring""" UpperCAmelCase_ : List[Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) return image @require_flax class _snake_case (unittest.TestCase): @cached_property def UpperCamelCase__ ( self ): return AutoImageProcessor.from_pretrained("facebook/regnet-y-040" ) if is_vision_available() else None @slow def UpperCamelCase__ ( self ): UpperCAmelCase_ : Union[str, Any] = FlaxRegNetForImageClassification.from_pretrained("facebook/regnet-y-040" ) UpperCAmelCase_ : Dict = self.default_image_processor UpperCAmelCase_ : List[Any] = prepare_img() UpperCAmelCase_ : Tuple = image_processor(images=_snake_case ,return_tensors="np" ) UpperCAmelCase_ : int = model(**_snake_case ) # verify the logits UpperCAmelCase_ : Tuple = (1, 10_00) self.assertEqual(outputs.logits.shape ,_snake_case ) UpperCAmelCase_ : Any = jnp.array([-0.4180, -1.5051, -3.4836] ) self.assertTrue(jnp.allclose(outputs.logits[0, :3] ,_snake_case ,atol=1E-4 ) )
71
'''simple docstring''' import os import pytest from datasets import ( get_dataset_config_info, get_dataset_config_names, get_dataset_infos, get_dataset_split_names, inspect_dataset, inspect_metric, ) _lowerCamelCase = pytest.mark.integration @pytest.mark.parametrize("path" , ["paws", "csv"] ) def a__ ( _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : str ) -> Tuple: """simple docstring""" inspect_dataset(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : Optional[Any] = path + ".py" assert script_name in os.listdir(_SCREAMING_SNAKE_CASE ) assert "__pycache__" not in os.listdir(_SCREAMING_SNAKE_CASE ) @pytest.mark.filterwarnings("ignore:inspect_metric is deprecated:FutureWarning" ) @pytest.mark.filterwarnings("ignore:metric_module_factory is deprecated:FutureWarning" ) @pytest.mark.parametrize("path" , ["accuracy"] ) def a__ ( _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : Tuple ) -> List[str]: """simple docstring""" inspect_metric(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : List[Any] = path + ".py" assert script_name in os.listdir(_SCREAMING_SNAKE_CASE ) assert "__pycache__" not in os.listdir(_SCREAMING_SNAKE_CASE ) @pytest.mark.parametrize( "path, config_name, expected_splits" , [ ("squad", "plain_text", ["train", "validation"]), ("dalle-mini/wit", "dalle-mini--wit", ["train"]), ("paws", "labeled_final", ["train", "test", "validation"]), ] , ) def a__ ( _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : Dict ) -> str: """simple docstring""" UpperCAmelCase_ : int = get_dataset_config_info(_SCREAMING_SNAKE_CASE , config_name=_SCREAMING_SNAKE_CASE ) assert info.config_name == config_name assert list(info.splits.keys() ) == expected_splits @pytest.mark.parametrize( "path, config_name, expected_exception" , [ ("paws", None, ValueError), ] , ) def a__ ( _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : List[str] ) -> str: """simple docstring""" with pytest.raises(_SCREAMING_SNAKE_CASE ): get_dataset_config_info(_SCREAMING_SNAKE_CASE , config_name=_SCREAMING_SNAKE_CASE ) @pytest.mark.parametrize( "path, expected" , [ ("squad", "plain_text"), ("acronym_identification", "default"), ("lhoestq/squad", "plain_text"), ("lhoestq/test", "default"), ("lhoestq/demo1", "lhoestq--demo1"), ("dalle-mini/wit", "dalle-mini--wit"), ] , ) def a__ ( _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : int ) -> List[str]: """simple docstring""" UpperCAmelCase_ : Optional[int] = get_dataset_config_names(_SCREAMING_SNAKE_CASE ) assert expected in config_names @pytest.mark.parametrize( "path, expected_configs, expected_splits_in_first_config" , [ ("squad", ["plain_text"], ["train", "validation"]), ("dalle-mini/wit", ["dalle-mini--wit"], ["train"]), ("paws", ["labeled_final", "labeled_swap", "unlabeled_final"], ["train", "test", "validation"]), ] , ) def a__ ( _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : List[str] ) -> Any: """simple docstring""" UpperCAmelCase_ : Any = get_dataset_infos(_SCREAMING_SNAKE_CASE ) assert list(infos.keys() ) == expected_configs UpperCAmelCase_ : Optional[Any] = expected_configs[0] assert expected_config in infos UpperCAmelCase_ : Dict = infos[expected_config] assert info.config_name == expected_config assert list(info.splits.keys() ) == expected_splits_in_first_config @pytest.mark.parametrize( "path, expected_config, expected_splits" , [ ("squad", "plain_text", ["train", "validation"]), ("dalle-mini/wit", "dalle-mini--wit", ["train"]), ("paws", "labeled_final", ["train", "test", "validation"]), ] , ) def a__ ( _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : Dict ) -> Any: """simple docstring""" UpperCAmelCase_ : Optional[int] = get_dataset_infos(_SCREAMING_SNAKE_CASE ) assert expected_config in infos UpperCAmelCase_ : Dict = infos[expected_config] assert info.config_name == expected_config assert list(info.splits.keys() ) == expected_splits @pytest.mark.parametrize( "path, config_name, expected_exception" , [ ("paws", None, ValueError), ] , ) def a__ ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : str ) -> Any: """simple docstring""" with pytest.raises(_SCREAMING_SNAKE_CASE ): get_dataset_split_names(_SCREAMING_SNAKE_CASE , config_name=_SCREAMING_SNAKE_CASE )
71
1
'''simple docstring''' import numpy as np from cva import COLOR_BGR2GRAY, cvtColor, imread from numpy import array, uinta from PIL import Image from digital_image_processing import change_contrast as cc from digital_image_processing import convert_to_negative as cn from digital_image_processing import sepia as sp from digital_image_processing.dithering import burkes as bs from digital_image_processing.edge_detection import canny from digital_image_processing.filters import convolve as conv from digital_image_processing.filters import gaussian_filter as gg from digital_image_processing.filters import local_binary_pattern as lbp from digital_image_processing.filters import median_filter as med from digital_image_processing.filters import sobel_filter as sob from digital_image_processing.resize import resize as rs _lowerCamelCase = imread(R"""digital_image_processing/image_data/lena_small.jpg""") _lowerCamelCase = cvtColor(img, COLOR_BGR2GRAY) def a__ ( ) -> Any: """simple docstring""" UpperCAmelCase_ : Optional[Any] = cn.convert_to_negative(_SCREAMING_SNAKE_CASE ) # assert negative_img array for at least one True assert negative_img.any() def a__ ( ) -> Dict: """simple docstring""" with Image.open("digital_image_processing/image_data/lena_small.jpg" ) as img: # Work around assertion for response assert str(cc.change_contrast(_SCREAMING_SNAKE_CASE , 1_10 ) ).startswith( "<PIL.Image.Image image mode=RGB size=100x100 at" ) def a__ ( ) -> Optional[int]: """simple docstring""" UpperCAmelCase_ : Optional[Any] = canny.gen_gaussian_kernel(9 , sigma=1.4 ) # Assert ambiguous array assert resp.all() def a__ ( ) -> str: """simple docstring""" UpperCAmelCase_ : Any = imread("digital_image_processing/image_data/lena_small.jpg" , 0 ) # assert ambiguous array for all == True assert canny_img.all() UpperCAmelCase_ : Optional[int] = canny.canny(_SCREAMING_SNAKE_CASE ) # assert canny array for at least one True assert canny_array.any() def a__ ( ) -> List[Any]: """simple docstring""" assert gg.gaussian_filter(_SCREAMING_SNAKE_CASE , 5 , sigma=0.9 ).all() def a__ ( ) -> str: """simple docstring""" UpperCAmelCase_ : List[str] = array([[0.25, 0.5, 0.25], [0.5, -3, 0.5], [0.25, 0.5, 0.25]] ) UpperCAmelCase_ : Any = conv.img_convolve(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).astype(_SCREAMING_SNAKE_CASE ) assert res.any() def a__ ( ) -> Tuple: """simple docstring""" assert med.median_filter(_SCREAMING_SNAKE_CASE , 3 ).any() def a__ ( ) -> Dict: """simple docstring""" UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = sob.sobel_filter(_SCREAMING_SNAKE_CASE ) assert grad.any() and theta.any() def a__ ( ) -> str: """simple docstring""" UpperCAmelCase_ : Union[str, Any] = sp.make_sepia(_SCREAMING_SNAKE_CASE , 20 ) assert sepia.all() def a__ ( _SCREAMING_SNAKE_CASE : str = "digital_image_processing/image_data/lena_small.jpg" ) -> Optional[int]: """simple docstring""" UpperCAmelCase_ : List[str] = bs.Burkes(imread(_SCREAMING_SNAKE_CASE , 1 ) , 1_20 ) burkes.process() assert burkes.output_img.any() def a__ ( _SCREAMING_SNAKE_CASE : str = "digital_image_processing/image_data/lena_small.jpg" , ) -> Optional[int]: """simple docstring""" UpperCAmelCase_ : str = rs.NearestNeighbour(imread(_SCREAMING_SNAKE_CASE , 1 ) , 4_00 , 2_00 ) nn.process() assert nn.output.any() def a__ ( ) -> str: """simple docstring""" UpperCAmelCase_ : List[Any] = "digital_image_processing/image_data/lena.jpg" # Reading the image and converting it to grayscale. UpperCAmelCase_ : Optional[int] = imread(_SCREAMING_SNAKE_CASE , 0 ) # Test for get_neighbors_pixel function() return not None UpperCAmelCase_ : List[str] = 0 UpperCAmelCase_ : str = 0 UpperCAmelCase_ : Any = image[x_coordinate][y_coordinate] UpperCAmelCase_ : Optional[Any] = lbp.get_neighbors_pixel( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) assert neighbors_pixels is not None # Test for local_binary_pattern function() # Create a numpy array as the same height and width of read image UpperCAmelCase_ : int = np.zeros((image.shape[0], image.shape[1]) ) # Iterating through the image and calculating the local binary pattern value # for each pixel. for i in range(0 , image.shape[0] ): for j in range(0 , image.shape[1] ): UpperCAmelCase_ : List[str] = lbp.local_binary_value(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) assert lbp_image.any()
71
'''simple docstring''' import json import os import shutil import tempfile import unittest import numpy as np import pytest from transformers import CLIPTokenizer, CLIPTokenizerFast from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES from transformers.testing_utils import require_vision from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available if is_vision_available(): from PIL import Image from transformers import CLIPImageProcessor, CLIPProcessor @require_vision class _snake_case (unittest.TestCase): def UpperCamelCase__ ( self ): UpperCAmelCase_ : Dict = tempfile.mkdtemp() # fmt: off UpperCAmelCase_ : List[str] = ["l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "lo", "l</w>", "w</w>", "r</w>", "t</w>", "low</w>", "er</w>", "lowest</w>", "newer</w>", "wider", "<unk>", "<|startoftext|>", "<|endoftext|>"] # fmt: on UpperCAmelCase_ : List[str] = dict(zip(_snake_case ,range(len(_snake_case ) ) ) ) UpperCAmelCase_ : List[Any] = ["#version: 0.2", "l o", "lo w</w>", "e r</w>", ""] UpperCAmelCase_ : Dict = {"unk_token": "<unk>"} UpperCAmelCase_ : Optional[int] = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["vocab_file"] ) UpperCAmelCase_ : str = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["merges_file"] ) with open(self.vocab_file ,"w" ,encoding="utf-8" ) as fp: fp.write(json.dumps(_snake_case ) + "\n" ) with open(self.merges_file ,"w" ,encoding="utf-8" ) as fp: fp.write("\n".join(_snake_case ) ) UpperCAmelCase_ : Optional[Any] = { "do_resize": True, "size": 20, "do_center_crop": True, "crop_size": 18, "do_normalize": True, "image_mean": [0.48145466, 0.4578275, 0.40821073], "image_std": [0.26862954, 0.26130258, 0.27577711], } UpperCAmelCase_ : str = os.path.join(self.tmpdirname ,_snake_case ) with open(self.image_processor_file ,"w" ,encoding="utf-8" ) as fp: json.dump(_snake_case ,_snake_case ) def UpperCamelCase__ ( self ,**_snake_case ): return CLIPTokenizer.from_pretrained(self.tmpdirname ,**_snake_case ) def UpperCamelCase__ ( self ,**_snake_case ): return CLIPTokenizerFast.from_pretrained(self.tmpdirname ,**_snake_case ) def UpperCamelCase__ ( self ,**_snake_case ): return CLIPImageProcessor.from_pretrained(self.tmpdirname ,**_snake_case ) def UpperCamelCase__ ( self ): shutil.rmtree(self.tmpdirname ) def UpperCamelCase__ ( self ): UpperCAmelCase_ : Optional[Any] = [np.random.randint(2_55 ,size=(3, 30, 4_00) ,dtype=np.uinta )] UpperCAmelCase_ : Union[str, Any] = [Image.fromarray(np.moveaxis(_snake_case ,0 ,-1 ) ) for x in image_inputs] return image_inputs def UpperCamelCase__ ( self ): UpperCAmelCase_ : Tuple = self.get_tokenizer() UpperCAmelCase_ : str = self.get_rust_tokenizer() UpperCAmelCase_ : List[str] = self.get_image_processor() UpperCAmelCase_ : Tuple = CLIPProcessor(tokenizer=_snake_case ,image_processor=_snake_case ) processor_slow.save_pretrained(self.tmpdirname ) UpperCAmelCase_ : int = CLIPProcessor.from_pretrained(self.tmpdirname ,use_fast=_snake_case ) UpperCAmelCase_ : str = CLIPProcessor(tokenizer=_snake_case ,image_processor=_snake_case ) processor_fast.save_pretrained(self.tmpdirname ) UpperCAmelCase_ : str = CLIPProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor_slow.tokenizer.get_vocab() ,tokenizer_slow.get_vocab() ) self.assertEqual(processor_fast.tokenizer.get_vocab() ,tokenizer_fast.get_vocab() ) self.assertEqual(tokenizer_slow.get_vocab() ,tokenizer_fast.get_vocab() ) self.assertIsInstance(processor_slow.tokenizer ,_snake_case ) self.assertIsInstance(processor_fast.tokenizer ,_snake_case ) self.assertEqual(processor_slow.image_processor.to_json_string() ,image_processor.to_json_string() ) self.assertEqual(processor_fast.image_processor.to_json_string() ,image_processor.to_json_string() ) self.assertIsInstance(processor_slow.image_processor ,_snake_case ) self.assertIsInstance(processor_fast.image_processor ,_snake_case ) def UpperCamelCase__ ( self ): UpperCAmelCase_ : List[str] = CLIPProcessor(tokenizer=self.get_tokenizer() ,image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) UpperCAmelCase_ : Union[str, Any] = self.get_tokenizer(bos_token="(BOS)" ,eos_token="(EOS)" ) UpperCAmelCase_ : Tuple = self.get_image_processor(do_normalize=_snake_case ,padding_value=1.0 ) UpperCAmelCase_ : int = CLIPProcessor.from_pretrained( self.tmpdirname ,bos_token="(BOS)" ,eos_token="(EOS)" ,do_normalize=_snake_case ,padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab() ,tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer ,_snake_case ) self.assertEqual(processor.image_processor.to_json_string() ,image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor ,_snake_case ) def UpperCamelCase__ ( self ): UpperCAmelCase_ : List[str] = self.get_image_processor() UpperCAmelCase_ : Dict = self.get_tokenizer() UpperCAmelCase_ : Dict = CLIPProcessor(tokenizer=_snake_case ,image_processor=_snake_case ) UpperCAmelCase_ : Any = self.prepare_image_inputs() UpperCAmelCase_ : Optional[int] = image_processor(_snake_case ,return_tensors="np" ) UpperCAmelCase_ : Any = processor(images=_snake_case ,return_tensors="np" ) for key in input_image_proc.keys(): self.assertAlmostEqual(input_image_proc[key].sum() ,input_processor[key].sum() ,delta=1E-2 ) def UpperCamelCase__ ( self ): UpperCAmelCase_ : Optional[Any] = self.get_image_processor() UpperCAmelCase_ : Union[str, Any] = self.get_tokenizer() UpperCAmelCase_ : Optional[int] = CLIPProcessor(tokenizer=_snake_case ,image_processor=_snake_case ) UpperCAmelCase_ : Tuple = "lower newer" UpperCAmelCase_ : Any = processor(text=_snake_case ) UpperCAmelCase_ : List[Any] = tokenizer(_snake_case ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] ,encoded_processor[key] ) def UpperCamelCase__ ( self ): UpperCAmelCase_ : str = self.get_image_processor() UpperCAmelCase_ : Union[str, Any] = self.get_tokenizer() UpperCAmelCase_ : Tuple = CLIPProcessor(tokenizer=_snake_case ,image_processor=_snake_case ) UpperCAmelCase_ : Any = "lower newer" UpperCAmelCase_ : List[str] = self.prepare_image_inputs() UpperCAmelCase_ : str = processor(text=_snake_case ,images=_snake_case ) self.assertListEqual(list(inputs.keys() ) ,["input_ids", "attention_mask", "pixel_values"] ) # test if it raises when no input is passed with pytest.raises(_snake_case ): processor() def UpperCamelCase__ ( self ): UpperCAmelCase_ : str = self.get_image_processor() UpperCAmelCase_ : Dict = self.get_tokenizer() UpperCAmelCase_ : Optional[int] = CLIPProcessor(tokenizer=_snake_case ,image_processor=_snake_case ) UpperCAmelCase_ : List[Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] UpperCAmelCase_ : int = processor.batch_decode(_snake_case ) UpperCAmelCase_ : int = tokenizer.batch_decode(_snake_case ) self.assertListEqual(_snake_case ,_snake_case ) def UpperCamelCase__ ( self ): UpperCAmelCase_ : Dict = self.get_image_processor() UpperCAmelCase_ : int = self.get_tokenizer() UpperCAmelCase_ : Tuple = CLIPProcessor(tokenizer=_snake_case ,image_processor=_snake_case ) UpperCAmelCase_ : Optional[int] = "lower newer" UpperCAmelCase_ : Any = self.prepare_image_inputs() UpperCAmelCase_ : Dict = processor(text=_snake_case ,images=_snake_case ) self.assertListEqual(list(inputs.keys() ) ,processor.model_input_names )
71
1
'''simple docstring''' def a__ ( _SCREAMING_SNAKE_CASE : int ) -> int: """simple docstring""" UpperCAmelCase_ : Optional[Any] = len(_SCREAMING_SNAKE_CASE ) for i in range(length - 1 ): UpperCAmelCase_ : Any = i for k in range(i + 1 , _SCREAMING_SNAKE_CASE ): if collection[k] < collection[least]: UpperCAmelCase_ : Union[str, Any] = k if least != i: UpperCAmelCase_ , UpperCAmelCase_ : str = (collection[i], collection[least]) return collection if __name__ == "__main__": _lowerCamelCase = input("""Enter numbers separated by a comma:\n""").strip() _lowerCamelCase = [int(item) for item in user_input.split(""",""")] print(selection_sort(unsorted))
71
'''simple docstring''' import gc import unittest import numpy as np import torch import torch.nn.functional as F from transformers import ( ClapTextConfig, ClapTextModelWithProjection, RobertaTokenizer, SpeechTaHifiGan, SpeechTaHifiGanConfig, ) from diffusers import ( AudioLDMPipeline, AutoencoderKL, DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel, ) from diffusers.utils import is_xformers_available, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism from ..pipeline_params import TEXT_TO_AUDIO_BATCH_PARAMS, TEXT_TO_AUDIO_PARAMS from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class _snake_case (__SCREAMING_SNAKE_CASE , unittest.TestCase): __A : Any =AudioLDMPipeline __A : Dict =TEXT_TO_AUDIO_PARAMS __A : Any =TEXT_TO_AUDIO_BATCH_PARAMS __A : Tuple =frozenset( [ "num_inference_steps", "num_waveforms_per_prompt", "generator", "latents", "output_type", "return_dict", "callback", "callback_steps", ]) def UpperCamelCase__ ( self ): torch.manual_seed(0 ) UpperCAmelCase_ : Union[str, Any] = UNetaDConditionModel( block_out_channels=(32, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=4 ,out_channels=4 ,down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") ,up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") ,cross_attention_dim=(32, 64) ,class_embed_type="simple_projection" ,projection_class_embeddings_input_dim=32 ,class_embeddings_concat=_snake_case ,) UpperCAmelCase_ : Optional[Any] = DDIMScheduler( beta_start=0.00085 ,beta_end=0.012 ,beta_schedule="scaled_linear" ,clip_sample=_snake_case ,set_alpha_to_one=_snake_case ,) torch.manual_seed(0 ) UpperCAmelCase_ : Union[str, Any] = AutoencoderKL( block_out_channels=[32, 64] ,in_channels=1 ,out_channels=1 ,down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] ,up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] ,latent_channels=4 ,) torch.manual_seed(0 ) UpperCAmelCase_ : Optional[int] = ClapTextConfig( bos_token_id=0 ,eos_token_id=2 ,hidden_size=32 ,intermediate_size=37 ,layer_norm_eps=1E-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=10_00 ,projection_dim=32 ,) UpperCAmelCase_ : Optional[Any] = ClapTextModelWithProjection(_snake_case ) UpperCAmelCase_ : List[Any] = RobertaTokenizer.from_pretrained("hf-internal-testing/tiny-random-roberta" ,model_max_length=77 ) UpperCAmelCase_ : Optional[int] = SpeechTaHifiGanConfig( model_in_dim=8 ,sampling_rate=1_60_00 ,upsample_initial_channel=16 ,upsample_rates=[2, 2] ,upsample_kernel_sizes=[4, 4] ,resblock_kernel_sizes=[3, 7] ,resblock_dilation_sizes=[[1, 3, 5], [1, 3, 5]] ,normalize_before=_snake_case ,) UpperCAmelCase_ : Union[str, Any] = SpeechTaHifiGan(_snake_case ) UpperCAmelCase_ : Union[str, Any] = { "unet": unet, "scheduler": scheduler, "vae": vae, "text_encoder": text_encoder, "tokenizer": tokenizer, "vocoder": vocoder, } return components def UpperCamelCase__ ( self ,_snake_case ,_snake_case=0 ): if str(_snake_case ).startswith("mps" ): UpperCAmelCase_ : Optional[int] = torch.manual_seed(_snake_case ) else: UpperCAmelCase_ : List[str] = torch.Generator(device=_snake_case ).manual_seed(_snake_case ) UpperCAmelCase_ : Any = { "prompt": "A hammer hitting a wooden surface", "generator": generator, "num_inference_steps": 2, "guidance_scale": 6.0, } return inputs def UpperCamelCase__ ( self ): UpperCAmelCase_ : int = "cpu" # ensure determinism for the device-dependent torch.Generator UpperCAmelCase_ : str = self.get_dummy_components() UpperCAmelCase_ : Optional[Any] = AudioLDMPipeline(**_snake_case ) UpperCAmelCase_ : List[Any] = audioldm_pipe.to(_snake_case ) audioldm_pipe.set_progress_bar_config(disable=_snake_case ) UpperCAmelCase_ : List[str] = self.get_dummy_inputs(_snake_case ) UpperCAmelCase_ : Any = audioldm_pipe(**_snake_case ) UpperCAmelCase_ : Dict = output.audios[0] assert audio.ndim == 1 assert len(_snake_case ) == 2_56 UpperCAmelCase_ : Any = audio[:10] UpperCAmelCase_ : Any = np.array( [-0.0050, 0.0050, -0.0060, 0.0033, -0.0026, 0.0033, -0.0027, 0.0033, -0.0028, 0.0033] ) assert np.abs(audio_slice - expected_slice ).max() < 1E-2 def UpperCamelCase__ ( self ): UpperCAmelCase_ : Optional[int] = self.get_dummy_components() UpperCAmelCase_ : int = AudioLDMPipeline(**_snake_case ) UpperCAmelCase_ : Dict = audioldm_pipe.to(_snake_case ) UpperCAmelCase_ : Tuple = audioldm_pipe.to(_snake_case ) audioldm_pipe.set_progress_bar_config(disable=_snake_case ) UpperCAmelCase_ : Union[str, Any] = self.get_dummy_inputs(_snake_case ) UpperCAmelCase_ : Tuple = 3 * [inputs["prompt"]] # forward UpperCAmelCase_ : Any = audioldm_pipe(**_snake_case ) UpperCAmelCase_ : List[str] = output.audios[0] UpperCAmelCase_ : Optional[Any] = self.get_dummy_inputs(_snake_case ) UpperCAmelCase_ : str = 3 * [inputs.pop("prompt" )] UpperCAmelCase_ : str = audioldm_pipe.tokenizer( _snake_case ,padding="max_length" ,max_length=audioldm_pipe.tokenizer.model_max_length ,truncation=_snake_case ,return_tensors="pt" ,) UpperCAmelCase_ : Dict = text_inputs["input_ids"].to(_snake_case ) UpperCAmelCase_ : str = audioldm_pipe.text_encoder( _snake_case ,) UpperCAmelCase_ : Optional[Any] = prompt_embeds.text_embeds # additional L_2 normalization over each hidden-state UpperCAmelCase_ : Tuple = F.normalize(_snake_case ,dim=-1 ) UpperCAmelCase_ : int = prompt_embeds # forward UpperCAmelCase_ : int = audioldm_pipe(**_snake_case ) UpperCAmelCase_ : List[Any] = output.audios[0] assert np.abs(audio_a - audio_a ).max() < 1E-2 def UpperCamelCase__ ( self ): UpperCAmelCase_ : List[Any] = self.get_dummy_components() UpperCAmelCase_ : Tuple = AudioLDMPipeline(**_snake_case ) UpperCAmelCase_ : List[Any] = audioldm_pipe.to(_snake_case ) UpperCAmelCase_ : List[Any] = audioldm_pipe.to(_snake_case ) audioldm_pipe.set_progress_bar_config(disable=_snake_case ) UpperCAmelCase_ : Union[str, Any] = self.get_dummy_inputs(_snake_case ) UpperCAmelCase_ : Optional[int] = 3 * ["this is a negative prompt"] UpperCAmelCase_ : Any = negative_prompt UpperCAmelCase_ : Union[str, Any] = 3 * [inputs["prompt"]] # forward UpperCAmelCase_ : Dict = audioldm_pipe(**_snake_case ) UpperCAmelCase_ : Dict = output.audios[0] UpperCAmelCase_ : Tuple = self.get_dummy_inputs(_snake_case ) UpperCAmelCase_ : Optional[Any] = 3 * [inputs.pop("prompt" )] UpperCAmelCase_ : List[Any] = [] for p in [prompt, negative_prompt]: UpperCAmelCase_ : Any = audioldm_pipe.tokenizer( _snake_case ,padding="max_length" ,max_length=audioldm_pipe.tokenizer.model_max_length ,truncation=_snake_case ,return_tensors="pt" ,) UpperCAmelCase_ : List[Any] = text_inputs["input_ids"].to(_snake_case ) UpperCAmelCase_ : str = audioldm_pipe.text_encoder( _snake_case ,) UpperCAmelCase_ : List[Any] = text_embeds.text_embeds # additional L_2 normalization over each hidden-state UpperCAmelCase_ : Any = F.normalize(_snake_case ,dim=-1 ) embeds.append(_snake_case ) UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = embeds # forward UpperCAmelCase_ : Tuple = audioldm_pipe(**_snake_case ) UpperCAmelCase_ : Any = output.audios[0] assert np.abs(audio_a - audio_a ).max() < 1E-2 def UpperCamelCase__ ( self ): UpperCAmelCase_ : Optional[int] = "cpu" # ensure determinism for the device-dependent torch.Generator UpperCAmelCase_ : Optional[Any] = self.get_dummy_components() UpperCAmelCase_ : Any = PNDMScheduler(skip_prk_steps=_snake_case ) UpperCAmelCase_ : Optional[Any] = AudioLDMPipeline(**_snake_case ) UpperCAmelCase_ : List[Any] = audioldm_pipe.to(_snake_case ) audioldm_pipe.set_progress_bar_config(disable=_snake_case ) UpperCAmelCase_ : Any = self.get_dummy_inputs(_snake_case ) UpperCAmelCase_ : int = "egg cracking" UpperCAmelCase_ : Optional[Any] = audioldm_pipe(**_snake_case ,negative_prompt=_snake_case ) UpperCAmelCase_ : int = output.audios[0] assert audio.ndim == 1 assert len(_snake_case ) == 2_56 UpperCAmelCase_ : List[Any] = audio[:10] UpperCAmelCase_ : Any = np.array( [-0.0051, 0.0050, -0.0060, 0.0034, -0.0026, 0.0033, -0.0027, 0.0033, -0.0028, 0.0032] ) assert np.abs(audio_slice - expected_slice ).max() < 1E-2 def UpperCamelCase__ ( self ): UpperCAmelCase_ : Optional[int] = "cpu" # ensure determinism for the device-dependent torch.Generator UpperCAmelCase_ : List[str] = self.get_dummy_components() UpperCAmelCase_ : Dict = PNDMScheduler(skip_prk_steps=_snake_case ) UpperCAmelCase_ : Any = AudioLDMPipeline(**_snake_case ) UpperCAmelCase_ : Any = audioldm_pipe.to(_snake_case ) audioldm_pipe.set_progress_bar_config(disable=_snake_case ) UpperCAmelCase_ : Dict = "A hammer hitting a wooden surface" # test num_waveforms_per_prompt=1 (default) UpperCAmelCase_ : Any = audioldm_pipe(_snake_case ,num_inference_steps=2 ).audios assert audios.shape == (1, 2_56) # test num_waveforms_per_prompt=1 (default) for batch of prompts UpperCAmelCase_ : List[str] = 2 UpperCAmelCase_ : Dict = audioldm_pipe([prompt] * batch_size ,num_inference_steps=2 ).audios assert audios.shape == (batch_size, 2_56) # test num_waveforms_per_prompt for single prompt UpperCAmelCase_ : List[str] = 2 UpperCAmelCase_ : List[Any] = audioldm_pipe(_snake_case ,num_inference_steps=2 ,num_waveforms_per_prompt=_snake_case ).audios assert audios.shape == (num_waveforms_per_prompt, 2_56) # test num_waveforms_per_prompt for batch of prompts UpperCAmelCase_ : Union[str, Any] = 2 UpperCAmelCase_ : Optional[int] = audioldm_pipe( [prompt] * batch_size ,num_inference_steps=2 ,num_waveforms_per_prompt=_snake_case ).audios assert audios.shape == (batch_size * num_waveforms_per_prompt, 2_56) def UpperCamelCase__ ( self ): UpperCAmelCase_ : List[str] = "cpu" # ensure determinism for the device-dependent torch.Generator UpperCAmelCase_ : Optional[Any] = self.get_dummy_components() UpperCAmelCase_ : Union[str, Any] = AudioLDMPipeline(**_snake_case ) UpperCAmelCase_ : List[Any] = audioldm_pipe.to(_snake_case ) audioldm_pipe.set_progress_bar_config(disable=_snake_case ) UpperCAmelCase_ : Optional[Any] = audioldm_pipe.vocoder.config.sampling_rate UpperCAmelCase_ : Any = self.get_dummy_inputs(_snake_case ) UpperCAmelCase_ : Optional[int] = audioldm_pipe(audio_length_in_s=0.016 ,**_snake_case ) UpperCAmelCase_ : str = output.audios[0] assert audio.ndim == 1 assert len(_snake_case ) / vocoder_sampling_rate == 0.016 UpperCAmelCase_ : List[Any] = audioldm_pipe(audio_length_in_s=0.032 ,**_snake_case ) UpperCAmelCase_ : Any = output.audios[0] assert audio.ndim == 1 assert len(_snake_case ) / vocoder_sampling_rate == 0.032 def UpperCamelCase__ ( self ): UpperCAmelCase_ : Optional[int] = self.get_dummy_components() UpperCAmelCase_ : str = AudioLDMPipeline(**_snake_case ) UpperCAmelCase_ : int = audioldm_pipe.to(_snake_case ) audioldm_pipe.set_progress_bar_config(disable=_snake_case ) UpperCAmelCase_ : int = ["hey"] UpperCAmelCase_ : Dict = audioldm_pipe(_snake_case ,num_inference_steps=1 ) UpperCAmelCase_ : Any = output.audios.shape assert audio_shape == (1, 2_56) UpperCAmelCase_ : Tuple = audioldm_pipe.vocoder.config config.model_in_dim *= 2 UpperCAmelCase_ : List[Any] = SpeechTaHifiGan(_snake_case ).to(_snake_case ) UpperCAmelCase_ : Tuple = audioldm_pipe(_snake_case ,num_inference_steps=1 ) UpperCAmelCase_ : int = output.audios.shape # waveform shape is unchanged, we just have 2x the number of mel channels in the spectrogram assert audio_shape == (1, 2_56) def UpperCamelCase__ ( self ): self._test_attention_slicing_forward_pass(test_mean_pixel_difference=_snake_case ) def UpperCamelCase__ ( self ): self._test_inference_batch_single_identical(test_mean_pixel_difference=_snake_case ) @unittest.skipIf( torch_device != "cuda" or not is_xformers_available() ,reason="XFormers attention is only available with CUDA and `xformers` installed" ,) def UpperCamelCase__ ( self ): self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=_snake_case ) @slow class _snake_case (unittest.TestCase): def UpperCamelCase__ ( self ): super().tearDown() gc.collect() torch.cuda.empty_cache() def UpperCamelCase__ ( self ,_snake_case ,_snake_case="cpu" ,_snake_case=torch.floataa ,_snake_case=0 ): UpperCAmelCase_ : Union[str, Any] = torch.Generator(device=_snake_case ).manual_seed(_snake_case ) UpperCAmelCase_ : str = np.random.RandomState(_snake_case ).standard_normal((1, 8, 1_28, 16) ) UpperCAmelCase_ : Optional[Any] = torch.from_numpy(_snake_case ).to(device=_snake_case ,dtype=_snake_case ) UpperCAmelCase_ : List[str] = { "prompt": "A hammer hitting a wooden surface", "latents": latents, "generator": generator, "num_inference_steps": 3, "guidance_scale": 2.5, } return inputs def UpperCamelCase__ ( self ): UpperCAmelCase_ : int = AudioLDMPipeline.from_pretrained("cvssp/audioldm" ) UpperCAmelCase_ : Optional[int] = audioldm_pipe.to(_snake_case ) audioldm_pipe.set_progress_bar_config(disable=_snake_case ) UpperCAmelCase_ : List[Any] = self.get_inputs(_snake_case ) UpperCAmelCase_ : List[Any] = 25 UpperCAmelCase_ : Union[str, Any] = audioldm_pipe(**_snake_case ).audios[0] assert audio.ndim == 1 assert len(_snake_case ) == 8_19_20 UpperCAmelCase_ : Union[str, Any] = audio[7_72_30:7_72_40] UpperCAmelCase_ : Any = np.array( [-0.4884, -0.4607, 0.0023, 0.5007, 0.5896, 0.5151, 0.3813, -0.0208, -0.3687, -0.4315] ) UpperCAmelCase_ : Dict = np.abs(expected_slice - audio_slice ).max() assert max_diff < 1E-2 def UpperCamelCase__ ( self ): UpperCAmelCase_ : Optional[int] = AudioLDMPipeline.from_pretrained("cvssp/audioldm" ) UpperCAmelCase_ : List[Any] = LMSDiscreteScheduler.from_config(audioldm_pipe.scheduler.config ) UpperCAmelCase_ : int = audioldm_pipe.to(_snake_case ) audioldm_pipe.set_progress_bar_config(disable=_snake_case ) UpperCAmelCase_ : Tuple = self.get_inputs(_snake_case ) UpperCAmelCase_ : Optional[Any] = audioldm_pipe(**_snake_case ).audios[0] assert audio.ndim == 1 assert len(_snake_case ) == 8_19_20 UpperCAmelCase_ : Any = audio[2_77_80:2_77_90] UpperCAmelCase_ : List[str] = np.array([-0.2131, -0.0873, -0.0124, -0.0189, 0.0569, 0.1373, 0.1883, 0.2886, 0.3297, 0.2212] ) UpperCAmelCase_ : Union[str, Any] = np.abs(expected_slice - audio_slice ).max() assert max_diff < 3E-2
71
1
'''simple docstring''' import json import os import unittest from transformers import BatchEncoding, MvpTokenizer, MvpTokenizerFast from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers, require_torch from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin, filter_roberta_detectors @require_tokenizers class _snake_case (__SCREAMING_SNAKE_CASE , unittest.TestCase): __A : str =MvpTokenizer __A : Optional[Any] =MvpTokenizerFast __A : Optional[int] =True __A : int =filter_roberta_detectors def UpperCamelCase__ ( self ): super().setUp() UpperCAmelCase_ : Optional[int] = [ "l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "\u0120", "\u0120l", "\u0120n", "\u0120lo", "\u0120low", "er", "\u0120lowest", "\u0120newer", "\u0120wider", "<unk>", ] UpperCAmelCase_ : Tuple = dict(zip(_snake_case ,range(len(_snake_case ) ) ) ) UpperCAmelCase_ : Any = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""] UpperCAmelCase_ : Tuple = {"unk_token": "<unk>"} UpperCAmelCase_ : Dict = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["vocab_file"] ) UpperCAmelCase_ : List[Any] = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["merges_file"] ) with open(self.vocab_file ,"w" ,encoding="utf-8" ) as fp: fp.write(json.dumps(_snake_case ) + "\n" ) with open(self.merges_file ,"w" ,encoding="utf-8" ) as fp: fp.write("\n".join(_snake_case ) ) def UpperCamelCase__ ( self ,**_snake_case ): kwargs.update(self.special_tokens_map ) return self.tokenizer_class.from_pretrained(self.tmpdirname ,**_snake_case ) def UpperCamelCase__ ( self ,**_snake_case ): kwargs.update(self.special_tokens_map ) return self.rust_tokenizer_class.from_pretrained(self.tmpdirname ,**_snake_case ) def UpperCamelCase__ ( self ,_snake_case ): return "lower newer", "lower newer" @cached_property def UpperCamelCase__ ( self ): return MvpTokenizer.from_pretrained("RUCAIBox/mvp" ) @cached_property def UpperCamelCase__ ( self ): return MvpTokenizerFast.from_pretrained("RUCAIBox/mvp" ) @require_torch def UpperCamelCase__ ( self ): UpperCAmelCase_ : str = ["A long paragraph for summarization.", "Another paragraph for summarization."] UpperCAmelCase_ : Union[str, Any] = [0, 2_50, 2_51, 1_78_18, 13, 3_91_86, 19_38, 4, 2] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: UpperCAmelCase_ : Optional[int] = tokenizer(_snake_case ,max_length=len(_snake_case ) ,padding=_snake_case ,return_tensors="pt" ) self.assertIsInstance(_snake_case ,_snake_case ) self.assertEqual((2, 9) ,batch.input_ids.shape ) self.assertEqual((2, 9) ,batch.attention_mask.shape ) UpperCAmelCase_ : int = batch.input_ids.tolist()[0] self.assertListEqual(_snake_case ,_snake_case ) # Test that special tokens are reset @require_torch def UpperCamelCase__ ( self ): UpperCAmelCase_ : Tuple = ["A long paragraph for summarization.", "Another paragraph for summarization."] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: UpperCAmelCase_ : int = tokenizer(_snake_case ,padding=_snake_case ,return_tensors="pt" ) # check if input_ids are returned and no labels self.assertIn("input_ids" ,_snake_case ) self.assertIn("attention_mask" ,_snake_case ) self.assertNotIn("labels" ,_snake_case ) self.assertNotIn("decoder_attention_mask" ,_snake_case ) @require_torch def UpperCamelCase__ ( self ): UpperCAmelCase_ : Tuple = [ "Summary of the text.", "Another summary.", ] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: UpperCAmelCase_ : Optional[Any] = tokenizer(text_target=_snake_case ,max_length=32 ,padding="max_length" ,return_tensors="pt" ) self.assertEqual(32 ,targets["input_ids"].shape[1] ) @require_torch def UpperCamelCase__ ( self ): for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: UpperCAmelCase_ : Tuple = tokenizer( ["I am a small frog" * 10_24, "I am a small frog"] ,padding=_snake_case ,truncation=_snake_case ,return_tensors="pt" ) self.assertIsInstance(_snake_case ,_snake_case ) self.assertEqual(batch.input_ids.shape ,(2, 10_24) ) @require_torch def UpperCamelCase__ ( self ): UpperCAmelCase_ : Union[str, Any] = ["A long paragraph for summarization."] UpperCAmelCase_ : Union[str, Any] = [ "Summary of the text.", ] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: UpperCAmelCase_ : Union[str, Any] = tokenizer(_snake_case ,text_target=_snake_case ,return_tensors="pt" ) UpperCAmelCase_ : Tuple = inputs["input_ids"] UpperCAmelCase_ : Any = inputs["labels"] self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() ) self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() ) self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() ) self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() ) def UpperCamelCase__ ( self ): pass def UpperCamelCase__ ( self ): for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): UpperCAmelCase_ : str = self.rust_tokenizer_class.from_pretrained(_snake_case ,**_snake_case ) UpperCAmelCase_ : int = self.tokenizer_class.from_pretrained(_snake_case ,**_snake_case ) UpperCAmelCase_ : str = "A, <mask> AllenNLP sentence." UpperCAmelCase_ : List[str] = tokenizer_r.encode_plus(_snake_case ,add_special_tokens=_snake_case ,return_token_type_ids=_snake_case ) UpperCAmelCase_ : Union[str, Any] = tokenizer_p.encode_plus(_snake_case ,add_special_tokens=_snake_case ,return_token_type_ids=_snake_case ) # token_type_ids should put 0 everywhere self.assertEqual(sum(tokens_r["token_type_ids"] ) ,sum(tokens_p["token_type_ids"] ) ) # attention_mask should put 1 everywhere, so sum over length should be 1 self.assertEqual( sum(tokens_r["attention_mask"] ) / len(tokens_r["attention_mask"] ) ,sum(tokens_p["attention_mask"] ) / len(tokens_p["attention_mask"] ) ,) UpperCAmelCase_ : List[str] = tokenizer_r.convert_ids_to_tokens(tokens_r["input_ids"] ) UpperCAmelCase_ : Union[str, Any] = tokenizer_p.convert_ids_to_tokens(tokens_p["input_ids"] ) # Rust correctly handles the space before the mask while python doesnt self.assertSequenceEqual(tokens_p["input_ids"] ,[0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] ) self.assertSequenceEqual(tokens_r["input_ids"] ,[0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] ) self.assertSequenceEqual( _snake_case ,["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] ) self.assertSequenceEqual( _snake_case ,["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] )
71
'''simple docstring''' from typing import TYPE_CHECKING from ...file_utils import _LazyModule, is_torch_available from ...utils import OptionalDependencyNotAvailable _lowerCamelCase = { """configuration_gpt_neox_japanese""": ["""GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GPTNeoXJapaneseConfig"""], """tokenization_gpt_neox_japanese""": ["""GPTNeoXJapaneseTokenizer"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCamelCase = [ """GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST""", """GPTNeoXJapaneseForCausalLM""", """GPTNeoXJapaneseLayer""", """GPTNeoXJapaneseModel""", """GPTNeoXJapanesePreTrainedModel""", ] if TYPE_CHECKING: from .configuration_gpt_neox_japanese import GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXJapaneseConfig from .tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_gpt_neox_japanese import ( GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST, GPTNeoXJapaneseForCausalLM, GPTNeoXJapaneseLayer, GPTNeoXJapaneseModel, GPTNeoXJapanesePreTrainedModel, ) else: import sys _lowerCamelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
71
1
'''simple docstring''' from typing import Dict, List from nltk.translate import gleu_score import datasets from datasets import MetricInfo _lowerCamelCase = """\ @misc{wu2016googles, title={Google's Neural Machine Translation System: Bridging the Gap between Human and Machine Translation}, author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes and Jeffrey Dean}, year={2016}, eprint={1609.08144}, archivePrefix={arXiv}, primaryClass={cs.CL} } """ _lowerCamelCase = """\ The BLEU score has some undesirable properties when used for single sentences, as it was designed to be a corpus measure. We therefore use a slightly different score for our RL experiments which we call the 'GLEU score'. For the GLEU score, we record all sub-sequences of 1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then compute a recall, which is the ratio of the number of matching n-grams to the number of total n-grams in the target (ground truth) sequence, and a precision, which is the ratio of the number of matching n-grams to the number of total n-grams in the generated output sequence. Then GLEU score is simply the minimum of recall and precision. This GLEU score's range is always between 0 (no matches) and 1 (all match) and it is symmetrical when switching output and target. According to our experiments, GLEU score correlates quite well with the BLEU metric on a corpus level but does not have its drawbacks for our per sentence reward objective. """ _lowerCamelCase = """\ Computes corpus-level Google BLEU (GLEU) score of translated segments against one or more references. Instead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching tokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values. Args: predictions (list of str): list of translations to score. Each translation should be tokenized into a list of tokens. references (list of list of str): list of lists of references for each translation. Each reference should be tokenized into a list of tokens. min_len (int): The minimum order of n-gram this function should extract. Defaults to 1. max_len (int): The maximum order of n-gram this function should extract. Defaults to 4. Returns: 'google_bleu': google_bleu score Examples: Example 1: >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which', ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always', ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat'] >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which', ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never', ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat'] >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was', ... 'interested', 'in', 'world', 'history'] >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history', ... 'because', 'he', 'read', 'the', 'book'] >>> list_of_references = [[ref1a], [ref2a]] >>> hypotheses = [hyp1, hyp2] >>> google_bleu = datasets.load_metric(\"google_bleu\") >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references) >>> print(round(results[\"google_bleu\"], 2)) 0.44 Example 2: >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which', ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always', ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat'] >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which', ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never', ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat'] >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that', ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never', ... 'heed', 'the', 'cat', 'commands'] >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the', ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions', ... 'of', 'the', 'cat'] >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was', ... 'interested', 'in', 'world', 'history'] >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history', ... 'because', 'he', 'read', 'the', 'book'] >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]] >>> hypotheses = [hyp1, hyp2] >>> google_bleu = datasets.load_metric(\"google_bleu\") >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references) >>> print(round(results[\"google_bleu\"], 2)) 0.61 Example 3: >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which', ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always', ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat'] >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which', ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never', ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat'] >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that', ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never', ... 'heed', 'the', 'cat', 'commands'] >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the', ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions', ... 'of', 'the', 'cat'] >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was', ... 'interested', 'in', 'world', 'history'] >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history', ... 'because', 'he', 'read', 'the', 'book'] >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]] >>> hypotheses = [hyp1, hyp2] >>> google_bleu = datasets.load_metric(\"google_bleu\") >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2) >>> print(round(results[\"google_bleu\"], 2)) 0.53 Example 4: >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which', ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always', ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat'] >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which', ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never', ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat'] >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that', ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never', ... 'heed', 'the', 'cat', 'commands'] >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the', ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions', ... 'of', 'the', 'cat'] >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was', ... 'interested', 'in', 'world', 'history'] >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history', ... 'because', 'he', 'read', 'the', 'book'] >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]] >>> hypotheses = [hyp1, hyp2] >>> google_bleu = datasets.load_metric(\"google_bleu\") >>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6) >>> print(round(results[\"google_bleu\"], 2)) 0.4 """ @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION) class _snake_case (datasets.Metric): def UpperCamelCase__ ( self ): return datasets.MetricInfo( description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features( { "predictions": datasets.Sequence(datasets.Value("string" ,id="token" ) ,id="sequence" ), "references": datasets.Sequence( datasets.Sequence(datasets.Value("string" ,id="token" ) ,id="sequence" ) ,id="references" ), } ) ,) def UpperCamelCase__ ( self ,_snake_case ,_snake_case ,_snake_case = 1 ,_snake_case = 4 ,): return { "google_bleu": gleu_score.corpus_gleu( list_of_references=_snake_case ,hypotheses=_snake_case ,min_len=_snake_case ,max_len=_snake_case ) }
71
'''simple docstring''' import heapq def a__ ( _SCREAMING_SNAKE_CASE : dict ) -> set[int]: """simple docstring""" UpperCAmelCase_ : list[list] = [] # for each node and his adjacency list add them and the rank of the node to queue # using heapq module the queue will be filled like a Priority Queue # heapq works with a min priority queue, so I used -1*len(v) to build it for key, value in graph.items(): # O(log(n)) heapq.heappush(_SCREAMING_SNAKE_CASE , [-1 * len(_SCREAMING_SNAKE_CASE ), (key, value)] ) # chosen_vertices = set of chosen vertices UpperCAmelCase_ : Optional[int] = set() # while queue isn't empty and there are still edges # (queue[0][0] is the rank of the node with max rank) while queue and queue[0][0] != 0: # extract vertex with max rank from queue and add it to chosen_vertices UpperCAmelCase_ : Tuple = heapq.heappop(_SCREAMING_SNAKE_CASE )[1][0] chosen_vertices.add(_SCREAMING_SNAKE_CASE ) # Remove all arcs adjacent to argmax for elem in queue: # if v haven't adjacent node, skip if elem[0] == 0: continue # if argmax is reachable from elem # remove argmax from elem's adjacent list and update his rank if argmax in elem[1][1]: UpperCAmelCase_ : Any = elem[1][1].index(_SCREAMING_SNAKE_CASE ) del elem[1][1][index] elem[0] += 1 # re-order the queue heapq.heapify(_SCREAMING_SNAKE_CASE ) return chosen_vertices if __name__ == "__main__": import doctest doctest.testmod() _lowerCamelCase = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]} print(f"""Minimum vertex cover:\n{greedy_min_vertex_cover(graph)}""")
71
1
'''simple docstring''' import inspect import unittest import warnings from transformers import DeiTConfig from transformers.models.auto import get_values from transformers.testing_utils import ( require_accelerate, require_torch, require_torch_gpu, require_vision, slow, torch_device, ) from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ( MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING, MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, MODEL_MAPPING, DeiTForImageClassification, DeiTForImageClassificationWithTeacher, DeiTForMaskedImageModeling, DeiTModel, ) from transformers.models.deit.modeling_deit import DEIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import DeiTImageProcessor class _snake_case : def __init__( self ,_snake_case ,_snake_case=13 ,_snake_case=30 ,_snake_case=2 ,_snake_case=3 ,_snake_case=True ,_snake_case=True ,_snake_case=32 ,_snake_case=5 ,_snake_case=4 ,_snake_case=37 ,_snake_case="gelu" ,_snake_case=0.1 ,_snake_case=0.1 ,_snake_case=10 ,_snake_case=0.02 ,_snake_case=3 ,_snake_case=None ,_snake_case=2 ,): UpperCAmelCase_ : Any = parent UpperCAmelCase_ : Optional[Any] = batch_size UpperCAmelCase_ : List[Any] = image_size UpperCAmelCase_ : Dict = patch_size UpperCAmelCase_ : List[str] = num_channels UpperCAmelCase_ : List[Any] = is_training UpperCAmelCase_ : Tuple = use_labels UpperCAmelCase_ : Dict = hidden_size UpperCAmelCase_ : Dict = num_hidden_layers UpperCAmelCase_ : Any = num_attention_heads UpperCAmelCase_ : Union[str, Any] = intermediate_size UpperCAmelCase_ : Tuple = hidden_act UpperCAmelCase_ : Optional[int] = hidden_dropout_prob UpperCAmelCase_ : Union[str, Any] = attention_probs_dropout_prob UpperCAmelCase_ : str = type_sequence_label_size UpperCAmelCase_ : str = initializer_range UpperCAmelCase_ : Dict = scope UpperCAmelCase_ : str = encoder_stride # in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens) UpperCAmelCase_ : List[str] = (image_size // patch_size) ** 2 UpperCAmelCase_ : Union[str, Any] = num_patches + 2 def UpperCamelCase__ ( self ): UpperCAmelCase_ : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) UpperCAmelCase_ : int = None if self.use_labels: UpperCAmelCase_ : Any = ids_tensor([self.batch_size] ,self.type_sequence_label_size ) UpperCAmelCase_ : int = self.get_config() return config, pixel_values, labels def UpperCamelCase__ ( self ): return DeiTConfig( image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,is_decoder=_snake_case ,initializer_range=self.initializer_range ,encoder_stride=self.encoder_stride ,) def UpperCamelCase__ ( self ,_snake_case ,_snake_case ,_snake_case ): UpperCAmelCase_ : List[str] = DeiTModel(config=_snake_case ) model.to(_snake_case ) model.eval() UpperCAmelCase_ : Dict = model(_snake_case ) self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) ) def UpperCamelCase__ ( self ,_snake_case ,_snake_case ,_snake_case ): UpperCAmelCase_ : int = DeiTForMaskedImageModeling(config=_snake_case ) model.to(_snake_case ) model.eval() UpperCAmelCase_ : Dict = model(_snake_case ) self.parent.assertEqual( result.reconstruction.shape ,(self.batch_size, self.num_channels, self.image_size, self.image_size) ) # test greyscale images UpperCAmelCase_ : Any = 1 UpperCAmelCase_ : int = DeiTForMaskedImageModeling(_snake_case ) model.to(_snake_case ) model.eval() UpperCAmelCase_ : int = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) UpperCAmelCase_ : Tuple = model(_snake_case ) self.parent.assertEqual(result.reconstruction.shape ,(self.batch_size, 1, self.image_size, self.image_size) ) def UpperCamelCase__ ( self ,_snake_case ,_snake_case ,_snake_case ): UpperCAmelCase_ : Optional[int] = self.type_sequence_label_size UpperCAmelCase_ : List[Any] = DeiTForImageClassification(_snake_case ) model.to(_snake_case ) model.eval() UpperCAmelCase_ : Dict = model(_snake_case ,labels=_snake_case ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) ) # test greyscale images UpperCAmelCase_ : int = 1 UpperCAmelCase_ : Tuple = DeiTForImageClassification(_snake_case ) model.to(_snake_case ) model.eval() UpperCAmelCase_ : str = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) UpperCAmelCase_ : int = model(_snake_case ,labels=_snake_case ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) ) def UpperCamelCase__ ( self ): UpperCAmelCase_ : str = self.prepare_config_and_inputs() ( ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ) : List[str] = config_and_inputs UpperCAmelCase_ : Any = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class _snake_case (__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase): __A : Optional[Any] =( ( DeiTModel, DeiTForImageClassification, DeiTForImageClassificationWithTeacher, DeiTForMaskedImageModeling, ) if is_torch_available() else () ) __A : str =( { "feature-extraction": DeiTModel, "image-classification": (DeiTForImageClassification, DeiTForImageClassificationWithTeacher), } if is_torch_available() else {} ) __A : Optional[int] =False __A : Optional[Any] =False __A : List[str] =False def UpperCamelCase__ ( self ): UpperCAmelCase_ : int = DeiTModelTester(self ) UpperCAmelCase_ : Union[str, Any] = ConfigTester(self ,config_class=_snake_case ,has_text_modality=_snake_case ,hidden_size=37 ) def UpperCamelCase__ ( self ): self.config_tester.run_common_tests() @unittest.skip(reason="DeiT does not use inputs_embeds" ) def UpperCamelCase__ ( self ): pass def UpperCamelCase__ ( self ): UpperCAmelCase_ , UpperCAmelCase_ : str = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCAmelCase_ : Any = model_class(_snake_case ) self.assertIsInstance(model.get_input_embeddings() ,(nn.Module) ) UpperCAmelCase_ : Optional[int] = model.get_output_embeddings() self.assertTrue(x is None or isinstance(_snake_case ,nn.Linear ) ) def UpperCamelCase__ ( self ): UpperCAmelCase_ , UpperCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCAmelCase_ : Tuple = model_class(_snake_case ) UpperCAmelCase_ : Dict = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic UpperCAmelCase_ : List[Any] = [*signature.parameters.keys()] UpperCAmelCase_ : int = ["pixel_values"] self.assertListEqual(arg_names[:1] ,_snake_case ) def UpperCamelCase__ ( self ): UpperCAmelCase_ : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_snake_case ) def UpperCamelCase__ ( self ): UpperCAmelCase_ : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_image_modeling(*_snake_case ) def UpperCamelCase__ ( self ): UpperCAmelCase_ : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*_snake_case ) def UpperCamelCase__ ( self ,_snake_case ,_snake_case ,_snake_case=False ): UpperCAmelCase_ : str = super()._prepare_for_class(_snake_case ,_snake_case ,return_labels=_snake_case ) if return_labels: if model_class.__name__ == "DeiTForImageClassificationWithTeacher": del inputs_dict["labels"] return inputs_dict def UpperCamelCase__ ( self ): if not self.model_tester.is_training: return UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() UpperCAmelCase_ : int = True for model_class in self.all_model_classes: # DeiTForImageClassificationWithTeacher supports inference-only if ( model_class in get_values(_snake_case ) or model_class.__name__ == "DeiTForImageClassificationWithTeacher" ): continue UpperCAmelCase_ : Dict = model_class(_snake_case ) model.to(_snake_case ) model.train() UpperCAmelCase_ : Any = self._prepare_for_class(_snake_case ,_snake_case ,return_labels=_snake_case ) UpperCAmelCase_ : Union[str, Any] = model(**_snake_case ).loss loss.backward() def UpperCamelCase__ ( self ): UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() if not self.model_tester.is_training: return UpperCAmelCase_ : Union[str, Any] = False UpperCAmelCase_ : Union[str, Any] = True for model_class in self.all_model_classes: if model_class in get_values(_snake_case ) or not model_class.supports_gradient_checkpointing: continue # DeiTForImageClassificationWithTeacher supports inference-only if model_class.__name__ == "DeiTForImageClassificationWithTeacher": continue UpperCAmelCase_ : Optional[Any] = model_class(_snake_case ) model.gradient_checkpointing_enable() model.to(_snake_case ) model.train() UpperCAmelCase_ : Union[str, Any] = self._prepare_for_class(_snake_case ,_snake_case ,return_labels=_snake_case ) UpperCAmelCase_ : Dict = model(**_snake_case ).loss loss.backward() def UpperCamelCase__ ( self ): UpperCAmelCase_ , UpperCAmelCase_ : List[str] = self.model_tester.prepare_config_and_inputs_for_common() UpperCAmelCase_ : Any = [ {"title": "multi_label_classification", "num_labels": 2, "dtype": torch.float}, {"title": "single_label_classification", "num_labels": 1, "dtype": torch.long}, {"title": "regression", "num_labels": 1, "dtype": torch.float}, ] for model_class in self.all_model_classes: if ( model_class not in [ *get_values(_snake_case ), *get_values(_snake_case ), ] or model_class.__name__ == "DeiTForImageClassificationWithTeacher" ): continue for problem_type in problem_types: with self.subTest(msg=f'''Testing {model_class} with {problem_type["title"]}''' ): UpperCAmelCase_ : Tuple = problem_type["title"] UpperCAmelCase_ : Dict = problem_type["num_labels"] UpperCAmelCase_ : int = model_class(_snake_case ) model.to(_snake_case ) model.train() UpperCAmelCase_ : List[Any] = self._prepare_for_class(_snake_case ,_snake_case ,return_labels=_snake_case ) if problem_type["num_labels"] > 1: UpperCAmelCase_ : Tuple = inputs["labels"].unsqueeze(1 ).repeat(1 ,problem_type["num_labels"] ) UpperCAmelCase_ : Optional[Any] = inputs["labels"].to(problem_type["dtype"] ) # This tests that we do not trigger the warning form PyTorch "Using a target size that is different # to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure # they have the same size." which is a symptom something in wrong for the regression problem. # See https://github.com/huggingface/transformers/issues/11780 with warnings.catch_warnings(record=_snake_case ) as warning_list: UpperCAmelCase_ : Dict = model(**_snake_case ).loss for w in warning_list: if "Using a target size that is different to the input size" in str(w.message ): raise ValueError( f'''Something is going wrong in the regression problem: intercepted {w.message}''' ) loss.backward() @slow def UpperCamelCase__ ( self ): for model_name in DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCAmelCase_ : List[str] = DeiTModel.from_pretrained(_snake_case ) self.assertIsNotNone(_snake_case ) def a__ ( ) -> str: """simple docstring""" UpperCAmelCase_ : Optional[Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) return image @require_torch @require_vision class _snake_case (unittest.TestCase): @cached_property def UpperCamelCase__ ( self ): return ( DeiTImageProcessor.from_pretrained("facebook/deit-base-distilled-patch16-224" ) if is_vision_available() else None ) @slow def UpperCamelCase__ ( self ): UpperCAmelCase_ : Dict = DeiTForImageClassificationWithTeacher.from_pretrained("facebook/deit-base-distilled-patch16-224" ).to( _snake_case ) UpperCAmelCase_ : Union[str, Any] = self.default_image_processor UpperCAmelCase_ : Tuple = prepare_img() UpperCAmelCase_ : Union[str, Any] = image_processor(images=_snake_case ,return_tensors="pt" ).to(_snake_case ) # forward pass with torch.no_grad(): UpperCAmelCase_ : Any = model(**_snake_case ) # verify the logits UpperCAmelCase_ : Optional[int] = torch.Size((1, 10_00) ) self.assertEqual(outputs.logits.shape ,_snake_case ) UpperCAmelCase_ : Tuple = torch.tensor([-1.0266, 0.1912, -1.2861] ).to(_snake_case ) self.assertTrue(torch.allclose(outputs.logits[0, :3] ,_snake_case ,atol=1E-4 ) ) @slow @require_accelerate @require_torch_gpu def UpperCamelCase__ ( self ): UpperCAmelCase_ : int = DeiTModel.from_pretrained( "facebook/deit-base-distilled-patch16-224" ,torch_dtype=torch.floataa ,device_map="auto" ) UpperCAmelCase_ : Optional[Any] = self.default_image_processor UpperCAmelCase_ : List[str] = prepare_img() UpperCAmelCase_ : List[str] = image_processor(images=_snake_case ,return_tensors="pt" ) UpperCAmelCase_ : Union[str, Any] = inputs.pixel_values.to(_snake_case ) # forward pass to make sure inference works in fp16 with torch.no_grad(): UpperCAmelCase_ : Optional[int] = model(_snake_case )
71
'''simple docstring''' from typing import Dict, List, Optional, Union import numpy as np from transformers.utils import is_vision_available from transformers.utils.generic import TensorType from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, is_valid_image, to_numpy_array, valid_images, ) from ...utils import logging if is_vision_available(): import PIL _lowerCamelCase = logging.get_logger(__name__) def a__ ( _SCREAMING_SNAKE_CASE : Tuple ) -> List[List[ImageInput]]: """simple docstring""" if isinstance(_SCREAMING_SNAKE_CASE , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ): return videos elif isinstance(_SCREAMING_SNAKE_CASE , (list, tuple) ) and is_valid_image(videos[0] ): return [videos] elif is_valid_image(_SCREAMING_SNAKE_CASE ): return [[videos]] raise ValueError(F'''Could not make batched video from {videos}''' ) class _snake_case (__SCREAMING_SNAKE_CASE): __A : Tuple =["pixel_values"] def __init__( self ,_snake_case = True ,_snake_case = None ,_snake_case = PILImageResampling.BILINEAR ,_snake_case = True ,_snake_case = None ,_snake_case = True ,_snake_case = 1 / 2_55 ,_snake_case = True ,_snake_case = True ,_snake_case = None ,_snake_case = None ,**_snake_case ,): super().__init__(**_snake_case ) UpperCAmelCase_ : Optional[Any] = size if size is not None else {"shortest_edge": 2_56} UpperCAmelCase_ : List[str] = get_size_dict(_snake_case ,default_to_square=_snake_case ) UpperCAmelCase_ : str = crop_size if crop_size is not None else {"height": 2_24, "width": 2_24} UpperCAmelCase_ : Optional[Any] = get_size_dict(_snake_case ,param_name="crop_size" ) UpperCAmelCase_ : int = do_resize UpperCAmelCase_ : List[str] = size UpperCAmelCase_ : Dict = do_center_crop UpperCAmelCase_ : Optional[Any] = crop_size UpperCAmelCase_ : Optional[Any] = resample UpperCAmelCase_ : int = do_rescale UpperCAmelCase_ : Optional[int] = rescale_factor UpperCAmelCase_ : Dict = offset UpperCAmelCase_ : Optional[Any] = do_normalize UpperCAmelCase_ : Union[str, Any] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN UpperCAmelCase_ : Dict = image_std if image_std is not None else IMAGENET_STANDARD_STD def UpperCamelCase__ ( self ,_snake_case ,_snake_case ,_snake_case = PILImageResampling.BILINEAR ,_snake_case = None ,**_snake_case ,): UpperCAmelCase_ : Any = get_size_dict(_snake_case ,default_to_square=_snake_case ) if "shortest_edge" in size: UpperCAmelCase_ : Optional[Any] = get_resize_output_image_size(_snake_case ,size["shortest_edge"] ,default_to_square=_snake_case ) elif "height" in size and "width" in size: UpperCAmelCase_ : Optional[Any] = (size["height"], size["width"]) else: raise ValueError(f'''Size must have \'height\' and \'width\' or \'shortest_edge\' as keys. Got {size.keys()}''' ) return resize(_snake_case ,size=_snake_case ,resample=_snake_case ,data_format=_snake_case ,**_snake_case ) def UpperCamelCase__ ( self ,_snake_case ,_snake_case ,_snake_case = None ,**_snake_case ,): UpperCAmelCase_ : Dict = get_size_dict(_snake_case ) if "height" not in size or "width" not in size: raise ValueError(f'''Size must have \'height\' and \'width\' as keys. Got {size.keys()}''' ) return center_crop(_snake_case ,size=(size["height"], size["width"]) ,data_format=_snake_case ,**_snake_case ) def UpperCamelCase__ ( self ,_snake_case ,_snake_case ,_snake_case = True ,_snake_case = None ,**_snake_case ,): UpperCAmelCase_ : int = image.astype(np.floataa ) if offset: UpperCAmelCase_ : Any = image - (scale / 2) return rescale(_snake_case ,scale=_snake_case ,data_format=_snake_case ,**_snake_case ) def UpperCamelCase__ ( self ,_snake_case ,_snake_case ,_snake_case ,_snake_case = None ,**_snake_case ,): return normalize(_snake_case ,mean=_snake_case ,std=_snake_case ,data_format=_snake_case ,**_snake_case ) def UpperCamelCase__ ( self ,_snake_case ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = ChannelDimension.FIRST ,): if do_resize and size is None or resample is None: raise ValueError("Size and resample must be specified if do_resize is True." ) if do_center_crop and crop_size is None: raise ValueError("Crop size must be specified if do_center_crop is True." ) if do_rescale and rescale_factor is None: raise ValueError("Rescale factor must be specified if do_rescale is True." ) if do_normalize and (image_mean is None or image_std is None): raise ValueError("Image mean and std must be specified if do_normalize is True." ) if offset and not do_rescale: raise ValueError("For offset, do_rescale must also be set to True." ) # All transformations expect numpy arrays. UpperCAmelCase_ : Optional[int] = to_numpy_array(_snake_case ) if do_resize: UpperCAmelCase_ : Dict = self.resize(image=_snake_case ,size=_snake_case ,resample=_snake_case ) if do_center_crop: UpperCAmelCase_ : Optional[Any] = self.center_crop(_snake_case ,size=_snake_case ) if do_rescale: UpperCAmelCase_ : Union[str, Any] = self.rescale(image=_snake_case ,scale=_snake_case ,offset=_snake_case ) if do_normalize: UpperCAmelCase_ : Any = self.normalize(image=_snake_case ,mean=_snake_case ,std=_snake_case ) UpperCAmelCase_ : Any = to_channel_dimension_format(_snake_case ,_snake_case ) return image def UpperCamelCase__ ( self ,_snake_case ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = ChannelDimension.FIRST ,**_snake_case ,): UpperCAmelCase_ : Tuple = do_resize if do_resize is not None else self.do_resize UpperCAmelCase_ : str = resample if resample is not None else self.resample UpperCAmelCase_ : List[Any] = do_center_crop if do_center_crop is not None else self.do_center_crop UpperCAmelCase_ : Tuple = do_rescale if do_rescale is not None else self.do_rescale UpperCAmelCase_ : List[str] = rescale_factor if rescale_factor is not None else self.rescale_factor UpperCAmelCase_ : List[Any] = offset if offset is not None else self.offset UpperCAmelCase_ : Optional[Any] = do_normalize if do_normalize is not None else self.do_normalize UpperCAmelCase_ : int = image_mean if image_mean is not None else self.image_mean UpperCAmelCase_ : int = image_std if image_std is not None else self.image_std UpperCAmelCase_ : Dict = size if size is not None else self.size UpperCAmelCase_ : int = get_size_dict(_snake_case ,default_to_square=_snake_case ) UpperCAmelCase_ : List[Any] = crop_size if crop_size is not None else self.crop_size UpperCAmelCase_ : int = get_size_dict(_snake_case ,param_name="crop_size" ) if not valid_images(_snake_case ): raise ValueError( "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, " "torch.Tensor, tf.Tensor or jax.ndarray." ) UpperCAmelCase_ : Any = make_batched(_snake_case ) UpperCAmelCase_ : Dict = [ [ self._preprocess_image( image=_snake_case ,do_resize=_snake_case ,size=_snake_case ,resample=_snake_case ,do_center_crop=_snake_case ,crop_size=_snake_case ,do_rescale=_snake_case ,rescale_factor=_snake_case ,offset=_snake_case ,do_normalize=_snake_case ,image_mean=_snake_case ,image_std=_snake_case ,data_format=_snake_case ,) for img in video ] for video in videos ] UpperCAmelCase_ : List[str] = {"pixel_values": videos} return BatchFeature(data=_snake_case ,tensor_type=_snake_case )
71
1
'''simple docstring''' from typing import Optional, Tuple, Union import flax import flax.linen as nn import jax import jax.numpy as jnp from flax.core.frozen_dict import FrozenDict from ..configuration_utils import ConfigMixin, flax_register_to_config from ..utils import BaseOutput from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps from .modeling_flax_utils import FlaxModelMixin from .unet_ad_blocks_flax import ( FlaxCrossAttnDownBlockaD, FlaxDownBlockaD, FlaxUNetMidBlockaDCrossAttn, ) @flax.struct.dataclass class _snake_case (__SCREAMING_SNAKE_CASE): __A : jnp.ndarray __A : jnp.ndarray class _snake_case (nn.Module): __A : int __A : Tuple[int] =(16, 32, 96, 2_56) __A : jnp.dtype =jnp.floataa def UpperCamelCase__ ( self ): UpperCAmelCase_ : Tuple = nn.Conv( self.block_out_channels[0] ,kernel_size=(3, 3) ,padding=((1, 1), (1, 1)) ,dtype=self.dtype ,) UpperCAmelCase_ : Any = [] for i in range(len(self.block_out_channels ) - 1 ): UpperCAmelCase_ : str = self.block_out_channels[i] UpperCAmelCase_ : Tuple = self.block_out_channels[i + 1] UpperCAmelCase_ : Any = nn.Conv( _snake_case ,kernel_size=(3, 3) ,padding=((1, 1), (1, 1)) ,dtype=self.dtype ,) blocks.append(_snake_case ) UpperCAmelCase_ : Optional[int] = nn.Conv( _snake_case ,kernel_size=(3, 3) ,strides=(2, 2) ,padding=((1, 1), (1, 1)) ,dtype=self.dtype ,) blocks.append(_snake_case ) UpperCAmelCase_ : Tuple = blocks UpperCAmelCase_ : str = nn.Conv( self.conditioning_embedding_channels ,kernel_size=(3, 3) ,padding=((1, 1), (1, 1)) ,kernel_init=nn.initializers.zeros_init() ,bias_init=nn.initializers.zeros_init() ,dtype=self.dtype ,) def __call__( self ,_snake_case ): UpperCAmelCase_ : Dict = self.conv_in(_snake_case ) UpperCAmelCase_ : int = nn.silu(_snake_case ) for block in self.blocks: UpperCAmelCase_ : Union[str, Any] = block(_snake_case ) UpperCAmelCase_ : Tuple = nn.silu(_snake_case ) UpperCAmelCase_ : Union[str, Any] = self.conv_out(_snake_case ) return embedding @flax_register_to_config class _snake_case (nn.Module , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE): __A : int =32 __A : int =4 __A : Tuple[str] =( "CrossAttnDownBlock2D", "CrossAttnDownBlock2D", "CrossAttnDownBlock2D", "DownBlock2D", ) __A : Union[bool, Tuple[bool]] =False __A : Tuple[int] =(3_20, 6_40, 12_80, 12_80) __A : int =2 __A : Union[int, Tuple[int]] =8 __A : Optional[Union[int, Tuple[int]]] =None __A : int =12_80 __A : float =0.0 __A : bool =False __A : jnp.dtype =jnp.floataa __A : bool =True __A : int =0 __A : str ="rgb" __A : Tuple[int] =(16, 32, 96, 2_56) def UpperCamelCase__ ( self ,_snake_case ): # init input tensors UpperCAmelCase_ : List[Any] = (1, self.in_channels, self.sample_size, self.sample_size) UpperCAmelCase_ : Any = jnp.zeros(_snake_case ,dtype=jnp.floataa ) UpperCAmelCase_ : Tuple = jnp.ones((1,) ,dtype=jnp.intaa ) UpperCAmelCase_ : List[Any] = jnp.zeros((1, 1, self.cross_attention_dim) ,dtype=jnp.floataa ) UpperCAmelCase_ : Union[str, Any] = (1, 3, self.sample_size * 8, self.sample_size * 8) UpperCAmelCase_ : int = jnp.zeros(_snake_case ,dtype=jnp.floataa ) UpperCAmelCase_ , UpperCAmelCase_ : str = jax.random.split(_snake_case ) UpperCAmelCase_ : Optional[int] = {"params": params_rng, "dropout": dropout_rng} return self.init(_snake_case ,_snake_case ,_snake_case ,_snake_case ,_snake_case )["params"] def UpperCamelCase__ ( self ): UpperCAmelCase_ : Tuple = self.block_out_channels UpperCAmelCase_ : Union[str, Any] = block_out_channels[0] * 4 # If `num_attention_heads` is not defined (which is the case for most models) # it will default to `attention_head_dim`. This looks weird upon first reading it and it is. # The reason for this behavior is to correct for incorrectly named variables that were introduced # when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131 # Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking # which is why we correct for the naming here. UpperCAmelCase_ : Dict = self.num_attention_heads or self.attention_head_dim # input UpperCAmelCase_ : str = nn.Conv( block_out_channels[0] ,kernel_size=(3, 3) ,strides=(1, 1) ,padding=((1, 1), (1, 1)) ,dtype=self.dtype ,) # time UpperCAmelCase_ : Dict = FlaxTimesteps( block_out_channels[0] ,flip_sin_to_cos=self.flip_sin_to_cos ,freq_shift=self.config.freq_shift ) UpperCAmelCase_ : List[Any] = FlaxTimestepEmbedding(_snake_case ,dtype=self.dtype ) UpperCAmelCase_ : int = FlaxControlNetConditioningEmbedding( conditioning_embedding_channels=block_out_channels[0] ,block_out_channels=self.conditioning_embedding_out_channels ,) UpperCAmelCase_ : Any = self.only_cross_attention if isinstance(_snake_case ,_snake_case ): UpperCAmelCase_ : Union[str, Any] = (only_cross_attention,) * len(self.down_block_types ) if isinstance(_snake_case ,_snake_case ): UpperCAmelCase_ : Tuple = (num_attention_heads,) * len(self.down_block_types ) # down UpperCAmelCase_ : List[str] = [] UpperCAmelCase_ : str = [] UpperCAmelCase_ : List[str] = block_out_channels[0] UpperCAmelCase_ : Union[str, Any] = nn.Conv( _snake_case ,kernel_size=(1, 1) ,padding="VALID" ,kernel_init=nn.initializers.zeros_init() ,bias_init=nn.initializers.zeros_init() ,dtype=self.dtype ,) controlnet_down_blocks.append(_snake_case ) for i, down_block_type in enumerate(self.down_block_types ): UpperCAmelCase_ : Tuple = output_channel UpperCAmelCase_ : List[Any] = block_out_channels[i] UpperCAmelCase_ : int = i == len(_snake_case ) - 1 if down_block_type == "CrossAttnDownBlock2D": UpperCAmelCase_ : int = FlaxCrossAttnDownBlockaD( in_channels=_snake_case ,out_channels=_snake_case ,dropout=self.dropout ,num_layers=self.layers_per_block ,num_attention_heads=num_attention_heads[i] ,add_downsample=not is_final_block ,use_linear_projection=self.use_linear_projection ,only_cross_attention=only_cross_attention[i] ,dtype=self.dtype ,) else: UpperCAmelCase_ : List[Any] = FlaxDownBlockaD( in_channels=_snake_case ,out_channels=_snake_case ,dropout=self.dropout ,num_layers=self.layers_per_block ,add_downsample=not is_final_block ,dtype=self.dtype ,) down_blocks.append(_snake_case ) for _ in range(self.layers_per_block ): UpperCAmelCase_ : List[Any] = nn.Conv( _snake_case ,kernel_size=(1, 1) ,padding="VALID" ,kernel_init=nn.initializers.zeros_init() ,bias_init=nn.initializers.zeros_init() ,dtype=self.dtype ,) controlnet_down_blocks.append(_snake_case ) if not is_final_block: UpperCAmelCase_ : int = nn.Conv( _snake_case ,kernel_size=(1, 1) ,padding="VALID" ,kernel_init=nn.initializers.zeros_init() ,bias_init=nn.initializers.zeros_init() ,dtype=self.dtype ,) controlnet_down_blocks.append(_snake_case ) UpperCAmelCase_ : int = down_blocks UpperCAmelCase_ : Union[str, Any] = controlnet_down_blocks # mid UpperCAmelCase_ : int = block_out_channels[-1] UpperCAmelCase_ : Tuple = FlaxUNetMidBlockaDCrossAttn( in_channels=_snake_case ,dropout=self.dropout ,num_attention_heads=num_attention_heads[-1] ,use_linear_projection=self.use_linear_projection ,dtype=self.dtype ,) UpperCAmelCase_ : List[str] = nn.Conv( _snake_case ,kernel_size=(1, 1) ,padding="VALID" ,kernel_init=nn.initializers.zeros_init() ,bias_init=nn.initializers.zeros_init() ,dtype=self.dtype ,) def __call__( self ,_snake_case ,_snake_case ,_snake_case ,_snake_case ,_snake_case = 1.0 ,_snake_case = True ,_snake_case = False ,): UpperCAmelCase_ : int = self.controlnet_conditioning_channel_order if channel_order == "bgr": UpperCAmelCase_ : Union[str, Any] = jnp.flip(_snake_case ,axis=1 ) # 1. time if not isinstance(_snake_case ,jnp.ndarray ): UpperCAmelCase_ : Optional[int] = jnp.array([timesteps] ,dtype=jnp.intaa ) elif isinstance(_snake_case ,jnp.ndarray ) and len(timesteps.shape ) == 0: UpperCAmelCase_ : str = timesteps.astype(dtype=jnp.floataa ) UpperCAmelCase_ : Optional[int] = jnp.expand_dims(_snake_case ,0 ) UpperCAmelCase_ : str = self.time_proj(_snake_case ) UpperCAmelCase_ : Optional[Any] = self.time_embedding(_snake_case ) # 2. pre-process UpperCAmelCase_ : Union[str, Any] = jnp.transpose(_snake_case ,(0, 2, 3, 1) ) UpperCAmelCase_ : List[str] = self.conv_in(_snake_case ) UpperCAmelCase_ : Tuple = jnp.transpose(_snake_case ,(0, 2, 3, 1) ) UpperCAmelCase_ : Optional[int] = self.controlnet_cond_embedding(_snake_case ) sample += controlnet_cond # 3. down UpperCAmelCase_ : Union[str, Any] = (sample,) for down_block in self.down_blocks: if isinstance(_snake_case ,_snake_case ): UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = down_block(_snake_case ,_snake_case ,_snake_case ,deterministic=not train ) else: UpperCAmelCase_ , UpperCAmelCase_ : str = down_block(_snake_case ,_snake_case ,deterministic=not train ) down_block_res_samples += res_samples # 4. mid UpperCAmelCase_ : str = self.mid_block(_snake_case ,_snake_case ,_snake_case ,deterministic=not train ) # 5. contronet blocks UpperCAmelCase_ : int = () for down_block_res_sample, controlnet_block in zip(_snake_case ,self.controlnet_down_blocks ): UpperCAmelCase_ : List[str] = controlnet_block(_snake_case ) controlnet_down_block_res_samples += (down_block_res_sample,) UpperCAmelCase_ : str = controlnet_down_block_res_samples UpperCAmelCase_ : List[Any] = self.controlnet_mid_block(_snake_case ) # 6. scaling UpperCAmelCase_ : Any = [sample * conditioning_scale for sample in down_block_res_samples] mid_block_res_sample *= conditioning_scale if not return_dict: return (down_block_res_samples, mid_block_res_sample) return FlaxControlNetOutput( down_block_res_samples=_snake_case ,mid_block_res_sample=_snake_case )
71
'''simple docstring''' import multiprocessing import os from typing import BinaryIO, Optional, Union import fsspec from .. import Dataset, Features, NamedSplit, config from ..formatting import query_table from ..packaged_modules.json.json import Json from ..utils import logging from ..utils.typing import NestedDataStructureLike, PathLike from .abc import AbstractDatasetReader class _snake_case (__SCREAMING_SNAKE_CASE): def __init__( self ,_snake_case ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = False ,_snake_case = False ,_snake_case = None ,_snake_case = None ,**_snake_case ,): super().__init__( _snake_case ,split=_snake_case ,features=_snake_case ,cache_dir=_snake_case ,keep_in_memory=_snake_case ,streaming=_snake_case ,num_proc=_snake_case ,**_snake_case ,) UpperCAmelCase_ : Tuple = field UpperCAmelCase_ : List[Any] = path_or_paths if isinstance(_snake_case ,_snake_case ) else {self.split: path_or_paths} UpperCAmelCase_ : Optional[int] = Json( cache_dir=_snake_case ,data_files=_snake_case ,features=_snake_case ,field=_snake_case ,**_snake_case ,) def UpperCamelCase__ ( self ): # Build iterable dataset if self.streaming: UpperCAmelCase_ : List[str] = self.builder.as_streaming_dataset(split=self.split ) # Build regular (map-style) dataset else: UpperCAmelCase_ : Union[str, Any] = None UpperCAmelCase_ : int = None UpperCAmelCase_ : List[Any] = None UpperCAmelCase_ : int = None self.builder.download_and_prepare( download_config=_snake_case ,download_mode=_snake_case ,verification_mode=_snake_case ,base_path=_snake_case ,num_proc=self.num_proc ,) UpperCAmelCase_ : Dict = self.builder.as_dataset( split=self.split ,verification_mode=_snake_case ,in_memory=self.keep_in_memory ) return dataset class _snake_case : def __init__( self ,_snake_case ,_snake_case ,_snake_case = None ,_snake_case = None ,**_snake_case ,): if num_proc is not None and num_proc <= 0: raise ValueError(f'''num_proc {num_proc} must be an integer > 0.''' ) UpperCAmelCase_ : int = dataset UpperCAmelCase_ : Union[str, Any] = path_or_buf UpperCAmelCase_ : str = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE UpperCAmelCase_ : Dict = num_proc UpperCAmelCase_ : Optional[Any] = "utf-8" UpperCAmelCase_ : Optional[int] = to_json_kwargs def UpperCamelCase__ ( self ): UpperCAmelCase_ : Dict = self.to_json_kwargs.pop("path_or_buf" ,_snake_case ) UpperCAmelCase_ : Tuple = self.to_json_kwargs.pop("orient" ,"records" ) UpperCAmelCase_ : Any = self.to_json_kwargs.pop("lines" ,True if orient == "records" else False ) UpperCAmelCase_ : Optional[int] = self.to_json_kwargs.pop("index" ,False if orient in ["split", "table"] else True ) UpperCAmelCase_ : int = self.to_json_kwargs.pop("compression" ,_snake_case ) if compression not in [None, "infer", "gzip", "bz2", "xz"]: raise NotImplementedError(f'''`datasets` currently does not support {compression} compression''' ) if isinstance(self.path_or_buf ,(str, bytes, os.PathLike) ): with fsspec.open(self.path_or_buf ,"wb" ,compression=_snake_case ) as buffer: UpperCAmelCase_ : List[str] = self._write(file_obj=_snake_case ,orient=_snake_case ,lines=_snake_case ,index=_snake_case ,**self.to_json_kwargs ) else: if compression: raise NotImplementedError( f'''The compression parameter is not supported when writing to a buffer, but compression={compression}''' " was passed. Please provide a local path instead." ) UpperCAmelCase_ : Union[str, Any] = self._write( file_obj=self.path_or_buf ,orient=_snake_case ,lines=_snake_case ,index=_snake_case ,**self.to_json_kwargs ) return written def UpperCamelCase__ ( self ,_snake_case ): UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = args UpperCAmelCase_ : List[str] = query_table( table=self.dataset.data ,key=slice(_snake_case ,offset + self.batch_size ) ,indices=self.dataset._indices ,) UpperCAmelCase_ : Optional[Any] = batch.to_pandas().to_json( path_or_buf=_snake_case ,orient=_snake_case ,lines=_snake_case ,index=_snake_case ,**_snake_case ) if not json_str.endswith("\n" ): json_str += "\n" return json_str.encode(self.encoding ) def UpperCamelCase__ ( self ,_snake_case ,_snake_case ,_snake_case ,_snake_case ,**_snake_case ,): UpperCAmelCase_ : Optional[Any] = 0 if self.num_proc is None or self.num_proc == 1: for offset in logging.tqdm( range(0 ,len(self.dataset ) ,self.batch_size ) ,unit="ba" ,disable=not logging.is_progress_bar_enabled() ,desc="Creating json from Arrow format" ,): UpperCAmelCase_ : Any = self._batch_json((offset, orient, lines, index, to_json_kwargs) ) written += file_obj.write(_snake_case ) else: UpperCAmelCase_ , UpperCAmelCase_ : int = len(self.dataset ), self.batch_size with multiprocessing.Pool(self.num_proc ) as pool: for json_str in logging.tqdm( pool.imap( self._batch_json ,[(offset, orient, lines, index, to_json_kwargs) for offset in range(0 ,_snake_case ,_snake_case )] ,) ,total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size ,unit="ba" ,disable=not logging.is_progress_bar_enabled() ,desc="Creating json from Arrow format" ,): written += file_obj.write(_snake_case ) return written
71
1
'''simple docstring''' import multiprocessing from typing import TYPE_CHECKING, Optional, Union from .. import Dataset, Features, config from ..formatting import query_table from ..packaged_modules.sql.sql import Sql from ..utils import logging from .abc import AbstractDatasetInputStream if TYPE_CHECKING: import sqlitea import sqlalchemy class _snake_case (__SCREAMING_SNAKE_CASE): def __init__( self ,_snake_case ,_snake_case ,_snake_case = None ,_snake_case = None ,_snake_case = False ,**_snake_case ,): super().__init__(features=_snake_case ,cache_dir=_snake_case ,keep_in_memory=_snake_case ,**_snake_case ) UpperCAmelCase_ : str = Sql( cache_dir=_snake_case ,features=_snake_case ,sql=_snake_case ,con=_snake_case ,**_snake_case ,) def UpperCamelCase__ ( self ): UpperCAmelCase_ : Union[str, Any] = None UpperCAmelCase_ : Tuple = None UpperCAmelCase_ : Union[str, Any] = None UpperCAmelCase_ : Tuple = None self.builder.download_and_prepare( download_config=_snake_case ,download_mode=_snake_case ,verification_mode=_snake_case ,base_path=_snake_case ,) # Build dataset for splits UpperCAmelCase_ : Union[str, Any] = self.builder.as_dataset( split="train" ,verification_mode=_snake_case ,in_memory=self.keep_in_memory ) return dataset class _snake_case : def __init__( self ,_snake_case ,_snake_case ,_snake_case ,_snake_case = None ,_snake_case = None ,**_snake_case ,): if num_proc is not None and num_proc <= 0: raise ValueError(f'''num_proc {num_proc} must be an integer > 0.''' ) UpperCAmelCase_ : Any = dataset UpperCAmelCase_ : Union[str, Any] = name UpperCAmelCase_ : List[Any] = con UpperCAmelCase_ : Any = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE UpperCAmelCase_ : Dict = num_proc UpperCAmelCase_ : Tuple = to_sql_kwargs def UpperCamelCase__ ( self ): UpperCAmelCase_ : Optional[int] = self.to_sql_kwargs.pop("sql" ,_snake_case ) UpperCAmelCase_ : Tuple = self.to_sql_kwargs.pop("con" ,_snake_case ) UpperCAmelCase_ : Union[str, Any] = self.to_sql_kwargs.pop("index" ,_snake_case ) UpperCAmelCase_ : Tuple = self._write(index=_snake_case ,**self.to_sql_kwargs ) return written def UpperCamelCase__ ( self ,_snake_case ): UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = args UpperCAmelCase_ : Optional[int] = {**to_sql_kwargs, "if_exists": "append"} if offset > 0 else to_sql_kwargs UpperCAmelCase_ : Optional[int] = query_table( table=self.dataset.data ,key=slice(_snake_case ,offset + self.batch_size ) ,indices=self.dataset._indices ,) UpperCAmelCase_ : List[Any] = batch.to_pandas() UpperCAmelCase_ : str = df.to_sql(self.name ,self.con ,index=_snake_case ,**_snake_case ) return num_rows or len(_snake_case ) def UpperCamelCase__ ( self ,_snake_case ,**_snake_case ): UpperCAmelCase_ : Dict = 0 if self.num_proc is None or self.num_proc == 1: for offset in logging.tqdm( range(0 ,len(self.dataset ) ,self.batch_size ) ,unit="ba" ,disable=not logging.is_progress_bar_enabled() ,desc="Creating SQL from Arrow format" ,): written += self._batch_sql((offset, index, to_sql_kwargs) ) else: UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = len(self.dataset ), self.batch_size with multiprocessing.Pool(self.num_proc ) as pool: for num_rows in logging.tqdm( pool.imap( self._batch_sql ,[(offset, index, to_sql_kwargs) for offset in range(0 ,_snake_case ,_snake_case )] ,) ,total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size ,unit="ba" ,disable=not logging.is_progress_bar_enabled() ,desc="Creating SQL from Arrow format" ,): written += num_rows return written
71
'''simple docstring''' from ..utils import DummyObject, requires_backends class _snake_case (metaclass=__SCREAMING_SNAKE_CASE): __A : Any =["speech"] def __init__( self ,*_snake_case ,**_snake_case ): requires_backends(self ,["speech"] ) class _snake_case (metaclass=__SCREAMING_SNAKE_CASE): __A : Dict =["speech"] def __init__( self ,*_snake_case ,**_snake_case ): requires_backends(self ,["speech"] )
71
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available _lowerCamelCase = { """configuration_jukebox""": [ """JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP""", """JukeboxConfig""", """JukeboxPriorConfig""", """JukeboxVQVAEConfig""", ], """tokenization_jukebox""": ["""JukeboxTokenizer"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCamelCase = [ """JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST""", """JukeboxModel""", """JukeboxPreTrainedModel""", """JukeboxVQVAE""", """JukeboxPrior""", ] if TYPE_CHECKING: from .configuration_jukebox import ( JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP, JukeboxConfig, JukeboxPriorConfig, JukeboxVQVAEConfig, ) from .tokenization_jukebox import JukeboxTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_jukebox import ( JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST, JukeboxModel, JukeboxPreTrainedModel, JukeboxPrior, JukeboxVQVAE, ) else: import sys _lowerCamelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
71
'''simple docstring''' def a__ ( _SCREAMING_SNAKE_CASE : list[int] , _SCREAMING_SNAKE_CASE : list[int] ) -> tuple[float, float]: """simple docstring""" if not len(_SCREAMING_SNAKE_CASE ) == len(_SCREAMING_SNAKE_CASE ) == 3: raise ValueError("Please enter a valid equation." ) if equationa[0] == equationa[1] == equationa[0] == equationa[1] == 0: raise ValueError("Both a & b of two equations can't be zero." ) # Extract the coefficients UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : List[str] = equationa UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = equationa # Calculate the determinants of the matrices UpperCAmelCase_ : Optional[int] = aa * ba - aa * ba UpperCAmelCase_ : Optional[int] = ca * ba - ca * ba UpperCAmelCase_ : Any = aa * ca - aa * ca # Check if the system of linear equations has a solution (using Cramer's rule) if determinant == 0: if determinant_x == determinant_y == 0: raise ValueError("Infinite solutions. (Consistent system)" ) else: raise ValueError("No solution. (Inconsistent system)" ) else: if determinant_x == determinant_y == 0: # Trivial solution (Inconsistent system) return (0.0, 0.0) else: UpperCAmelCase_ : Optional[int] = determinant_x / determinant UpperCAmelCase_ : List[Any] = determinant_y / determinant # Non-Trivial Solution (Consistent system) return (x, y)
71
1
'''simple docstring''' import os import pytest from datasets import ( get_dataset_config_info, get_dataset_config_names, get_dataset_infos, get_dataset_split_names, inspect_dataset, inspect_metric, ) _lowerCamelCase = pytest.mark.integration @pytest.mark.parametrize("path" , ["paws", "csv"] ) def a__ ( _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : str ) -> Tuple: """simple docstring""" inspect_dataset(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : Optional[Any] = path + ".py" assert script_name in os.listdir(_SCREAMING_SNAKE_CASE ) assert "__pycache__" not in os.listdir(_SCREAMING_SNAKE_CASE ) @pytest.mark.filterwarnings("ignore:inspect_metric is deprecated:FutureWarning" ) @pytest.mark.filterwarnings("ignore:metric_module_factory is deprecated:FutureWarning" ) @pytest.mark.parametrize("path" , ["accuracy"] ) def a__ ( _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : Tuple ) -> List[str]: """simple docstring""" inspect_metric(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : List[Any] = path + ".py" assert script_name in os.listdir(_SCREAMING_SNAKE_CASE ) assert "__pycache__" not in os.listdir(_SCREAMING_SNAKE_CASE ) @pytest.mark.parametrize( "path, config_name, expected_splits" , [ ("squad", "plain_text", ["train", "validation"]), ("dalle-mini/wit", "dalle-mini--wit", ["train"]), ("paws", "labeled_final", ["train", "test", "validation"]), ] , ) def a__ ( _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : Dict ) -> str: """simple docstring""" UpperCAmelCase_ : int = get_dataset_config_info(_SCREAMING_SNAKE_CASE , config_name=_SCREAMING_SNAKE_CASE ) assert info.config_name == config_name assert list(info.splits.keys() ) == expected_splits @pytest.mark.parametrize( "path, config_name, expected_exception" , [ ("paws", None, ValueError), ] , ) def a__ ( _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : List[str] ) -> str: """simple docstring""" with pytest.raises(_SCREAMING_SNAKE_CASE ): get_dataset_config_info(_SCREAMING_SNAKE_CASE , config_name=_SCREAMING_SNAKE_CASE ) @pytest.mark.parametrize( "path, expected" , [ ("squad", "plain_text"), ("acronym_identification", "default"), ("lhoestq/squad", "plain_text"), ("lhoestq/test", "default"), ("lhoestq/demo1", "lhoestq--demo1"), ("dalle-mini/wit", "dalle-mini--wit"), ] , ) def a__ ( _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : int ) -> List[str]: """simple docstring""" UpperCAmelCase_ : Optional[int] = get_dataset_config_names(_SCREAMING_SNAKE_CASE ) assert expected in config_names @pytest.mark.parametrize( "path, expected_configs, expected_splits_in_first_config" , [ ("squad", ["plain_text"], ["train", "validation"]), ("dalle-mini/wit", ["dalle-mini--wit"], ["train"]), ("paws", ["labeled_final", "labeled_swap", "unlabeled_final"], ["train", "test", "validation"]), ] , ) def a__ ( _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : List[str] ) -> Any: """simple docstring""" UpperCAmelCase_ : Any = get_dataset_infos(_SCREAMING_SNAKE_CASE ) assert list(infos.keys() ) == expected_configs UpperCAmelCase_ : Optional[Any] = expected_configs[0] assert expected_config in infos UpperCAmelCase_ : Dict = infos[expected_config] assert info.config_name == expected_config assert list(info.splits.keys() ) == expected_splits_in_first_config @pytest.mark.parametrize( "path, expected_config, expected_splits" , [ ("squad", "plain_text", ["train", "validation"]), ("dalle-mini/wit", "dalle-mini--wit", ["train"]), ("paws", "labeled_final", ["train", "test", "validation"]), ] , ) def a__ ( _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : Dict ) -> Any: """simple docstring""" UpperCAmelCase_ : Optional[int] = get_dataset_infos(_SCREAMING_SNAKE_CASE ) assert expected_config in infos UpperCAmelCase_ : Dict = infos[expected_config] assert info.config_name == expected_config assert list(info.splits.keys() ) == expected_splits @pytest.mark.parametrize( "path, config_name, expected_exception" , [ ("paws", None, ValueError), ] , ) def a__ ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : str ) -> Any: """simple docstring""" with pytest.raises(_SCREAMING_SNAKE_CASE ): get_dataset_split_names(_SCREAMING_SNAKE_CASE , config_name=_SCREAMING_SNAKE_CASE )
71
'''simple docstring''' from statistics import mean, stdev def a__ ( _SCREAMING_SNAKE_CASE : list , _SCREAMING_SNAKE_CASE : int = 3 ) -> list: """simple docstring""" UpperCAmelCase_ : Dict = min(_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : Union[str, Any] = max(_SCREAMING_SNAKE_CASE ) # normalize data return [round((x - x_min) / (x_max - x_min) , _SCREAMING_SNAKE_CASE ) for x in data] def a__ ( _SCREAMING_SNAKE_CASE : list , _SCREAMING_SNAKE_CASE : int = 3 ) -> list: """simple docstring""" UpperCAmelCase_ : Tuple = mean(_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : str = stdev(_SCREAMING_SNAKE_CASE ) # standardize data return [round((x - mu) / (sigma) , _SCREAMING_SNAKE_CASE ) for x in data]
71
1
'''simple docstring''' import json import os import shutil import tempfile import unittest from transformers import BatchEncoding, CanineTokenizer from transformers.testing_utils import require_tokenizers, require_torch from transformers.tokenization_utils import AddedToken from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin class _snake_case (__SCREAMING_SNAKE_CASE , unittest.TestCase): __A : List[Any] =CanineTokenizer __A : Any =False def UpperCamelCase__ ( self ): super().setUp() UpperCAmelCase_ : Dict = CanineTokenizer() tokenizer.save_pretrained(self.tmpdirname ) @cached_property def UpperCamelCase__ ( self ): return CanineTokenizer.from_pretrained("google/canine-s" ) def UpperCamelCase__ ( self ,**_snake_case ): UpperCAmelCase_ : Optional[int] = self.tokenizer_class.from_pretrained(self.tmpdirname ,**_snake_case ) UpperCAmelCase_ : int = 10_24 return tokenizer @require_torch def UpperCamelCase__ ( self ): UpperCAmelCase_ : Dict = self.canine_tokenizer UpperCAmelCase_ : str = ["Life is like a box of chocolates.", "You never know what you're gonna get."] # fmt: off UpperCAmelCase_ : Any = [5_73_44, 76, 1_05, 1_02, 1_01, 32, 1_05, 1_15, 32, 1_08, 1_05, 1_07, 1_01, 32, 97, 32, 98, 1_11, 1_20, 32, 1_11, 1_02, 32, 99, 1_04, 1_11, 99, 1_11, 1_08, 97, 1_16, 1_01, 1_15, 46, 5_73_45, 0, 0, 0, 0] # fmt: on UpperCAmelCase_ : List[Any] = tokenizer(_snake_case ,padding=_snake_case ,return_tensors="pt" ) self.assertIsInstance(_snake_case ,_snake_case ) UpperCAmelCase_ : str = list(batch.input_ids.numpy()[0] ) self.assertListEqual(_snake_case ,_snake_case ) self.assertEqual((2, 39) ,batch.input_ids.shape ) self.assertEqual((2, 39) ,batch.attention_mask.shape ) @require_torch def UpperCamelCase__ ( self ): UpperCAmelCase_ : Tuple = self.canine_tokenizer UpperCAmelCase_ : List[Any] = ["Once there was a man.", "He wrote a test in HuggingFace Tranformers."] UpperCAmelCase_ : Tuple = tokenizer(_snake_case ,padding=_snake_case ,return_tensors="pt" ) # check if input_ids, attention_mask and token_type_ids are returned self.assertIn("input_ids" ,_snake_case ) self.assertIn("attention_mask" ,_snake_case ) self.assertIn("token_type_ids" ,_snake_case ) @require_torch def UpperCamelCase__ ( self ): UpperCAmelCase_ : Optional[int] = self.canine_tokenizer UpperCAmelCase_ : Tuple = [ "What's the weater?", "It's about 25 degrees.", ] UpperCAmelCase_ : Union[str, Any] = tokenizer( text_target=_snake_case ,max_length=32 ,padding="max_length" ,truncation=_snake_case ,return_tensors="pt" ) self.assertEqual(32 ,targets["input_ids"].shape[1] ) def UpperCamelCase__ ( self ): # safety check on max_len default value so we are sure the test works UpperCAmelCase_ : List[Any] = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(f'''{tokenizer.__class__.__name__}''' ): self.assertNotEqual(tokenizer.model_max_length ,42 ) # Now let's start the test UpperCAmelCase_ : Optional[int] = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(f'''{tokenizer.__class__.__name__}''' ): # Isolate this from the other tests because we save additional tokens/etc UpperCAmelCase_ : List[str] = tempfile.mkdtemp() UpperCAmelCase_ : Dict = " He is very happy, UNwant\u00E9d,running" UpperCAmelCase_ : int = tokenizer.encode(_snake_case ,add_special_tokens=_snake_case ) tokenizer.save_pretrained(_snake_case ) UpperCAmelCase_ : Optional[int] = tokenizer.__class__.from_pretrained(_snake_case ) UpperCAmelCase_ : Optional[Any] = after_tokenizer.encode(_snake_case ,add_special_tokens=_snake_case ) self.assertListEqual(_snake_case ,_snake_case ) shutil.rmtree(_snake_case ) UpperCAmelCase_ : Optional[Any] = self.get_tokenizers(model_max_length=42 ) for tokenizer in tokenizers: with self.subTest(f'''{tokenizer.__class__.__name__}''' ): # Isolate this from the other tests because we save additional tokens/etc UpperCAmelCase_ : Any = tempfile.mkdtemp() UpperCAmelCase_ : List[str] = " He is very happy, UNwant\u00E9d,running" UpperCAmelCase_ : Union[str, Any] = tokenizer.additional_special_tokens # We can add a new special token for Canine as follows: UpperCAmelCase_ : Any = chr(0xE_007 ) additional_special_tokens.append(_snake_case ) tokenizer.add_special_tokens({"additional_special_tokens": additional_special_tokens} ) UpperCAmelCase_ : int = tokenizer.encode(_snake_case ,add_special_tokens=_snake_case ) tokenizer.save_pretrained(_snake_case ) UpperCAmelCase_ : Optional[Any] = tokenizer.__class__.from_pretrained(_snake_case ) UpperCAmelCase_ : List[Any] = after_tokenizer.encode(_snake_case ,add_special_tokens=_snake_case ) self.assertListEqual(_snake_case ,_snake_case ) self.assertIn(_snake_case ,after_tokenizer.additional_special_tokens ) self.assertEqual(after_tokenizer.model_max_length ,42 ) UpperCAmelCase_ : List[str] = tokenizer.__class__.from_pretrained(_snake_case ,model_max_length=43 ) self.assertEqual(tokenizer.model_max_length ,43 ) shutil.rmtree(_snake_case ) def UpperCamelCase__ ( self ): UpperCAmelCase_ : Dict = self.get_tokenizers(do_lower_case=_snake_case ) for tokenizer in tokenizers: with self.subTest(f'''{tokenizer.__class__.__name__}''' ): UpperCAmelCase_ , UpperCAmelCase_ : Any = self.get_clean_sequence(_snake_case ) # a special token for Canine can be defined as follows: UpperCAmelCase_ : int = 0xE_005 UpperCAmelCase_ : int = chr(_snake_case ) tokenizer.add_special_tokens({"cls_token": special_token} ) UpperCAmelCase_ : int = tokenizer.encode(_snake_case ,add_special_tokens=_snake_case ) self.assertEqual(len(_snake_case ) ,1 ) UpperCAmelCase_ : Optional[Any] = tokenizer.decode(ids + encoded_special_token ,clean_up_tokenization_spaces=_snake_case ) UpperCAmelCase_ : List[Any] = tokenizer.encode(_snake_case ,add_special_tokens=_snake_case ) UpperCAmelCase_ : str = tokenizer.encode(_snake_case ,add_special_tokens=_snake_case ) UpperCAmelCase_ : Optional[int] = tokenizer.encode(_snake_case ,add_special_tokens=_snake_case ) self.assertEqual(_snake_case ,input_encoded + special_token_id ) UpperCAmelCase_ : Optional[Any] = tokenizer.decode(_snake_case ,skip_special_tokens=_snake_case ) self.assertTrue(special_token not in decoded ) def UpperCamelCase__ ( self ): UpperCAmelCase_ : int = self.get_tokenizers(do_lower_case=_snake_case ) for tokenizer in tokenizers: with self.subTest(f'''{tokenizer.__class__.__name__}''' ): UpperCAmelCase_ : str = chr(0xE_005 ) UpperCAmelCase_ : List[str] = chr(0xE_006 ) # `add_tokens` method stores special tokens only in `tokenizer.unique_no_split_tokens`. (in tokenization_utils.py) tokenizer.add_tokens([SPECIAL_TOKEN_1] ,special_tokens=_snake_case ) # `add_special_tokens` method stores special tokens in `tokenizer.additional_special_tokens`, # which also occur in `tokenizer.all_special_tokens`. (in tokenization_utils_base.py) tokenizer.add_special_tokens({"additional_special_tokens": [SPECIAL_TOKEN_2]} ) UpperCAmelCase_ : Optional[int] = tokenizer.tokenize(_snake_case ) UpperCAmelCase_ : Any = tokenizer.tokenize(_snake_case ) self.assertEqual(len(_snake_case ) ,1 ) self.assertEqual(len(_snake_case ) ,1 ) self.assertEqual(token_a[0] ,_snake_case ) self.assertEqual(token_a[0] ,_snake_case ) @require_tokenizers def UpperCamelCase__ ( self ): UpperCAmelCase_ : Optional[int] = self.get_tokenizers(do_lower_case=_snake_case ) for tokenizer in tokenizers: with self.subTest(f'''{tokenizer.__class__.__name__}''' ): # a special token for Canine can be defined as follows: UpperCAmelCase_ : Union[str, Any] = 0xE_006 UpperCAmelCase_ : Any = chr(_snake_case ) UpperCAmelCase_ : Tuple = AddedToken(_snake_case ,lstrip=_snake_case ) tokenizer.add_special_tokens({"additional_special_tokens": [new_token]} ) with tempfile.TemporaryDirectory() as tmp_dir_name: tokenizer.save_pretrained(_snake_case ) tokenizer.from_pretrained(_snake_case ) def UpperCamelCase__ ( self ): UpperCAmelCase_ : List[str] = [] if self.test_slow_tokenizer: tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) ) if self.test_rust_tokenizer: tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) ) for tokenizer_class, tokenizer_utils in tokenizer_list: with tempfile.TemporaryDirectory() as tmp_dir: tokenizer_utils.save_pretrained(_snake_case ) with open(os.path.join(_snake_case ,"special_tokens_map.json" ) ,encoding="utf-8" ) as json_file: UpperCAmelCase_ : Any = json.load(_snake_case ) with open(os.path.join(_snake_case ,"tokenizer_config.json" ) ,encoding="utf-8" ) as json_file: UpperCAmelCase_ : Dict = json.load(_snake_case ) # a special token for Canine can be defined as follows: UpperCAmelCase_ : Tuple = 0xE_006 UpperCAmelCase_ : str = chr(_snake_case ) UpperCAmelCase_ : Any = [new_token_a] UpperCAmelCase_ : Any = [new_token_a] with open(os.path.join(_snake_case ,"special_tokens_map.json" ) ,"w" ,encoding="utf-8" ) as outfile: json.dump(_snake_case ,_snake_case ) with open(os.path.join(_snake_case ,"tokenizer_config.json" ) ,"w" ,encoding="utf-8" ) as outfile: json.dump(_snake_case ,_snake_case ) # the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes # into account the new value of additional_special_tokens given in the "tokenizer_config.json" and # "special_tokens_map.json" files UpperCAmelCase_ : List[Any] = tokenizer_class.from_pretrained(_snake_case ,extra_ids=0 ) self.assertIn(_snake_case ,tokenizer_without_change_in_init.additional_special_tokens ) # self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab self.assertEqual( [new_token_a] ,tokenizer_without_change_in_init.convert_ids_to_tokens( tokenizer_without_change_in_init.convert_tokens_to_ids([new_token_a] ) ) ,) UpperCAmelCase_ : List[str] = 0xE_007 UpperCAmelCase_ : Any = chr(_snake_case ) # Now we test that we can change the value of additional_special_tokens in the from_pretrained UpperCAmelCase_ : List[Any] = [AddedToken(_snake_case ,lstrip=_snake_case )] UpperCAmelCase_ : List[Any] = tokenizer_class.from_pretrained( _snake_case ,additional_special_tokens=_snake_case ,extra_ids=0 ) self.assertIn(_snake_case ,tokenizer.additional_special_tokens ) # self.assertIn(new_token_2,tokenizer.get_vocab()) # ByT5Tokenization no vocab self.assertEqual( [new_token_a] ,tokenizer.convert_ids_to_tokens(tokenizer.convert_tokens_to_ids([new_token_a] ) ) ) @require_tokenizers def UpperCamelCase__ ( self ): UpperCAmelCase_ : int = self.get_tokenizers(do_lower_case=_snake_case ) for tokenizer in tokenizers: with self.subTest(f'''{tokenizer.__class__.__name__}''' ): UpperCAmelCase_ : Tuple = "hello world" if self.space_between_special_tokens: UpperCAmelCase_ : str = "[CLS] hello world [SEP]" else: UpperCAmelCase_ : List[str] = input UpperCAmelCase_ : str = tokenizer.encode(_snake_case ,add_special_tokens=_snake_case ) UpperCAmelCase_ : List[Any] = tokenizer.decode(_snake_case ,spaces_between_special_tokens=self.space_between_special_tokens ) self.assertIn(_snake_case ,[output, output.lower()] ) def UpperCamelCase__ ( self ): UpperCAmelCase_ : Dict = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(f'''{tokenizer.__class__.__name__}''' ): UpperCAmelCase_ : Optional[int] = [ "bos_token", "eos_token", "unk_token", "sep_token", "pad_token", "cls_token", "mask_token", ] UpperCAmelCase_ : Tuple = "a" UpperCAmelCase_ : int = ord(_snake_case ) for attr in attributes_list: setattr(_snake_case ,attr + "_id" ,_snake_case ) self.assertEqual(getattr(_snake_case ,_snake_case ) ,_snake_case ) self.assertEqual(getattr(_snake_case ,attr + "_id" ) ,_snake_case ) setattr(_snake_case ,attr + "_id" ,_snake_case ) self.assertEqual(getattr(_snake_case ,_snake_case ) ,_snake_case ) self.assertEqual(getattr(_snake_case ,attr + "_id" ) ,_snake_case ) setattr(_snake_case ,"additional_special_tokens_ids" ,[] ) self.assertListEqual(getattr(_snake_case ,"additional_special_tokens" ) ,[] ) self.assertListEqual(getattr(_snake_case ,"additional_special_tokens_ids" ) ,[] ) UpperCAmelCase_ : Optional[int] = 0xE_006 UpperCAmelCase_ : List[Any] = chr(_snake_case ) setattr(_snake_case ,"additional_special_tokens_ids" ,[additional_special_token_id] ) self.assertListEqual(getattr(_snake_case ,"additional_special_tokens" ) ,[additional_special_token] ) self.assertListEqual(getattr(_snake_case ,"additional_special_tokens_ids" ) ,[additional_special_token_id] ) def UpperCamelCase__ ( self ): pass def UpperCamelCase__ ( self ): pass def UpperCamelCase__ ( self ): pass def UpperCamelCase__ ( self ): pass def UpperCamelCase__ ( self ): pass def UpperCamelCase__ ( self ): pass def UpperCamelCase__ ( self ): pass def UpperCamelCase__ ( self ): pass
71
'''simple docstring''' import argparse import os # New Code # import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType from accelerate.utils import find_executable_batch_size ######################################################################## # This is a fully working simple example to use Accelerate, # specifically showcasing how to ensure out-of-memory errors never # interrupt training, and builds off the `nlp_example.py` script. # # This example trains a Bert base model on GLUE MRPC # in any of the following settings (with the same script): # - single CPU or single GPU # - multi GPUS (using PyTorch distributed mode) # - (multi) TPUs # - fp16 (mixed-precision) or fp32 (normal precision) # # New additions from the base script can be found quickly by # looking for the # New Code # tags # # To run it in each of these various modes, follow the instructions # in the readme for examples: # https://github.com/huggingface/accelerate/tree/main/examples # ######################################################################## _lowerCamelCase = 16 _lowerCamelCase = 32 def a__ ( _SCREAMING_SNAKE_CASE : Accelerator , _SCREAMING_SNAKE_CASE : int = 16 ) -> List[Any]: """simple docstring""" UpperCAmelCase_ : Dict = AutoTokenizer.from_pretrained("bert-base-cased" ) UpperCAmelCase_ : Tuple = load_dataset("glue" , "mrpc" ) def tokenize_function(_SCREAMING_SNAKE_CASE : Union[str, Any] ): # max_length=None => use the model max length (it's actually the default) UpperCAmelCase_ : Any = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=_SCREAMING_SNAKE_CASE , max_length=_SCREAMING_SNAKE_CASE ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset # starting with the main process first: with accelerator.main_process_first(): UpperCAmelCase_ : Union[str, Any] = datasets.map( _SCREAMING_SNAKE_CASE , batched=_SCREAMING_SNAKE_CASE , remove_columns=["idx", "sentence1", "sentence2"] , ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library UpperCAmelCase_ : Tuple = tokenized_datasets.rename_column("label" , "labels" ) def collate_fn(_SCREAMING_SNAKE_CASE : List[Any] ): # On TPU it's best to pad everything to the same length or training will be very slow. UpperCAmelCase_ : Optional[int] = 1_28 if accelerator.distributed_type == DistributedType.TPU else None # When using mixed precision we want round multiples of 8/16 if accelerator.mixed_precision == "fp8": UpperCAmelCase_ : Optional[int] = 16 elif accelerator.mixed_precision != "no": UpperCAmelCase_ : int = 8 else: UpperCAmelCase_ : Optional[Any] = None return tokenizer.pad( _SCREAMING_SNAKE_CASE , padding="longest" , max_length=_SCREAMING_SNAKE_CASE , pad_to_multiple_of=_SCREAMING_SNAKE_CASE , return_tensors="pt" , ) # Instantiate dataloaders. UpperCAmelCase_ : Any = DataLoader( tokenized_datasets["train"] , shuffle=_SCREAMING_SNAKE_CASE , collate_fn=_SCREAMING_SNAKE_CASE , batch_size=_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : int = DataLoader( tokenized_datasets["validation"] , shuffle=_SCREAMING_SNAKE_CASE , collate_fn=_SCREAMING_SNAKE_CASE , batch_size=_SCREAMING_SNAKE_CASE ) return train_dataloader, eval_dataloader # For testing only if os.environ.get("""TESTING_MOCKED_DATALOADERS""", None) == "1": from accelerate.test_utils.training import mocked_dataloaders _lowerCamelCase = mocked_dataloaders # noqa: F811 def a__ ( _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : int ) -> int: """simple docstring""" if os.environ.get("TESTING_MOCKED_DATALOADERS" , _SCREAMING_SNAKE_CASE ) == "1": UpperCAmelCase_ : Tuple = 2 # Initialize accelerator UpperCAmelCase_ : int = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision ) # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs UpperCAmelCase_ : str = config["lr"] UpperCAmelCase_ : Union[str, Any] = int(config["num_epochs"] ) UpperCAmelCase_ : Tuple = int(config["seed"] ) UpperCAmelCase_ : Union[str, Any] = int(config["batch_size"] ) UpperCAmelCase_ : List[str] = evaluate.load("glue" , "mrpc" ) # New Code # # We now can define an inner training loop function. It should take a batch size as the only parameter, # and build the dataloaders in there. # It also gets our decorator @find_executable_batch_size(starting_batch_size=_SCREAMING_SNAKE_CASE ) def inner_training_loop(_SCREAMING_SNAKE_CASE : List[str] ): # And now just move everything below under this function # We need to bring in the Accelerator object from earlier nonlocal accelerator # And reset all of its attributes that could hold onto any memory: accelerator.free_memory() # Then we can declare the model, optimizer, and everything else: set_seed(_SCREAMING_SNAKE_CASE ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) UpperCAmelCase_ : Dict = AutoModelForSequenceClassification.from_pretrained("bert-base-cased" , return_dict=_SCREAMING_SNAKE_CASE ) # We could avoid this line since the accelerator is set with `device_placement=True` (default value). # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). UpperCAmelCase_ : Dict = model.to(accelerator.device ) # Instantiate optimizer UpperCAmelCase_ : int = AdamW(params=model.parameters() , lr=_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ , UpperCAmelCase_ : Dict = get_dataloaders(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # Instantiate scheduler UpperCAmelCase_ : Union[str, Any] = get_linear_schedule_with_warmup( optimizer=_SCREAMING_SNAKE_CASE , num_warmup_steps=1_00 , num_training_steps=(len(_SCREAMING_SNAKE_CASE ) * num_epochs) , ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = accelerator.prepare( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # Now we train the model for epoch in range(_SCREAMING_SNAKE_CASE ): model.train() for step, batch in enumerate(_SCREAMING_SNAKE_CASE ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) UpperCAmelCase_ : str = model(**_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : List[Any] = outputs.loss accelerator.backward(_SCREAMING_SNAKE_CASE ) optimizer.step() lr_scheduler.step() optimizer.zero_grad() model.eval() for step, batch in enumerate(_SCREAMING_SNAKE_CASE ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): UpperCAmelCase_ : Optional[Any] = model(**_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : str = outputs.logits.argmax(dim=-1 ) UpperCAmelCase_ , UpperCAmelCase_ : str = accelerator.gather_for_metrics((predictions, batch["labels"]) ) metric.add_batch( predictions=_SCREAMING_SNAKE_CASE , references=_SCREAMING_SNAKE_CASE , ) UpperCAmelCase_ : str = metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(F'''epoch {epoch}:''' , _SCREAMING_SNAKE_CASE ) # New Code # # And call it at the end with no arguments # Note: You could also refactor this outside of your training loop function inner_training_loop() def a__ ( ) -> Optional[Any]: """simple docstring""" UpperCAmelCase_ : Dict = argparse.ArgumentParser(description="Simple example of training script." ) parser.add_argument( "--mixed_precision" , type=_SCREAMING_SNAKE_CASE , default=_SCREAMING_SNAKE_CASE , choices=["no", "fp16", "bf16", "fp8"] , help="Whether to use mixed precision. Choose" "between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10." "and an Nvidia Ampere GPU." , ) parser.add_argument("--cpu" , action="store_true" , help="If passed, will train on the CPU." ) UpperCAmelCase_ : Tuple = parser.parse_args() UpperCAmelCase_ : int = {"lr": 2E-5, "num_epochs": 3, "seed": 42, "batch_size": 16} training_function(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) if __name__ == "__main__": main()
71
1
'''simple docstring''' _lowerCamelCase = { "joule": 1.0, "kilojoule": 1000, "megajoule": 100_0000, "gigajoule": 10_0000_0000, "wattsecond": 1.0, "watthour": 3600, "kilowatthour": 360_0000, "newtonmeter": 1.0, "calorie_nutr": 4186.8, "kilocalorie_nutr": 418_6800.00, "electronvolt": 1.6_0_2_1_7_6_6_3_4E-1_9, "britishthermalunit_it": 1055.0_5585, "footpound": 1.35_58_18, } def a__ ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : float ) -> float: """simple docstring""" if to_type not in ENERGY_CONVERSION or from_type not in ENERGY_CONVERSION: UpperCAmelCase_ : List[str] = ( F'''Incorrect \'from_type\' or \'to_type\' value: {from_type!r}, {to_type!r}\n''' F'''Valid values are: {", ".join(_SCREAMING_SNAKE_CASE )}''' ) raise ValueError(_SCREAMING_SNAKE_CASE ) return value * ENERGY_CONVERSION[from_type] / ENERGY_CONVERSION[to_type] if __name__ == "__main__": import doctest doctest.testmod()
71
'''simple docstring''' from __future__ import annotations def a__ ( _SCREAMING_SNAKE_CASE : int ) -> list[int]: """simple docstring""" UpperCAmelCase_ : Union[str, Any] = 2 UpperCAmelCase_ : Optional[int] = [] while i * i <= n: if n % i: i += 1 else: n //= i factors.append(_SCREAMING_SNAKE_CASE ) if n > 1: factors.append(_SCREAMING_SNAKE_CASE ) return factors if __name__ == "__main__": import doctest doctest.testmod()
71
1
'''simple docstring''' from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging if is_vision_available(): import PIL _lowerCamelCase = logging.get_logger(__name__) class _snake_case (__SCREAMING_SNAKE_CASE): __A : str =["pixel_values"] def __init__( self ,_snake_case = True ,_snake_case = None ,_snake_case = 0.9 ,_snake_case = PILImageResampling.BICUBIC ,_snake_case = True ,_snake_case = None ,_snake_case = 1 / 2_55 ,_snake_case = True ,_snake_case = True ,_snake_case = None ,_snake_case = None ,**_snake_case ,): super().__init__(**_snake_case ) UpperCAmelCase_ : str = size if size is not None else {"shortest_edge": 2_24} UpperCAmelCase_ : Optional[int] = get_size_dict(_snake_case ,default_to_square=_snake_case ) UpperCAmelCase_ : List[Any] = crop_size if crop_size is not None else {"height": 2_24, "width": 2_24} UpperCAmelCase_ : Optional[Any] = get_size_dict(_snake_case ,param_name="crop_size" ) UpperCAmelCase_ : str = do_resize UpperCAmelCase_ : Optional[int] = size UpperCAmelCase_ : Optional[int] = crop_pct UpperCAmelCase_ : int = resample UpperCAmelCase_ : Any = do_center_crop UpperCAmelCase_ : Union[str, Any] = crop_size UpperCAmelCase_ : Union[str, Any] = do_rescale UpperCAmelCase_ : Dict = rescale_factor UpperCAmelCase_ : Tuple = do_normalize UpperCAmelCase_ : Union[str, Any] = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN UpperCAmelCase_ : List[Any] = image_std if image_std is not None else IMAGENET_DEFAULT_STD def UpperCamelCase__ ( self ,_snake_case ,_snake_case ,_snake_case = None ,_snake_case = PILImageResampling.BICUBIC ,_snake_case = None ,**_snake_case ,): UpperCAmelCase_ : Dict = get_size_dict(_snake_case ,default_to_square=_snake_case ) if "shortest_edge" not in size and ("height" not in size or "width" not in size): raise ValueError(f'''size must contain \'height\' and \'width\' or \'shortest_edge\' as keys. Got {size.keys()}''' ) if crop_pct is not None: if "shortest_edge" in size: UpperCAmelCase_ : Optional[Any] = int(size["shortest_edge"] / crop_pct ) elif "height" in size and "width" in size: if size["height"] == size["width"]: UpperCAmelCase_ : Dict = int(size["height"] / crop_pct ) else: UpperCAmelCase_ : Dict = (int(size["height"] / crop_pct ), int(size["width"] / crop_pct )) else: raise ValueError("Invalid size for resize: {}".format(_snake_case ) ) UpperCAmelCase_ : List[str] = get_resize_output_image_size(_snake_case ,size=_snake_case ,default_to_square=_snake_case ) else: if "shortest_edge" in size: UpperCAmelCase_ : Any = get_resize_output_image_size(_snake_case ,size=size["shortest_edge"] ,default_to_square=_snake_case ) elif "height" in size and "width" in size: UpperCAmelCase_ : List[Any] = (size["height"], size["width"]) else: raise ValueError("Invalid size for resize: {}".format(_snake_case ) ) return resize(_snake_case ,size=_snake_case ,resample=_snake_case ,data_format=_snake_case ,**_snake_case ) def UpperCamelCase__ ( self ,_snake_case ,_snake_case ,_snake_case = None ,**_snake_case ,): UpperCAmelCase_ : List[str] = get_size_dict(_snake_case ) if "height" not in size or "width" not in size: raise ValueError(f'''size must contain \'height\' and \'width\' as keys. Got {size.keys()}''' ) return center_crop(_snake_case ,size=(size["height"], size["width"]) ,data_format=_snake_case ,**_snake_case ) def UpperCamelCase__ ( self ,_snake_case ,_snake_case ,_snake_case = None ,**_snake_case ,): return rescale(_snake_case ,scale=_snake_case ,data_format=_snake_case ,**_snake_case ) def UpperCamelCase__ ( self ,_snake_case ,_snake_case ,_snake_case ,_snake_case = None ,**_snake_case ,): return normalize(_snake_case ,mean=_snake_case ,std=_snake_case ,data_format=_snake_case ,**_snake_case ) def UpperCamelCase__ ( self ,_snake_case ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = ChannelDimension.FIRST ,**_snake_case ,): UpperCAmelCase_ : List[str] = do_resize if do_resize is not None else self.do_resize UpperCAmelCase_ : Tuple = crop_pct if crop_pct is not None else self.crop_pct UpperCAmelCase_ : Optional[int] = resample if resample is not None else self.resample UpperCAmelCase_ : Any = do_center_crop if do_center_crop is not None else self.do_center_crop UpperCAmelCase_ : int = do_rescale if do_rescale is not None else self.do_rescale UpperCAmelCase_ : Optional[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor UpperCAmelCase_ : Dict = do_normalize if do_normalize is not None else self.do_normalize UpperCAmelCase_ : str = image_mean if image_mean is not None else self.image_mean UpperCAmelCase_ : int = image_std if image_std is not None else self.image_std UpperCAmelCase_ : Union[str, Any] = size if size is not None else self.size UpperCAmelCase_ : Dict = get_size_dict(_snake_case ,default_to_square=_snake_case ) UpperCAmelCase_ : List[str] = crop_size if crop_size is not None else self.crop_size UpperCAmelCase_ : int = get_size_dict(_snake_case ,param_name="crop_size" ) UpperCAmelCase_ : Dict = make_list_of_images(_snake_case ) if not valid_images(_snake_case ): raise ValueError( "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, " "torch.Tensor, tf.Tensor or jax.ndarray." ) if do_resize and size is None or resample is None: raise ValueError("Size and resample must be specified if do_resize is True." ) if do_center_crop and crop_pct is None: raise ValueError("Crop_pct must be specified if do_center_crop is True." ) if do_rescale and rescale_factor is None: raise ValueError("Rescale factor must be specified if do_rescale is True." ) if do_normalize and (image_mean is None or image_std is None): raise ValueError("Image mean and std must be specified if do_normalize is True." ) # All transformations expect numpy arrays. UpperCAmelCase_ : Optional[Any] = [to_numpy_array(_snake_case ) for image in images] if do_resize: UpperCAmelCase_ : Tuple = [self.resize(image=_snake_case ,size=_snake_case ,crop_pct=_snake_case ,resample=_snake_case ) for image in images] if do_center_crop: UpperCAmelCase_ : Tuple = [self.center_crop(image=_snake_case ,size=_snake_case ) for image in images] if do_rescale: UpperCAmelCase_ : str = [self.rescale(image=_snake_case ,scale=_snake_case ) for image in images] if do_normalize: UpperCAmelCase_ : str = [self.normalize(image=_snake_case ,mean=_snake_case ,std=_snake_case ) for image in images] UpperCAmelCase_ : Dict = [to_channel_dimension_format(_snake_case ,_snake_case ) for image in images] UpperCAmelCase_ : List[Any] = {"pixel_values": images} return BatchFeature(data=_snake_case ,tensor_type=_snake_case )
71
'''simple docstring''' from typing import Dict, List from nltk.translate import gleu_score import datasets from datasets import MetricInfo _lowerCamelCase = """\ @misc{wu2016googles, title={Google's Neural Machine Translation System: Bridging the Gap between Human and Machine Translation}, author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes and Jeffrey Dean}, year={2016}, eprint={1609.08144}, archivePrefix={arXiv}, primaryClass={cs.CL} } """ _lowerCamelCase = """\ The BLEU score has some undesirable properties when used for single sentences, as it was designed to be a corpus measure. We therefore use a slightly different score for our RL experiments which we call the 'GLEU score'. For the GLEU score, we record all sub-sequences of 1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then compute a recall, which is the ratio of the number of matching n-grams to the number of total n-grams in the target (ground truth) sequence, and a precision, which is the ratio of the number of matching n-grams to the number of total n-grams in the generated output sequence. Then GLEU score is simply the minimum of recall and precision. This GLEU score's range is always between 0 (no matches) and 1 (all match) and it is symmetrical when switching output and target. According to our experiments, GLEU score correlates quite well with the BLEU metric on a corpus level but does not have its drawbacks for our per sentence reward objective. """ _lowerCamelCase = """\ Computes corpus-level Google BLEU (GLEU) score of translated segments against one or more references. Instead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching tokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values. Args: predictions (list of str): list of translations to score. Each translation should be tokenized into a list of tokens. references (list of list of str): list of lists of references for each translation. Each reference should be tokenized into a list of tokens. min_len (int): The minimum order of n-gram this function should extract. Defaults to 1. max_len (int): The maximum order of n-gram this function should extract. Defaults to 4. Returns: 'google_bleu': google_bleu score Examples: Example 1: >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which', ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always', ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat'] >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which', ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never', ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat'] >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was', ... 'interested', 'in', 'world', 'history'] >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history', ... 'because', 'he', 'read', 'the', 'book'] >>> list_of_references = [[ref1a], [ref2a]] >>> hypotheses = [hyp1, hyp2] >>> google_bleu = datasets.load_metric(\"google_bleu\") >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references) >>> print(round(results[\"google_bleu\"], 2)) 0.44 Example 2: >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which', ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always', ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat'] >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which', ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never', ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat'] >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that', ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never', ... 'heed', 'the', 'cat', 'commands'] >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the', ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions', ... 'of', 'the', 'cat'] >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was', ... 'interested', 'in', 'world', 'history'] >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history', ... 'because', 'he', 'read', 'the', 'book'] >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]] >>> hypotheses = [hyp1, hyp2] >>> google_bleu = datasets.load_metric(\"google_bleu\") >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references) >>> print(round(results[\"google_bleu\"], 2)) 0.61 Example 3: >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which', ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always', ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat'] >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which', ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never', ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat'] >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that', ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never', ... 'heed', 'the', 'cat', 'commands'] >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the', ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions', ... 'of', 'the', 'cat'] >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was', ... 'interested', 'in', 'world', 'history'] >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history', ... 'because', 'he', 'read', 'the', 'book'] >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]] >>> hypotheses = [hyp1, hyp2] >>> google_bleu = datasets.load_metric(\"google_bleu\") >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2) >>> print(round(results[\"google_bleu\"], 2)) 0.53 Example 4: >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which', ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always', ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat'] >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which', ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never', ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat'] >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that', ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never', ... 'heed', 'the', 'cat', 'commands'] >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the', ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions', ... 'of', 'the', 'cat'] >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was', ... 'interested', 'in', 'world', 'history'] >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history', ... 'because', 'he', 'read', 'the', 'book'] >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]] >>> hypotheses = [hyp1, hyp2] >>> google_bleu = datasets.load_metric(\"google_bleu\") >>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6) >>> print(round(results[\"google_bleu\"], 2)) 0.4 """ @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION) class _snake_case (datasets.Metric): def UpperCamelCase__ ( self ): return datasets.MetricInfo( description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features( { "predictions": datasets.Sequence(datasets.Value("string" ,id="token" ) ,id="sequence" ), "references": datasets.Sequence( datasets.Sequence(datasets.Value("string" ,id="token" ) ,id="sequence" ) ,id="references" ), } ) ,) def UpperCamelCase__ ( self ,_snake_case ,_snake_case ,_snake_case = 1 ,_snake_case = 4 ,): return { "google_bleu": gleu_score.corpus_gleu( list_of_references=_snake_case ,hypotheses=_snake_case ,min_len=_snake_case ,max_len=_snake_case ) }
71
1
'''simple docstring''' from __future__ import annotations import unittest from transformers import BlenderbotConfig, BlenderbotTokenizer, is_tf_available from transformers.testing_utils import require_tf, require_tokenizers, slow from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotForConditionalGeneration, TFBlenderbotModel @require_tf class _snake_case : __A : Dict =BlenderbotConfig __A : Union[str, Any] ={} __A : Any ="gelu" def __init__( self ,_snake_case ,_snake_case=13 ,_snake_case=7 ,_snake_case=True ,_snake_case=False ,_snake_case=99 ,_snake_case=32 ,_snake_case=2 ,_snake_case=4 ,_snake_case=37 ,_snake_case=0.1 ,_snake_case=0.1 ,_snake_case=20 ,_snake_case=2 ,_snake_case=1 ,_snake_case=0 ,): UpperCAmelCase_ : List[Any] = parent UpperCAmelCase_ : str = batch_size UpperCAmelCase_ : Dict = seq_length UpperCAmelCase_ : int = is_training UpperCAmelCase_ : Optional[Any] = use_labels UpperCAmelCase_ : Any = vocab_size UpperCAmelCase_ : Optional[int] = hidden_size UpperCAmelCase_ : Optional[int] = num_hidden_layers UpperCAmelCase_ : int = num_attention_heads UpperCAmelCase_ : Tuple = intermediate_size UpperCAmelCase_ : Any = hidden_dropout_prob UpperCAmelCase_ : Optional[int] = attention_probs_dropout_prob UpperCAmelCase_ : List[Any] = max_position_embeddings UpperCAmelCase_ : str = eos_token_id UpperCAmelCase_ : List[Any] = pad_token_id UpperCAmelCase_ : List[Any] = bos_token_id def UpperCamelCase__ ( self ): UpperCAmelCase_ : Any = ids_tensor([self.batch_size, self.seq_length - 1] ,self.vocab_size ) UpperCAmelCase_ : int = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) ,1 ) UpperCAmelCase_ : Optional[Any] = tf.concat([input_ids, eos_tensor] ,axis=1 ) UpperCAmelCase_ : int = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size ) UpperCAmelCase_ : Optional[Any] = self.config_cls( vocab_size=self.vocab_size ,d_model=self.hidden_size ,encoder_layers=self.num_hidden_layers ,decoder_layers=self.num_hidden_layers ,encoder_attention_heads=self.num_attention_heads ,decoder_attention_heads=self.num_attention_heads ,encoder_ffn_dim=self.intermediate_size ,decoder_ffn_dim=self.intermediate_size ,dropout=self.hidden_dropout_prob ,attention_dropout=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,eos_token_ids=[2] ,bos_token_id=self.bos_token_id ,pad_token_id=self.pad_token_id ,decoder_start_token_id=self.pad_token_id ,**self.config_updates ,) UpperCAmelCase_ : List[str] = prepare_blenderbot_inputs_dict(_snake_case ,_snake_case ,_snake_case ) return config, inputs_dict def UpperCamelCase__ ( self ,_snake_case ,_snake_case ): UpperCAmelCase_ : Tuple = TFBlenderbotModel(config=_snake_case ).get_decoder() UpperCAmelCase_ : int = inputs_dict["input_ids"] UpperCAmelCase_ : Dict = input_ids[:1, :] UpperCAmelCase_ : Any = inputs_dict["attention_mask"][:1, :] UpperCAmelCase_ : int = inputs_dict["head_mask"] UpperCAmelCase_ : Optional[int] = 1 # first forward pass UpperCAmelCase_ : List[str] = model(_snake_case ,attention_mask=_snake_case ,head_mask=_snake_case ,use_cache=_snake_case ) UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = outputs.to_tuple() # create hypothetical next token and extent to next_input_ids UpperCAmelCase_ : Optional[int] = ids_tensor((self.batch_size, 3) ,config.vocab_size ) UpperCAmelCase_ : Any = tf.cast(ids_tensor((self.batch_size, 3) ,2 ) ,tf.inta ) # append to next input_ids and UpperCAmelCase_ : Union[str, Any] = tf.concat([input_ids, next_tokens] ,axis=-1 ) UpperCAmelCase_ : Any = tf.concat([attention_mask, next_attn_mask] ,axis=-1 ) UpperCAmelCase_ : Any = model(_snake_case ,attention_mask=_snake_case )[0] UpperCAmelCase_ : List[Any] = model(_snake_case ,attention_mask=_snake_case ,past_key_values=_snake_case )[0] self.parent.assertEqual(next_tokens.shape[1] ,output_from_past.shape[1] ) # select random slice UpperCAmelCase_ : str = int(ids_tensor((1,) ,output_from_past.shape[-1] ) ) UpperCAmelCase_ : List[str] = output_from_no_past[:, -3:, random_slice_idx] UpperCAmelCase_ : Union[str, Any] = output_from_past[:, :, random_slice_idx] # test that outputs are equal for slice tf.debugging.assert_near(_snake_case ,_snake_case ,rtol=1E-3 ) def a__ ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : str=None , _SCREAMING_SNAKE_CASE : Any=None , _SCREAMING_SNAKE_CASE : Any=None , _SCREAMING_SNAKE_CASE : List[str]=None , _SCREAMING_SNAKE_CASE : Dict=None , ) -> Union[str, Any]: """simple docstring""" if attention_mask is None: UpperCAmelCase_ : Dict = tf.cast(tf.math.not_equal(_SCREAMING_SNAKE_CASE , config.pad_token_id ) , tf.inta ) if decoder_attention_mask is None: UpperCAmelCase_ : Optional[int] = tf.concat( [ tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ), tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ), ] , axis=-1 , ) if head_mask is None: UpperCAmelCase_ : List[Any] = tf.ones((config.encoder_layers, config.encoder_attention_heads) ) if decoder_head_mask is None: UpperCAmelCase_ : Optional[int] = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) if cross_attn_head_mask is None: UpperCAmelCase_ : str = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": decoder_attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, "cross_attn_head_mask": cross_attn_head_mask, } @require_tf class _snake_case (__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase): __A : Union[str, Any] =(TFBlenderbotForConditionalGeneration, TFBlenderbotModel) if is_tf_available() else () __A : List[str] =(TFBlenderbotForConditionalGeneration,) if is_tf_available() else () __A : Dict =( { "conversational": TFBlenderbotForConditionalGeneration, "feature-extraction": TFBlenderbotModel, "summarization": TFBlenderbotForConditionalGeneration, "text2text-generation": TFBlenderbotForConditionalGeneration, "translation": TFBlenderbotForConditionalGeneration, } if is_tf_available() else {} ) __A : Any =True __A : Dict =False __A : Dict =False def UpperCamelCase__ ( self ): UpperCAmelCase_ : Optional[int] = TFBlenderbotModelTester(self ) UpperCAmelCase_ : int = ConfigTester(self ,config_class=_snake_case ) def UpperCamelCase__ ( self ): self.config_tester.run_common_tests() def UpperCamelCase__ ( self ): UpperCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_decoder_model_past_large_inputs(*_snake_case ) @require_tokenizers @require_tf class _snake_case (unittest.TestCase): __A : Optional[int] =["My friends are cool but they eat too many carbs."] __A : Optional[Any] ="facebook/blenderbot-400M-distill" @cached_property def UpperCamelCase__ ( self ): return BlenderbotTokenizer.from_pretrained(self.model_name ) @cached_property def UpperCamelCase__ ( self ): UpperCAmelCase_ : List[Any] = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name ) return model @slow def UpperCamelCase__ ( self ): UpperCAmelCase_ : List[Any] = self.tokenizer(self.src_text ,return_tensors="tf" ) UpperCAmelCase_ : Union[str, Any] = self.model.generate( model_inputs.input_ids ,) UpperCAmelCase_ : str = self.tokenizer.batch_decode(generated_ids.numpy() ,skip_special_tokens=_snake_case )[0] assert ( generated_words == " That's unfortunate. Are they trying to lose weight or are they just trying to be healthier?" )
71
'''simple docstring''' import logging import os import sys from dataclasses import dataclass, field from typing import Optional from seqaseq_trainer import SeqaSeqTrainer from seqaseq_training_args import SeqaSeqTrainingArguments import transformers from transformers import ( AutoConfig, AutoModelForSeqaSeqLM, AutoTokenizer, HfArgumentParser, MBartTokenizer, MBartTokenizerFast, set_seed, ) from transformers.trainer_utils import EvaluationStrategy, is_main_process from transformers.training_args import ParallelMode from utils import ( SeqaSeqDataCollator, SeqaSeqDataset, assert_all_frozen, build_compute_metrics_fn, check_output_dir, freeze_embeds, freeze_params, lmap, save_json, use_task_specific_params, write_txt_file, ) _lowerCamelCase = logging.getLogger(__name__) @dataclass class _snake_case : __A : str =field( metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"}) __A : Optional[str] =field( default=__SCREAMING_SNAKE_CASE , metadata={"help": "Pretrained config name or path if not the same as model_name"}) __A : Optional[str] =field( default=__SCREAMING_SNAKE_CASE , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"}) __A : Optional[str] =field( default=__SCREAMING_SNAKE_CASE , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , ) __A : bool =field(default=__SCREAMING_SNAKE_CASE , metadata={"help": "Whether tp freeze the encoder."}) __A : bool =field(default=__SCREAMING_SNAKE_CASE , metadata={"help": "Whether to freeze the embeddings."}) @dataclass class _snake_case : __A : str =field( metadata={"help": "The input data dir. Should contain the .tsv files (or other data files) for the task."}) __A : Optional[str] =field( default="summarization" , metadata={"help": "Task name, summarization (or summarization_{dataset} for pegasus) or translation"} , ) __A : Optional[int] =field( default=10_24 , metadata={ "help": ( "The maximum total input sequence length after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded." ) } , ) __A : Optional[int] =field( default=1_28 , metadata={ "help": ( "The maximum total sequence length for target text after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded." ) } , ) __A : Optional[int] =field( default=1_42 , metadata={ "help": ( "The maximum total sequence length for validation target text after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded. " "This argument is also used to override the ``max_length`` param of ``model.generate``, which is used " "during ``evaluate`` and ``predict``." ) } , ) __A : Optional[int] =field( default=1_42 , metadata={ "help": ( "The maximum total sequence length for test target text after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded." ) } , ) __A : Optional[int] =field(default=-1 , metadata={"help": "# training examples. -1 means use all."}) __A : Optional[int] =field(default=-1 , metadata={"help": "# validation examples. -1 means use all."}) __A : Optional[int] =field(default=-1 , metadata={"help": "# test examples. -1 means use all."}) __A : Optional[str] =field(default=__SCREAMING_SNAKE_CASE , metadata={"help": "Source language id for translation."}) __A : Optional[str] =field(default=__SCREAMING_SNAKE_CASE , metadata={"help": "Target language id for translation."}) __A : Optional[int] =field(default=__SCREAMING_SNAKE_CASE , metadata={"help": "# num_beams to use for evaluation."}) __A : bool =field( default=__SCREAMING_SNAKE_CASE , metadata={"help": "If only pad tokens should be ignored. This assumes that `config.pad_token_id` is defined."} , ) def a__ ( _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : str ) -> Union[str, Any]: """simple docstring""" logger.info(F'''***** {split} metrics *****''' ) for key in sorted(metrics.keys() ): logger.info(F''' {key} = {metrics[key]}''' ) save_json(_SCREAMING_SNAKE_CASE , os.path.join(_SCREAMING_SNAKE_CASE , F'''{split}_results.json''' ) ) def a__ ( ) -> Any: """simple docstring""" UpperCAmelCase_ : List[str] = HfArgumentParser((ModelArguments, DataTrainingArguments, SeqaSeqTrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : List[str] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : int = parser.parse_args_into_dataclasses() check_output_dir(_SCREAMING_SNAKE_CASE ) # Setup logging logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , ) logger.warning( "Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.parallel_mode == ParallelMode.DISTRIBUTED ) , training_args.fpaa , ) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Set the verbosity to info of the Transformers logger (on main process only): if is_main_process(training_args.local_rank ): transformers.utils.logging.set_verbosity_info() logger.info("Training/evaluation parameters %s" , _SCREAMING_SNAKE_CASE ) # Set seed set_seed(training_args.seed ) # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. UpperCAmelCase_ : List[str] = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , ) UpperCAmelCase_ : List[Any] = ("encoder_layerdrop", "decoder_layerdrop", "dropout", "attention_dropout") for p in extra_model_params: if getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): assert hasattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ), F'''({config.__class__.__name__}) doesn\'t have a `{p}` attribute''' setattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ) UpperCAmelCase_ : Dict = AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , ) UpperCAmelCase_ : Union[str, Any] = AutoModelForSeqaSeqLM.from_pretrained( model_args.model_name_or_path , from_tf=".ckpt" in model_args.model_name_or_path , config=_SCREAMING_SNAKE_CASE , cache_dir=model_args.cache_dir , ) # use task specific params use_task_specific_params(_SCREAMING_SNAKE_CASE , data_args.task ) # set num_beams for evaluation if data_args.eval_beams is None: UpperCAmelCase_ : Dict = model.config.num_beams # set decoder_start_token_id for MBart if model.config.decoder_start_token_id is None and isinstance(_SCREAMING_SNAKE_CASE , (MBartTokenizer, MBartTokenizerFast) ): assert ( data_args.tgt_lang is not None and data_args.src_lang is not None ), "mBart requires --tgt_lang and --src_lang" if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): UpperCAmelCase_ : Dict = tokenizer.lang_code_to_id[data_args.tgt_lang] else: UpperCAmelCase_ : List[Any] = tokenizer.convert_tokens_to_ids(data_args.tgt_lang ) if model_args.freeze_embeds: freeze_embeds(_SCREAMING_SNAKE_CASE ) if model_args.freeze_encoder: freeze_params(model.get_encoder() ) assert_all_frozen(model.get_encoder() ) UpperCAmelCase_ : Dict = SeqaSeqDataset # Get datasets UpperCAmelCase_ : Tuple = ( dataset_class( _SCREAMING_SNAKE_CASE , type_path="train" , data_dir=data_args.data_dir , n_obs=data_args.n_train , max_target_length=data_args.max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or "" , ) if training_args.do_train else None ) UpperCAmelCase_ : Dict = ( dataset_class( _SCREAMING_SNAKE_CASE , type_path="val" , data_dir=data_args.data_dir , n_obs=data_args.n_val , max_target_length=data_args.val_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or "" , ) if training_args.do_eval or training_args.evaluation_strategy != EvaluationStrategy.NO else None ) UpperCAmelCase_ : int = ( dataset_class( _SCREAMING_SNAKE_CASE , type_path="test" , data_dir=data_args.data_dir , n_obs=data_args.n_test , max_target_length=data_args.test_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or "" , ) if training_args.do_predict else None ) # Initialize our Trainer UpperCAmelCase_ : Optional[Any] = ( build_compute_metrics_fn(data_args.task , _SCREAMING_SNAKE_CASE ) if training_args.predict_with_generate else None ) UpperCAmelCase_ : List[str] = SeqaSeqTrainer( model=_SCREAMING_SNAKE_CASE , args=_SCREAMING_SNAKE_CASE , data_args=_SCREAMING_SNAKE_CASE , train_dataset=_SCREAMING_SNAKE_CASE , eval_dataset=_SCREAMING_SNAKE_CASE , data_collator=SeqaSeqDataCollator( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , model.config.decoder_start_token_id , training_args.tpu_num_cores ) , compute_metrics=_SCREAMING_SNAKE_CASE , tokenizer=_SCREAMING_SNAKE_CASE , ) UpperCAmelCase_ : List[Any] = {} # Training if training_args.do_train: logger.info("*** Train ***" ) UpperCAmelCase_ : Any = trainer.train( model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None ) UpperCAmelCase_ : int = train_result.metrics UpperCAmelCase_ : Dict = data_args.n_train trainer.save_model() # this also saves the tokenizer if trainer.is_world_process_zero(): handle_metrics("train" , _SCREAMING_SNAKE_CASE , training_args.output_dir ) all_metrics.update(_SCREAMING_SNAKE_CASE ) # Need to save the state, since Trainer.save_model saves only the tokenizer with the model trainer.state.save_to_json(os.path.join(training_args.output_dir , "trainer_state.json" ) ) # For convenience, we also re-save the tokenizer to the same directory, # so that you can share your model easily on huggingface.co/models =) tokenizer.save_pretrained(training_args.output_dir ) # Evaluation if training_args.do_eval: logger.info("*** Evaluate ***" ) UpperCAmelCase_ : Union[str, Any] = trainer.evaluate(metric_key_prefix="val" ) UpperCAmelCase_ : Optional[Any] = data_args.n_val UpperCAmelCase_ : Union[str, Any] = round(metrics["val_loss"] , 4 ) if trainer.is_world_process_zero(): handle_metrics("val" , _SCREAMING_SNAKE_CASE , training_args.output_dir ) all_metrics.update(_SCREAMING_SNAKE_CASE ) if training_args.do_predict: logger.info("*** Predict ***" ) UpperCAmelCase_ : List[Any] = trainer.predict(test_dataset=_SCREAMING_SNAKE_CASE , metric_key_prefix="test" ) UpperCAmelCase_ : List[str] = test_output.metrics UpperCAmelCase_ : int = data_args.n_test if trainer.is_world_process_zero(): UpperCAmelCase_ : Optional[Any] = round(metrics["test_loss"] , 4 ) handle_metrics("test" , _SCREAMING_SNAKE_CASE , training_args.output_dir ) all_metrics.update(_SCREAMING_SNAKE_CASE ) if training_args.predict_with_generate: UpperCAmelCase_ : Optional[int] = tokenizer.batch_decode( test_output.predictions , skip_special_tokens=_SCREAMING_SNAKE_CASE , clean_up_tokenization_spaces=_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : Optional[int] = lmap(str.strip , _SCREAMING_SNAKE_CASE ) write_txt_file(_SCREAMING_SNAKE_CASE , os.path.join(training_args.output_dir , "test_generations.txt" ) ) if trainer.is_world_process_zero(): save_json(_SCREAMING_SNAKE_CASE , os.path.join(training_args.output_dir , "all_results.json" ) ) return all_metrics def a__ ( _SCREAMING_SNAKE_CASE : str ) -> Optional[int]: """simple docstring""" main() if __name__ == "__main__": main()
71
1
'''simple docstring''' from __future__ import annotations import collections import tempfile import unittest import numpy as np from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import is_tf_available, is_vision_available from ...test_modeling_tf_common import floats_tensor, ids_tensor, random_attention_mask from ..bert.test_modeling_tf_bert import TFBertModelTester from ..clip.test_modeling_tf_clip import TFCLIPVisionModelTester from ..deit.test_modeling_tf_deit import TFDeiTModelTester from ..roberta.test_modeling_tf_roberta import TFRobertaModelTester from ..vit.test_modeling_tf_vit import TFViTModelTester if is_tf_available(): from transformers import ( TFBertModel, TFCLIPVisionModel, TFDeiTModel, TFRobertaModel, TFVisionTextDualEncoderModel, TFViTModel, VisionTextDualEncoderConfig, ) if is_vision_available(): from PIL import Image from transformers import VisionTextDualEncoderProcessor def a__ ( _SCREAMING_SNAKE_CASE : List[Any] ) -> Any: """simple docstring""" if isinstance(_SCREAMING_SNAKE_CASE , collections.abc.Iterable ): return x return (x, x) @require_tf class _snake_case : def UpperCamelCase__ ( self ,_snake_case ,_snake_case ): pass def UpperCamelCase__ ( self ): pass def UpperCamelCase__ ( self ): pass def UpperCamelCase__ ( self ,_snake_case ,_snake_case ,_snake_case ,_snake_case ,_snake_case=None ,**_snake_case ): UpperCAmelCase_ : int = VisionTextDualEncoderConfig.from_vision_text_configs(_snake_case ,_snake_case ) UpperCAmelCase_ : Optional[Any] = TFVisionTextDualEncoderModel(_snake_case ) UpperCAmelCase_ : Optional[Any] = model(input_ids=_snake_case ,pixel_values=_snake_case ,attention_mask=_snake_case ) self.assertEqual(output["text_embeds"].shape ,(input_ids.shape[0], config.projection_dim) ) self.assertEqual(output["image_embeds"].shape ,(pixel_values.shape[0], config.projection_dim) ) def UpperCamelCase__ ( self ,_snake_case ,_snake_case ,_snake_case ,_snake_case ,_snake_case=None ,**_snake_case ): UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = self.get_vision_text_model(_snake_case ,_snake_case ) UpperCAmelCase_ : List[str] = TFVisionTextDualEncoderModel(vision_model=_snake_case ,text_model=_snake_case ) UpperCAmelCase_ : Any = model(input_ids=_snake_case ,pixel_values=_snake_case ,attention_mask=_snake_case ) self.assertEqual(output["text_embeds"].shape ,(input_ids.shape[0], model.config.projection_dim) ) self.assertEqual(output["image_embeds"].shape ,(pixel_values.shape[0], model.config.projection_dim) ) def UpperCamelCase__ ( self ,_snake_case ,_snake_case ,_snake_case ,_snake_case ,_snake_case=None ,**_snake_case ): UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = self.get_vision_text_model(_snake_case ,_snake_case ) UpperCAmelCase_ : Tuple = {"vision_model": vision_model, "text_model": text_model} UpperCAmelCase_ : int = TFVisionTextDualEncoderModel.from_vision_text_pretrained(**_snake_case ) UpperCAmelCase_ : List[str] = model(input_ids=_snake_case ,pixel_values=_snake_case ,attention_mask=_snake_case ) self.assertEqual(output["text_embeds"].shape ,(input_ids.shape[0], model.config.projection_dim) ) self.assertEqual(output["image_embeds"].shape ,(pixel_values.shape[0], model.config.projection_dim) ) def UpperCamelCase__ ( self ,_snake_case ,_snake_case ,_snake_case ,_snake_case ,_snake_case=None ,**_snake_case ): UpperCAmelCase_ , UpperCAmelCase_ : Dict = self.get_vision_text_model(_snake_case ,_snake_case ) UpperCAmelCase_ : int = TFVisionTextDualEncoderModel(vision_model=_snake_case ,text_model=_snake_case ) UpperCAmelCase_ : Optional[Any] = model(input_ids=_snake_case ,pixel_values=_snake_case ,attention_mask=_snake_case ) UpperCAmelCase_ : str = output[0].numpy() with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(_snake_case ) UpperCAmelCase_ : Optional[Any] = TFVisionTextDualEncoderModel.from_pretrained(_snake_case ) UpperCAmelCase_ : Any = model(input_ids=_snake_case ,pixel_values=_snake_case ,attention_mask=_snake_case ) UpperCAmelCase_ : Optional[int] = after_output[0].numpy() UpperCAmelCase_ : List[str] = np.amax(np.abs(out_a - out_a ) ) self.assertLessEqual(_snake_case ,1E-5 ) def UpperCamelCase__ ( self ,_snake_case ,_snake_case ,_snake_case ,_snake_case ,_snake_case=None ,**_snake_case ): UpperCAmelCase_ , UpperCAmelCase_ : int = self.get_vision_text_model(_snake_case ,_snake_case ) UpperCAmelCase_ : Tuple = TFVisionTextDualEncoderModel(vision_model=_snake_case ,text_model=_snake_case ) UpperCAmelCase_ : List[str] = model( input_ids=_snake_case ,pixel_values=_snake_case ,attention_mask=_snake_case ,output_attentions=_snake_case ) UpperCAmelCase_ : Union[str, Any] = output.vision_model_output.attentions self.assertEqual(len(_snake_case ) ,vision_config.num_hidden_layers ) # in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token) UpperCAmelCase_ : List[Any] = to_atuple(vision_model.config.image_size ) UpperCAmelCase_ : str = to_atuple(vision_model.config.patch_size ) UpperCAmelCase_ : List[Any] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) UpperCAmelCase_ : Any = num_patches + 1 self.assertEqual(vision_attentions[0].shape[-3:] ,(vision_config.num_attention_heads, seq_len, seq_len) ) UpperCAmelCase_ : Optional[Any] = output.text_model_output.attentions self.assertEqual(len(_snake_case ) ,text_config.num_hidden_layers ) self.assertEqual( text_attentions[0].shape[-3:] ,(text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) ,) def UpperCamelCase__ ( self ,_snake_case ,_snake_case ,_snake_case ): UpperCAmelCase_ : Optional[Any] = np.abs((a - b) ).max() self.assertLessEqual(_snake_case ,_snake_case ,f'''Difference between torch and flax is {diff} (>= {tol}).''' ) def UpperCamelCase__ ( self ): UpperCAmelCase_ : Optional[Any] = self.prepare_config_and_inputs() self.check_vision_text_dual_encoder_model(**_snake_case ) def UpperCamelCase__ ( self ): UpperCAmelCase_ : List[str] = self.prepare_config_and_inputs() self.check_model_from_pretrained_configs(**_snake_case ) def UpperCamelCase__ ( self ): UpperCAmelCase_ : List[str] = self.prepare_config_and_inputs() self.check_vision_text_dual_encoder_from_pretrained(**_snake_case ) def UpperCamelCase__ ( self ): UpperCAmelCase_ : Union[str, Any] = self.prepare_config_and_inputs() self.check_save_load(**_snake_case ) def UpperCamelCase__ ( self ): UpperCAmelCase_ : str = self.prepare_config_and_inputs() self.check_vision_text_output_attention(**_snake_case ) @slow def UpperCamelCase__ ( self ): UpperCAmelCase_ , UpperCAmelCase_ : List[str] = self.get_pretrained_model_and_inputs() UpperCAmelCase_ : Optional[int] = model_a(**_snake_case ) UpperCAmelCase_ : List[Any] = outputs[0].numpy() with tempfile.TemporaryDirectory() as tmp_dirname: model_a.save_pretrained(_snake_case ) UpperCAmelCase_ : Union[str, Any] = TFVisionTextDualEncoderModel.from_pretrained(_snake_case ) UpperCAmelCase_ : Tuple = model_a(**_snake_case ) UpperCAmelCase_ : str = after_outputs[0].numpy() UpperCAmelCase_ : List[str] = np.amax(np.abs(out_a - out_a ) ) self.assertLessEqual(_snake_case ,1E-5 ) @require_tf class _snake_case (__SCREAMING_SNAKE_CASE , unittest.TestCase): def UpperCamelCase__ ( self ): UpperCAmelCase_ : Dict = TFVisionTextDualEncoderModel.from_vision_text_pretrained( "hf-internal-testing/tiny-random-vit" ,"hf-internal-testing/tiny-random-bert" ) UpperCAmelCase_ : List[Any] = 13 UpperCAmelCase_ : int = floats_tensor( [ batch_size, model.vision_model.config.num_channels, model.vision_model.config.image_size, model.vision_model.config.image_size, ] ) UpperCAmelCase_ : List[Any] = ids_tensor([batch_size, 4] ,model.text_model.config.vocab_size ) UpperCAmelCase_ : List[str] = random_attention_mask([batch_size, 4] ) UpperCAmelCase_ : Any = {"pixel_values": pixel_values, "input_ids": input_ids, "attention_mask": attention_mask} return model, inputs def UpperCamelCase__ ( self ,_snake_case ,_snake_case ): UpperCAmelCase_ : List[str] = TFViTModel(_snake_case ,name="vision_model" ) UpperCAmelCase_ : List[Any] = TFBertModel(_snake_case ,name="text_model" ) return vision_model, text_model def UpperCamelCase__ ( self ): UpperCAmelCase_ : Dict = TFViTModelTester(self ) UpperCAmelCase_ : Union[str, Any] = TFBertModelTester(self ) UpperCAmelCase_ : Union[str, Any] = vit_model_tester.prepare_config_and_inputs() UpperCAmelCase_ : Optional[Any] = bert_model_tester.prepare_config_and_inputs() UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = vision_config_and_inputs ( ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ) : str = text_config_and_inputs return { "text_config": text_config, "vision_config": vision_config, "pixel_values": pixel_values, "attention_mask": input_mask, "input_ids": input_ids, "text_token_type_ids": token_type_ids, "text_sequence_labels": sequence_labels, "text_token_labels": token_labels, "text_choice_labels": choice_labels, } @require_tf class _snake_case (__SCREAMING_SNAKE_CASE , unittest.TestCase): def UpperCamelCase__ ( self ): # DeiT repo doesn't have TF weights, but we don't actually use the weights at all so let's # just reinitialize it. UpperCAmelCase_ : Optional[int] = TFVisionTextDualEncoderModel.from_vision_text_pretrained( "Rocketknight1/tiny-random-deit-tf" ,"hf-internal-testing/tiny-random-roberta" ) UpperCAmelCase_ : List[str] = 13 UpperCAmelCase_ : int = floats_tensor( [ batch_size, model.vision_model.config.num_channels, model.vision_model.config.image_size, model.vision_model.config.image_size, ] ) UpperCAmelCase_ : List[Any] = ids_tensor([batch_size, 4] ,model.text_model.config.vocab_size ) UpperCAmelCase_ : int = random_attention_mask([batch_size, 4] ) UpperCAmelCase_ : int = {"pixel_values": pixel_values, "input_ids": input_ids, "attention_mask": attention_mask} return model, inputs def UpperCamelCase__ ( self ,_snake_case ,_snake_case ,_snake_case ,_snake_case ,_snake_case=None ,**_snake_case ): UpperCAmelCase_ , UpperCAmelCase_ : List[str] = self.get_vision_text_model(_snake_case ,_snake_case ) UpperCAmelCase_ : Any = TFVisionTextDualEncoderModel(vision_model=_snake_case ,text_model=_snake_case ) UpperCAmelCase_ : Union[str, Any] = model( input_ids=_snake_case ,pixel_values=_snake_case ,attention_mask=_snake_case ,output_attentions=_snake_case ) UpperCAmelCase_ : Optional[int] = output.vision_model_output.attentions self.assertEqual(len(_snake_case ) ,vision_config.num_hidden_layers ) # in DEiT, the seq_len equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens) UpperCAmelCase_ : List[str] = to_atuple(vision_model.config.image_size ) UpperCAmelCase_ : int = to_atuple(vision_model.config.patch_size ) UpperCAmelCase_ : List[Any] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) UpperCAmelCase_ : Optional[Any] = num_patches + 2 self.assertEqual(vision_attentions[0].shape[-3:] ,(vision_config.num_attention_heads, seq_len, seq_len) ) UpperCAmelCase_ : Optional[int] = output.text_model_output.attentions self.assertEqual(len(_snake_case ) ,text_config.num_hidden_layers ) self.assertEqual( text_attentions[0].shape[-3:] ,(text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) ,) def UpperCamelCase__ ( self ,_snake_case ,_snake_case ): UpperCAmelCase_ : Optional[int] = TFDeiTModel(_snake_case ,name="vision_model" ) UpperCAmelCase_ : List[Any] = TFRobertaModel(_snake_case ,name="text_model" ) return vision_model, text_model def UpperCamelCase__ ( self ): UpperCAmelCase_ : Dict = TFDeiTModelTester(self ) UpperCAmelCase_ : Dict = TFRobertaModelTester(self ) UpperCAmelCase_ : str = vit_model_tester.prepare_config_and_inputs() UpperCAmelCase_ : int = bert_model_tester.prepare_config_and_inputs() UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Dict = vision_config_and_inputs ( ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ) : Tuple = text_config_and_inputs return { "text_config": text_config, "vision_config": vision_config, "pixel_values": pixel_values, "attention_mask": input_mask, "input_ids": input_ids, "text_token_type_ids": token_type_ids, "text_sequence_labels": sequence_labels, "text_token_labels": token_labels, "text_choice_labels": choice_labels, } @require_tf class _snake_case (__SCREAMING_SNAKE_CASE , unittest.TestCase): def UpperCamelCase__ ( self ): UpperCAmelCase_ : int = TFVisionTextDualEncoderModel.from_vision_text_pretrained( "Rocketknight1/tiny-random-clip-tf" ,"hf-internal-testing/tiny-random-bert" ) UpperCAmelCase_ : Union[str, Any] = 13 UpperCAmelCase_ : Optional[Any] = floats_tensor( [ batch_size, model.vision_model.config.num_channels, model.vision_model.config.image_size, model.vision_model.config.image_size, ] ) UpperCAmelCase_ : List[Any] = ids_tensor([batch_size, 4] ,model.text_model.config.vocab_size ) UpperCAmelCase_ : Optional[int] = random_attention_mask([batch_size, 4] ) UpperCAmelCase_ : Any = {"pixel_values": pixel_values, "input_ids": input_ids, "attention_mask": attention_mask} return model, inputs def UpperCamelCase__ ( self ,_snake_case ,_snake_case ): UpperCAmelCase_ : List[str] = TFCLIPVisionModel(_snake_case ,name="vision_model" ) UpperCAmelCase_ : List[str] = TFBertModel(_snake_case ,name="text_model" ) return vision_model, text_model def UpperCamelCase__ ( self ): UpperCAmelCase_ : Tuple = TFCLIPVisionModelTester(self ) UpperCAmelCase_ : Union[str, Any] = TFBertModelTester(self ) UpperCAmelCase_ : Union[str, Any] = clip_model_tester.prepare_config_and_inputs() UpperCAmelCase_ : Tuple = bert_model_tester.prepare_config_and_inputs() UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = vision_config_and_inputs ( ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ) : Union[str, Any] = text_config_and_inputs return { "text_config": text_config, "vision_config": vision_config, "pixel_values": pixel_values, "attention_mask": input_mask, "input_ids": input_ids, "text_token_type_ids": token_type_ids, "text_sequence_labels": sequence_labels, "text_token_labels": token_labels, "text_choice_labels": choice_labels, } @require_vision @require_tf class _snake_case (unittest.TestCase): @slow def UpperCamelCase__ ( self ): UpperCAmelCase_ : Optional[int] = TFVisionTextDualEncoderModel.from_pretrained( "clip-italian/clip-italian" ,logit_scale_init_value=1.0 ,from_pt=_snake_case ) UpperCAmelCase_ : Tuple = VisionTextDualEncoderProcessor.from_pretrained("clip-italian/clip-italian" ) UpperCAmelCase_ : Optional[Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) UpperCAmelCase_ : int = processor( text=["una foto di un gatto", "una foto di un cane"] ,images=_snake_case ,padding=_snake_case ,return_tensors="np" ) UpperCAmelCase_ : Optional[Any] = model(**_snake_case ) # verify the logits self.assertEqual(outputs.logits_per_image.shape ,(inputs.pixel_values.shape[0], inputs.input_ids.shape[0]) ) self.assertEqual( outputs.logits_per_text.shape ,(inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) ,) UpperCAmelCase_ : Optional[Any] = np.array([[1.2284727, 0.3104122]] ) self.assertTrue(np.allclose(outputs.logits_per_image.numpy() ,_snake_case ,atol=1E-3 ) )
71
'''simple docstring''' from __future__ import annotations import unittest from transformers import BlenderbotConfig, BlenderbotTokenizer, is_tf_available from transformers.testing_utils import require_tf, require_tokenizers, slow from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotForConditionalGeneration, TFBlenderbotModel @require_tf class _snake_case : __A : Dict =BlenderbotConfig __A : Union[str, Any] ={} __A : Any ="gelu" def __init__( self ,_snake_case ,_snake_case=13 ,_snake_case=7 ,_snake_case=True ,_snake_case=False ,_snake_case=99 ,_snake_case=32 ,_snake_case=2 ,_snake_case=4 ,_snake_case=37 ,_snake_case=0.1 ,_snake_case=0.1 ,_snake_case=20 ,_snake_case=2 ,_snake_case=1 ,_snake_case=0 ,): UpperCAmelCase_ : List[Any] = parent UpperCAmelCase_ : str = batch_size UpperCAmelCase_ : Dict = seq_length UpperCAmelCase_ : int = is_training UpperCAmelCase_ : Optional[Any] = use_labels UpperCAmelCase_ : Any = vocab_size UpperCAmelCase_ : Optional[int] = hidden_size UpperCAmelCase_ : Optional[int] = num_hidden_layers UpperCAmelCase_ : int = num_attention_heads UpperCAmelCase_ : Tuple = intermediate_size UpperCAmelCase_ : Any = hidden_dropout_prob UpperCAmelCase_ : Optional[int] = attention_probs_dropout_prob UpperCAmelCase_ : List[Any] = max_position_embeddings UpperCAmelCase_ : str = eos_token_id UpperCAmelCase_ : List[Any] = pad_token_id UpperCAmelCase_ : List[Any] = bos_token_id def UpperCamelCase__ ( self ): UpperCAmelCase_ : Any = ids_tensor([self.batch_size, self.seq_length - 1] ,self.vocab_size ) UpperCAmelCase_ : int = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) ,1 ) UpperCAmelCase_ : Optional[Any] = tf.concat([input_ids, eos_tensor] ,axis=1 ) UpperCAmelCase_ : int = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size ) UpperCAmelCase_ : Optional[Any] = self.config_cls( vocab_size=self.vocab_size ,d_model=self.hidden_size ,encoder_layers=self.num_hidden_layers ,decoder_layers=self.num_hidden_layers ,encoder_attention_heads=self.num_attention_heads ,decoder_attention_heads=self.num_attention_heads ,encoder_ffn_dim=self.intermediate_size ,decoder_ffn_dim=self.intermediate_size ,dropout=self.hidden_dropout_prob ,attention_dropout=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,eos_token_ids=[2] ,bos_token_id=self.bos_token_id ,pad_token_id=self.pad_token_id ,decoder_start_token_id=self.pad_token_id ,**self.config_updates ,) UpperCAmelCase_ : List[str] = prepare_blenderbot_inputs_dict(_snake_case ,_snake_case ,_snake_case ) return config, inputs_dict def UpperCamelCase__ ( self ,_snake_case ,_snake_case ): UpperCAmelCase_ : Tuple = TFBlenderbotModel(config=_snake_case ).get_decoder() UpperCAmelCase_ : int = inputs_dict["input_ids"] UpperCAmelCase_ : Dict = input_ids[:1, :] UpperCAmelCase_ : Any = inputs_dict["attention_mask"][:1, :] UpperCAmelCase_ : int = inputs_dict["head_mask"] UpperCAmelCase_ : Optional[int] = 1 # first forward pass UpperCAmelCase_ : List[str] = model(_snake_case ,attention_mask=_snake_case ,head_mask=_snake_case ,use_cache=_snake_case ) UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = outputs.to_tuple() # create hypothetical next token and extent to next_input_ids UpperCAmelCase_ : Optional[int] = ids_tensor((self.batch_size, 3) ,config.vocab_size ) UpperCAmelCase_ : Any = tf.cast(ids_tensor((self.batch_size, 3) ,2 ) ,tf.inta ) # append to next input_ids and UpperCAmelCase_ : Union[str, Any] = tf.concat([input_ids, next_tokens] ,axis=-1 ) UpperCAmelCase_ : Any = tf.concat([attention_mask, next_attn_mask] ,axis=-1 ) UpperCAmelCase_ : Any = model(_snake_case ,attention_mask=_snake_case )[0] UpperCAmelCase_ : List[Any] = model(_snake_case ,attention_mask=_snake_case ,past_key_values=_snake_case )[0] self.parent.assertEqual(next_tokens.shape[1] ,output_from_past.shape[1] ) # select random slice UpperCAmelCase_ : str = int(ids_tensor((1,) ,output_from_past.shape[-1] ) ) UpperCAmelCase_ : List[str] = output_from_no_past[:, -3:, random_slice_idx] UpperCAmelCase_ : Union[str, Any] = output_from_past[:, :, random_slice_idx] # test that outputs are equal for slice tf.debugging.assert_near(_snake_case ,_snake_case ,rtol=1E-3 ) def a__ ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : str=None , _SCREAMING_SNAKE_CASE : Any=None , _SCREAMING_SNAKE_CASE : Any=None , _SCREAMING_SNAKE_CASE : List[str]=None , _SCREAMING_SNAKE_CASE : Dict=None , ) -> Union[str, Any]: """simple docstring""" if attention_mask is None: UpperCAmelCase_ : Dict = tf.cast(tf.math.not_equal(_SCREAMING_SNAKE_CASE , config.pad_token_id ) , tf.inta ) if decoder_attention_mask is None: UpperCAmelCase_ : Optional[int] = tf.concat( [ tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ), tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ), ] , axis=-1 , ) if head_mask is None: UpperCAmelCase_ : List[Any] = tf.ones((config.encoder_layers, config.encoder_attention_heads) ) if decoder_head_mask is None: UpperCAmelCase_ : Optional[int] = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) if cross_attn_head_mask is None: UpperCAmelCase_ : str = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": decoder_attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, "cross_attn_head_mask": cross_attn_head_mask, } @require_tf class _snake_case (__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase): __A : Union[str, Any] =(TFBlenderbotForConditionalGeneration, TFBlenderbotModel) if is_tf_available() else () __A : List[str] =(TFBlenderbotForConditionalGeneration,) if is_tf_available() else () __A : Dict =( { "conversational": TFBlenderbotForConditionalGeneration, "feature-extraction": TFBlenderbotModel, "summarization": TFBlenderbotForConditionalGeneration, "text2text-generation": TFBlenderbotForConditionalGeneration, "translation": TFBlenderbotForConditionalGeneration, } if is_tf_available() else {} ) __A : Any =True __A : Dict =False __A : Dict =False def UpperCamelCase__ ( self ): UpperCAmelCase_ : Optional[int] = TFBlenderbotModelTester(self ) UpperCAmelCase_ : int = ConfigTester(self ,config_class=_snake_case ) def UpperCamelCase__ ( self ): self.config_tester.run_common_tests() def UpperCamelCase__ ( self ): UpperCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_decoder_model_past_large_inputs(*_snake_case ) @require_tokenizers @require_tf class _snake_case (unittest.TestCase): __A : Optional[int] =["My friends are cool but they eat too many carbs."] __A : Optional[Any] ="facebook/blenderbot-400M-distill" @cached_property def UpperCamelCase__ ( self ): return BlenderbotTokenizer.from_pretrained(self.model_name ) @cached_property def UpperCamelCase__ ( self ): UpperCAmelCase_ : List[Any] = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name ) return model @slow def UpperCamelCase__ ( self ): UpperCAmelCase_ : List[Any] = self.tokenizer(self.src_text ,return_tensors="tf" ) UpperCAmelCase_ : Union[str, Any] = self.model.generate( model_inputs.input_ids ,) UpperCAmelCase_ : str = self.tokenizer.batch_decode(generated_ids.numpy() ,skip_special_tokens=_snake_case )[0] assert ( generated_words == " That's unfortunate. Are they trying to lose weight or are they just trying to be healthier?" )
71
1
'''simple docstring''' from typing import Any, Callable, Dict, List, Optional, Union import torch from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, DiffusionPipeline, LMSDiscreteScheduler, PNDMScheduler, StableDiffusionPipeline, UNetaDConditionModel, ) from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker _lowerCamelCase = """CompVis/stable-diffusion-v1-1""" _lowerCamelCase = """CompVis/stable-diffusion-v1-2""" _lowerCamelCase = """CompVis/stable-diffusion-v1-3""" _lowerCamelCase = """CompVis/stable-diffusion-v1-4""" class _snake_case (__SCREAMING_SNAKE_CASE): def __init__( self ,_snake_case ,_snake_case ,_snake_case ,_snake_case ,_snake_case ,_snake_case ,_snake_case ,_snake_case = True ,): super()._init_() UpperCAmelCase_ : Union[str, Any] = StableDiffusionPipeline.from_pretrained(_snake_case ) UpperCAmelCase_ : str = StableDiffusionPipeline.from_pretrained(_snake_case ) UpperCAmelCase_ : Optional[int] = StableDiffusionPipeline.from_pretrained(_snake_case ) UpperCAmelCase_ : int = StableDiffusionPipeline( vae=_snake_case ,text_encoder=_snake_case ,tokenizer=_snake_case ,unet=_snake_case ,scheduler=_snake_case ,safety_checker=_snake_case ,feature_extractor=_snake_case ,requires_safety_checker=_snake_case ,) self.register_modules(pipelinea=self.pipea ,pipelinea=self.pipea ,pipelinea=self.pipea ,pipelinea=self.pipea ) @property def UpperCamelCase__ ( self ): return {k: getattr(self ,_snake_case ) for k in self.config.keys() if not k.startswith("_" )} def UpperCamelCase__ ( self ,_snake_case = "auto" ): if slice_size == "auto": # half the attention head size is usually a good trade-off between # speed and memory UpperCAmelCase_ : Optional[Any] = self.unet.config.attention_head_dim // 2 self.unet.set_attention_slice(_snake_case ) def UpperCamelCase__ ( self ): self.enable_attention_slicing(_snake_case ) @torch.no_grad() def UpperCamelCase__ ( self ,_snake_case ,_snake_case = 5_12 ,_snake_case = 5_12 ,_snake_case = 50 ,_snake_case = 7.5 ,_snake_case = None ,_snake_case = 1 ,_snake_case = 0.0 ,_snake_case = None ,_snake_case = None ,_snake_case = "pil" ,_snake_case = True ,_snake_case = None ,_snake_case = 1 ,**_snake_case ,): return self.pipea( prompt=_snake_case ,height=_snake_case ,width=_snake_case ,num_inference_steps=_snake_case ,guidance_scale=_snake_case ,negative_prompt=_snake_case ,num_images_per_prompt=_snake_case ,eta=_snake_case ,generator=_snake_case ,latents=_snake_case ,output_type=_snake_case ,return_dict=_snake_case ,callback=_snake_case ,callback_steps=_snake_case ,**_snake_case ,) @torch.no_grad() def UpperCamelCase__ ( self ,_snake_case ,_snake_case = 5_12 ,_snake_case = 5_12 ,_snake_case = 50 ,_snake_case = 7.5 ,_snake_case = None ,_snake_case = 1 ,_snake_case = 0.0 ,_snake_case = None ,_snake_case = None ,_snake_case = "pil" ,_snake_case = True ,_snake_case = None ,_snake_case = 1 ,**_snake_case ,): return self.pipea( prompt=_snake_case ,height=_snake_case ,width=_snake_case ,num_inference_steps=_snake_case ,guidance_scale=_snake_case ,negative_prompt=_snake_case ,num_images_per_prompt=_snake_case ,eta=_snake_case ,generator=_snake_case ,latents=_snake_case ,output_type=_snake_case ,return_dict=_snake_case ,callback=_snake_case ,callback_steps=_snake_case ,**_snake_case ,) @torch.no_grad() def UpperCamelCase__ ( self ,_snake_case ,_snake_case = 5_12 ,_snake_case = 5_12 ,_snake_case = 50 ,_snake_case = 7.5 ,_snake_case = None ,_snake_case = 1 ,_snake_case = 0.0 ,_snake_case = None ,_snake_case = None ,_snake_case = "pil" ,_snake_case = True ,_snake_case = None ,_snake_case = 1 ,**_snake_case ,): return self.pipea( prompt=_snake_case ,height=_snake_case ,width=_snake_case ,num_inference_steps=_snake_case ,guidance_scale=_snake_case ,negative_prompt=_snake_case ,num_images_per_prompt=_snake_case ,eta=_snake_case ,generator=_snake_case ,latents=_snake_case ,output_type=_snake_case ,return_dict=_snake_case ,callback=_snake_case ,callback_steps=_snake_case ,**_snake_case ,) @torch.no_grad() def UpperCamelCase__ ( self ,_snake_case ,_snake_case = 5_12 ,_snake_case = 5_12 ,_snake_case = 50 ,_snake_case = 7.5 ,_snake_case = None ,_snake_case = 1 ,_snake_case = 0.0 ,_snake_case = None ,_snake_case = None ,_snake_case = "pil" ,_snake_case = True ,_snake_case = None ,_snake_case = 1 ,**_snake_case ,): return self.pipea( prompt=_snake_case ,height=_snake_case ,width=_snake_case ,num_inference_steps=_snake_case ,guidance_scale=_snake_case ,negative_prompt=_snake_case ,num_images_per_prompt=_snake_case ,eta=_snake_case ,generator=_snake_case ,latents=_snake_case ,output_type=_snake_case ,return_dict=_snake_case ,callback=_snake_case ,callback_steps=_snake_case ,**_snake_case ,) @torch.no_grad() def UpperCamelCase__ ( self ,_snake_case ,_snake_case = 5_12 ,_snake_case = 5_12 ,_snake_case = 50 ,_snake_case = 7.5 ,_snake_case = None ,_snake_case = 1 ,_snake_case = 0.0 ,_snake_case = None ,_snake_case = None ,_snake_case = "pil" ,_snake_case = True ,_snake_case = None ,_snake_case = 1 ,**_snake_case ,): UpperCAmelCase_ : Any = "cuda" if torch.cuda.is_available() else "cpu" self.to(_snake_case ) # Checks if the height and width are divisible by 8 or not if height % 8 != 0 or width % 8 != 0: raise ValueError(f'''`height` and `width` must be divisible by 8 but are {height} and {width}.''' ) # Get first result from Stable Diffusion Checkpoint v1.1 UpperCAmelCase_ : List[Any] = self.textaimg_sda_a( prompt=_snake_case ,height=_snake_case ,width=_snake_case ,num_inference_steps=_snake_case ,guidance_scale=_snake_case ,negative_prompt=_snake_case ,num_images_per_prompt=_snake_case ,eta=_snake_case ,generator=_snake_case ,latents=_snake_case ,output_type=_snake_case ,return_dict=_snake_case ,callback=_snake_case ,callback_steps=_snake_case ,**_snake_case ,) # Get first result from Stable Diffusion Checkpoint v1.2 UpperCAmelCase_ : Tuple = self.textaimg_sda_a( prompt=_snake_case ,height=_snake_case ,width=_snake_case ,num_inference_steps=_snake_case ,guidance_scale=_snake_case ,negative_prompt=_snake_case ,num_images_per_prompt=_snake_case ,eta=_snake_case ,generator=_snake_case ,latents=_snake_case ,output_type=_snake_case ,return_dict=_snake_case ,callback=_snake_case ,callback_steps=_snake_case ,**_snake_case ,) # Get first result from Stable Diffusion Checkpoint v1.3 UpperCAmelCase_ : Optional[Any] = self.textaimg_sda_a( prompt=_snake_case ,height=_snake_case ,width=_snake_case ,num_inference_steps=_snake_case ,guidance_scale=_snake_case ,negative_prompt=_snake_case ,num_images_per_prompt=_snake_case ,eta=_snake_case ,generator=_snake_case ,latents=_snake_case ,output_type=_snake_case ,return_dict=_snake_case ,callback=_snake_case ,callback_steps=_snake_case ,**_snake_case ,) # Get first result from Stable Diffusion Checkpoint v1.4 UpperCAmelCase_ : Any = self.textaimg_sda_a( prompt=_snake_case ,height=_snake_case ,width=_snake_case ,num_inference_steps=_snake_case ,guidance_scale=_snake_case ,negative_prompt=_snake_case ,num_images_per_prompt=_snake_case ,eta=_snake_case ,generator=_snake_case ,latents=_snake_case ,output_type=_snake_case ,return_dict=_snake_case ,callback=_snake_case ,callback_steps=_snake_case ,**_snake_case ,) # Get all result images into a single list and pass it via StableDiffusionPipelineOutput for final result return StableDiffusionPipelineOutput([resa[0], resa[0], resa[0], resa[0]] )
71
'''simple docstring''' from numpy import exp, pi, sqrt def a__ ( _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : float = 0.0 , _SCREAMING_SNAKE_CASE : float = 1.0 ) -> int: """simple docstring""" return 1 / sqrt(2 * pi * sigma**2 ) * exp(-((x - mu) ** 2) / (2 * sigma**2) ) if __name__ == "__main__": import doctest doctest.testmod()
71
1
'''simple docstring''' import argparse import json import os import fairseq import torch from fairseq.data import Dictionary from transformers import ( UniSpeechConfig, UniSpeechForCTC, UniSpeechForPreTraining, WavaVecaFeatureExtractor, WavaVecaPhonemeCTCTokenizer, WavaVecaProcessor, logging, ) logging.set_verbosity_info() _lowerCamelCase = logging.get_logger(__name__) _lowerCamelCase = { """post_extract_proj""": """feature_projection.projection""", """encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""", """self_attn.k_proj""": """encoder.layers.*.attention.k_proj""", """self_attn.v_proj""": """encoder.layers.*.attention.v_proj""", """self_attn.q_proj""": """encoder.layers.*.attention.q_proj""", """self_attn.out_proj""": """encoder.layers.*.attention.out_proj""", """self_attn_layer_norm""": """encoder.layers.*.layer_norm""", """fc1""": """encoder.layers.*.feed_forward.intermediate_dense""", """fc2""": """encoder.layers.*.feed_forward.output_dense""", """final_layer_norm""": """encoder.layers.*.final_layer_norm""", """encoder.layer_norm""": """encoder.layer_norm""", """w2v_model.layer_norm""": """feature_projection.layer_norm""", """quantizer.weight_proj""": """quantizer.weight_proj""", """quantizer.vars""": """quantizer.codevectors""", """project_q""": """project_q""", """final_proj""": """project_hid""", """w2v_encoder.proj""": """ctc_proj""", """mask_emb""": """masked_spec_embed""", } _lowerCamelCase = [ """ctc_proj""", """quantizer.weight_proj""", """quantizer.codevectors""", """project_q""", """project_hid""", ] def a__ ( _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : Dict ) -> Union[str, Any]: """simple docstring""" for attribute in key.split("." ): if is_finetuned: if attribute in ["quantizer", "project_q", "project_hid"]: # those layers are only relevant for pretraining and should be dropped return if attribute == "ctc_proj": # we should rename `ctc_proj` to `lm_head` for fine-tuned phoneme models UpperCAmelCase_ : Any = "lm_head" UpperCAmelCase_ : Tuple = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) if weight_type is not None: UpperCAmelCase_ : Optional[int] = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).shape else: UpperCAmelCase_ : Dict = hf_pointer.shape assert hf_shape == value.shape, ( F'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be''' F''' {value.shape} for {full_name}''' ) if weight_type == "weight": UpperCAmelCase_ : Any = value elif weight_type == "weight_g": UpperCAmelCase_ : Optional[int] = value elif weight_type == "weight_v": UpperCAmelCase_ : Dict = value elif weight_type == "bias": UpperCAmelCase_ : Dict = value else: UpperCAmelCase_ : Tuple = value logger.info(F'''{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.''' ) def a__ ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : Optional[int] ) -> Tuple: """simple docstring""" UpperCAmelCase_ : int = [] UpperCAmelCase_ : List[str] = fairseq_model.state_dict() UpperCAmelCase_ : List[str] = hf_model.unispeech.feature_extractor for name, value in fairseq_dict.items(): UpperCAmelCase_ : Dict = False if "conv_layers" in name: load_conv_layer( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , hf_model.config.feat_extract_norm == "group" , ) UpperCAmelCase_ : int = True else: for key, mapped_key in MAPPING.items(): UpperCAmelCase_ : List[Any] = "unispeech." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]: UpperCAmelCase_ : Optional[int] = True if "*" in mapped_key: UpperCAmelCase_ : Tuple = name.split(_SCREAMING_SNAKE_CASE )[0].split("." )[-2] UpperCAmelCase_ : Dict = mapped_key.replace("*" , _SCREAMING_SNAKE_CASE ) if "weight_g" in name: UpperCAmelCase_ : int = "weight_g" elif "weight_v" in name: UpperCAmelCase_ : List[str] = "weight_v" elif "bias" in name: UpperCAmelCase_ : List[Any] = "bias" elif "weight" in name: # TODO: don't match quantizer.weight_proj UpperCAmelCase_ : List[Any] = "weight" else: UpperCAmelCase_ : Dict = None set_recursively(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) continue if not is_used: unused_weights.append(_SCREAMING_SNAKE_CASE ) logger.warning(F'''Unused weights: {unused_weights}''' ) def a__ ( _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : Any ) -> List[Any]: """simple docstring""" UpperCAmelCase_ : str = full_name.split("conv_layers." )[-1] UpperCAmelCase_ : Optional[int] = name.split("." ) UpperCAmelCase_ : Tuple = int(items[0] ) UpperCAmelCase_ : List[str] = int(items[1] ) if type_id == 0: if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, ( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' ) UpperCAmelCase_ : Dict = value logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, ( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' ) UpperCAmelCase_ : str = value logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, ( F'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was''' " found." ) UpperCAmelCase_ : Dict = value logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, ( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.''' ) UpperCAmelCase_ : Union[str, Any] = value logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' ) else: unused_weights.append(_SCREAMING_SNAKE_CASE ) @torch.no_grad() def a__ ( _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : int=None , _SCREAMING_SNAKE_CASE : Union[str, Any]=None , _SCREAMING_SNAKE_CASE : Optional[Any]=True ) -> List[Any]: """simple docstring""" if config_path is not None: UpperCAmelCase_ : Optional[int] = UniSpeechConfig.from_pretrained(_SCREAMING_SNAKE_CASE ) else: UpperCAmelCase_ : Tuple = UniSpeechConfig() if is_finetuned: if dict_path: UpperCAmelCase_ : str = Dictionary.load_from_json(_SCREAMING_SNAKE_CASE ) # important change bos & pad token id since CTC symbol is <pad> and # not <s> as in fairseq UpperCAmelCase_ : Optional[int] = target_dict.pad_index UpperCAmelCase_ : List[str] = target_dict.bos_index UpperCAmelCase_ : Optional[Any] = target_dict.eos_index UpperCAmelCase_ : int = len(target_dict.symbols ) UpperCAmelCase_ : int = os.path.join(_SCREAMING_SNAKE_CASE , "vocab.json" ) if not os.path.isdir(_SCREAMING_SNAKE_CASE ): logger.error("--pytorch_dump_folder_path ({}) should be a directory".format(_SCREAMING_SNAKE_CASE ) ) return os.makedirs(_SCREAMING_SNAKE_CASE , exist_ok=_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : Optional[Any] = target_dict.indices # fairseq has the <pad> and <s> switched UpperCAmelCase_ : str = 42 UpperCAmelCase_ : str = 43 with open(_SCREAMING_SNAKE_CASE , "w" , encoding="utf-8" ) as vocab_handle: json.dump(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : Any = WavaVecaPhonemeCTCTokenizer( _SCREAMING_SNAKE_CASE , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="|" , do_lower_case=_SCREAMING_SNAKE_CASE , ) UpperCAmelCase_ : Tuple = True if config.feat_extract_norm == "layer" else False UpperCAmelCase_ : Optional[Any] = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=1_60_00 , padding_value=0 , do_normalize=_SCREAMING_SNAKE_CASE , return_attention_mask=_SCREAMING_SNAKE_CASE , ) UpperCAmelCase_ : List[str] = WavaVecaProcessor(feature_extractor=_SCREAMING_SNAKE_CASE , tokenizer=_SCREAMING_SNAKE_CASE ) processor.save_pretrained(_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : str = UniSpeechForCTC(_SCREAMING_SNAKE_CASE ) else: UpperCAmelCase_ : Any = UniSpeechForPreTraining(_SCREAMING_SNAKE_CASE ) if is_finetuned: UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] ), "w2v_path": checkpoint_path} ) else: UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Any = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] ) UpperCAmelCase_ : str = model[0].eval() recursively_load_weights(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) hf_unispeech.save_pretrained(_SCREAMING_SNAKE_CASE ) if __name__ == "__main__": _lowerCamelCase = argparse.ArgumentParser() parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""") parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""") parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""") parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""") parser.add_argument( """--not_finetuned""", action="""store_true""", help="""Whether the model to convert is a fine-tuned model or not""" ) _lowerCamelCase = parser.parse_args() convert_unispeech_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned )
71
'''simple docstring''' from typing import Optional from torch import nn from .transformer_ad import TransformeraDModel, TransformeraDModelOutput class _snake_case (nn.Module): def __init__( self ,_snake_case = 16 ,_snake_case = 88 ,_snake_case = None ,_snake_case = 1 ,_snake_case = 0.0 ,_snake_case = 32 ,_snake_case = None ,_snake_case = False ,_snake_case = None ,_snake_case = None ,_snake_case = "geglu" ,_snake_case = None ,): super().__init__() UpperCAmelCase_ : Optional[Any] = nn.ModuleList( [ TransformeraDModel( num_attention_heads=_snake_case ,attention_head_dim=_snake_case ,in_channels=_snake_case ,num_layers=_snake_case ,dropout=_snake_case ,norm_num_groups=_snake_case ,cross_attention_dim=_snake_case ,attention_bias=_snake_case ,sample_size=_snake_case ,num_vector_embeds=_snake_case ,activation_fn=_snake_case ,num_embeds_ada_norm=_snake_case ,) for _ in range(2 ) ] ) # Variables that can be set by a pipeline: # The ratio of transformer1 to transformer2's output states to be combined during inference UpperCAmelCase_ : List[str] = 0.5 # The shape of `encoder_hidden_states` is expected to be # `(batch_size, condition_lengths[0]+condition_lengths[1], num_features)` UpperCAmelCase_ : int = [77, 2_57] # Which transformer to use to encode which condition. # E.g. `(1, 0)` means that we'll use `transformers[1](conditions[0])` and `transformers[0](conditions[1])` UpperCAmelCase_ : List[Any] = [1, 0] def UpperCamelCase__ ( self ,_snake_case ,_snake_case ,_snake_case=None ,_snake_case=None ,_snake_case=None ,_snake_case = True ,): UpperCAmelCase_ : List[str] = hidden_states UpperCAmelCase_ : str = [] UpperCAmelCase_ : Optional[int] = 0 # attention_mask is not used yet for i in range(2 ): # for each of the two transformers, pass the corresponding condition tokens UpperCAmelCase_ : Any = encoder_hidden_states[:, tokens_start : tokens_start + self.condition_lengths[i]] UpperCAmelCase_ : Any = self.transformer_index_for_condition[i] UpperCAmelCase_ : int = self.transformers[transformer_index]( _snake_case ,encoder_hidden_states=_snake_case ,timestep=_snake_case ,cross_attention_kwargs=_snake_case ,return_dict=_snake_case ,)[0] encoded_states.append(encoded_state - input_states ) tokens_start += self.condition_lengths[i] UpperCAmelCase_ : Dict = encoded_states[0] * self.mix_ratio + encoded_states[1] * (1 - self.mix_ratio) UpperCAmelCase_ : List[Any] = output_states + input_states if not return_dict: return (output_states,) return TransformeraDModelOutput(sample=_snake_case )
71
1
'''simple docstring''' def a__ ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int ) -> str: """simple docstring""" if a < 0 or b < 0: raise ValueError("the value of both inputs must be positive" ) UpperCAmelCase_ : Any = str(bin(_SCREAMING_SNAKE_CASE ) )[2:] # remove the leading "0b" UpperCAmelCase_ : Optional[int] = str(bin(_SCREAMING_SNAKE_CASE ) )[2:] # remove the leading "0b" UpperCAmelCase_ : Union[str, Any] = max(len(_SCREAMING_SNAKE_CASE ) , len(_SCREAMING_SNAKE_CASE ) ) return "0b" + "".join( str(int(char_a != char_b ) ) for char_a, char_b in zip(a_binary.zfill(_SCREAMING_SNAKE_CASE ) , b_binary.zfill(_SCREAMING_SNAKE_CASE ) ) ) if __name__ == "__main__": import doctest doctest.testmod()
71
'''simple docstring''' import json import sys def a__ ( _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : int ) -> Tuple: """simple docstring""" with open(_SCREAMING_SNAKE_CASE , encoding="utf-8" ) as f: UpperCAmelCase_ : Dict = json.load(_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : str = ["<details>", "<summary>Show updated benchmarks!</summary>", " "] for benchmark_name in sorted(_SCREAMING_SNAKE_CASE ): UpperCAmelCase_ : Optional[Any] = results[benchmark_name] UpperCAmelCase_ : Any = benchmark_name.split("/" )[-1] output_md.append(F'''### Benchmark: {benchmark_file_name}''' ) UpperCAmelCase_ : Any = "| metric |" UpperCAmelCase_ : Any = "|--------|" UpperCAmelCase_ : Union[str, Any] = "| new / old (diff) |" for metric_name in sorted(_SCREAMING_SNAKE_CASE ): UpperCAmelCase_ : Tuple = benchmark_res[metric_name] UpperCAmelCase_ : Union[str, Any] = metric_vals["new"] UpperCAmelCase_ : Optional[Any] = metric_vals.get("old" , _SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : Dict = metric_vals.get("diff" , _SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : Dict = F''' {new_val:f}''' if isinstance(_SCREAMING_SNAKE_CASE , (int, float) ) else "None" if old_val is not None: val_str += F''' / {old_val:f}''' if isinstance(_SCREAMING_SNAKE_CASE , (int, float) ) else "None" if dif_val is not None: val_str += F''' ({dif_val:f})''' if isinstance(_SCREAMING_SNAKE_CASE , (int, float) ) else "None" title += " " + metric_name + " |" lines += "---|" value += val_str + " |" output_md += [title, lines, value, " "] output_md.append("</details>" ) with open(_SCREAMING_SNAKE_CASE , "w" , encoding="utf-8" ) as f: f.writelines("\n".join(_SCREAMING_SNAKE_CASE ) ) if __name__ == "__main__": _lowerCamelCase = sys.argv[1] _lowerCamelCase = sys.argv[2] format_json_to_md(input_json_file, output_md_file)
71
1
'''simple docstring''' def a__ ( _SCREAMING_SNAKE_CASE : str ) -> Dict: """simple docstring""" UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = [], [] while len(_SCREAMING_SNAKE_CASE ) > 1: UpperCAmelCase_ , UpperCAmelCase_ : int = min(_SCREAMING_SNAKE_CASE ), max(_SCREAMING_SNAKE_CASE ) start.append(_SCREAMING_SNAKE_CASE ) end.append(_SCREAMING_SNAKE_CASE ) collection.remove(_SCREAMING_SNAKE_CASE ) collection.remove(_SCREAMING_SNAKE_CASE ) end.reverse() return start + collection + end if __name__ == "__main__": _lowerCamelCase = input("""Enter numbers separated by a comma:\n""").strip() _lowerCamelCase = [int(item) for item in user_input.split(""",""")] print(*merge_sort(unsorted), sep=""",""")
71
'''simple docstring''' import unittest from transformers import DebertaVaTokenizer, DebertaVaTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin _lowerCamelCase = get_tests_dir("""fixtures/spiece.model""") @require_sentencepiece @require_tokenizers class _snake_case (__SCREAMING_SNAKE_CASE , unittest.TestCase): __A : Optional[int] =DebertaVaTokenizer __A : Union[str, Any] =DebertaVaTokenizerFast __A : str =True __A : List[str] =True def UpperCamelCase__ ( self ): super().setUp() # We have a SentencePiece fixture for testing UpperCAmelCase_ : Optional[int] = DebertaVaTokenizer(_snake_case ,unk_token="<unk>" ) tokenizer.save_pretrained(self.tmpdirname ) def UpperCamelCase__ ( self ,_snake_case ): UpperCAmelCase_ : List[Any] = "this is a test" UpperCAmelCase_ : Optional[Any] = "this is a test" return input_text, output_text def UpperCamelCase__ ( self ): UpperCAmelCase_ : Optional[Any] = "<pad>" UpperCAmelCase_ : str = 0 self.assertEqual(self.get_tokenizer()._convert_token_to_id(_snake_case ) ,_snake_case ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(_snake_case ) ,_snake_case ) def UpperCamelCase__ ( self ): UpperCAmelCase_ : int = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] ,"<pad>" ) self.assertEqual(vocab_keys[1] ,"<unk>" ) self.assertEqual(vocab_keys[-1] ,"[PAD]" ) self.assertEqual(len(_snake_case ) ,3_00_01 ) def UpperCamelCase__ ( self ): self.assertEqual(self.get_tokenizer().vocab_size ,3_00_00 ) def UpperCamelCase__ ( self ): # fmt: off UpperCAmelCase_ : str = " \tHeLLo!how \n Are yoU? " UpperCAmelCase_ : Union[str, Any] = ["▁hello", "!", "how", "▁are", "▁you", "?"] # fmt: on UpperCAmelCase_ : Tuple = DebertaVaTokenizer(_snake_case ,do_lower_case=_snake_case ) UpperCAmelCase_ : Union[str, Any] = tokenizer.convert_ids_to_tokens(tokenizer.encode(_snake_case ,add_special_tokens=_snake_case ) ) self.assertListEqual(_snake_case ,_snake_case ) UpperCAmelCase_ : Tuple = DebertaVaTokenizerFast(_snake_case ,do_lower_case=_snake_case ) UpperCAmelCase_ : Any = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_snake_case ,add_special_tokens=_snake_case ) ) self.assertListEqual(_snake_case ,_snake_case ) @unittest.skip("There is an inconsistency between slow and fast tokenizer due to a bug in the fast one." ) def UpperCamelCase__ ( self ): pass @unittest.skip("There is an inconsistency between slow and fast tokenizer due to a bug in the fast one." ) def UpperCamelCase__ ( self ): pass def UpperCamelCase__ ( self ): # fmt: off UpperCAmelCase_ : Optional[int] = "I was born in 92000, and this is falsé." UpperCAmelCase_ : List[str] = ["▁", "<unk>", "▁was", "▁born", "▁in", "▁9", "2000", "▁", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", "▁", ".", ] # fmt: on UpperCAmelCase_ : List[Any] = DebertaVaTokenizer(_snake_case ,split_by_punct=_snake_case ) UpperCAmelCase_ : Optional[int] = tokenizer.convert_ids_to_tokens(tokenizer.encode(_snake_case ,add_special_tokens=_snake_case ) ) self.assertListEqual(_snake_case ,_snake_case ) UpperCAmelCase_ : int = DebertaVaTokenizerFast(_snake_case ,split_by_punct=_snake_case ) UpperCAmelCase_ : Dict = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_snake_case ,add_special_tokens=_snake_case ) ) self.assertListEqual(_snake_case ,_snake_case ) def UpperCamelCase__ ( self ): # fmt: off UpperCAmelCase_ : Tuple = "I was born in 92000, and this is falsé." UpperCAmelCase_ : Dict = ["▁i", "▁was", "▁born", "▁in", "▁9", "2000", "▁", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", "▁", ".", ] # fmt: on UpperCAmelCase_ : Optional[Any] = DebertaVaTokenizer(_snake_case ,do_lower_case=_snake_case ,split_by_punct=_snake_case ) UpperCAmelCase_ : Any = tokenizer.convert_ids_to_tokens(tokenizer.encode(_snake_case ,add_special_tokens=_snake_case ) ) self.assertListEqual(_snake_case ,_snake_case ) UpperCAmelCase_ : List[Any] = DebertaVaTokenizerFast(_snake_case ,do_lower_case=_snake_case ,split_by_punct=_snake_case ) UpperCAmelCase_ : Tuple = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_snake_case ,add_special_tokens=_snake_case ) ) self.assertListEqual(_snake_case ,_snake_case ) def UpperCamelCase__ ( self ): # fmt: off UpperCAmelCase_ : Optional[int] = "I was born in 92000, and this is falsé." UpperCAmelCase_ : Optional[int] = ["▁i", "▁was", "▁born", "▁in", "▁9", "2000", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", ".", ] # fmt: on UpperCAmelCase_ : List[Any] = DebertaVaTokenizer(_snake_case ,do_lower_case=_snake_case ,split_by_punct=_snake_case ) UpperCAmelCase_ : str = tokenizer.convert_ids_to_tokens(tokenizer.encode(_snake_case ,add_special_tokens=_snake_case ) ) self.assertListEqual(_snake_case ,_snake_case ) UpperCAmelCase_ : Optional[Any] = DebertaVaTokenizerFast(_snake_case ,do_lower_case=_snake_case ,split_by_punct=_snake_case ) UpperCAmelCase_ : str = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_snake_case ,add_special_tokens=_snake_case ) ) self.assertListEqual(_snake_case ,_snake_case ) def UpperCamelCase__ ( self ): # fmt: off UpperCAmelCase_ : Optional[int] = "I was born in 92000, and this is falsé." UpperCAmelCase_ : Optional[Any] = ["▁", "<unk>", "▁was", "▁born", "▁in", "▁9", "2000", "▁", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", "▁", ".", ] # fmt: on UpperCAmelCase_ : List[str] = DebertaVaTokenizer(_snake_case ,do_lower_case=_snake_case ,split_by_punct=_snake_case ) UpperCAmelCase_ : Tuple = tokenizer.convert_ids_to_tokens(tokenizer.encode(_snake_case ,add_special_tokens=_snake_case ) ) self.assertListEqual(_snake_case ,_snake_case ) UpperCAmelCase_ : Dict = DebertaVaTokenizerFast(_snake_case ,do_lower_case=_snake_case ,split_by_punct=_snake_case ) UpperCAmelCase_ : Dict = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_snake_case ,add_special_tokens=_snake_case ) ) self.assertListEqual(_snake_case ,_snake_case ) def UpperCamelCase__ ( self ): # fmt: off UpperCAmelCase_ : Tuple = " \tHeLLo!how \n Are yoU? " UpperCAmelCase_ : List[Any] = ["▁", "<unk>", "e", "<unk>", "o", "!", "how", "▁", "<unk>", "re", "▁yo", "<unk>", "?"] # fmt: on UpperCAmelCase_ : Any = DebertaVaTokenizer(_snake_case ,do_lower_case=_snake_case ,split_by_punct=_snake_case ) UpperCAmelCase_ : List[str] = tokenizer.convert_ids_to_tokens(tokenizer.encode(_snake_case ,add_special_tokens=_snake_case ) ) self.assertListEqual(_snake_case ,_snake_case ) UpperCAmelCase_ : int = DebertaVaTokenizerFast(_snake_case ,do_lower_case=_snake_case ,split_by_punct=_snake_case ) UpperCAmelCase_ : Tuple = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_snake_case ,add_special_tokens=_snake_case ) ) self.assertListEqual(_snake_case ,_snake_case ) def UpperCamelCase__ ( self ): UpperCAmelCase_ : List[str] = self.get_tokenizer() UpperCAmelCase_ : Union[str, Any] = self.get_rust_tokenizer() UpperCAmelCase_ : Dict = "I was born in 92000, and this is falsé." UpperCAmelCase_ : Any = tokenizer.convert_ids_to_tokens(tokenizer.encode(_snake_case ,add_special_tokens=_snake_case ) ) UpperCAmelCase_ : Optional[Any] = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_snake_case ,add_special_tokens=_snake_case ) ) self.assertListEqual(_snake_case ,_snake_case ) UpperCAmelCase_ : Tuple = tokenizer.encode(_snake_case ,add_special_tokens=_snake_case ) UpperCAmelCase_ : int = rust_tokenizer.encode(_snake_case ,add_special_tokens=_snake_case ) self.assertListEqual(_snake_case ,_snake_case ) UpperCAmelCase_ : Optional[Any] = self.get_rust_tokenizer() UpperCAmelCase_ : Union[str, Any] = tokenizer.encode(_snake_case ) UpperCAmelCase_ : List[Any] = rust_tokenizer.encode(_snake_case ) self.assertListEqual(_snake_case ,_snake_case ) def UpperCamelCase__ ( self ): UpperCAmelCase_ : Any = "This is a test" UpperCAmelCase_ : Optional[int] = [13, 1, 43_98, 25, 21, 12_89] UpperCAmelCase_ : Optional[Any] = ["▁", "T", "his", "▁is", "▁a", "▁test"] UpperCAmelCase_ : List[str] = ["▁", "<unk>", "his", "▁is", "▁a", "▁test"] UpperCAmelCase_ : str = DebertaVaTokenizer(_snake_case ,keep_accents=_snake_case ) UpperCAmelCase_ : List[Any] = DebertaVaTokenizerFast(_snake_case ,keep_accents=_snake_case ) UpperCAmelCase_ : Optional[int] = tokenizer.encode(_snake_case ,add_special_tokens=_snake_case ) self.assertListEqual(_snake_case ,_snake_case ) UpperCAmelCase_ : Any = tokenizer.tokenize(_snake_case ) self.assertListEqual(_snake_case ,_snake_case ) UpperCAmelCase_ : Union[str, Any] = tokenizer.convert_ids_to_tokens(_snake_case ) self.assertListEqual(_snake_case ,_snake_case ) UpperCAmelCase_ : List[Any] = rust_tokenizer.encode(_snake_case ,add_special_tokens=_snake_case ) self.assertListEqual(_snake_case ,_snake_case ) UpperCAmelCase_ : Dict = rust_tokenizer.tokenize(_snake_case ) self.assertListEqual(_snake_case ,_snake_case ) UpperCAmelCase_ : List[str] = rust_tokenizer.convert_ids_to_tokens(_snake_case ) self.assertListEqual(_snake_case ,_snake_case ) # fmt: off UpperCAmelCase_ : List[str] = "I was born in 92000, and this is falsé." UpperCAmelCase_ : Optional[int] = [13, 1, 23, 3_86, 19, 5_61, 30_50, 15, 17, 48, 25, 82_56, 18, 1, 9] UpperCAmelCase_ : str = ["▁", "I", "▁was", "▁born", "▁in", "▁9", "2000", ",", "▁and", "▁this", "▁is", "▁fal", "s", "é", ".", ] UpperCAmelCase_ : List[str] = ["▁", "<unk>", "▁was", "▁born", "▁in", "▁9", "2000", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", ".", ] # fmt: on UpperCAmelCase_ : List[str] = tokenizer.encode(_snake_case ,add_special_tokens=_snake_case ) self.assertListEqual(_snake_case ,_snake_case ) UpperCAmelCase_ : Dict = tokenizer.tokenize(_snake_case ) self.assertListEqual(_snake_case ,_snake_case ) UpperCAmelCase_ : int = tokenizer.convert_ids_to_tokens(_snake_case ) self.assertListEqual(_snake_case ,_snake_case ) UpperCAmelCase_ : Optional[int] = rust_tokenizer.encode(_snake_case ,add_special_tokens=_snake_case ) self.assertListEqual(_snake_case ,_snake_case ) UpperCAmelCase_ : Optional[int] = rust_tokenizer.tokenize(_snake_case ) self.assertListEqual(_snake_case ,_snake_case ) UpperCAmelCase_ : Any = rust_tokenizer.convert_ids_to_tokens(_snake_case ) self.assertListEqual(_snake_case ,_snake_case ) def UpperCamelCase__ ( self ): UpperCAmelCase_ : Any = DebertaVaTokenizer(_snake_case ) UpperCAmelCase_ : Optional[int] = tokenizer.encode("sequence builders" ) UpperCAmelCase_ : Dict = tokenizer.encode("multi-sequence build" ) UpperCAmelCase_ : Tuple = tokenizer.build_inputs_with_special_tokens(_snake_case ) UpperCAmelCase_ : Optional[Any] = tokenizer.build_inputs_with_special_tokens(_snake_case ,_snake_case ) self.assertEqual([tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] ,_snake_case ) self.assertEqual( [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [tokenizer.sep_token_id] ,_snake_case ,) @slow def UpperCamelCase__ ( self ): # fmt: off UpperCAmelCase_ : Union[str, Any] = {"input_ids": [[1, 3_98_67, 36, 1_93_90, 4_86, 27, 3_50_52, 8_14_36, 18, 6_06_85, 12_25, 7, 3_50_52, 8_14_36, 18, 93_67, 1_68_99, 18, 1_59_37, 53, 5_94, 7_73, 18, 1_62_87, 3_04_65, 36, 1_59_37, 6, 4_11_39, 38, 3_69_79, 6_07_63, 1_91, 6, 3_41_32, 99, 6, 5_05_38, 3_90, 4_32_30, 6, 3_41_32, 27_79, 2_08_50, 14, 6_99, 10_72, 11_94, 36, 3_82, 1_09_01, 53, 7, 6_99, 10_72, 20_84, 36, 2_04_22, 6_30, 53, 19, 1_05, 30_49, 18_96, 10_53, 1_68_99, 15_06, 11, 3_79_78, 42_43, 7, 12_37, 3_18_69, 2_00, 1_65_66, 6_54, 6, 3_50_52, 8_14_36, 7, 5_56_30, 1_35_93, 4, 2], [1, 26, 1_50_11, 13, 6_67, 8, 10_53, 18, 2_36_11, 12_37, 7_23_56, 1_28_20, 34, 10_41_34, 12_09, 35, 1_33_13, 66_27, 21, 2_02, 3_47, 7, 1_64, 23_99, 11, 46, 44_85, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 5, 12_32, 28_64, 1_57_85, 1_49_51, 1_05, 5, 85_81, 12_50, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "token_type_ids": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=_snake_case ,model_name="microsoft/deberta-v2-xlarge" ,revision="ad6e42c1532ddf3a15c39246b63f5559d558b670" ,)
71
1
'''simple docstring''' from __future__ import annotations def a__ ( _SCREAMING_SNAKE_CASE : int ) -> list[int]: """simple docstring""" UpperCAmelCase_ : Union[str, Any] = 2 UpperCAmelCase_ : Optional[int] = [] while i * i <= n: if n % i: i += 1 else: n //= i factors.append(_SCREAMING_SNAKE_CASE ) if n > 1: factors.append(_SCREAMING_SNAKE_CASE ) return factors if __name__ == "__main__": import doctest doctest.testmod()
71
'''simple docstring''' def a__ ( _SCREAMING_SNAKE_CASE : int ) -> int: """simple docstring""" if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): raise TypeError("Input value must be an 'int' type" ) UpperCAmelCase_ : Union[str, Any] = 0 while number: position += 1 number >>= 1 return position if __name__ == "__main__": import doctest doctest.testmod()
71
1
'''simple docstring''' from sklearn.metrics import mean_squared_error import datasets _lowerCamelCase = """\ @article{scikit-learn, title={Scikit-learn: Machine Learning in {P}ython}, author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V. and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P. and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.}, journal={Journal of Machine Learning Research}, volume={12}, pages={2825--2830}, year={2011} } """ _lowerCamelCase = """\ Mean Squared Error(MSE) is the average of the square of difference between the predicted and actual values. """ _lowerCamelCase = """ Args: predictions: array-like of shape (n_samples,) or (n_samples, n_outputs) Estimated target values. references: array-like of shape (n_samples,) or (n_samples, n_outputs) Ground truth (correct) target values. sample_weight: array-like of shape (n_samples,), default=None Sample weights. multioutput: {\"raw_values\", \"uniform_average\"} or array-like of shape (n_outputs,), default=\"uniform_average\" Defines aggregating of multiple output values. Array-like value defines weights used to average errors. \"raw_values\" : Returns a full set of errors in case of multioutput input. \"uniform_average\" : Errors of all outputs are averaged with uniform weight. squared : bool, default=True If True returns MSE value, if False returns RMSE (Root Mean Squared Error) value. Returns: mse : mean squared error. Examples: >>> mse_metric = datasets.load_metric(\"mse\") >>> predictions = [2.5, 0.0, 2, 8] >>> references = [3, -0.5, 2, 7] >>> results = mse_metric.compute(predictions=predictions, references=references) >>> print(results) {'mse': 0.375} >>> rmse_result = mse_metric.compute(predictions=predictions, references=references, squared=False) >>> print(rmse_result) {'mse': 0.6123724356957945} If you're using multi-dimensional lists, then set the config as follows : >>> mse_metric = datasets.load_metric(\"mse\", \"multilist\") >>> predictions = [[0.5, 1], [-1, 1], [7, -6]] >>> references = [[0, 2], [-1, 2], [8, -5]] >>> results = mse_metric.compute(predictions=predictions, references=references) >>> print(results) {'mse': 0.7083333333333334} >>> results = mse_metric.compute(predictions=predictions, references=references, multioutput='raw_values') >>> print(results) # doctest: +NORMALIZE_WHITESPACE {'mse': array([0.41666667, 1. ])} """ @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION) class _snake_case (datasets.Metric): def UpperCamelCase__ ( self ): return datasets.MetricInfo( description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(self._get_feature_types() ) ,reference_urls=[ "https://scikit-learn.org/stable/modules/generated/sklearn.metrics.mean_squared_error.html" ] ,) def UpperCamelCase__ ( self ): if self.config_name == "multilist": return { "predictions": datasets.Sequence(datasets.Value("float" ) ), "references": datasets.Sequence(datasets.Value("float" ) ), } else: return { "predictions": datasets.Value("float" ), "references": datasets.Value("float" ), } def UpperCamelCase__ ( self ,_snake_case ,_snake_case ,_snake_case=None ,_snake_case="uniform_average" ,_snake_case=True ): UpperCAmelCase_ : str = mean_squared_error( _snake_case ,_snake_case ,sample_weight=_snake_case ,multioutput=_snake_case ,squared=_snake_case ) return {"mse": mse}
71
'''simple docstring''' from math import factorial def a__ ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int ) -> int: """simple docstring""" if n < k or k < 0: raise ValueError("Please enter positive integers for n and k where n >= k" ) return factorial(_SCREAMING_SNAKE_CASE ) // (factorial(_SCREAMING_SNAKE_CASE ) * factorial(n - k )) if __name__ == "__main__": print( """The number of five-card hands possible from a standard""", f"""fifty-two card deck is: {combinations(52, 5)}\n""", ) print( """If a class of 40 students must be arranged into groups of""", f"""4 for group projects, there are {combinations(40, 4)} ways""", """to arrange them.\n""", ) print( """If 10 teams are competing in a Formula One race, there""", f"""are {combinations(10, 3)} ways that first, second and""", """third place can be awarded.""", )
71
1
'''simple docstring''' from typing import TYPE_CHECKING from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available _lowerCamelCase = {"""configuration_mmbt""": ["""MMBTConfig"""]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCamelCase = ["""MMBTForClassification""", """MMBTModel""", """ModalEmbeddings"""] if TYPE_CHECKING: from .configuration_mmbt import MMBTConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mmbt import MMBTForClassification, MMBTModel, ModalEmbeddings else: import sys _lowerCamelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
71
'''simple docstring''' import random import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, UNetaDConditionModel, VideoToVideoSDPipeline, ) from diffusers.utils import floats_tensor, is_xformers_available, skip_mps from diffusers.utils.testing_utils import enable_full_determinism, slow, torch_device from ..pipeline_params import ( TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS, ) from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() @skip_mps class _snake_case (__SCREAMING_SNAKE_CASE , unittest.TestCase): __A : Union[str, Any] =VideoToVideoSDPipeline __A : Tuple =TEXT_GUIDED_IMAGE_VARIATION_PARAMS.union({"video"}) - {"image", "width", "height"} __A : Union[str, Any] =TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({"video"}) - {"image"} __A : str =PipelineTesterMixin.required_optional_params - {"latents"} __A : Dict =False # No `output_type`. __A : Optional[int] =frozenset( [ "num_inference_steps", "generator", "latents", "return_dict", "callback", "callback_steps", ]) def UpperCamelCase__ ( self ): torch.manual_seed(0 ) UpperCAmelCase_ : Optional[int] = UNetaDConditionModel( block_out_channels=(32, 64, 64, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=4 ,out_channels=4 ,down_block_types=("CrossAttnDownBlock3D", "CrossAttnDownBlock3D", "CrossAttnDownBlock3D", "DownBlock3D") ,up_block_types=("UpBlock3D", "CrossAttnUpBlock3D", "CrossAttnUpBlock3D", "CrossAttnUpBlock3D") ,cross_attention_dim=32 ,attention_head_dim=4 ,) UpperCAmelCase_ : int = DDIMScheduler( beta_start=0.00085 ,beta_end=0.012 ,beta_schedule="scaled_linear" ,clip_sample=_snake_case ,set_alpha_to_one=_snake_case ,) torch.manual_seed(0 ) UpperCAmelCase_ : Dict = AutoencoderKL( block_out_channels=[32, 64] ,in_channels=3 ,out_channels=3 ,down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] ,up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] ,latent_channels=4 ,sample_size=1_28 ,) torch.manual_seed(0 ) UpperCAmelCase_ : Dict = CLIPTextConfig( bos_token_id=0 ,eos_token_id=2 ,hidden_size=32 ,intermediate_size=37 ,layer_norm_eps=1E-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=10_00 ,hidden_act="gelu" ,projection_dim=5_12 ,) UpperCAmelCase_ : Union[str, Any] = CLIPTextModel(_snake_case ) UpperCAmelCase_ : List[Any] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" ) UpperCAmelCase_ : Optional[int] = { "unet": unet, "scheduler": scheduler, "vae": vae, "text_encoder": text_encoder, "tokenizer": tokenizer, } return components def UpperCamelCase__ ( self ,_snake_case ,_snake_case=0 ): # 3 frames UpperCAmelCase_ : Dict = floats_tensor((1, 3, 3, 32, 32) ,rng=random.Random(_snake_case ) ).to(_snake_case ) if str(_snake_case ).startswith("mps" ): UpperCAmelCase_ : Tuple = torch.manual_seed(_snake_case ) else: UpperCAmelCase_ : Tuple = torch.Generator(device=_snake_case ).manual_seed(_snake_case ) UpperCAmelCase_ : Union[str, Any] = { "prompt": "A painting of a squirrel eating a burger", "video": video, "generator": generator, "num_inference_steps": 2, "guidance_scale": 6.0, "output_type": "pt", } return inputs def UpperCamelCase__ ( self ): UpperCAmelCase_ : str = "cpu" # ensure determinism for the device-dependent torch.Generator UpperCAmelCase_ : Dict = self.get_dummy_components() UpperCAmelCase_ : str = VideoToVideoSDPipeline(**_snake_case ) UpperCAmelCase_ : int = sd_pipe.to(_snake_case ) sd_pipe.set_progress_bar_config(disable=_snake_case ) UpperCAmelCase_ : Tuple = self.get_dummy_inputs(_snake_case ) UpperCAmelCase_ : str = "np" UpperCAmelCase_ : Dict = sd_pipe(**_snake_case ).frames UpperCAmelCase_ : Tuple = frames[0][-3:, -3:, -1] assert frames[0].shape == (32, 32, 3) UpperCAmelCase_ : Dict = np.array([1_06, 1_17, 1_13, 1_74, 1_37, 1_12, 1_48, 1_51, 1_31] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 @unittest.skipIf( torch_device != "cuda" or not is_xformers_available() ,reason="XFormers attention is only available with CUDA and `xformers` installed" ,) def UpperCamelCase__ ( self ): self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=_snake_case ,expected_max_diff=5E-3 ) @unittest.skip(reason="Batching needs to be properly figured out first for this pipeline." ) def UpperCamelCase__ ( self ): pass @unittest.skip(reason="Batching needs to be properly figured out first for this pipeline." ) def UpperCamelCase__ ( self ): pass @unittest.skip(reason="`num_images_per_prompt` argument is not supported for this pipeline." ) def UpperCamelCase__ ( self ): pass def UpperCamelCase__ ( self ): return super().test_progress_bar() @slow @skip_mps class _snake_case (unittest.TestCase): def UpperCamelCase__ ( self ): UpperCAmelCase_ : Dict = VideoToVideoSDPipeline.from_pretrained("cerspense/zeroscope_v2_XL" ,torch_dtype=torch.floataa ) pipe.enable_model_cpu_offload() # 10 frames UpperCAmelCase_ : str = torch.Generator(device="cpu" ).manual_seed(0 ) UpperCAmelCase_ : int = torch.randn((1, 10, 3, 10_24, 5_76) ,generator=_snake_case ) UpperCAmelCase_ : List[Any] = video.to("cuda" ) UpperCAmelCase_ : List[Any] = "Spiderman is surfing" UpperCAmelCase_ : Optional[Any] = pipe(_snake_case ,video=_snake_case ,generator=_snake_case ,num_inference_steps=3 ,output_type="pt" ).frames UpperCAmelCase_ : Any = np.array([-1.0458984, -1.1279297, -0.9663086, -0.91503906, -0.75097656] ) assert np.abs(video_frames.cpu().numpy()[0, 0, 0, 0, -5:] - expected_array ).sum() < 1E-2
71
1
'''simple docstring''' import argparse from transformers import TaConfig, TaForConditionalGeneration, load_tf_weights_in_ta from transformers.utils import logging logging.set_verbosity_info() def a__ ( _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : Optional[Any] ) -> Optional[Any]: """simple docstring""" UpperCAmelCase_ : Any = TaConfig.from_json_file(_SCREAMING_SNAKE_CASE ) print(F'''Building PyTorch model from configuration: {config}''' ) UpperCAmelCase_ : List[str] = TaForConditionalGeneration(_SCREAMING_SNAKE_CASE ) # Load weights from tf checkpoint load_tf_weights_in_ta(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # Save pytorch-model print(F'''Save PyTorch model to {pytorch_dump_path}''' ) model.save_pretrained(_SCREAMING_SNAKE_CASE ) if __name__ == "__main__": _lowerCamelCase = argparse.ArgumentParser() # Required parameters parser.add_argument( """--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path.""" ) parser.add_argument( """--config_file""", default=None, type=str, required=True, help=( """The config json file corresponding to the pre-trained T5 model. \nThis specifies the model architecture.""" ), ) parser.add_argument( """--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model.""" ) _lowerCamelCase = parser.parse_args() convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
71
'''simple docstring''' import os import pytest from datasets import ( get_dataset_config_info, get_dataset_config_names, get_dataset_infos, get_dataset_split_names, inspect_dataset, inspect_metric, ) _lowerCamelCase = pytest.mark.integration @pytest.mark.parametrize("path" , ["paws", "csv"] ) def a__ ( _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : str ) -> Tuple: """simple docstring""" inspect_dataset(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : Optional[Any] = path + ".py" assert script_name in os.listdir(_SCREAMING_SNAKE_CASE ) assert "__pycache__" not in os.listdir(_SCREAMING_SNAKE_CASE ) @pytest.mark.filterwarnings("ignore:inspect_metric is deprecated:FutureWarning" ) @pytest.mark.filterwarnings("ignore:metric_module_factory is deprecated:FutureWarning" ) @pytest.mark.parametrize("path" , ["accuracy"] ) def a__ ( _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : Tuple ) -> List[str]: """simple docstring""" inspect_metric(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : List[Any] = path + ".py" assert script_name in os.listdir(_SCREAMING_SNAKE_CASE ) assert "__pycache__" not in os.listdir(_SCREAMING_SNAKE_CASE ) @pytest.mark.parametrize( "path, config_name, expected_splits" , [ ("squad", "plain_text", ["train", "validation"]), ("dalle-mini/wit", "dalle-mini--wit", ["train"]), ("paws", "labeled_final", ["train", "test", "validation"]), ] , ) def a__ ( _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : Dict ) -> str: """simple docstring""" UpperCAmelCase_ : int = get_dataset_config_info(_SCREAMING_SNAKE_CASE , config_name=_SCREAMING_SNAKE_CASE ) assert info.config_name == config_name assert list(info.splits.keys() ) == expected_splits @pytest.mark.parametrize( "path, config_name, expected_exception" , [ ("paws", None, ValueError), ] , ) def a__ ( _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : List[str] ) -> str: """simple docstring""" with pytest.raises(_SCREAMING_SNAKE_CASE ): get_dataset_config_info(_SCREAMING_SNAKE_CASE , config_name=_SCREAMING_SNAKE_CASE ) @pytest.mark.parametrize( "path, expected" , [ ("squad", "plain_text"), ("acronym_identification", "default"), ("lhoestq/squad", "plain_text"), ("lhoestq/test", "default"), ("lhoestq/demo1", "lhoestq--demo1"), ("dalle-mini/wit", "dalle-mini--wit"), ] , ) def a__ ( _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : int ) -> List[str]: """simple docstring""" UpperCAmelCase_ : Optional[int] = get_dataset_config_names(_SCREAMING_SNAKE_CASE ) assert expected in config_names @pytest.mark.parametrize( "path, expected_configs, expected_splits_in_first_config" , [ ("squad", ["plain_text"], ["train", "validation"]), ("dalle-mini/wit", ["dalle-mini--wit"], ["train"]), ("paws", ["labeled_final", "labeled_swap", "unlabeled_final"], ["train", "test", "validation"]), ] , ) def a__ ( _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : List[str] ) -> Any: """simple docstring""" UpperCAmelCase_ : Any = get_dataset_infos(_SCREAMING_SNAKE_CASE ) assert list(infos.keys() ) == expected_configs UpperCAmelCase_ : Optional[Any] = expected_configs[0] assert expected_config in infos UpperCAmelCase_ : Dict = infos[expected_config] assert info.config_name == expected_config assert list(info.splits.keys() ) == expected_splits_in_first_config @pytest.mark.parametrize( "path, expected_config, expected_splits" , [ ("squad", "plain_text", ["train", "validation"]), ("dalle-mini/wit", "dalle-mini--wit", ["train"]), ("paws", "labeled_final", ["train", "test", "validation"]), ] , ) def a__ ( _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : Dict ) -> Any: """simple docstring""" UpperCAmelCase_ : Optional[int] = get_dataset_infos(_SCREAMING_SNAKE_CASE ) assert expected_config in infos UpperCAmelCase_ : Dict = infos[expected_config] assert info.config_name == expected_config assert list(info.splits.keys() ) == expected_splits @pytest.mark.parametrize( "path, config_name, expected_exception" , [ ("paws", None, ValueError), ] , ) def a__ ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : str ) -> Any: """simple docstring""" with pytest.raises(_SCREAMING_SNAKE_CASE ): get_dataset_split_names(_SCREAMING_SNAKE_CASE , config_name=_SCREAMING_SNAKE_CASE )
71
1
'''simple docstring''' import logging import torch from accelerate import Accelerator from arguments import EvaluationArguments from datasets import load_dataset from torch.utils.data import IterableDataset from torch.utils.data.dataloader import DataLoader from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, set_seed class _snake_case (__SCREAMING_SNAKE_CASE): def __init__( self ,_snake_case ,_snake_case ,_snake_case=10_24 ,_snake_case=10_24 ,_snake_case=3.6 ): UpperCAmelCase_ : Optional[int] = tokenizer UpperCAmelCase_ : str = tokenizer.bos_token_id UpperCAmelCase_ : List[Any] = dataset UpperCAmelCase_ : Optional[int] = seq_length UpperCAmelCase_ : str = seq_length * chars_per_token * num_of_sequences def __iter__( self ): UpperCAmelCase_ : Any = iter(self.dataset ) UpperCAmelCase_ : Any = True while more_examples: UpperCAmelCase_ , UpperCAmelCase_ : int = [], 0 while True: if buffer_len >= self.input_characters: break try: buffer.append(next(_snake_case )["content"] ) buffer_len += len(buffer[-1] ) except StopIteration: UpperCAmelCase_ : int = False break UpperCAmelCase_ : List[str] = tokenizer(_snake_case ,truncation=_snake_case )["input_ids"] UpperCAmelCase_ : Dict = [] for tokenized_input in tokenized_inputs: all_token_ids.extend(tokenized_input + [self.concat_token_id] ) for i in range(0 ,len(_snake_case ) ,self.seq_length ): UpperCAmelCase_ : str = all_token_ids[i : i + self.seq_length] if len(_snake_case ) == self.seq_length: yield torch.tensor(_snake_case ) def a__ ( _SCREAMING_SNAKE_CASE : int ) -> Dict: """simple docstring""" UpperCAmelCase_ : Any = {"streaming": True} UpperCAmelCase_ : Optional[int] = load_dataset(args.dataset_name , split="train" , **_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : Any = ConstantLengthDataset(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , seq_length=args.seq_length ) UpperCAmelCase_ : str = DataLoader(_SCREAMING_SNAKE_CASE , batch_size=args.batch_size ) return eval_dataloader def a__ ( _SCREAMING_SNAKE_CASE : Optional[Any] ) -> Union[str, Any]: """simple docstring""" model.eval() UpperCAmelCase_ : str = [] for step, batch in enumerate(_SCREAMING_SNAKE_CASE ): with torch.no_grad(): UpperCAmelCase_ : Tuple = model(_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : str = outputs.loss.repeat(args.batch_size ) losses.append(accelerator.gather(_SCREAMING_SNAKE_CASE ) ) if args.max_eval_steps > 0 and step >= args.max_eval_steps: break UpperCAmelCase_ : Dict = torch.mean(torch.cat(_SCREAMING_SNAKE_CASE ) ) try: UpperCAmelCase_ : int = torch.exp(_SCREAMING_SNAKE_CASE ) except OverflowError: UpperCAmelCase_ : int = float("inf" ) return loss.item(), perplexity.item() # Setup Accelerator _lowerCamelCase = Accelerator() # Parse configuration _lowerCamelCase = HfArgumentParser(EvaluationArguments) _lowerCamelCase = parser.parse_args() set_seed(args.seed) # Logging _lowerCamelCase = logging.getLogger(__name__) logging.basicConfig( format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""", datefmt="""%m/%d/%Y %H:%M:%S""", level=logging.INFO ) # Load model and tokenizer _lowerCamelCase = AutoModelForCausalLM.from_pretrained(args.model_ckpt) _lowerCamelCase = AutoTokenizer.from_pretrained(args.model_ckpt) # Load dataset and dataloader _lowerCamelCase = create_dataloader(args) # Prepare everything with our `accelerator`. _lowerCamelCase , _lowerCamelCase = accelerator.prepare(model, eval_dataloader) # Evaluate and save the last checkpoint logger.info("""Evaluating and saving model after training""") _lowerCamelCase , _lowerCamelCase = evaluate(args) logger.info(f"""loss/eval: {eval_loss}, perplexity: {perplexity}""")
71
'''simple docstring''' import json import os import shutil import tempfile import unittest import numpy as np import pytest from transformers import CLIPTokenizer, CLIPTokenizerFast from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES from transformers.testing_utils import require_vision from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available if is_vision_available(): from PIL import Image from transformers import CLIPImageProcessor, CLIPProcessor @require_vision class _snake_case (unittest.TestCase): def UpperCamelCase__ ( self ): UpperCAmelCase_ : Dict = tempfile.mkdtemp() # fmt: off UpperCAmelCase_ : List[str] = ["l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "lo", "l</w>", "w</w>", "r</w>", "t</w>", "low</w>", "er</w>", "lowest</w>", "newer</w>", "wider", "<unk>", "<|startoftext|>", "<|endoftext|>"] # fmt: on UpperCAmelCase_ : List[str] = dict(zip(_snake_case ,range(len(_snake_case ) ) ) ) UpperCAmelCase_ : List[Any] = ["#version: 0.2", "l o", "lo w</w>", "e r</w>", ""] UpperCAmelCase_ : Dict = {"unk_token": "<unk>"} UpperCAmelCase_ : Optional[int] = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["vocab_file"] ) UpperCAmelCase_ : str = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["merges_file"] ) with open(self.vocab_file ,"w" ,encoding="utf-8" ) as fp: fp.write(json.dumps(_snake_case ) + "\n" ) with open(self.merges_file ,"w" ,encoding="utf-8" ) as fp: fp.write("\n".join(_snake_case ) ) UpperCAmelCase_ : Optional[Any] = { "do_resize": True, "size": 20, "do_center_crop": True, "crop_size": 18, "do_normalize": True, "image_mean": [0.48145466, 0.4578275, 0.40821073], "image_std": [0.26862954, 0.26130258, 0.27577711], } UpperCAmelCase_ : str = os.path.join(self.tmpdirname ,_snake_case ) with open(self.image_processor_file ,"w" ,encoding="utf-8" ) as fp: json.dump(_snake_case ,_snake_case ) def UpperCamelCase__ ( self ,**_snake_case ): return CLIPTokenizer.from_pretrained(self.tmpdirname ,**_snake_case ) def UpperCamelCase__ ( self ,**_snake_case ): return CLIPTokenizerFast.from_pretrained(self.tmpdirname ,**_snake_case ) def UpperCamelCase__ ( self ,**_snake_case ): return CLIPImageProcessor.from_pretrained(self.tmpdirname ,**_snake_case ) def UpperCamelCase__ ( self ): shutil.rmtree(self.tmpdirname ) def UpperCamelCase__ ( self ): UpperCAmelCase_ : Optional[Any] = [np.random.randint(2_55 ,size=(3, 30, 4_00) ,dtype=np.uinta )] UpperCAmelCase_ : Union[str, Any] = [Image.fromarray(np.moveaxis(_snake_case ,0 ,-1 ) ) for x in image_inputs] return image_inputs def UpperCamelCase__ ( self ): UpperCAmelCase_ : Tuple = self.get_tokenizer() UpperCAmelCase_ : str = self.get_rust_tokenizer() UpperCAmelCase_ : List[str] = self.get_image_processor() UpperCAmelCase_ : Tuple = CLIPProcessor(tokenizer=_snake_case ,image_processor=_snake_case ) processor_slow.save_pretrained(self.tmpdirname ) UpperCAmelCase_ : int = CLIPProcessor.from_pretrained(self.tmpdirname ,use_fast=_snake_case ) UpperCAmelCase_ : str = CLIPProcessor(tokenizer=_snake_case ,image_processor=_snake_case ) processor_fast.save_pretrained(self.tmpdirname ) UpperCAmelCase_ : str = CLIPProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor_slow.tokenizer.get_vocab() ,tokenizer_slow.get_vocab() ) self.assertEqual(processor_fast.tokenizer.get_vocab() ,tokenizer_fast.get_vocab() ) self.assertEqual(tokenizer_slow.get_vocab() ,tokenizer_fast.get_vocab() ) self.assertIsInstance(processor_slow.tokenizer ,_snake_case ) self.assertIsInstance(processor_fast.tokenizer ,_snake_case ) self.assertEqual(processor_slow.image_processor.to_json_string() ,image_processor.to_json_string() ) self.assertEqual(processor_fast.image_processor.to_json_string() ,image_processor.to_json_string() ) self.assertIsInstance(processor_slow.image_processor ,_snake_case ) self.assertIsInstance(processor_fast.image_processor ,_snake_case ) def UpperCamelCase__ ( self ): UpperCAmelCase_ : List[str] = CLIPProcessor(tokenizer=self.get_tokenizer() ,image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) UpperCAmelCase_ : Union[str, Any] = self.get_tokenizer(bos_token="(BOS)" ,eos_token="(EOS)" ) UpperCAmelCase_ : Tuple = self.get_image_processor(do_normalize=_snake_case ,padding_value=1.0 ) UpperCAmelCase_ : int = CLIPProcessor.from_pretrained( self.tmpdirname ,bos_token="(BOS)" ,eos_token="(EOS)" ,do_normalize=_snake_case ,padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab() ,tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer ,_snake_case ) self.assertEqual(processor.image_processor.to_json_string() ,image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor ,_snake_case ) def UpperCamelCase__ ( self ): UpperCAmelCase_ : List[str] = self.get_image_processor() UpperCAmelCase_ : Dict = self.get_tokenizer() UpperCAmelCase_ : Dict = CLIPProcessor(tokenizer=_snake_case ,image_processor=_snake_case ) UpperCAmelCase_ : Any = self.prepare_image_inputs() UpperCAmelCase_ : Optional[int] = image_processor(_snake_case ,return_tensors="np" ) UpperCAmelCase_ : Any = processor(images=_snake_case ,return_tensors="np" ) for key in input_image_proc.keys(): self.assertAlmostEqual(input_image_proc[key].sum() ,input_processor[key].sum() ,delta=1E-2 ) def UpperCamelCase__ ( self ): UpperCAmelCase_ : Optional[Any] = self.get_image_processor() UpperCAmelCase_ : Union[str, Any] = self.get_tokenizer() UpperCAmelCase_ : Optional[int] = CLIPProcessor(tokenizer=_snake_case ,image_processor=_snake_case ) UpperCAmelCase_ : Tuple = "lower newer" UpperCAmelCase_ : Any = processor(text=_snake_case ) UpperCAmelCase_ : List[Any] = tokenizer(_snake_case ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] ,encoded_processor[key] ) def UpperCamelCase__ ( self ): UpperCAmelCase_ : str = self.get_image_processor() UpperCAmelCase_ : Union[str, Any] = self.get_tokenizer() UpperCAmelCase_ : Tuple = CLIPProcessor(tokenizer=_snake_case ,image_processor=_snake_case ) UpperCAmelCase_ : Any = "lower newer" UpperCAmelCase_ : List[str] = self.prepare_image_inputs() UpperCAmelCase_ : str = processor(text=_snake_case ,images=_snake_case ) self.assertListEqual(list(inputs.keys() ) ,["input_ids", "attention_mask", "pixel_values"] ) # test if it raises when no input is passed with pytest.raises(_snake_case ): processor() def UpperCamelCase__ ( self ): UpperCAmelCase_ : str = self.get_image_processor() UpperCAmelCase_ : Dict = self.get_tokenizer() UpperCAmelCase_ : Optional[int] = CLIPProcessor(tokenizer=_snake_case ,image_processor=_snake_case ) UpperCAmelCase_ : List[Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] UpperCAmelCase_ : int = processor.batch_decode(_snake_case ) UpperCAmelCase_ : int = tokenizer.batch_decode(_snake_case ) self.assertListEqual(_snake_case ,_snake_case ) def UpperCamelCase__ ( self ): UpperCAmelCase_ : Dict = self.get_image_processor() UpperCAmelCase_ : int = self.get_tokenizer() UpperCAmelCase_ : Tuple = CLIPProcessor(tokenizer=_snake_case ,image_processor=_snake_case ) UpperCAmelCase_ : Optional[int] = "lower newer" UpperCAmelCase_ : Any = self.prepare_image_inputs() UpperCAmelCase_ : Dict = processor(text=_snake_case ,images=_snake_case ) self.assertListEqual(list(inputs.keys() ) ,processor.model_input_names )
71
1
'''simple docstring''' import os import re import shutil import sys import tempfile import unittest import black _lowerCamelCase = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))) sys.path.append(os.path.join(git_repo_path, """utils""")) import check_copies # noqa: E402 # This is the reference code that will be used in the tests. # If DDPMSchedulerOutput is changed in scheduling_ddpm.py, this code needs to be manually updated. _lowerCamelCase = """ \"\"\" Output class for the scheduler's step function output. Args: prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images): Computed sample (x_{t-1}) of previous timestep. `prev_sample` should be used as next model input in the denoising loop. pred_original_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images): The predicted denoised sample (x_{0}) based on the model output from the current timestep. `pred_original_sample` can be used to preview progress or for guidance. \"\"\" prev_sample: torch.FloatTensor pred_original_sample: Optional[torch.FloatTensor] = None """ class _snake_case (unittest.TestCase): def UpperCamelCase__ ( self ): UpperCAmelCase_ : Union[str, Any] = tempfile.mkdtemp() os.makedirs(os.path.join(self.diffusers_dir ,"schedulers/" ) ) UpperCAmelCase_ : Tuple = self.diffusers_dir shutil.copy( os.path.join(_snake_case ,"src/diffusers/schedulers/scheduling_ddpm.py" ) ,os.path.join(self.diffusers_dir ,"schedulers/scheduling_ddpm.py" ) ,) def UpperCamelCase__ ( self ): UpperCAmelCase_ : List[Any] = "src/diffusers" shutil.rmtree(self.diffusers_dir ) def UpperCamelCase__ ( self ,_snake_case ,_snake_case ,_snake_case ,_snake_case=None ): UpperCAmelCase_ : Any = comment + f'''\nclass {class_name}(nn.Module):\n''' + class_code if overwrite_result is not None: UpperCAmelCase_ : List[str] = comment + f'''\nclass {class_name}(nn.Module):\n''' + overwrite_result UpperCAmelCase_ : List[str] = black.Mode(target_versions={black.TargetVersion.PYaa} ,line_length=1_19 ) UpperCAmelCase_ : Union[str, Any] = black.format_str(_snake_case ,mode=_snake_case ) UpperCAmelCase_ : Any = os.path.join(self.diffusers_dir ,"new_code.py" ) with open(_snake_case ,"w" ,newline="\n" ) as f: f.write(_snake_case ) if overwrite_result is None: self.assertTrue(len(check_copies.is_copy_consistent(_snake_case ) ) == 0 ) else: check_copies.is_copy_consistent(f.name ,overwrite=_snake_case ) with open(_snake_case ,"r" ) as f: self.assertTrue(f.read() ,_snake_case ) def UpperCamelCase__ ( self ): UpperCAmelCase_ : Dict = check_copies.find_code_in_diffusers("schedulers.scheduling_ddpm.DDPMSchedulerOutput" ) self.assertEqual(_snake_case ,_snake_case ) def UpperCamelCase__ ( self ): # Base copy consistency self.check_copy_consistency( "# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput" ,"DDPMSchedulerOutput" ,REFERENCE_CODE + "\n" ,) # With no empty line at the end self.check_copy_consistency( "# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput" ,"DDPMSchedulerOutput" ,_snake_case ,) # Copy consistency with rename self.check_copy_consistency( "# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test" ,"TestSchedulerOutput" ,re.sub("DDPM" ,"Test" ,_snake_case ) ,) # Copy consistency with a really long name UpperCAmelCase_ : str = "TestClassWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason" self.check_copy_consistency( f'''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->{long_class_name}''' ,f'''{long_class_name}SchedulerOutput''' ,re.sub("Bert" ,_snake_case ,_snake_case ) ,) # Copy consistency with overwrite self.check_copy_consistency( "# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test" ,"TestSchedulerOutput" ,_snake_case ,overwrite_result=re.sub("DDPM" ,"Test" ,_snake_case ) ,)
71
'''simple docstring''' import gc import unittest import numpy as np import torch import torch.nn.functional as F from transformers import ( ClapTextConfig, ClapTextModelWithProjection, RobertaTokenizer, SpeechTaHifiGan, SpeechTaHifiGanConfig, ) from diffusers import ( AudioLDMPipeline, AutoencoderKL, DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel, ) from diffusers.utils import is_xformers_available, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism from ..pipeline_params import TEXT_TO_AUDIO_BATCH_PARAMS, TEXT_TO_AUDIO_PARAMS from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class _snake_case (__SCREAMING_SNAKE_CASE , unittest.TestCase): __A : Any =AudioLDMPipeline __A : Dict =TEXT_TO_AUDIO_PARAMS __A : Any =TEXT_TO_AUDIO_BATCH_PARAMS __A : Tuple =frozenset( [ "num_inference_steps", "num_waveforms_per_prompt", "generator", "latents", "output_type", "return_dict", "callback", "callback_steps", ]) def UpperCamelCase__ ( self ): torch.manual_seed(0 ) UpperCAmelCase_ : Union[str, Any] = UNetaDConditionModel( block_out_channels=(32, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=4 ,out_channels=4 ,down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") ,up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") ,cross_attention_dim=(32, 64) ,class_embed_type="simple_projection" ,projection_class_embeddings_input_dim=32 ,class_embeddings_concat=_snake_case ,) UpperCAmelCase_ : Optional[Any] = DDIMScheduler( beta_start=0.00085 ,beta_end=0.012 ,beta_schedule="scaled_linear" ,clip_sample=_snake_case ,set_alpha_to_one=_snake_case ,) torch.manual_seed(0 ) UpperCAmelCase_ : Union[str, Any] = AutoencoderKL( block_out_channels=[32, 64] ,in_channels=1 ,out_channels=1 ,down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] ,up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] ,latent_channels=4 ,) torch.manual_seed(0 ) UpperCAmelCase_ : Optional[int] = ClapTextConfig( bos_token_id=0 ,eos_token_id=2 ,hidden_size=32 ,intermediate_size=37 ,layer_norm_eps=1E-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=10_00 ,projection_dim=32 ,) UpperCAmelCase_ : Optional[Any] = ClapTextModelWithProjection(_snake_case ) UpperCAmelCase_ : List[Any] = RobertaTokenizer.from_pretrained("hf-internal-testing/tiny-random-roberta" ,model_max_length=77 ) UpperCAmelCase_ : Optional[int] = SpeechTaHifiGanConfig( model_in_dim=8 ,sampling_rate=1_60_00 ,upsample_initial_channel=16 ,upsample_rates=[2, 2] ,upsample_kernel_sizes=[4, 4] ,resblock_kernel_sizes=[3, 7] ,resblock_dilation_sizes=[[1, 3, 5], [1, 3, 5]] ,normalize_before=_snake_case ,) UpperCAmelCase_ : Union[str, Any] = SpeechTaHifiGan(_snake_case ) UpperCAmelCase_ : Union[str, Any] = { "unet": unet, "scheduler": scheduler, "vae": vae, "text_encoder": text_encoder, "tokenizer": tokenizer, "vocoder": vocoder, } return components def UpperCamelCase__ ( self ,_snake_case ,_snake_case=0 ): if str(_snake_case ).startswith("mps" ): UpperCAmelCase_ : Optional[int] = torch.manual_seed(_snake_case ) else: UpperCAmelCase_ : List[str] = torch.Generator(device=_snake_case ).manual_seed(_snake_case ) UpperCAmelCase_ : Any = { "prompt": "A hammer hitting a wooden surface", "generator": generator, "num_inference_steps": 2, "guidance_scale": 6.0, } return inputs def UpperCamelCase__ ( self ): UpperCAmelCase_ : int = "cpu" # ensure determinism for the device-dependent torch.Generator UpperCAmelCase_ : str = self.get_dummy_components() UpperCAmelCase_ : Optional[Any] = AudioLDMPipeline(**_snake_case ) UpperCAmelCase_ : List[Any] = audioldm_pipe.to(_snake_case ) audioldm_pipe.set_progress_bar_config(disable=_snake_case ) UpperCAmelCase_ : List[str] = self.get_dummy_inputs(_snake_case ) UpperCAmelCase_ : Any = audioldm_pipe(**_snake_case ) UpperCAmelCase_ : Dict = output.audios[0] assert audio.ndim == 1 assert len(_snake_case ) == 2_56 UpperCAmelCase_ : Any = audio[:10] UpperCAmelCase_ : Any = np.array( [-0.0050, 0.0050, -0.0060, 0.0033, -0.0026, 0.0033, -0.0027, 0.0033, -0.0028, 0.0033] ) assert np.abs(audio_slice - expected_slice ).max() < 1E-2 def UpperCamelCase__ ( self ): UpperCAmelCase_ : Optional[int] = self.get_dummy_components() UpperCAmelCase_ : int = AudioLDMPipeline(**_snake_case ) UpperCAmelCase_ : Dict = audioldm_pipe.to(_snake_case ) UpperCAmelCase_ : Tuple = audioldm_pipe.to(_snake_case ) audioldm_pipe.set_progress_bar_config(disable=_snake_case ) UpperCAmelCase_ : Union[str, Any] = self.get_dummy_inputs(_snake_case ) UpperCAmelCase_ : Tuple = 3 * [inputs["prompt"]] # forward UpperCAmelCase_ : Any = audioldm_pipe(**_snake_case ) UpperCAmelCase_ : List[str] = output.audios[0] UpperCAmelCase_ : Optional[Any] = self.get_dummy_inputs(_snake_case ) UpperCAmelCase_ : str = 3 * [inputs.pop("prompt" )] UpperCAmelCase_ : str = audioldm_pipe.tokenizer( _snake_case ,padding="max_length" ,max_length=audioldm_pipe.tokenizer.model_max_length ,truncation=_snake_case ,return_tensors="pt" ,) UpperCAmelCase_ : Dict = text_inputs["input_ids"].to(_snake_case ) UpperCAmelCase_ : str = audioldm_pipe.text_encoder( _snake_case ,) UpperCAmelCase_ : Optional[Any] = prompt_embeds.text_embeds # additional L_2 normalization over each hidden-state UpperCAmelCase_ : Tuple = F.normalize(_snake_case ,dim=-1 ) UpperCAmelCase_ : int = prompt_embeds # forward UpperCAmelCase_ : int = audioldm_pipe(**_snake_case ) UpperCAmelCase_ : List[Any] = output.audios[0] assert np.abs(audio_a - audio_a ).max() < 1E-2 def UpperCamelCase__ ( self ): UpperCAmelCase_ : List[Any] = self.get_dummy_components() UpperCAmelCase_ : Tuple = AudioLDMPipeline(**_snake_case ) UpperCAmelCase_ : List[Any] = audioldm_pipe.to(_snake_case ) UpperCAmelCase_ : List[Any] = audioldm_pipe.to(_snake_case ) audioldm_pipe.set_progress_bar_config(disable=_snake_case ) UpperCAmelCase_ : Union[str, Any] = self.get_dummy_inputs(_snake_case ) UpperCAmelCase_ : Optional[int] = 3 * ["this is a negative prompt"] UpperCAmelCase_ : Any = negative_prompt UpperCAmelCase_ : Union[str, Any] = 3 * [inputs["prompt"]] # forward UpperCAmelCase_ : Dict = audioldm_pipe(**_snake_case ) UpperCAmelCase_ : Dict = output.audios[0] UpperCAmelCase_ : Tuple = self.get_dummy_inputs(_snake_case ) UpperCAmelCase_ : Optional[Any] = 3 * [inputs.pop("prompt" )] UpperCAmelCase_ : List[Any] = [] for p in [prompt, negative_prompt]: UpperCAmelCase_ : Any = audioldm_pipe.tokenizer( _snake_case ,padding="max_length" ,max_length=audioldm_pipe.tokenizer.model_max_length ,truncation=_snake_case ,return_tensors="pt" ,) UpperCAmelCase_ : List[Any] = text_inputs["input_ids"].to(_snake_case ) UpperCAmelCase_ : str = audioldm_pipe.text_encoder( _snake_case ,) UpperCAmelCase_ : List[Any] = text_embeds.text_embeds # additional L_2 normalization over each hidden-state UpperCAmelCase_ : Any = F.normalize(_snake_case ,dim=-1 ) embeds.append(_snake_case ) UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = embeds # forward UpperCAmelCase_ : Tuple = audioldm_pipe(**_snake_case ) UpperCAmelCase_ : Any = output.audios[0] assert np.abs(audio_a - audio_a ).max() < 1E-2 def UpperCamelCase__ ( self ): UpperCAmelCase_ : Optional[int] = "cpu" # ensure determinism for the device-dependent torch.Generator UpperCAmelCase_ : Optional[Any] = self.get_dummy_components() UpperCAmelCase_ : Any = PNDMScheduler(skip_prk_steps=_snake_case ) UpperCAmelCase_ : Optional[Any] = AudioLDMPipeline(**_snake_case ) UpperCAmelCase_ : List[Any] = audioldm_pipe.to(_snake_case ) audioldm_pipe.set_progress_bar_config(disable=_snake_case ) UpperCAmelCase_ : Any = self.get_dummy_inputs(_snake_case ) UpperCAmelCase_ : int = "egg cracking" UpperCAmelCase_ : Optional[Any] = audioldm_pipe(**_snake_case ,negative_prompt=_snake_case ) UpperCAmelCase_ : int = output.audios[0] assert audio.ndim == 1 assert len(_snake_case ) == 2_56 UpperCAmelCase_ : List[Any] = audio[:10] UpperCAmelCase_ : Any = np.array( [-0.0051, 0.0050, -0.0060, 0.0034, -0.0026, 0.0033, -0.0027, 0.0033, -0.0028, 0.0032] ) assert np.abs(audio_slice - expected_slice ).max() < 1E-2 def UpperCamelCase__ ( self ): UpperCAmelCase_ : Optional[int] = "cpu" # ensure determinism for the device-dependent torch.Generator UpperCAmelCase_ : List[str] = self.get_dummy_components() UpperCAmelCase_ : Dict = PNDMScheduler(skip_prk_steps=_snake_case ) UpperCAmelCase_ : Any = AudioLDMPipeline(**_snake_case ) UpperCAmelCase_ : Any = audioldm_pipe.to(_snake_case ) audioldm_pipe.set_progress_bar_config(disable=_snake_case ) UpperCAmelCase_ : Dict = "A hammer hitting a wooden surface" # test num_waveforms_per_prompt=1 (default) UpperCAmelCase_ : Any = audioldm_pipe(_snake_case ,num_inference_steps=2 ).audios assert audios.shape == (1, 2_56) # test num_waveforms_per_prompt=1 (default) for batch of prompts UpperCAmelCase_ : List[str] = 2 UpperCAmelCase_ : Dict = audioldm_pipe([prompt] * batch_size ,num_inference_steps=2 ).audios assert audios.shape == (batch_size, 2_56) # test num_waveforms_per_prompt for single prompt UpperCAmelCase_ : List[str] = 2 UpperCAmelCase_ : List[Any] = audioldm_pipe(_snake_case ,num_inference_steps=2 ,num_waveforms_per_prompt=_snake_case ).audios assert audios.shape == (num_waveforms_per_prompt, 2_56) # test num_waveforms_per_prompt for batch of prompts UpperCAmelCase_ : Union[str, Any] = 2 UpperCAmelCase_ : Optional[int] = audioldm_pipe( [prompt] * batch_size ,num_inference_steps=2 ,num_waveforms_per_prompt=_snake_case ).audios assert audios.shape == (batch_size * num_waveforms_per_prompt, 2_56) def UpperCamelCase__ ( self ): UpperCAmelCase_ : List[str] = "cpu" # ensure determinism for the device-dependent torch.Generator UpperCAmelCase_ : Optional[Any] = self.get_dummy_components() UpperCAmelCase_ : Union[str, Any] = AudioLDMPipeline(**_snake_case ) UpperCAmelCase_ : List[Any] = audioldm_pipe.to(_snake_case ) audioldm_pipe.set_progress_bar_config(disable=_snake_case ) UpperCAmelCase_ : Optional[Any] = audioldm_pipe.vocoder.config.sampling_rate UpperCAmelCase_ : Any = self.get_dummy_inputs(_snake_case ) UpperCAmelCase_ : Optional[int] = audioldm_pipe(audio_length_in_s=0.016 ,**_snake_case ) UpperCAmelCase_ : str = output.audios[0] assert audio.ndim == 1 assert len(_snake_case ) / vocoder_sampling_rate == 0.016 UpperCAmelCase_ : List[Any] = audioldm_pipe(audio_length_in_s=0.032 ,**_snake_case ) UpperCAmelCase_ : Any = output.audios[0] assert audio.ndim == 1 assert len(_snake_case ) / vocoder_sampling_rate == 0.032 def UpperCamelCase__ ( self ): UpperCAmelCase_ : Optional[int] = self.get_dummy_components() UpperCAmelCase_ : str = AudioLDMPipeline(**_snake_case ) UpperCAmelCase_ : int = audioldm_pipe.to(_snake_case ) audioldm_pipe.set_progress_bar_config(disable=_snake_case ) UpperCAmelCase_ : int = ["hey"] UpperCAmelCase_ : Dict = audioldm_pipe(_snake_case ,num_inference_steps=1 ) UpperCAmelCase_ : Any = output.audios.shape assert audio_shape == (1, 2_56) UpperCAmelCase_ : Tuple = audioldm_pipe.vocoder.config config.model_in_dim *= 2 UpperCAmelCase_ : List[Any] = SpeechTaHifiGan(_snake_case ).to(_snake_case ) UpperCAmelCase_ : Tuple = audioldm_pipe(_snake_case ,num_inference_steps=1 ) UpperCAmelCase_ : int = output.audios.shape # waveform shape is unchanged, we just have 2x the number of mel channels in the spectrogram assert audio_shape == (1, 2_56) def UpperCamelCase__ ( self ): self._test_attention_slicing_forward_pass(test_mean_pixel_difference=_snake_case ) def UpperCamelCase__ ( self ): self._test_inference_batch_single_identical(test_mean_pixel_difference=_snake_case ) @unittest.skipIf( torch_device != "cuda" or not is_xformers_available() ,reason="XFormers attention is only available with CUDA and `xformers` installed" ,) def UpperCamelCase__ ( self ): self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=_snake_case ) @slow class _snake_case (unittest.TestCase): def UpperCamelCase__ ( self ): super().tearDown() gc.collect() torch.cuda.empty_cache() def UpperCamelCase__ ( self ,_snake_case ,_snake_case="cpu" ,_snake_case=torch.floataa ,_snake_case=0 ): UpperCAmelCase_ : Union[str, Any] = torch.Generator(device=_snake_case ).manual_seed(_snake_case ) UpperCAmelCase_ : str = np.random.RandomState(_snake_case ).standard_normal((1, 8, 1_28, 16) ) UpperCAmelCase_ : Optional[Any] = torch.from_numpy(_snake_case ).to(device=_snake_case ,dtype=_snake_case ) UpperCAmelCase_ : List[str] = { "prompt": "A hammer hitting a wooden surface", "latents": latents, "generator": generator, "num_inference_steps": 3, "guidance_scale": 2.5, } return inputs def UpperCamelCase__ ( self ): UpperCAmelCase_ : int = AudioLDMPipeline.from_pretrained("cvssp/audioldm" ) UpperCAmelCase_ : Optional[int] = audioldm_pipe.to(_snake_case ) audioldm_pipe.set_progress_bar_config(disable=_snake_case ) UpperCAmelCase_ : List[Any] = self.get_inputs(_snake_case ) UpperCAmelCase_ : List[Any] = 25 UpperCAmelCase_ : Union[str, Any] = audioldm_pipe(**_snake_case ).audios[0] assert audio.ndim == 1 assert len(_snake_case ) == 8_19_20 UpperCAmelCase_ : Union[str, Any] = audio[7_72_30:7_72_40] UpperCAmelCase_ : Any = np.array( [-0.4884, -0.4607, 0.0023, 0.5007, 0.5896, 0.5151, 0.3813, -0.0208, -0.3687, -0.4315] ) UpperCAmelCase_ : Dict = np.abs(expected_slice - audio_slice ).max() assert max_diff < 1E-2 def UpperCamelCase__ ( self ): UpperCAmelCase_ : Optional[int] = AudioLDMPipeline.from_pretrained("cvssp/audioldm" ) UpperCAmelCase_ : List[Any] = LMSDiscreteScheduler.from_config(audioldm_pipe.scheduler.config ) UpperCAmelCase_ : int = audioldm_pipe.to(_snake_case ) audioldm_pipe.set_progress_bar_config(disable=_snake_case ) UpperCAmelCase_ : Tuple = self.get_inputs(_snake_case ) UpperCAmelCase_ : Optional[Any] = audioldm_pipe(**_snake_case ).audios[0] assert audio.ndim == 1 assert len(_snake_case ) == 8_19_20 UpperCAmelCase_ : Any = audio[2_77_80:2_77_90] UpperCAmelCase_ : List[str] = np.array([-0.2131, -0.0873, -0.0124, -0.0189, 0.0569, 0.1373, 0.1883, 0.2886, 0.3297, 0.2212] ) UpperCAmelCase_ : Union[str, Any] = np.abs(expected_slice - audio_slice ).max() assert max_diff < 3E-2
71
1
'''simple docstring''' def a__ ( _SCREAMING_SNAKE_CASE : int = 10 ) -> str: """simple docstring""" if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) or n < 0: raise ValueError("Invalid input" ) UpperCAmelCase_ : Dict = 10**n UpperCAmelCase_ : Any = 2_84_33 * (pow(2 , 7_83_04_57 , _SCREAMING_SNAKE_CASE )) + 1 return str(number % modulus ) if __name__ == "__main__": from doctest import testmod testmod() print(f"""{solution(10) = }""")
71
'''simple docstring''' from typing import TYPE_CHECKING from ...file_utils import _LazyModule, is_torch_available from ...utils import OptionalDependencyNotAvailable _lowerCamelCase = { """configuration_gpt_neox_japanese""": ["""GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GPTNeoXJapaneseConfig"""], """tokenization_gpt_neox_japanese""": ["""GPTNeoXJapaneseTokenizer"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCamelCase = [ """GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST""", """GPTNeoXJapaneseForCausalLM""", """GPTNeoXJapaneseLayer""", """GPTNeoXJapaneseModel""", """GPTNeoXJapanesePreTrainedModel""", ] if TYPE_CHECKING: from .configuration_gpt_neox_japanese import GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXJapaneseConfig from .tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_gpt_neox_japanese import ( GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST, GPTNeoXJapaneseForCausalLM, GPTNeoXJapaneseLayer, GPTNeoXJapaneseModel, GPTNeoXJapanesePreTrainedModel, ) else: import sys _lowerCamelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
71
1
'''simple docstring''' from __future__ import annotations def a__ ( _SCREAMING_SNAKE_CASE : list[int] , _SCREAMING_SNAKE_CASE : list[int] , _SCREAMING_SNAKE_CASE : list[int] , _SCREAMING_SNAKE_CASE : list[list[str]] , _SCREAMING_SNAKE_CASE : int , ) -> None: """simple docstring""" UpperCAmelCase_ : Optional[int] = len(_SCREAMING_SNAKE_CASE ) # If row is equal to the size of the board it means there are a queen in each row in # the current board (possible_board) if row == n: # We convert the variable possible_board that looks like this: [1, 3, 0, 2] to # this: ['. Q . . ', '. . . Q ', 'Q . . . ', '. . Q . '] boards.append([". " * i + "Q " + ". " * (n - 1 - i) for i in possible_board] ) return # We iterate each column in the row to find all possible results in each row for col in range(_SCREAMING_SNAKE_CASE ): # We apply that we learned previously. First we check that in the current board # (possible_board) there are not other same value because if there is it means # that there are a collision in vertical. Then we apply the two formulas we # learned before: # # 45º: y - x = b or 45: row - col = b # 135º: y + x = b or row + col = b. # # And we verify if the results of this two formulas not exist in their variables # respectively. (diagonal_right_collisions, diagonal_left_collisions) # # If any or these are True it means there is a collision so we continue to the # next value in the for loop. if ( col in possible_board or row - col in diagonal_right_collisions or row + col in diagonal_left_collisions ): continue # If it is False we call dfs function again and we update the inputs depth_first_search( [*possible_board, col] , [*diagonal_right_collisions, row - col] , [*diagonal_left_collisions, row + col] , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , ) def a__ ( _SCREAMING_SNAKE_CASE : int ) -> None: """simple docstring""" UpperCAmelCase_ : list[list[str]] = [] depth_first_search([] , [] , [] , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # Print all the boards for board in boards: for column in board: print(_SCREAMING_SNAKE_CASE ) print("" ) print(len(_SCREAMING_SNAKE_CASE ) , "solutions were found." ) if __name__ == "__main__": import doctest doctest.testmod() n_queens_solution(4)
71
'''simple docstring''' import heapq def a__ ( _SCREAMING_SNAKE_CASE : dict ) -> set[int]: """simple docstring""" UpperCAmelCase_ : list[list] = [] # for each node and his adjacency list add them and the rank of the node to queue # using heapq module the queue will be filled like a Priority Queue # heapq works with a min priority queue, so I used -1*len(v) to build it for key, value in graph.items(): # O(log(n)) heapq.heappush(_SCREAMING_SNAKE_CASE , [-1 * len(_SCREAMING_SNAKE_CASE ), (key, value)] ) # chosen_vertices = set of chosen vertices UpperCAmelCase_ : Optional[int] = set() # while queue isn't empty and there are still edges # (queue[0][0] is the rank of the node with max rank) while queue and queue[0][0] != 0: # extract vertex with max rank from queue and add it to chosen_vertices UpperCAmelCase_ : Tuple = heapq.heappop(_SCREAMING_SNAKE_CASE )[1][0] chosen_vertices.add(_SCREAMING_SNAKE_CASE ) # Remove all arcs adjacent to argmax for elem in queue: # if v haven't adjacent node, skip if elem[0] == 0: continue # if argmax is reachable from elem # remove argmax from elem's adjacent list and update his rank if argmax in elem[1][1]: UpperCAmelCase_ : Any = elem[1][1].index(_SCREAMING_SNAKE_CASE ) del elem[1][1][index] elem[0] += 1 # re-order the queue heapq.heapify(_SCREAMING_SNAKE_CASE ) return chosen_vertices if __name__ == "__main__": import doctest doctest.testmod() _lowerCamelCase = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]} print(f"""Minimum vertex cover:\n{greedy_min_vertex_cover(graph)}""")
71
1
'''simple docstring''' def a__ ( _SCREAMING_SNAKE_CASE : list ) -> list: """simple docstring""" def merge(_SCREAMING_SNAKE_CASE : list , _SCREAMING_SNAKE_CASE : list ) -> list: def _merge(): while left and right: yield (left if left[0] <= right[0] else right).pop(0 ) yield from left yield from right return list(_merge() ) if len(_SCREAMING_SNAKE_CASE ) <= 1: return collection UpperCAmelCase_ : Dict = len(_SCREAMING_SNAKE_CASE ) // 2 return merge(merge_sort(collection[:mid] ) , merge_sort(collection[mid:] ) ) if __name__ == "__main__": import doctest doctest.testmod() _lowerCamelCase = input("""Enter numbers separated by a comma:\n""").strip() _lowerCamelCase = [int(item) for item in user_input.split(""",""")] print(*merge_sort(unsorted), sep=""",""")
71
'''simple docstring''' from typing import Dict, List, Optional, Union import numpy as np from transformers.utils import is_vision_available from transformers.utils.generic import TensorType from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, is_valid_image, to_numpy_array, valid_images, ) from ...utils import logging if is_vision_available(): import PIL _lowerCamelCase = logging.get_logger(__name__) def a__ ( _SCREAMING_SNAKE_CASE : Tuple ) -> List[List[ImageInput]]: """simple docstring""" if isinstance(_SCREAMING_SNAKE_CASE , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ): return videos elif isinstance(_SCREAMING_SNAKE_CASE , (list, tuple) ) and is_valid_image(videos[0] ): return [videos] elif is_valid_image(_SCREAMING_SNAKE_CASE ): return [[videos]] raise ValueError(F'''Could not make batched video from {videos}''' ) class _snake_case (__SCREAMING_SNAKE_CASE): __A : Tuple =["pixel_values"] def __init__( self ,_snake_case = True ,_snake_case = None ,_snake_case = PILImageResampling.BILINEAR ,_snake_case = True ,_snake_case = None ,_snake_case = True ,_snake_case = 1 / 2_55 ,_snake_case = True ,_snake_case = True ,_snake_case = None ,_snake_case = None ,**_snake_case ,): super().__init__(**_snake_case ) UpperCAmelCase_ : Optional[Any] = size if size is not None else {"shortest_edge": 2_56} UpperCAmelCase_ : List[str] = get_size_dict(_snake_case ,default_to_square=_snake_case ) UpperCAmelCase_ : str = crop_size if crop_size is not None else {"height": 2_24, "width": 2_24} UpperCAmelCase_ : Optional[Any] = get_size_dict(_snake_case ,param_name="crop_size" ) UpperCAmelCase_ : int = do_resize UpperCAmelCase_ : List[str] = size UpperCAmelCase_ : Dict = do_center_crop UpperCAmelCase_ : Optional[Any] = crop_size UpperCAmelCase_ : Optional[Any] = resample UpperCAmelCase_ : int = do_rescale UpperCAmelCase_ : Optional[int] = rescale_factor UpperCAmelCase_ : Dict = offset UpperCAmelCase_ : Optional[Any] = do_normalize UpperCAmelCase_ : Union[str, Any] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN UpperCAmelCase_ : Dict = image_std if image_std is not None else IMAGENET_STANDARD_STD def UpperCamelCase__ ( self ,_snake_case ,_snake_case ,_snake_case = PILImageResampling.BILINEAR ,_snake_case = None ,**_snake_case ,): UpperCAmelCase_ : Any = get_size_dict(_snake_case ,default_to_square=_snake_case ) if "shortest_edge" in size: UpperCAmelCase_ : Optional[Any] = get_resize_output_image_size(_snake_case ,size["shortest_edge"] ,default_to_square=_snake_case ) elif "height" in size and "width" in size: UpperCAmelCase_ : Optional[Any] = (size["height"], size["width"]) else: raise ValueError(f'''Size must have \'height\' and \'width\' or \'shortest_edge\' as keys. Got {size.keys()}''' ) return resize(_snake_case ,size=_snake_case ,resample=_snake_case ,data_format=_snake_case ,**_snake_case ) def UpperCamelCase__ ( self ,_snake_case ,_snake_case ,_snake_case = None ,**_snake_case ,): UpperCAmelCase_ : Dict = get_size_dict(_snake_case ) if "height" not in size or "width" not in size: raise ValueError(f'''Size must have \'height\' and \'width\' as keys. Got {size.keys()}''' ) return center_crop(_snake_case ,size=(size["height"], size["width"]) ,data_format=_snake_case ,**_snake_case ) def UpperCamelCase__ ( self ,_snake_case ,_snake_case ,_snake_case = True ,_snake_case = None ,**_snake_case ,): UpperCAmelCase_ : int = image.astype(np.floataa ) if offset: UpperCAmelCase_ : Any = image - (scale / 2) return rescale(_snake_case ,scale=_snake_case ,data_format=_snake_case ,**_snake_case ) def UpperCamelCase__ ( self ,_snake_case ,_snake_case ,_snake_case ,_snake_case = None ,**_snake_case ,): return normalize(_snake_case ,mean=_snake_case ,std=_snake_case ,data_format=_snake_case ,**_snake_case ) def UpperCamelCase__ ( self ,_snake_case ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = ChannelDimension.FIRST ,): if do_resize and size is None or resample is None: raise ValueError("Size and resample must be specified if do_resize is True." ) if do_center_crop and crop_size is None: raise ValueError("Crop size must be specified if do_center_crop is True." ) if do_rescale and rescale_factor is None: raise ValueError("Rescale factor must be specified if do_rescale is True." ) if do_normalize and (image_mean is None or image_std is None): raise ValueError("Image mean and std must be specified if do_normalize is True." ) if offset and not do_rescale: raise ValueError("For offset, do_rescale must also be set to True." ) # All transformations expect numpy arrays. UpperCAmelCase_ : Optional[int] = to_numpy_array(_snake_case ) if do_resize: UpperCAmelCase_ : Dict = self.resize(image=_snake_case ,size=_snake_case ,resample=_snake_case ) if do_center_crop: UpperCAmelCase_ : Optional[Any] = self.center_crop(_snake_case ,size=_snake_case ) if do_rescale: UpperCAmelCase_ : Union[str, Any] = self.rescale(image=_snake_case ,scale=_snake_case ,offset=_snake_case ) if do_normalize: UpperCAmelCase_ : Any = self.normalize(image=_snake_case ,mean=_snake_case ,std=_snake_case ) UpperCAmelCase_ : Any = to_channel_dimension_format(_snake_case ,_snake_case ) return image def UpperCamelCase__ ( self ,_snake_case ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = ChannelDimension.FIRST ,**_snake_case ,): UpperCAmelCase_ : Tuple = do_resize if do_resize is not None else self.do_resize UpperCAmelCase_ : str = resample if resample is not None else self.resample UpperCAmelCase_ : List[Any] = do_center_crop if do_center_crop is not None else self.do_center_crop UpperCAmelCase_ : Tuple = do_rescale if do_rescale is not None else self.do_rescale UpperCAmelCase_ : List[str] = rescale_factor if rescale_factor is not None else self.rescale_factor UpperCAmelCase_ : List[Any] = offset if offset is not None else self.offset UpperCAmelCase_ : Optional[Any] = do_normalize if do_normalize is not None else self.do_normalize UpperCAmelCase_ : int = image_mean if image_mean is not None else self.image_mean UpperCAmelCase_ : int = image_std if image_std is not None else self.image_std UpperCAmelCase_ : Dict = size if size is not None else self.size UpperCAmelCase_ : int = get_size_dict(_snake_case ,default_to_square=_snake_case ) UpperCAmelCase_ : List[Any] = crop_size if crop_size is not None else self.crop_size UpperCAmelCase_ : int = get_size_dict(_snake_case ,param_name="crop_size" ) if not valid_images(_snake_case ): raise ValueError( "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, " "torch.Tensor, tf.Tensor or jax.ndarray." ) UpperCAmelCase_ : Any = make_batched(_snake_case ) UpperCAmelCase_ : Dict = [ [ self._preprocess_image( image=_snake_case ,do_resize=_snake_case ,size=_snake_case ,resample=_snake_case ,do_center_crop=_snake_case ,crop_size=_snake_case ,do_rescale=_snake_case ,rescale_factor=_snake_case ,offset=_snake_case ,do_normalize=_snake_case ,image_mean=_snake_case ,image_std=_snake_case ,data_format=_snake_case ,) for img in video ] for video in videos ] UpperCAmelCase_ : List[str] = {"pixel_values": videos} return BatchFeature(data=_snake_case ,tensor_type=_snake_case )
71
1
'''simple docstring''' import argparse import os import torch from diffusers import ( CMStochasticIterativeScheduler, ConsistencyModelPipeline, UNetaDModel, ) _lowerCamelCase = { """sample_size""": 32, """in_channels""": 3, """out_channels""": 3, """layers_per_block""": 2, """num_class_embeds""": 1000, """block_out_channels""": [32, 64], """attention_head_dim""": 8, """down_block_types""": [ """ResnetDownsampleBlock2D""", """AttnDownBlock2D""", ], """up_block_types""": [ """AttnUpBlock2D""", """ResnetUpsampleBlock2D""", ], """resnet_time_scale_shift""": """scale_shift""", """upsample_type""": """resnet""", """downsample_type""": """resnet""", } _lowerCamelCase = { """sample_size""": 64, """in_channels""": 3, """out_channels""": 3, """layers_per_block""": 3, """num_class_embeds""": 1000, """block_out_channels""": [192, 192 * 2, 192 * 3, 192 * 4], """attention_head_dim""": 64, """down_block_types""": [ """ResnetDownsampleBlock2D""", """AttnDownBlock2D""", """AttnDownBlock2D""", """AttnDownBlock2D""", ], """up_block_types""": [ """AttnUpBlock2D""", """AttnUpBlock2D""", """AttnUpBlock2D""", """ResnetUpsampleBlock2D""", ], """resnet_time_scale_shift""": """scale_shift""", """upsample_type""": """resnet""", """downsample_type""": """resnet""", } _lowerCamelCase = { """sample_size""": 256, """in_channels""": 3, """out_channels""": 3, """layers_per_block""": 2, """num_class_embeds""": None, """block_out_channels""": [256, 256, 256 * 2, 256 * 2, 256 * 4, 256 * 4], """attention_head_dim""": 64, """down_block_types""": [ """ResnetDownsampleBlock2D""", """ResnetDownsampleBlock2D""", """ResnetDownsampleBlock2D""", """AttnDownBlock2D""", """AttnDownBlock2D""", """AttnDownBlock2D""", ], """up_block_types""": [ """AttnUpBlock2D""", """AttnUpBlock2D""", """AttnUpBlock2D""", """ResnetUpsampleBlock2D""", """ResnetUpsampleBlock2D""", """ResnetUpsampleBlock2D""", ], """resnet_time_scale_shift""": """default""", """upsample_type""": """resnet""", """downsample_type""": """resnet""", } _lowerCamelCase = { """num_train_timesteps""": 40, """sigma_min""": 0.0_02, """sigma_max""": 80.0, } _lowerCamelCase = { """num_train_timesteps""": 201, """sigma_min""": 0.0_02, """sigma_max""": 80.0, } _lowerCamelCase = { """num_train_timesteps""": 151, """sigma_min""": 0.0_02, """sigma_max""": 80.0, } def a__ ( _SCREAMING_SNAKE_CASE : Union[str, Any] ) -> List[Any]: """simple docstring""" if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): return v if v.lower() in ("yes", "true", "t", "y", "1"): return True elif v.lower() in ("no", "false", "f", "n", "0"): return False else: raise argparse.ArgumentTypeError("boolean value expected" ) def a__ ( _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : Optional[int]=False ) -> List[Any]: """simple docstring""" UpperCAmelCase_ : Tuple = checkpoint[F'''{old_prefix}.in_layers.0.weight'''] UpperCAmelCase_ : List[Any] = checkpoint[F'''{old_prefix}.in_layers.0.bias'''] UpperCAmelCase_ : Optional[int] = checkpoint[F'''{old_prefix}.in_layers.2.weight'''] UpperCAmelCase_ : Any = checkpoint[F'''{old_prefix}.in_layers.2.bias'''] UpperCAmelCase_ : str = checkpoint[F'''{old_prefix}.emb_layers.1.weight'''] UpperCAmelCase_ : Dict = checkpoint[F'''{old_prefix}.emb_layers.1.bias'''] UpperCAmelCase_ : List[Any] = checkpoint[F'''{old_prefix}.out_layers.0.weight'''] UpperCAmelCase_ : Dict = checkpoint[F'''{old_prefix}.out_layers.0.bias'''] UpperCAmelCase_ : List[Any] = checkpoint[F'''{old_prefix}.out_layers.3.weight'''] UpperCAmelCase_ : Tuple = checkpoint[F'''{old_prefix}.out_layers.3.bias'''] if has_skip: UpperCAmelCase_ : Any = checkpoint[F'''{old_prefix}.skip_connection.weight'''] UpperCAmelCase_ : Tuple = checkpoint[F'''{old_prefix}.skip_connection.bias'''] return new_checkpoint def a__ ( _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : Tuple=None ) -> str: """simple docstring""" UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : List[str] = checkpoint[F'''{old_prefix}.qkv.weight'''].chunk(3 , dim=0 ) UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : int = checkpoint[F'''{old_prefix}.qkv.bias'''].chunk(3 , dim=0 ) UpperCAmelCase_ : Tuple = checkpoint[F'''{old_prefix}.norm.weight'''] UpperCAmelCase_ : Optional[Any] = checkpoint[F'''{old_prefix}.norm.bias'''] UpperCAmelCase_ : Union[str, Any] = weight_q.squeeze(-1 ).squeeze(-1 ) UpperCAmelCase_ : int = bias_q.squeeze(-1 ).squeeze(-1 ) UpperCAmelCase_ : str = weight_k.squeeze(-1 ).squeeze(-1 ) UpperCAmelCase_ : Any = bias_k.squeeze(-1 ).squeeze(-1 ) UpperCAmelCase_ : Tuple = weight_v.squeeze(-1 ).squeeze(-1 ) UpperCAmelCase_ : int = bias_v.squeeze(-1 ).squeeze(-1 ) UpperCAmelCase_ : Tuple = ( checkpoint[F'''{old_prefix}.proj_out.weight'''].squeeze(-1 ).squeeze(-1 ) ) UpperCAmelCase_ : Optional[int] = checkpoint[F'''{old_prefix}.proj_out.bias'''].squeeze(-1 ).squeeze(-1 ) return new_checkpoint def a__ ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Union[str, Any] ) -> int: """simple docstring""" UpperCAmelCase_ : Any = torch.load(_SCREAMING_SNAKE_CASE , map_location="cpu" ) UpperCAmelCase_ : List[str] = {} UpperCAmelCase_ : str = checkpoint["time_embed.0.weight"] UpperCAmelCase_ : Any = checkpoint["time_embed.0.bias"] UpperCAmelCase_ : int = checkpoint["time_embed.2.weight"] UpperCAmelCase_ : Union[str, Any] = checkpoint["time_embed.2.bias"] if unet_config["num_class_embeds"] is not None: UpperCAmelCase_ : Optional[Any] = checkpoint["label_emb.weight"] UpperCAmelCase_ : Tuple = checkpoint["input_blocks.0.0.weight"] UpperCAmelCase_ : List[str] = checkpoint["input_blocks.0.0.bias"] UpperCAmelCase_ : Union[str, Any] = unet_config["down_block_types"] UpperCAmelCase_ : Tuple = unet_config["layers_per_block"] UpperCAmelCase_ : Any = unet_config["attention_head_dim"] UpperCAmelCase_ : Optional[Any] = unet_config["block_out_channels"] UpperCAmelCase_ : Optional[Any] = 1 UpperCAmelCase_ : Any = channels_list[0] for i, layer_type in enumerate(_SCREAMING_SNAKE_CASE ): UpperCAmelCase_ : Dict = channels_list[i] UpperCAmelCase_ : List[Any] = current_channels != prev_channels if layer_type == "ResnetDownsampleBlock2D": for j in range(_SCREAMING_SNAKE_CASE ): UpperCAmelCase_ : Tuple = F'''down_blocks.{i}.resnets.{j}''' UpperCAmelCase_ : Union[str, Any] = F'''input_blocks.{current_layer}.0''' UpperCAmelCase_ : Optional[int] = True if j == 0 and downsample_block_has_skip else False UpperCAmelCase_ : Optional[int] = convert_resnet(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , has_skip=_SCREAMING_SNAKE_CASE ) current_layer += 1 elif layer_type == "AttnDownBlock2D": for j in range(_SCREAMING_SNAKE_CASE ): UpperCAmelCase_ : List[Any] = F'''down_blocks.{i}.resnets.{j}''' UpperCAmelCase_ : List[str] = F'''input_blocks.{current_layer}.0''' UpperCAmelCase_ : Optional[Any] = True if j == 0 and downsample_block_has_skip else False UpperCAmelCase_ : Dict = convert_resnet(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , has_skip=_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : Tuple = F'''down_blocks.{i}.attentions.{j}''' UpperCAmelCase_ : Tuple = F'''input_blocks.{current_layer}.1''' UpperCAmelCase_ : Union[str, Any] = convert_attention( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) current_layer += 1 if i != len(_SCREAMING_SNAKE_CASE ) - 1: UpperCAmelCase_ : str = F'''down_blocks.{i}.downsamplers.0''' UpperCAmelCase_ : int = F'''input_blocks.{current_layer}.0''' UpperCAmelCase_ : Any = convert_resnet(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) current_layer += 1 UpperCAmelCase_ : List[str] = current_channels # hardcoded the mid-block for now UpperCAmelCase_ : Tuple = "mid_block.resnets.0" UpperCAmelCase_ : Dict = "middle_block.0" UpperCAmelCase_ : Union[str, Any] = convert_resnet(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : Optional[int] = "mid_block.attentions.0" UpperCAmelCase_ : int = "middle_block.1" UpperCAmelCase_ : Union[str, Any] = convert_attention(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : str = "mid_block.resnets.1" UpperCAmelCase_ : Dict = "middle_block.2" UpperCAmelCase_ : List[str] = convert_resnet(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : int = 0 UpperCAmelCase_ : Tuple = unet_config["up_block_types"] for i, layer_type in enumerate(_SCREAMING_SNAKE_CASE ): if layer_type == "ResnetUpsampleBlock2D": for j in range(layers_per_block + 1 ): UpperCAmelCase_ : Union[str, Any] = F'''up_blocks.{i}.resnets.{j}''' UpperCAmelCase_ : Dict = F'''output_blocks.{current_layer}.0''' UpperCAmelCase_ : Optional[Any] = convert_resnet(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , has_skip=_SCREAMING_SNAKE_CASE ) current_layer += 1 if i != len(_SCREAMING_SNAKE_CASE ) - 1: UpperCAmelCase_ : int = F'''up_blocks.{i}.upsamplers.0''' UpperCAmelCase_ : int = F'''output_blocks.{current_layer-1}.1''' UpperCAmelCase_ : Dict = convert_resnet(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) elif layer_type == "AttnUpBlock2D": for j in range(layers_per_block + 1 ): UpperCAmelCase_ : Optional[Any] = F'''up_blocks.{i}.resnets.{j}''' UpperCAmelCase_ : List[Any] = F'''output_blocks.{current_layer}.0''' UpperCAmelCase_ : int = convert_resnet(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , has_skip=_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : Tuple = F'''up_blocks.{i}.attentions.{j}''' UpperCAmelCase_ : Dict = F'''output_blocks.{current_layer}.1''' UpperCAmelCase_ : int = convert_attention( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) current_layer += 1 if i != len(_SCREAMING_SNAKE_CASE ) - 1: UpperCAmelCase_ : List[str] = F'''up_blocks.{i}.upsamplers.0''' UpperCAmelCase_ : Optional[Any] = F'''output_blocks.{current_layer-1}.2''' UpperCAmelCase_ : str = convert_resnet(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : Optional[Any] = checkpoint["out.0.weight"] UpperCAmelCase_ : Any = checkpoint["out.0.bias"] UpperCAmelCase_ : Optional[int] = checkpoint["out.2.weight"] UpperCAmelCase_ : Optional[Any] = checkpoint["out.2.bias"] return new_checkpoint if __name__ == "__main__": _lowerCamelCase = argparse.ArgumentParser() parser.add_argument("""--unet_path""", default=None, type=str, required=True, help="""Path to the unet.pt to convert.""") parser.add_argument( """--dump_path""", default=None, type=str, required=True, help="""Path to output the converted UNet model.""" ) parser.add_argument("""--class_cond""", default=True, type=str, help="""Whether the model is class-conditional.""") _lowerCamelCase = parser.parse_args() _lowerCamelCase = strabool(args.class_cond) _lowerCamelCase = os.path.basename(args.unet_path) print(f"""Checkpoint: {ckpt_name}""") # Get U-Net config if "imagenet64" in ckpt_name: _lowerCamelCase = IMAGENET_64_UNET_CONFIG elif "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)): _lowerCamelCase = LSUN_256_UNET_CONFIG elif "test" in ckpt_name: _lowerCamelCase = TEST_UNET_CONFIG else: raise ValueError(f"""Checkpoint type {ckpt_name} is not currently supported.""") if not args.class_cond: _lowerCamelCase = None _lowerCamelCase = con_pt_to_diffuser(args.unet_path, unet_config) _lowerCamelCase = UNetaDModel(**unet_config) image_unet.load_state_dict(converted_unet_ckpt) # Get scheduler config if "cd" in ckpt_name or "test" in ckpt_name: _lowerCamelCase = CD_SCHEDULER_CONFIG elif "ct" in ckpt_name and "imagenet64" in ckpt_name: _lowerCamelCase = CT_IMAGENET_64_SCHEDULER_CONFIG elif "ct" in ckpt_name and "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)): _lowerCamelCase = CT_LSUN_256_SCHEDULER_CONFIG else: raise ValueError(f"""Checkpoint type {ckpt_name} is not currently supported.""") _lowerCamelCase = CMStochasticIterativeScheduler(**scheduler_config) _lowerCamelCase = ConsistencyModelPipeline(unet=image_unet, scheduler=cm_scheduler) consistency_model.save_pretrained(args.dump_path)
71
'''simple docstring''' import multiprocessing import os from typing import BinaryIO, Optional, Union import fsspec from .. import Dataset, Features, NamedSplit, config from ..formatting import query_table from ..packaged_modules.json.json import Json from ..utils import logging from ..utils.typing import NestedDataStructureLike, PathLike from .abc import AbstractDatasetReader class _snake_case (__SCREAMING_SNAKE_CASE): def __init__( self ,_snake_case ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = False ,_snake_case = False ,_snake_case = None ,_snake_case = None ,**_snake_case ,): super().__init__( _snake_case ,split=_snake_case ,features=_snake_case ,cache_dir=_snake_case ,keep_in_memory=_snake_case ,streaming=_snake_case ,num_proc=_snake_case ,**_snake_case ,) UpperCAmelCase_ : Tuple = field UpperCAmelCase_ : List[Any] = path_or_paths if isinstance(_snake_case ,_snake_case ) else {self.split: path_or_paths} UpperCAmelCase_ : Optional[int] = Json( cache_dir=_snake_case ,data_files=_snake_case ,features=_snake_case ,field=_snake_case ,**_snake_case ,) def UpperCamelCase__ ( self ): # Build iterable dataset if self.streaming: UpperCAmelCase_ : List[str] = self.builder.as_streaming_dataset(split=self.split ) # Build regular (map-style) dataset else: UpperCAmelCase_ : Union[str, Any] = None UpperCAmelCase_ : int = None UpperCAmelCase_ : List[Any] = None UpperCAmelCase_ : int = None self.builder.download_and_prepare( download_config=_snake_case ,download_mode=_snake_case ,verification_mode=_snake_case ,base_path=_snake_case ,num_proc=self.num_proc ,) UpperCAmelCase_ : Dict = self.builder.as_dataset( split=self.split ,verification_mode=_snake_case ,in_memory=self.keep_in_memory ) return dataset class _snake_case : def __init__( self ,_snake_case ,_snake_case ,_snake_case = None ,_snake_case = None ,**_snake_case ,): if num_proc is not None and num_proc <= 0: raise ValueError(f'''num_proc {num_proc} must be an integer > 0.''' ) UpperCAmelCase_ : int = dataset UpperCAmelCase_ : Union[str, Any] = path_or_buf UpperCAmelCase_ : str = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE UpperCAmelCase_ : Dict = num_proc UpperCAmelCase_ : Optional[Any] = "utf-8" UpperCAmelCase_ : Optional[int] = to_json_kwargs def UpperCamelCase__ ( self ): UpperCAmelCase_ : Dict = self.to_json_kwargs.pop("path_or_buf" ,_snake_case ) UpperCAmelCase_ : Tuple = self.to_json_kwargs.pop("orient" ,"records" ) UpperCAmelCase_ : Any = self.to_json_kwargs.pop("lines" ,True if orient == "records" else False ) UpperCAmelCase_ : Optional[int] = self.to_json_kwargs.pop("index" ,False if orient in ["split", "table"] else True ) UpperCAmelCase_ : int = self.to_json_kwargs.pop("compression" ,_snake_case ) if compression not in [None, "infer", "gzip", "bz2", "xz"]: raise NotImplementedError(f'''`datasets` currently does not support {compression} compression''' ) if isinstance(self.path_or_buf ,(str, bytes, os.PathLike) ): with fsspec.open(self.path_or_buf ,"wb" ,compression=_snake_case ) as buffer: UpperCAmelCase_ : List[str] = self._write(file_obj=_snake_case ,orient=_snake_case ,lines=_snake_case ,index=_snake_case ,**self.to_json_kwargs ) else: if compression: raise NotImplementedError( f'''The compression parameter is not supported when writing to a buffer, but compression={compression}''' " was passed. Please provide a local path instead." ) UpperCAmelCase_ : Union[str, Any] = self._write( file_obj=self.path_or_buf ,orient=_snake_case ,lines=_snake_case ,index=_snake_case ,**self.to_json_kwargs ) return written def UpperCamelCase__ ( self ,_snake_case ): UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = args UpperCAmelCase_ : List[str] = query_table( table=self.dataset.data ,key=slice(_snake_case ,offset + self.batch_size ) ,indices=self.dataset._indices ,) UpperCAmelCase_ : Optional[Any] = batch.to_pandas().to_json( path_or_buf=_snake_case ,orient=_snake_case ,lines=_snake_case ,index=_snake_case ,**_snake_case ) if not json_str.endswith("\n" ): json_str += "\n" return json_str.encode(self.encoding ) def UpperCamelCase__ ( self ,_snake_case ,_snake_case ,_snake_case ,_snake_case ,**_snake_case ,): UpperCAmelCase_ : Optional[Any] = 0 if self.num_proc is None or self.num_proc == 1: for offset in logging.tqdm( range(0 ,len(self.dataset ) ,self.batch_size ) ,unit="ba" ,disable=not logging.is_progress_bar_enabled() ,desc="Creating json from Arrow format" ,): UpperCAmelCase_ : Any = self._batch_json((offset, orient, lines, index, to_json_kwargs) ) written += file_obj.write(_snake_case ) else: UpperCAmelCase_ , UpperCAmelCase_ : int = len(self.dataset ), self.batch_size with multiprocessing.Pool(self.num_proc ) as pool: for json_str in logging.tqdm( pool.imap( self._batch_json ,[(offset, orient, lines, index, to_json_kwargs) for offset in range(0 ,_snake_case ,_snake_case )] ,) ,total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size ,unit="ba" ,disable=not logging.is_progress_bar_enabled() ,desc="Creating json from Arrow format" ,): written += file_obj.write(_snake_case ) return written
71
1
'''simple docstring''' from unittest.mock import patch import pyspark from datasets.packaged_modules.spark.spark import ( Spark, SparkExamplesIterable, _generate_iterable_examples, ) from ..utils import ( require_dill_gt_0_3_2, require_not_windows, ) def a__ ( _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : List[Any] ) -> List[Any]: """simple docstring""" UpperCAmelCase_ : Any = [] for part_id in partition_order: UpperCAmelCase_ : str = df.where(F'''SPARK_PARTITION_ID() = {part_id}''' ).collect() for row_idx, row in enumerate(_SCREAMING_SNAKE_CASE ): expected_row_ids_and_row_dicts.append((F'''{part_id}_{row_idx}''', row.asDict()) ) return expected_row_ids_and_row_dicts @require_not_windows @require_dill_gt_0_3_2 def a__ ( ) -> Dict: """simple docstring""" UpperCAmelCase_ : List[Any] = pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate() UpperCAmelCase_ : str = spark.range(1_00 ).repartition(1 ) UpperCAmelCase_ : str = Spark(_SCREAMING_SNAKE_CASE ) # The id ints will be converted to Pyarrow int64s, so each row will be 8 bytes. Setting a max_shard_size of 16 means # that each partition can hold 2 rows. spark_builder._repartition_df_if_needed(max_shard_size=16 ) # Given that the dataframe has 100 rows and each partition has 2 rows, we expect 50 partitions. assert spark_builder.df.rdd.getNumPartitions() == 50 @require_not_windows @require_dill_gt_0_3_2 def a__ ( ) -> Optional[int]: """simple docstring""" UpperCAmelCase_ : str = pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate() UpperCAmelCase_ : Any = spark.range(10 ).repartition(2 ) UpperCAmelCase_ : Optional[int] = [1, 0] UpperCAmelCase_ : Union[str, Any] = _generate_iterable_examples(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # Reverse the partitions. UpperCAmelCase_ : List[str] = _get_expected_row_ids_and_row_dicts_for_partition_order(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for i, (row_id, row_dict) in enumerate(generate_fn() ): UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = expected_row_ids_and_row_dicts[i] assert row_id == expected_row_id assert row_dict == expected_row_dict @require_not_windows @require_dill_gt_0_3_2 def a__ ( ) -> Dict: """simple docstring""" UpperCAmelCase_ : Optional[Any] = pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate() UpperCAmelCase_ : Any = spark.range(10 ).repartition(1 ) UpperCAmelCase_ : Any = SparkExamplesIterable(_SCREAMING_SNAKE_CASE ) assert it.n_shards == 1 for i, (row_id, row_dict) in enumerate(_SCREAMING_SNAKE_CASE ): assert row_id == F'''0_{i}''' assert row_dict == {"id": i} @require_not_windows @require_dill_gt_0_3_2 def a__ ( ) -> Optional[Any]: """simple docstring""" UpperCAmelCase_ : Dict = pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate() UpperCAmelCase_ : Union[str, Any] = spark.range(30 ).repartition(3 ) # Mock the generator so that shuffle reverses the partition indices. with patch("numpy.random.Generator" ) as generator_mock: UpperCAmelCase_ : Any = lambda _SCREAMING_SNAKE_CASE : x.reverse() UpperCAmelCase_ : Optional[Any] = _get_expected_row_ids_and_row_dicts_for_partition_order(_SCREAMING_SNAKE_CASE , [2, 1, 0] ) UpperCAmelCase_ : List[str] = SparkExamplesIterable(_SCREAMING_SNAKE_CASE ).shuffle_data_sources(_SCREAMING_SNAKE_CASE ) assert shuffled_it.n_shards == 3 for i, (row_id, row_dict) in enumerate(_SCREAMING_SNAKE_CASE ): UpperCAmelCase_ , UpperCAmelCase_ : List[str] = expected_row_ids_and_row_dicts[i] assert row_id == expected_row_id assert row_dict == expected_row_dict @require_not_windows @require_dill_gt_0_3_2 def a__ ( ) -> int: """simple docstring""" UpperCAmelCase_ : List[str] = pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate() UpperCAmelCase_ : Union[str, Any] = spark.range(20 ).repartition(4 ) # Partitions 0 and 2 UpperCAmelCase_ : Any = SparkExamplesIterable(_SCREAMING_SNAKE_CASE ).shard_data_sources(worker_id=0 , num_workers=2 ) assert shard_it_a.n_shards == 2 UpperCAmelCase_ : List[str] = _get_expected_row_ids_and_row_dicts_for_partition_order(_SCREAMING_SNAKE_CASE , [0, 2] ) for i, (row_id, row_dict) in enumerate(_SCREAMING_SNAKE_CASE ): UpperCAmelCase_ , UpperCAmelCase_ : Any = expected_row_ids_and_row_dicts_a[i] assert row_id == expected_row_id assert row_dict == expected_row_dict # Partitions 1 and 3 UpperCAmelCase_ : List[str] = SparkExamplesIterable(_SCREAMING_SNAKE_CASE ).shard_data_sources(worker_id=1 , num_workers=2 ) assert shard_it_a.n_shards == 2 UpperCAmelCase_ : Tuple = _get_expected_row_ids_and_row_dicts_for_partition_order(_SCREAMING_SNAKE_CASE , [1, 3] ) for i, (row_id, row_dict) in enumerate(_SCREAMING_SNAKE_CASE ): UpperCAmelCase_ , UpperCAmelCase_ : List[str] = expected_row_ids_and_row_dicts_a[i] assert row_id == expected_row_id assert row_dict == expected_row_dict @require_not_windows @require_dill_gt_0_3_2 def a__ ( ) -> Tuple: """simple docstring""" UpperCAmelCase_ : List[Any] = pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate() UpperCAmelCase_ : List[Any] = spark.range(1_00 ).repartition(1 ) UpperCAmelCase_ : List[str] = Spark(_SCREAMING_SNAKE_CASE ) # Choose a small max_shard_size for maximum partitioning. spark_builder._repartition_df_if_needed(max_shard_size=1 ) # The new number of partitions should not be greater than the number of rows. assert spark_builder.df.rdd.getNumPartitions() == 1_00
71
'''simple docstring''' from ..utils import DummyObject, requires_backends class _snake_case (metaclass=__SCREAMING_SNAKE_CASE): __A : Any =["speech"] def __init__( self ,*_snake_case ,**_snake_case ): requires_backends(self ,["speech"] ) class _snake_case (metaclass=__SCREAMING_SNAKE_CASE): __A : Dict =["speech"] def __init__( self ,*_snake_case ,**_snake_case ): requires_backends(self ,["speech"] )
71
1
'''simple docstring''' import re from typing import Callable, List, Optional, Union import tensorflow as tf try: from tensorflow.keras.optimizers.legacy import Adam except ImportError: from tensorflow.keras.optimizers import Adam class _snake_case (tf.keras.optimizers.schedules.LearningRateSchedule): def __init__( self ,_snake_case ,_snake_case ,_snake_case ,_snake_case = 1.0 ,_snake_case = None ,): super().__init__() UpperCAmelCase_ : int = initial_learning_rate UpperCAmelCase_ : Any = warmup_steps UpperCAmelCase_ : Tuple = power UpperCAmelCase_ : Any = decay_schedule_fn UpperCAmelCase_ : Union[str, Any] = name def __call__( self ,_snake_case ): with tf.name_scope(self.name or "WarmUp" ) as name: # Implements polynomial warmup. i.e., if global_step < warmup_steps, the # learning rate will be `global_step/num_warmup_steps * init_lr`. UpperCAmelCase_ : Tuple = tf.cast(_snake_case ,tf.floataa ) UpperCAmelCase_ : Optional[Any] = tf.cast(self.warmup_steps ,tf.floataa ) UpperCAmelCase_ : List[str] = global_step_float / warmup_steps_float UpperCAmelCase_ : List[str] = self.initial_learning_rate * tf.math.pow(_snake_case ,self.power ) return tf.cond( global_step_float < warmup_steps_float ,lambda: warmup_learning_rate ,lambda: self.decay_schedule_fn(step - self.warmup_steps ) ,name=_snake_case ,) def UpperCamelCase__ ( self ): return { "initial_learning_rate": self.initial_learning_rate, "decay_schedule_fn": self.decay_schedule_fn, "warmup_steps": self.warmup_steps, "power": self.power, "name": self.name, } def a__ ( _SCREAMING_SNAKE_CASE : float , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : float = 0.0 , _SCREAMING_SNAKE_CASE : float = 0.9 , _SCREAMING_SNAKE_CASE : float = 0.999 , _SCREAMING_SNAKE_CASE : float = 1E-8 , _SCREAMING_SNAKE_CASE : Optional[float] = None , _SCREAMING_SNAKE_CASE : Optional[float] = None , _SCREAMING_SNAKE_CASE : float = 0.0 , _SCREAMING_SNAKE_CASE : float = 1.0 , _SCREAMING_SNAKE_CASE : Optional[List[str]] = None , ) -> Optional[int]: """simple docstring""" UpperCAmelCase_ : List[str] = tf.keras.optimizers.schedules.PolynomialDecay( initial_learning_rate=_SCREAMING_SNAKE_CASE , decay_steps=num_train_steps - num_warmup_steps , end_learning_rate=init_lr * min_lr_ratio , power=_SCREAMING_SNAKE_CASE , ) if num_warmup_steps: UpperCAmelCase_ : Tuple = WarmUp( initial_learning_rate=_SCREAMING_SNAKE_CASE , decay_schedule_fn=_SCREAMING_SNAKE_CASE , warmup_steps=_SCREAMING_SNAKE_CASE , ) if weight_decay_rate > 0.0: UpperCAmelCase_ : Any = AdamWeightDecay( learning_rate=_SCREAMING_SNAKE_CASE , weight_decay_rate=_SCREAMING_SNAKE_CASE , beta_a=_SCREAMING_SNAKE_CASE , beta_a=_SCREAMING_SNAKE_CASE , epsilon=_SCREAMING_SNAKE_CASE , clipnorm=_SCREAMING_SNAKE_CASE , global_clipnorm=_SCREAMING_SNAKE_CASE , exclude_from_weight_decay=["LayerNorm", "layer_norm", "bias"] , include_in_weight_decay=_SCREAMING_SNAKE_CASE , ) else: UpperCAmelCase_ : Tuple = tf.keras.optimizers.Adam( learning_rate=_SCREAMING_SNAKE_CASE , beta_a=_SCREAMING_SNAKE_CASE , beta_a=_SCREAMING_SNAKE_CASE , epsilon=_SCREAMING_SNAKE_CASE , clipnorm=_SCREAMING_SNAKE_CASE , global_clipnorm=_SCREAMING_SNAKE_CASE , ) # We return the optimizer and the LR scheduler in order to better track the # evolution of the LR independently of the optimizer. return optimizer, lr_schedule class _snake_case (__SCREAMING_SNAKE_CASE): def __init__( self ,_snake_case = 0.001 ,_snake_case = 0.9 ,_snake_case = 0.999 ,_snake_case = 1E-7 ,_snake_case = False ,_snake_case = 0.0 ,_snake_case = None ,_snake_case = None ,_snake_case = "AdamWeightDecay" ,**_snake_case ,): super().__init__(_snake_case ,_snake_case ,_snake_case ,_snake_case ,_snake_case ,_snake_case ,**_snake_case ) UpperCAmelCase_ : List[str] = weight_decay_rate UpperCAmelCase_ : Optional[int] = include_in_weight_decay UpperCAmelCase_ : Optional[Any] = exclude_from_weight_decay @classmethod def UpperCamelCase__ ( cls ,_snake_case ): UpperCAmelCase_ : Union[str, Any] = {"WarmUp": WarmUp} return super(_snake_case ,cls ).from_config(_snake_case ,custom_objects=_snake_case ) def UpperCamelCase__ ( self ,_snake_case ,_snake_case ,_snake_case ): super(_snake_case ,self )._prepare_local(_snake_case ,_snake_case ,_snake_case ) UpperCAmelCase_ : int = tf.constant( self.weight_decay_rate ,name="adam_weight_decay_rate" ) def UpperCamelCase__ ( self ,_snake_case ,_snake_case ,_snake_case ): UpperCAmelCase_ : Tuple = self._do_use_weight_decay(var.name ) if do_decay: return var.assign_sub( learning_rate * var * apply_state[(var.device, var.dtype.base_dtype)]["weight_decay_rate"] ,use_locking=self._use_locking ,) return tf.no_op() def UpperCamelCase__ ( self ,_snake_case ,_snake_case=None ,**_snake_case ): UpperCAmelCase_ , UpperCAmelCase_ : str = list(zip(*_snake_case ) ) return super(_snake_case ,self ).apply_gradients(zip(_snake_case ,_snake_case ) ,name=_snake_case ,**_snake_case ) def UpperCamelCase__ ( self ,_snake_case ,_snake_case ,_snake_case ): if apply_state is None: return self._decayed_lr_t[var_dtype], {} UpperCAmelCase_ : str = apply_state or {} UpperCAmelCase_ : Any = apply_state.get((var_device, var_dtype) ) if coefficients is None: UpperCAmelCase_ : List[str] = self._fallback_apply_state(_snake_case ,_snake_case ) UpperCAmelCase_ : str = coefficients return coefficients["lr_t"], {"apply_state": apply_state} def UpperCamelCase__ ( self ,_snake_case ,_snake_case ,_snake_case=None ): UpperCAmelCase_ , UpperCAmelCase_ : Any = self._get_lr(var.device ,var.dtype.base_dtype ,_snake_case ) UpperCAmelCase_ : List[str] = self._decay_weights_op(_snake_case ,_snake_case ,_snake_case ) with tf.control_dependencies([decay] ): return super(_snake_case ,self )._resource_apply_dense(_snake_case ,_snake_case ,**_snake_case ) def UpperCamelCase__ ( self ,_snake_case ,_snake_case ,_snake_case ,_snake_case=None ): UpperCAmelCase_ , UpperCAmelCase_ : Dict = self._get_lr(var.device ,var.dtype.base_dtype ,_snake_case ) UpperCAmelCase_ : Any = self._decay_weights_op(_snake_case ,_snake_case ,_snake_case ) with tf.control_dependencies([decay] ): return super(_snake_case ,self )._resource_apply_sparse(_snake_case ,_snake_case ,_snake_case ,**_snake_case ) def UpperCamelCase__ ( self ): UpperCAmelCase_ : int = super().get_config() config.update({"weight_decay_rate": self.weight_decay_rate} ) return config def UpperCamelCase__ ( self ,_snake_case ): if self.weight_decay_rate == 0: return False if self._include_in_weight_decay: for r in self._include_in_weight_decay: if re.search(_snake_case ,_snake_case ) is not None: return True if self._exclude_from_weight_decay: for r in self._exclude_from_weight_decay: if re.search(_snake_case ,_snake_case ) is not None: return False return True class _snake_case (__SCREAMING_SNAKE_CASE): def __init__( self ): UpperCAmelCase_ : Any = [] UpperCAmelCase_ : Union[str, Any] = None @property def UpperCamelCase__ ( self ): if self._accum_steps is None: UpperCAmelCase_ : str = tf.Variable( tf.constant(0 ,dtype=tf.intaa ) ,trainable=_snake_case ,synchronization=tf.VariableSynchronization.ON_READ ,aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA ,) return self._accum_steps.value() @property def UpperCamelCase__ ( self ): if not self._gradients: raise ValueError("The accumulator should be called first to initialize the gradients" ) return [gradient.value() if gradient is not None else gradient for gradient in self._gradients] def __call__( self ,_snake_case ): if not self._gradients: UpperCAmelCase_ : List[Any] = self.step # Create the step variable. self._gradients.extend( [ tf.Variable( tf.zeros_like(_snake_case ) ,trainable=_snake_case ,synchronization=tf.VariableSynchronization.ON_READ ,aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA ,) if gradient is not None else gradient for gradient in gradients ] ) if len(_snake_case ) != len(self._gradients ): raise ValueError(f'''Expected {len(self._gradients )} gradients, but got {len(_snake_case )}''' ) for accum_gradient, gradient in zip(self._gradients ,_snake_case ): if accum_gradient is not None and gradient is not None: accum_gradient.assign_add(_snake_case ) self._accum_steps.assign_add(1 ) def UpperCamelCase__ ( self ): if not self._gradients: return self._accum_steps.assign(0 ) for gradient in self._gradients: if gradient is not None: gradient.assign(tf.zeros_like(_snake_case ) )
71
'''simple docstring''' def a__ ( _SCREAMING_SNAKE_CASE : list[int] , _SCREAMING_SNAKE_CASE : list[int] ) -> tuple[float, float]: """simple docstring""" if not len(_SCREAMING_SNAKE_CASE ) == len(_SCREAMING_SNAKE_CASE ) == 3: raise ValueError("Please enter a valid equation." ) if equationa[0] == equationa[1] == equationa[0] == equationa[1] == 0: raise ValueError("Both a & b of two equations can't be zero." ) # Extract the coefficients UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : List[str] = equationa UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = equationa # Calculate the determinants of the matrices UpperCAmelCase_ : Optional[int] = aa * ba - aa * ba UpperCAmelCase_ : Optional[int] = ca * ba - ca * ba UpperCAmelCase_ : Any = aa * ca - aa * ca # Check if the system of linear equations has a solution (using Cramer's rule) if determinant == 0: if determinant_x == determinant_y == 0: raise ValueError("Infinite solutions. (Consistent system)" ) else: raise ValueError("No solution. (Inconsistent system)" ) else: if determinant_x == determinant_y == 0: # Trivial solution (Inconsistent system) return (0.0, 0.0) else: UpperCAmelCase_ : Optional[int] = determinant_x / determinant UpperCAmelCase_ : List[Any] = determinant_y / determinant # Non-Trivial Solution (Consistent system) return (x, y)
71
1
'''simple docstring''' from math import factorial def a__ ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int ) -> int: """simple docstring""" if n < k or k < 0: raise ValueError("Please enter positive integers for n and k where n >= k" ) return factorial(_SCREAMING_SNAKE_CASE ) // (factorial(_SCREAMING_SNAKE_CASE ) * factorial(n - k )) if __name__ == "__main__": print( """The number of five-card hands possible from a standard""", f"""fifty-two card deck is: {combinations(52, 5)}\n""", ) print( """If a class of 40 students must be arranged into groups of""", f"""4 for group projects, there are {combinations(40, 4)} ways""", """to arrange them.\n""", ) print( """If 10 teams are competing in a Formula One race, there""", f"""are {combinations(10, 3)} ways that first, second and""", """third place can be awarded.""", )
71
'''simple docstring''' from statistics import mean, stdev def a__ ( _SCREAMING_SNAKE_CASE : list , _SCREAMING_SNAKE_CASE : int = 3 ) -> list: """simple docstring""" UpperCAmelCase_ : Dict = min(_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : Union[str, Any] = max(_SCREAMING_SNAKE_CASE ) # normalize data return [round((x - x_min) / (x_max - x_min) , _SCREAMING_SNAKE_CASE ) for x in data] def a__ ( _SCREAMING_SNAKE_CASE : list , _SCREAMING_SNAKE_CASE : int = 3 ) -> list: """simple docstring""" UpperCAmelCase_ : Tuple = mean(_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : str = stdev(_SCREAMING_SNAKE_CASE ) # standardize data return [round((x - mu) / (sigma) , _SCREAMING_SNAKE_CASE ) for x in data]
71
1
'''simple docstring''' from math import cos, sin, sqrt, tau from audio_filters.iir_filter import IIRFilter def a__ ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : float = 1 / sqrt(2 ) ) -> IIRFilter: """simple docstring""" UpperCAmelCase_ : List[str] = tau * frequency / samplerate UpperCAmelCase_ : Tuple = sin(_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : Union[str, Any] = cos(_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : Optional[Any] = _sin / (2 * q_factor) UpperCAmelCase_ : Any = (1 - _cos) / 2 UpperCAmelCase_ : int = 1 - _cos UpperCAmelCase_ : str = 1 + alpha UpperCAmelCase_ : List[str] = -2 * _cos UpperCAmelCase_ : Optional[int] = 1 - alpha UpperCAmelCase_ : List[Any] = IIRFilter(2 ) filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] ) return filt def a__ ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : float = 1 / sqrt(2 ) ) -> IIRFilter: """simple docstring""" UpperCAmelCase_ : Union[str, Any] = tau * frequency / samplerate UpperCAmelCase_ : Dict = sin(_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : Tuple = cos(_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : List[Any] = _sin / (2 * q_factor) UpperCAmelCase_ : Union[str, Any] = (1 + _cos) / 2 UpperCAmelCase_ : str = -1 - _cos UpperCAmelCase_ : List[str] = 1 + alpha UpperCAmelCase_ : List[str] = -2 * _cos UpperCAmelCase_ : List[str] = 1 - alpha UpperCAmelCase_ : Tuple = IIRFilter(2 ) filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] ) return filt def a__ ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : float = 1 / sqrt(2 ) ) -> IIRFilter: """simple docstring""" UpperCAmelCase_ : Any = tau * frequency / samplerate UpperCAmelCase_ : Optional[int] = sin(_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : Dict = cos(_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : Union[str, Any] = _sin / (2 * q_factor) UpperCAmelCase_ : str = _sin / 2 UpperCAmelCase_ : Union[str, Any] = 0 UpperCAmelCase_ : Optional[int] = -ba UpperCAmelCase_ : Dict = 1 + alpha UpperCAmelCase_ : Any = -2 * _cos UpperCAmelCase_ : List[str] = 1 - alpha UpperCAmelCase_ : List[Any] = IIRFilter(2 ) filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] ) return filt def a__ ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : float = 1 / sqrt(2 ) ) -> IIRFilter: """simple docstring""" UpperCAmelCase_ : List[str] = tau * frequency / samplerate UpperCAmelCase_ : Tuple = sin(_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : List[str] = cos(_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : List[str] = _sin / (2 * q_factor) UpperCAmelCase_ : Tuple = 1 - alpha UpperCAmelCase_ : Dict = -2 * _cos UpperCAmelCase_ : Optional[Any] = 1 + alpha UpperCAmelCase_ : Optional[Any] = IIRFilter(2 ) filt.set_coefficients([ba, ba, ba] , [ba, ba, ba] ) return filt def a__ ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : float , _SCREAMING_SNAKE_CASE : float = 1 / sqrt(2 ) , ) -> IIRFilter: """simple docstring""" UpperCAmelCase_ : List[Any] = tau * frequency / samplerate UpperCAmelCase_ : int = sin(_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : Optional[Any] = cos(_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : Optional[int] = _sin / (2 * q_factor) UpperCAmelCase_ : Union[str, Any] = 10 ** (gain_db / 40) UpperCAmelCase_ : Dict = 1 + alpha * big_a UpperCAmelCase_ : Dict = -2 * _cos UpperCAmelCase_ : Tuple = 1 - alpha * big_a UpperCAmelCase_ : Any = 1 + alpha / big_a UpperCAmelCase_ : str = -2 * _cos UpperCAmelCase_ : List[str] = 1 - alpha / big_a UpperCAmelCase_ : Tuple = IIRFilter(2 ) filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] ) return filt def a__ ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : float , _SCREAMING_SNAKE_CASE : float = 1 / sqrt(2 ) , ) -> IIRFilter: """simple docstring""" UpperCAmelCase_ : List[str] = tau * frequency / samplerate UpperCAmelCase_ : Dict = sin(_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : Optional[int] = cos(_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : Any = _sin / (2 * q_factor) UpperCAmelCase_ : Union[str, Any] = 10 ** (gain_db / 40) UpperCAmelCase_ : str = (big_a + 1) - (big_a - 1) * _cos UpperCAmelCase_ : Tuple = (big_a + 1) + (big_a - 1) * _cos UpperCAmelCase_ : str = (big_a - 1) - (big_a + 1) * _cos UpperCAmelCase_ : Union[str, Any] = (big_a - 1) + (big_a + 1) * _cos UpperCAmelCase_ : int = 2 * sqrt(_SCREAMING_SNAKE_CASE ) * alpha UpperCAmelCase_ : Optional[Any] = big_a * (pmc + aaa) UpperCAmelCase_ : Any = 2 * big_a * mpc UpperCAmelCase_ : Any = big_a * (pmc - aaa) UpperCAmelCase_ : Tuple = ppmc + aaa UpperCAmelCase_ : int = -2 * pmpc UpperCAmelCase_ : Any = ppmc - aaa UpperCAmelCase_ : Union[str, Any] = IIRFilter(2 ) filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] ) return filt def a__ ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : float , _SCREAMING_SNAKE_CASE : float = 1 / sqrt(2 ) , ) -> IIRFilter: """simple docstring""" UpperCAmelCase_ : Any = tau * frequency / samplerate UpperCAmelCase_ : int = sin(_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : Any = cos(_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : Optional[Any] = _sin / (2 * q_factor) UpperCAmelCase_ : Optional[int] = 10 ** (gain_db / 40) UpperCAmelCase_ : List[str] = (big_a + 1) - (big_a - 1) * _cos UpperCAmelCase_ : str = (big_a + 1) + (big_a - 1) * _cos UpperCAmelCase_ : List[str] = (big_a - 1) - (big_a + 1) * _cos UpperCAmelCase_ : Union[str, Any] = (big_a - 1) + (big_a + 1) * _cos UpperCAmelCase_ : Optional[Any] = 2 * sqrt(_SCREAMING_SNAKE_CASE ) * alpha UpperCAmelCase_ : int = big_a * (ppmc + aaa) UpperCAmelCase_ : Tuple = -2 * big_a * pmpc UpperCAmelCase_ : Tuple = big_a * (ppmc - aaa) UpperCAmelCase_ : List[Any] = pmc + aaa UpperCAmelCase_ : List[str] = 2 * mpc UpperCAmelCase_ : Dict = pmc - aaa UpperCAmelCase_ : Tuple = IIRFilter(2 ) filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] ) return filt
71
'''simple docstring''' import argparse import os # New Code # import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType from accelerate.utils import find_executable_batch_size ######################################################################## # This is a fully working simple example to use Accelerate, # specifically showcasing how to ensure out-of-memory errors never # interrupt training, and builds off the `nlp_example.py` script. # # This example trains a Bert base model on GLUE MRPC # in any of the following settings (with the same script): # - single CPU or single GPU # - multi GPUS (using PyTorch distributed mode) # - (multi) TPUs # - fp16 (mixed-precision) or fp32 (normal precision) # # New additions from the base script can be found quickly by # looking for the # New Code # tags # # To run it in each of these various modes, follow the instructions # in the readme for examples: # https://github.com/huggingface/accelerate/tree/main/examples # ######################################################################## _lowerCamelCase = 16 _lowerCamelCase = 32 def a__ ( _SCREAMING_SNAKE_CASE : Accelerator , _SCREAMING_SNAKE_CASE : int = 16 ) -> List[Any]: """simple docstring""" UpperCAmelCase_ : Dict = AutoTokenizer.from_pretrained("bert-base-cased" ) UpperCAmelCase_ : Tuple = load_dataset("glue" , "mrpc" ) def tokenize_function(_SCREAMING_SNAKE_CASE : Union[str, Any] ): # max_length=None => use the model max length (it's actually the default) UpperCAmelCase_ : Any = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=_SCREAMING_SNAKE_CASE , max_length=_SCREAMING_SNAKE_CASE ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset # starting with the main process first: with accelerator.main_process_first(): UpperCAmelCase_ : Union[str, Any] = datasets.map( _SCREAMING_SNAKE_CASE , batched=_SCREAMING_SNAKE_CASE , remove_columns=["idx", "sentence1", "sentence2"] , ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library UpperCAmelCase_ : Tuple = tokenized_datasets.rename_column("label" , "labels" ) def collate_fn(_SCREAMING_SNAKE_CASE : List[Any] ): # On TPU it's best to pad everything to the same length or training will be very slow. UpperCAmelCase_ : Optional[int] = 1_28 if accelerator.distributed_type == DistributedType.TPU else None # When using mixed precision we want round multiples of 8/16 if accelerator.mixed_precision == "fp8": UpperCAmelCase_ : Optional[int] = 16 elif accelerator.mixed_precision != "no": UpperCAmelCase_ : int = 8 else: UpperCAmelCase_ : Optional[Any] = None return tokenizer.pad( _SCREAMING_SNAKE_CASE , padding="longest" , max_length=_SCREAMING_SNAKE_CASE , pad_to_multiple_of=_SCREAMING_SNAKE_CASE , return_tensors="pt" , ) # Instantiate dataloaders. UpperCAmelCase_ : Any = DataLoader( tokenized_datasets["train"] , shuffle=_SCREAMING_SNAKE_CASE , collate_fn=_SCREAMING_SNAKE_CASE , batch_size=_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : int = DataLoader( tokenized_datasets["validation"] , shuffle=_SCREAMING_SNAKE_CASE , collate_fn=_SCREAMING_SNAKE_CASE , batch_size=_SCREAMING_SNAKE_CASE ) return train_dataloader, eval_dataloader # For testing only if os.environ.get("""TESTING_MOCKED_DATALOADERS""", None) == "1": from accelerate.test_utils.training import mocked_dataloaders _lowerCamelCase = mocked_dataloaders # noqa: F811 def a__ ( _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : int ) -> int: """simple docstring""" if os.environ.get("TESTING_MOCKED_DATALOADERS" , _SCREAMING_SNAKE_CASE ) == "1": UpperCAmelCase_ : Tuple = 2 # Initialize accelerator UpperCAmelCase_ : int = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision ) # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs UpperCAmelCase_ : str = config["lr"] UpperCAmelCase_ : Union[str, Any] = int(config["num_epochs"] ) UpperCAmelCase_ : Tuple = int(config["seed"] ) UpperCAmelCase_ : Union[str, Any] = int(config["batch_size"] ) UpperCAmelCase_ : List[str] = evaluate.load("glue" , "mrpc" ) # New Code # # We now can define an inner training loop function. It should take a batch size as the only parameter, # and build the dataloaders in there. # It also gets our decorator @find_executable_batch_size(starting_batch_size=_SCREAMING_SNAKE_CASE ) def inner_training_loop(_SCREAMING_SNAKE_CASE : List[str] ): # And now just move everything below under this function # We need to bring in the Accelerator object from earlier nonlocal accelerator # And reset all of its attributes that could hold onto any memory: accelerator.free_memory() # Then we can declare the model, optimizer, and everything else: set_seed(_SCREAMING_SNAKE_CASE ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) UpperCAmelCase_ : Dict = AutoModelForSequenceClassification.from_pretrained("bert-base-cased" , return_dict=_SCREAMING_SNAKE_CASE ) # We could avoid this line since the accelerator is set with `device_placement=True` (default value). # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). UpperCAmelCase_ : Dict = model.to(accelerator.device ) # Instantiate optimizer UpperCAmelCase_ : int = AdamW(params=model.parameters() , lr=_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ , UpperCAmelCase_ : Dict = get_dataloaders(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # Instantiate scheduler UpperCAmelCase_ : Union[str, Any] = get_linear_schedule_with_warmup( optimizer=_SCREAMING_SNAKE_CASE , num_warmup_steps=1_00 , num_training_steps=(len(_SCREAMING_SNAKE_CASE ) * num_epochs) , ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = accelerator.prepare( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # Now we train the model for epoch in range(_SCREAMING_SNAKE_CASE ): model.train() for step, batch in enumerate(_SCREAMING_SNAKE_CASE ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) UpperCAmelCase_ : str = model(**_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : List[Any] = outputs.loss accelerator.backward(_SCREAMING_SNAKE_CASE ) optimizer.step() lr_scheduler.step() optimizer.zero_grad() model.eval() for step, batch in enumerate(_SCREAMING_SNAKE_CASE ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): UpperCAmelCase_ : Optional[Any] = model(**_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : str = outputs.logits.argmax(dim=-1 ) UpperCAmelCase_ , UpperCAmelCase_ : str = accelerator.gather_for_metrics((predictions, batch["labels"]) ) metric.add_batch( predictions=_SCREAMING_SNAKE_CASE , references=_SCREAMING_SNAKE_CASE , ) UpperCAmelCase_ : str = metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(F'''epoch {epoch}:''' , _SCREAMING_SNAKE_CASE ) # New Code # # And call it at the end with no arguments # Note: You could also refactor this outside of your training loop function inner_training_loop() def a__ ( ) -> Optional[Any]: """simple docstring""" UpperCAmelCase_ : Dict = argparse.ArgumentParser(description="Simple example of training script." ) parser.add_argument( "--mixed_precision" , type=_SCREAMING_SNAKE_CASE , default=_SCREAMING_SNAKE_CASE , choices=["no", "fp16", "bf16", "fp8"] , help="Whether to use mixed precision. Choose" "between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10." "and an Nvidia Ampere GPU." , ) parser.add_argument("--cpu" , action="store_true" , help="If passed, will train on the CPU." ) UpperCAmelCase_ : Tuple = parser.parse_args() UpperCAmelCase_ : int = {"lr": 2E-5, "num_epochs": 3, "seed": 42, "batch_size": 16} training_function(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) if __name__ == "__main__": main()
71
1
'''simple docstring''' from __future__ import annotations import unittest from transformers import DebertaVaConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TFDebertaVaForMaskedLM, TFDebertaVaForQuestionAnswering, TFDebertaVaForSequenceClassification, TFDebertaVaForTokenClassification, TFDebertaVaModel, ) class _snake_case : def __init__( self ,_snake_case ,_snake_case=13 ,_snake_case=7 ,_snake_case=True ,_snake_case=True ,_snake_case=True ,_snake_case=True ,_snake_case=99 ,_snake_case=32 ,_snake_case=2 ,_snake_case=4 ,_snake_case=37 ,_snake_case="gelu" ,_snake_case=0.1 ,_snake_case=0.1 ,_snake_case=5_12 ,_snake_case=16 ,_snake_case=2 ,_snake_case=0.02 ,_snake_case=False ,_snake_case=True ,_snake_case="None" ,_snake_case=3 ,_snake_case=4 ,_snake_case=None ,): UpperCAmelCase_ : Union[str, Any] = parent UpperCAmelCase_ : Dict = batch_size UpperCAmelCase_ : int = seq_length UpperCAmelCase_ : Union[str, Any] = is_training UpperCAmelCase_ : Union[str, Any] = use_input_mask UpperCAmelCase_ : Optional[Any] = use_token_type_ids UpperCAmelCase_ : Dict = use_labels UpperCAmelCase_ : int = vocab_size UpperCAmelCase_ : int = hidden_size UpperCAmelCase_ : Optional[Any] = num_hidden_layers UpperCAmelCase_ : Any = num_attention_heads UpperCAmelCase_ : Tuple = intermediate_size UpperCAmelCase_ : Tuple = hidden_act UpperCAmelCase_ : Dict = hidden_dropout_prob UpperCAmelCase_ : str = attention_probs_dropout_prob UpperCAmelCase_ : int = max_position_embeddings UpperCAmelCase_ : str = type_vocab_size UpperCAmelCase_ : List[Any] = type_sequence_label_size UpperCAmelCase_ : Any = initializer_range UpperCAmelCase_ : Optional[Any] = num_labels UpperCAmelCase_ : int = num_choices UpperCAmelCase_ : str = relative_attention UpperCAmelCase_ : List[str] = position_biased_input UpperCAmelCase_ : List[str] = pos_att_type UpperCAmelCase_ : Optional[int] = scope def UpperCamelCase__ ( self ): UpperCAmelCase_ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size ) UpperCAmelCase_ : Union[str, Any] = None if self.use_input_mask: UpperCAmelCase_ : str = random_attention_mask([self.batch_size, self.seq_length] ) UpperCAmelCase_ : Union[str, Any] = None if self.use_token_type_ids: UpperCAmelCase_ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size ) UpperCAmelCase_ : Optional[Any] = None UpperCAmelCase_ : Any = None UpperCAmelCase_ : int = None if self.use_labels: UpperCAmelCase_ : Optional[int] = ids_tensor([self.batch_size] ,self.type_sequence_label_size ) UpperCAmelCase_ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels ) UpperCAmelCase_ : Optional[Any] = DebertaVaConfig( vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,relative_attention=self.relative_attention ,position_biased_input=self.position_biased_input ,initializer_range=self.initializer_range ,return_dict=_snake_case ,) return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def UpperCamelCase__ ( self ,_snake_case ,_snake_case ,_snake_case ,_snake_case ,_snake_case ,_snake_case ,_snake_case ): UpperCAmelCase_ : List[str] = TFDebertaVaModel(config=_snake_case ) UpperCAmelCase_ : Tuple = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids} UpperCAmelCase_ : Optional[Any] = [input_ids, input_mask] UpperCAmelCase_ : Any = model(_snake_case ) UpperCAmelCase_ : List[Any] = model(_snake_case ) self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) ) def UpperCamelCase__ ( self ,_snake_case ,_snake_case ,_snake_case ,_snake_case ,_snake_case ,_snake_case ,_snake_case ): UpperCAmelCase_ : Optional[int] = TFDebertaVaForMaskedLM(config=_snake_case ) UpperCAmelCase_ : Union[str, Any] = { "input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids, } UpperCAmelCase_ : List[str] = model(_snake_case ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) ) def UpperCamelCase__ ( self ,_snake_case ,_snake_case ,_snake_case ,_snake_case ,_snake_case ,_snake_case ,_snake_case ): UpperCAmelCase_ : int = self.num_labels UpperCAmelCase_ : Tuple = TFDebertaVaForSequenceClassification(config=_snake_case ) UpperCAmelCase_ : Any = { "input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids, } UpperCAmelCase_ : List[str] = model(_snake_case ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) ) def UpperCamelCase__ ( self ,_snake_case ,_snake_case ,_snake_case ,_snake_case ,_snake_case ,_snake_case ,_snake_case ): UpperCAmelCase_ : Dict = self.num_labels UpperCAmelCase_ : Tuple = TFDebertaVaForTokenClassification(config=_snake_case ) UpperCAmelCase_ : Optional[Any] = { "input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids, } UpperCAmelCase_ : List[str] = model(_snake_case ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) ) def UpperCamelCase__ ( self ,_snake_case ,_snake_case ,_snake_case ,_snake_case ,_snake_case ,_snake_case ,_snake_case ): UpperCAmelCase_ : List[str] = TFDebertaVaForQuestionAnswering(config=_snake_case ) UpperCAmelCase_ : Any = { "input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids, } UpperCAmelCase_ : List[str] = model(_snake_case ) self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length) ) def UpperCamelCase__ ( self ): UpperCAmelCase_ : List[Any] = self.prepare_config_and_inputs() ( ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ) : Optional[int] = config_and_inputs UpperCAmelCase_ : Dict = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask} return config, inputs_dict @require_tf class _snake_case (__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase): __A : List[str] =( ( TFDebertaVaModel, TFDebertaVaForMaskedLM, TFDebertaVaForQuestionAnswering, TFDebertaVaForSequenceClassification, TFDebertaVaForTokenClassification, ) if is_tf_available() else () ) __A : Optional[int] =( { "feature-extraction": TFDebertaVaModel, "fill-mask": TFDebertaVaForMaskedLM, "question-answering": TFDebertaVaForQuestionAnswering, "text-classification": TFDebertaVaForSequenceClassification, "token-classification": TFDebertaVaForTokenClassification, "zero-shot": TFDebertaVaForSequenceClassification, } if is_tf_available() else {} ) __A : Tuple =False __A : List[Any] =False def UpperCamelCase__ ( self ): UpperCAmelCase_ : Optional[int] = TFDebertaVaModelTester(self ) UpperCAmelCase_ : Union[str, Any] = ConfigTester(self ,config_class=_snake_case ,hidden_size=37 ) def UpperCamelCase__ ( self ): self.config_tester.run_common_tests() def UpperCamelCase__ ( self ): UpperCAmelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_snake_case ) def UpperCamelCase__ ( self ): UpperCAmelCase_ : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*_snake_case ) def UpperCamelCase__ ( self ): UpperCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*_snake_case ) def UpperCamelCase__ ( self ): UpperCAmelCase_ : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*_snake_case ) def UpperCamelCase__ ( self ): UpperCAmelCase_ : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*_snake_case ) @slow def UpperCamelCase__ ( self ): UpperCAmelCase_ : Dict = TFDebertaVaModel.from_pretrained("kamalkraj/deberta-v2-xlarge" ) self.assertIsNotNone(_snake_case ) @require_tf class _snake_case (unittest.TestCase): @unittest.skip(reason="Model not available yet" ) def UpperCamelCase__ ( self ): pass @slow def UpperCamelCase__ ( self ): UpperCAmelCase_ : List[str] = TFDebertaVaModel.from_pretrained("kamalkraj/deberta-v2-xlarge" ) UpperCAmelCase_ : int = tf.constant([[0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 4_60_78, 15_88, 2]] ) UpperCAmelCase_ : List[Any] = tf.constant([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] ) UpperCAmelCase_ : Optional[Any] = model(_snake_case ,attention_mask=_snake_case )[0] UpperCAmelCase_ : List[str] = tf.constant( [[[0.2356, 0.1948, 0.0369], [-0.1063, 0.3586, -0.5152], [-0.6399, -0.0259, -0.2525]]] ) tf.debugging.assert_near(output[:, 1:4, 1:4] ,_snake_case ,atol=1E-4 )
71
'''simple docstring''' from __future__ import annotations def a__ ( _SCREAMING_SNAKE_CASE : int ) -> list[int]: """simple docstring""" UpperCAmelCase_ : Union[str, Any] = 2 UpperCAmelCase_ : Optional[int] = [] while i * i <= n: if n % i: i += 1 else: n //= i factors.append(_SCREAMING_SNAKE_CASE ) if n > 1: factors.append(_SCREAMING_SNAKE_CASE ) return factors if __name__ == "__main__": import doctest doctest.testmod()
71
1
'''simple docstring''' def a__ ( _SCREAMING_SNAKE_CASE : int ) -> int: """simple docstring""" UpperCAmelCase_ : List[str] = 0 while num > 0: digit_sum += num % 10 num //= 10 return digit_sum def a__ ( _SCREAMING_SNAKE_CASE : int = 1_00 ) -> int: """simple docstring""" UpperCAmelCase_ : Optional[int] = 1 UpperCAmelCase_ : Any = 2 for i in range(2 , max_n + 1 ): UpperCAmelCase_ : Optional[int] = pre_numerator UpperCAmelCase_ : int = 2 * i // 3 if i % 3 == 0 else 1 UpperCAmelCase_ : Any = cur_numerator UpperCAmelCase_ : List[str] = e_cont * pre_numerator + temp return sum_digits(_SCREAMING_SNAKE_CASE ) if __name__ == "__main__": print(f"""{solution() = }""")
71
'''simple docstring''' from typing import Dict, List from nltk.translate import gleu_score import datasets from datasets import MetricInfo _lowerCamelCase = """\ @misc{wu2016googles, title={Google's Neural Machine Translation System: Bridging the Gap between Human and Machine Translation}, author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes and Jeffrey Dean}, year={2016}, eprint={1609.08144}, archivePrefix={arXiv}, primaryClass={cs.CL} } """ _lowerCamelCase = """\ The BLEU score has some undesirable properties when used for single sentences, as it was designed to be a corpus measure. We therefore use a slightly different score for our RL experiments which we call the 'GLEU score'. For the GLEU score, we record all sub-sequences of 1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then compute a recall, which is the ratio of the number of matching n-grams to the number of total n-grams in the target (ground truth) sequence, and a precision, which is the ratio of the number of matching n-grams to the number of total n-grams in the generated output sequence. Then GLEU score is simply the minimum of recall and precision. This GLEU score's range is always between 0 (no matches) and 1 (all match) and it is symmetrical when switching output and target. According to our experiments, GLEU score correlates quite well with the BLEU metric on a corpus level but does not have its drawbacks for our per sentence reward objective. """ _lowerCamelCase = """\ Computes corpus-level Google BLEU (GLEU) score of translated segments against one or more references. Instead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching tokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values. Args: predictions (list of str): list of translations to score. Each translation should be tokenized into a list of tokens. references (list of list of str): list of lists of references for each translation. Each reference should be tokenized into a list of tokens. min_len (int): The minimum order of n-gram this function should extract. Defaults to 1. max_len (int): The maximum order of n-gram this function should extract. Defaults to 4. Returns: 'google_bleu': google_bleu score Examples: Example 1: >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which', ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always', ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat'] >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which', ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never', ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat'] >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was', ... 'interested', 'in', 'world', 'history'] >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history', ... 'because', 'he', 'read', 'the', 'book'] >>> list_of_references = [[ref1a], [ref2a]] >>> hypotheses = [hyp1, hyp2] >>> google_bleu = datasets.load_metric(\"google_bleu\") >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references) >>> print(round(results[\"google_bleu\"], 2)) 0.44 Example 2: >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which', ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always', ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat'] >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which', ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never', ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat'] >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that', ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never', ... 'heed', 'the', 'cat', 'commands'] >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the', ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions', ... 'of', 'the', 'cat'] >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was', ... 'interested', 'in', 'world', 'history'] >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history', ... 'because', 'he', 'read', 'the', 'book'] >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]] >>> hypotheses = [hyp1, hyp2] >>> google_bleu = datasets.load_metric(\"google_bleu\") >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references) >>> print(round(results[\"google_bleu\"], 2)) 0.61 Example 3: >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which', ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always', ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat'] >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which', ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never', ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat'] >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that', ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never', ... 'heed', 'the', 'cat', 'commands'] >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the', ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions', ... 'of', 'the', 'cat'] >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was', ... 'interested', 'in', 'world', 'history'] >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history', ... 'because', 'he', 'read', 'the', 'book'] >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]] >>> hypotheses = [hyp1, hyp2] >>> google_bleu = datasets.load_metric(\"google_bleu\") >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2) >>> print(round(results[\"google_bleu\"], 2)) 0.53 Example 4: >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which', ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always', ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat'] >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which', ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never', ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat'] >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that', ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never', ... 'heed', 'the', 'cat', 'commands'] >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the', ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions', ... 'of', 'the', 'cat'] >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was', ... 'interested', 'in', 'world', 'history'] >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history', ... 'because', 'he', 'read', 'the', 'book'] >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]] >>> hypotheses = [hyp1, hyp2] >>> google_bleu = datasets.load_metric(\"google_bleu\") >>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6) >>> print(round(results[\"google_bleu\"], 2)) 0.4 """ @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION) class _snake_case (datasets.Metric): def UpperCamelCase__ ( self ): return datasets.MetricInfo( description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features( { "predictions": datasets.Sequence(datasets.Value("string" ,id="token" ) ,id="sequence" ), "references": datasets.Sequence( datasets.Sequence(datasets.Value("string" ,id="token" ) ,id="sequence" ) ,id="references" ), } ) ,) def UpperCamelCase__ ( self ,_snake_case ,_snake_case ,_snake_case = 1 ,_snake_case = 4 ,): return { "google_bleu": gleu_score.corpus_gleu( list_of_references=_snake_case ,hypotheses=_snake_case ,min_len=_snake_case ,max_len=_snake_case ) }
71
1
'''simple docstring''' import argparse from collections import defaultdict import yaml _lowerCamelCase = """docs/source/en/_toctree.yml""" def a__ ( _SCREAMING_SNAKE_CASE : str ) -> Optional[int]: """simple docstring""" UpperCAmelCase_ : Optional[int] = defaultdict(_SCREAMING_SNAKE_CASE ) for doc in model_doc: counts[doc["local"]] += 1 UpperCAmelCase_ : Optional[Any] = [key for key, value in counts.items() if value > 1] UpperCAmelCase_ : List[str] = [] for duplicate_key in duplicates: UpperCAmelCase_ : List[str] = list({doc["title"] for doc in model_doc if doc["local"] == duplicate_key} ) if len(_SCREAMING_SNAKE_CASE ) > 1: raise ValueError( F'''{duplicate_key} is present several times in the documentation table of content at ''' "`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the " "others." ) # Only add this once new_doc.append({"local": duplicate_key, "title": titles[0]} ) # Add none duplicate-keys new_doc.extend([doc for doc in model_doc if counts[doc["local"]] == 1] ) # Sort return sorted(_SCREAMING_SNAKE_CASE , key=lambda _SCREAMING_SNAKE_CASE : s["title"].lower() ) def a__ ( _SCREAMING_SNAKE_CASE : Tuple=False ) -> List[Any]: """simple docstring""" with open(_SCREAMING_SNAKE_CASE , encoding="utf-8" ) as f: UpperCAmelCase_ : int = yaml.safe_load(f.read() ) # Get to the API doc UpperCAmelCase_ : Optional[Any] = 0 while content[api_idx]["title"] != "API": api_idx += 1 UpperCAmelCase_ : Tuple = content[api_idx]["sections"] # Then to the model doc UpperCAmelCase_ : List[Any] = 0 while api_doc[model_idx]["title"] != "Models": model_idx += 1 UpperCAmelCase_ : List[str] = api_doc[model_idx]["sections"] UpperCAmelCase_ : List[str] = [(idx, section) for idx, section in enumerate(_SCREAMING_SNAKE_CASE ) if "sections" in section] UpperCAmelCase_ : Optional[Any] = False for idx, modality_doc in modalities_docs: UpperCAmelCase_ : Dict = modality_doc["sections"] UpperCAmelCase_ : Optional[int] = clean_model_doc_toc(_SCREAMING_SNAKE_CASE ) if old_modality_doc != new_modality_doc: UpperCAmelCase_ : Union[str, Any] = True if overwrite: UpperCAmelCase_ : Dict = new_modality_doc if diff: if overwrite: UpperCAmelCase_ : List[str] = model_doc UpperCAmelCase_ : List[str] = api_doc with open(_SCREAMING_SNAKE_CASE , "w" , encoding="utf-8" ) as f: f.write(yaml.dump(_SCREAMING_SNAKE_CASE , allow_unicode=_SCREAMING_SNAKE_CASE ) ) else: raise ValueError( "The model doc part of the table of content is not properly sorted, run `make style` to fix this." ) if __name__ == "__main__": _lowerCamelCase = argparse.ArgumentParser() parser.add_argument("""--fix_and_overwrite""", action="""store_true""", help="""Whether to fix inconsistencies.""") _lowerCamelCase = parser.parse_args() check_model_doc(args.fix_and_overwrite)
71
'''simple docstring''' import logging import os import sys from dataclasses import dataclass, field from typing import Optional from seqaseq_trainer import SeqaSeqTrainer from seqaseq_training_args import SeqaSeqTrainingArguments import transformers from transformers import ( AutoConfig, AutoModelForSeqaSeqLM, AutoTokenizer, HfArgumentParser, MBartTokenizer, MBartTokenizerFast, set_seed, ) from transformers.trainer_utils import EvaluationStrategy, is_main_process from transformers.training_args import ParallelMode from utils import ( SeqaSeqDataCollator, SeqaSeqDataset, assert_all_frozen, build_compute_metrics_fn, check_output_dir, freeze_embeds, freeze_params, lmap, save_json, use_task_specific_params, write_txt_file, ) _lowerCamelCase = logging.getLogger(__name__) @dataclass class _snake_case : __A : str =field( metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"}) __A : Optional[str] =field( default=__SCREAMING_SNAKE_CASE , metadata={"help": "Pretrained config name or path if not the same as model_name"}) __A : Optional[str] =field( default=__SCREAMING_SNAKE_CASE , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"}) __A : Optional[str] =field( default=__SCREAMING_SNAKE_CASE , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , ) __A : bool =field(default=__SCREAMING_SNAKE_CASE , metadata={"help": "Whether tp freeze the encoder."}) __A : bool =field(default=__SCREAMING_SNAKE_CASE , metadata={"help": "Whether to freeze the embeddings."}) @dataclass class _snake_case : __A : str =field( metadata={"help": "The input data dir. Should contain the .tsv files (or other data files) for the task."}) __A : Optional[str] =field( default="summarization" , metadata={"help": "Task name, summarization (or summarization_{dataset} for pegasus) or translation"} , ) __A : Optional[int] =field( default=10_24 , metadata={ "help": ( "The maximum total input sequence length after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded." ) } , ) __A : Optional[int] =field( default=1_28 , metadata={ "help": ( "The maximum total sequence length for target text after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded." ) } , ) __A : Optional[int] =field( default=1_42 , metadata={ "help": ( "The maximum total sequence length for validation target text after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded. " "This argument is also used to override the ``max_length`` param of ``model.generate``, which is used " "during ``evaluate`` and ``predict``." ) } , ) __A : Optional[int] =field( default=1_42 , metadata={ "help": ( "The maximum total sequence length for test target text after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded." ) } , ) __A : Optional[int] =field(default=-1 , metadata={"help": "# training examples. -1 means use all."}) __A : Optional[int] =field(default=-1 , metadata={"help": "# validation examples. -1 means use all."}) __A : Optional[int] =field(default=-1 , metadata={"help": "# test examples. -1 means use all."}) __A : Optional[str] =field(default=__SCREAMING_SNAKE_CASE , metadata={"help": "Source language id for translation."}) __A : Optional[str] =field(default=__SCREAMING_SNAKE_CASE , metadata={"help": "Target language id for translation."}) __A : Optional[int] =field(default=__SCREAMING_SNAKE_CASE , metadata={"help": "# num_beams to use for evaluation."}) __A : bool =field( default=__SCREAMING_SNAKE_CASE , metadata={"help": "If only pad tokens should be ignored. This assumes that `config.pad_token_id` is defined."} , ) def a__ ( _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : str ) -> Union[str, Any]: """simple docstring""" logger.info(F'''***** {split} metrics *****''' ) for key in sorted(metrics.keys() ): logger.info(F''' {key} = {metrics[key]}''' ) save_json(_SCREAMING_SNAKE_CASE , os.path.join(_SCREAMING_SNAKE_CASE , F'''{split}_results.json''' ) ) def a__ ( ) -> Any: """simple docstring""" UpperCAmelCase_ : List[str] = HfArgumentParser((ModelArguments, DataTrainingArguments, SeqaSeqTrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : List[str] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : int = parser.parse_args_into_dataclasses() check_output_dir(_SCREAMING_SNAKE_CASE ) # Setup logging logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , ) logger.warning( "Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.parallel_mode == ParallelMode.DISTRIBUTED ) , training_args.fpaa , ) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Set the verbosity to info of the Transformers logger (on main process only): if is_main_process(training_args.local_rank ): transformers.utils.logging.set_verbosity_info() logger.info("Training/evaluation parameters %s" , _SCREAMING_SNAKE_CASE ) # Set seed set_seed(training_args.seed ) # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. UpperCAmelCase_ : List[str] = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , ) UpperCAmelCase_ : List[Any] = ("encoder_layerdrop", "decoder_layerdrop", "dropout", "attention_dropout") for p in extra_model_params: if getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): assert hasattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ), F'''({config.__class__.__name__}) doesn\'t have a `{p}` attribute''' setattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ) UpperCAmelCase_ : Dict = AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , ) UpperCAmelCase_ : Union[str, Any] = AutoModelForSeqaSeqLM.from_pretrained( model_args.model_name_or_path , from_tf=".ckpt" in model_args.model_name_or_path , config=_SCREAMING_SNAKE_CASE , cache_dir=model_args.cache_dir , ) # use task specific params use_task_specific_params(_SCREAMING_SNAKE_CASE , data_args.task ) # set num_beams for evaluation if data_args.eval_beams is None: UpperCAmelCase_ : Dict = model.config.num_beams # set decoder_start_token_id for MBart if model.config.decoder_start_token_id is None and isinstance(_SCREAMING_SNAKE_CASE , (MBartTokenizer, MBartTokenizerFast) ): assert ( data_args.tgt_lang is not None and data_args.src_lang is not None ), "mBart requires --tgt_lang and --src_lang" if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): UpperCAmelCase_ : Dict = tokenizer.lang_code_to_id[data_args.tgt_lang] else: UpperCAmelCase_ : List[Any] = tokenizer.convert_tokens_to_ids(data_args.tgt_lang ) if model_args.freeze_embeds: freeze_embeds(_SCREAMING_SNAKE_CASE ) if model_args.freeze_encoder: freeze_params(model.get_encoder() ) assert_all_frozen(model.get_encoder() ) UpperCAmelCase_ : Dict = SeqaSeqDataset # Get datasets UpperCAmelCase_ : Tuple = ( dataset_class( _SCREAMING_SNAKE_CASE , type_path="train" , data_dir=data_args.data_dir , n_obs=data_args.n_train , max_target_length=data_args.max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or "" , ) if training_args.do_train else None ) UpperCAmelCase_ : Dict = ( dataset_class( _SCREAMING_SNAKE_CASE , type_path="val" , data_dir=data_args.data_dir , n_obs=data_args.n_val , max_target_length=data_args.val_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or "" , ) if training_args.do_eval or training_args.evaluation_strategy != EvaluationStrategy.NO else None ) UpperCAmelCase_ : int = ( dataset_class( _SCREAMING_SNAKE_CASE , type_path="test" , data_dir=data_args.data_dir , n_obs=data_args.n_test , max_target_length=data_args.test_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or "" , ) if training_args.do_predict else None ) # Initialize our Trainer UpperCAmelCase_ : Optional[Any] = ( build_compute_metrics_fn(data_args.task , _SCREAMING_SNAKE_CASE ) if training_args.predict_with_generate else None ) UpperCAmelCase_ : List[str] = SeqaSeqTrainer( model=_SCREAMING_SNAKE_CASE , args=_SCREAMING_SNAKE_CASE , data_args=_SCREAMING_SNAKE_CASE , train_dataset=_SCREAMING_SNAKE_CASE , eval_dataset=_SCREAMING_SNAKE_CASE , data_collator=SeqaSeqDataCollator( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , model.config.decoder_start_token_id , training_args.tpu_num_cores ) , compute_metrics=_SCREAMING_SNAKE_CASE , tokenizer=_SCREAMING_SNAKE_CASE , ) UpperCAmelCase_ : List[Any] = {} # Training if training_args.do_train: logger.info("*** Train ***" ) UpperCAmelCase_ : Any = trainer.train( model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None ) UpperCAmelCase_ : int = train_result.metrics UpperCAmelCase_ : Dict = data_args.n_train trainer.save_model() # this also saves the tokenizer if trainer.is_world_process_zero(): handle_metrics("train" , _SCREAMING_SNAKE_CASE , training_args.output_dir ) all_metrics.update(_SCREAMING_SNAKE_CASE ) # Need to save the state, since Trainer.save_model saves only the tokenizer with the model trainer.state.save_to_json(os.path.join(training_args.output_dir , "trainer_state.json" ) ) # For convenience, we also re-save the tokenizer to the same directory, # so that you can share your model easily on huggingface.co/models =) tokenizer.save_pretrained(training_args.output_dir ) # Evaluation if training_args.do_eval: logger.info("*** Evaluate ***" ) UpperCAmelCase_ : Union[str, Any] = trainer.evaluate(metric_key_prefix="val" ) UpperCAmelCase_ : Optional[Any] = data_args.n_val UpperCAmelCase_ : Union[str, Any] = round(metrics["val_loss"] , 4 ) if trainer.is_world_process_zero(): handle_metrics("val" , _SCREAMING_SNAKE_CASE , training_args.output_dir ) all_metrics.update(_SCREAMING_SNAKE_CASE ) if training_args.do_predict: logger.info("*** Predict ***" ) UpperCAmelCase_ : List[Any] = trainer.predict(test_dataset=_SCREAMING_SNAKE_CASE , metric_key_prefix="test" ) UpperCAmelCase_ : List[str] = test_output.metrics UpperCAmelCase_ : int = data_args.n_test if trainer.is_world_process_zero(): UpperCAmelCase_ : Optional[Any] = round(metrics["test_loss"] , 4 ) handle_metrics("test" , _SCREAMING_SNAKE_CASE , training_args.output_dir ) all_metrics.update(_SCREAMING_SNAKE_CASE ) if training_args.predict_with_generate: UpperCAmelCase_ : Optional[int] = tokenizer.batch_decode( test_output.predictions , skip_special_tokens=_SCREAMING_SNAKE_CASE , clean_up_tokenization_spaces=_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : Optional[int] = lmap(str.strip , _SCREAMING_SNAKE_CASE ) write_txt_file(_SCREAMING_SNAKE_CASE , os.path.join(training_args.output_dir , "test_generations.txt" ) ) if trainer.is_world_process_zero(): save_json(_SCREAMING_SNAKE_CASE , os.path.join(training_args.output_dir , "all_results.json" ) ) return all_metrics def a__ ( _SCREAMING_SNAKE_CASE : str ) -> Optional[int]: """simple docstring""" main() if __name__ == "__main__": main()
71
1
'''simple docstring''' import copy from typing import Any, Dict, List, Optional, Union import numpy as np import torch from ...audio_utils import mel_filter_bank, spectrogram, window_function from ...feature_extraction_sequence_utils import SequenceFeatureExtractor from ...feature_extraction_utils import BatchFeature from ...utils import TensorType, logging _lowerCamelCase = logging.get_logger(__name__) class _snake_case (__SCREAMING_SNAKE_CASE): __A : Optional[Any] =["input_features", "is_longer"] def __init__( self ,_snake_case=64 ,_snake_case=4_80_00 ,_snake_case=4_80 ,_snake_case=10 ,_snake_case=10_24 ,_snake_case=0.0 ,_snake_case=False ,_snake_case = 0 ,_snake_case = 1_40_00 ,_snake_case = None ,_snake_case = "fusion" ,_snake_case = "repeatpad" ,**_snake_case ,): super().__init__( feature_size=_snake_case ,sampling_rate=_snake_case ,padding_value=_snake_case ,return_attention_mask=_snake_case ,**_snake_case ,) UpperCAmelCase_ : List[Any] = top_db UpperCAmelCase_ : str = truncation UpperCAmelCase_ : Dict = padding UpperCAmelCase_ : List[Any] = fft_window_size UpperCAmelCase_ : Optional[Any] = (fft_window_size >> 1) + 1 UpperCAmelCase_ : Dict = hop_length UpperCAmelCase_ : List[Any] = max_length_s UpperCAmelCase_ : List[Any] = max_length_s * sampling_rate UpperCAmelCase_ : int = sampling_rate UpperCAmelCase_ : List[str] = frequency_min UpperCAmelCase_ : str = frequency_max UpperCAmelCase_ : int = mel_filter_bank( num_frequency_bins=self.nb_frequency_bins ,num_mel_filters=_snake_case ,min_frequency=_snake_case ,max_frequency=_snake_case ,sampling_rate=_snake_case ,norm=_snake_case ,mel_scale="htk" ,) UpperCAmelCase_ : Optional[int] = mel_filter_bank( num_frequency_bins=self.nb_frequency_bins ,num_mel_filters=_snake_case ,min_frequency=_snake_case ,max_frequency=_snake_case ,sampling_rate=_snake_case ,norm="slaney" ,mel_scale="slaney" ,) def UpperCamelCase__ ( self ): UpperCAmelCase_ : Optional[int] = copy.deepcopy(self.__dict__ ) UpperCAmelCase_ : int = self.__class__.__name__ if "mel_filters" in output: del output["mel_filters"] if "mel_filters_slaney" in output: del output["mel_filters_slaney"] return output def UpperCamelCase__ ( self ,_snake_case ,_snake_case = None ): UpperCAmelCase_ : Any = spectrogram( _snake_case ,window_function(self.fft_window_size ,"hann" ) ,frame_length=self.fft_window_size ,hop_length=self.hop_length ,power=2.0 ,mel_filters=_snake_case ,log_mel="dB" ,) return log_mel_spectrogram.T def UpperCamelCase__ ( self ,_snake_case ,_snake_case ,_snake_case ): UpperCAmelCase_ : int = np.array_split(list(range(0 ,total_frames - chunk_frames + 1 ) ) ,3 ) if len(ranges[1] ) == 0: # if the audio is too short, we just use the first chunk UpperCAmelCase_ : int = [0] if len(ranges[2] ) == 0: # if the audio is too short, we just use the first chunk UpperCAmelCase_ : List[str] = [0] # randomly choose index for each part UpperCAmelCase_ : Dict = np.random.choice(ranges[0] ) UpperCAmelCase_ : Tuple = np.random.choice(ranges[1] ) UpperCAmelCase_ : Any = np.random.choice(ranges[2] ) UpperCAmelCase_ : str = mel[idx_front : idx_front + chunk_frames, :] UpperCAmelCase_ : Union[str, Any] = mel[idx_middle : idx_middle + chunk_frames, :] UpperCAmelCase_ : str = mel[idx_back : idx_back + chunk_frames, :] UpperCAmelCase_ : Optional[Any] = torch.tensor(mel[None, None, :] ) UpperCAmelCase_ : Dict = torch.nn.functional.interpolate( _snake_case ,size=[chunk_frames, 64] ,mode="bilinear" ,align_corners=_snake_case ) UpperCAmelCase_ : Tuple = mel_shrink[0][0].numpy() UpperCAmelCase_ : Union[str, Any] = np.stack([mel_shrink, mel_chunk_front, mel_chunk_middle, mel_chunk_back] ,axis=0 ) return mel_fusion def UpperCamelCase__ ( self ,_snake_case ,_snake_case ,_snake_case ,_snake_case ): if waveform.shape[0] > max_length: if truncation == "rand_trunc": UpperCAmelCase_ : Any = True # random crop to max_length (for compatibility) -> this should be handled by self.pad UpperCAmelCase_ : int = len(_snake_case ) - max_length UpperCAmelCase_ : Optional[Any] = np.random.randint(0 ,overflow + 1 ) UpperCAmelCase_ : Optional[Any] = waveform[idx : idx + max_length] UpperCAmelCase_ : List[str] = self._np_extract_fbank_features(_snake_case ,self.mel_filters_slaney )[None, :] elif truncation == "fusion": UpperCAmelCase_ : List[Any] = self._np_extract_fbank_features(_snake_case ,self.mel_filters ) UpperCAmelCase_ : str = max_length // self.hop_length + 1 # the +1 related to how the spectrogram is computed UpperCAmelCase_ : Tuple = mel.shape[0] if chunk_frames == total_frames: # there is a corner case where the audio length is larger than max_length but smaller than max_length+hop_length. # In this case, we just use the whole audio. UpperCAmelCase_ : List[str] = np.stack([mel, mel, mel, mel] ,axis=0 ) UpperCAmelCase_ : Tuple = False else: UpperCAmelCase_ : List[str] = self._random_mel_fusion(_snake_case ,_snake_case ,_snake_case ) UpperCAmelCase_ : Tuple = True else: raise NotImplementedError(f'''data_truncating {truncation} not implemented''' ) else: UpperCAmelCase_ : Union[str, Any] = False # only use repeat as a new possible value for padding. you repeat the audio before applying the usual max_length padding if waveform.shape[0] < max_length: if padding == "repeat": UpperCAmelCase_ : List[Any] = int(max_length / len(_snake_case ) ) UpperCAmelCase_ : Union[str, Any] = np.stack(np.tile(_snake_case ,n_repeat + 1 ) )[:max_length] if padding == "repeatpad": UpperCAmelCase_ : Any = int(max_length / len(_snake_case ) ) UpperCAmelCase_ : Union[str, Any] = np.stack(np.tile(_snake_case ,_snake_case ) ) UpperCAmelCase_ : Union[str, Any] = np.pad(_snake_case ,(0, max_length - waveform.shape[0]) ,mode="constant" ,constant_values=0 ) if truncation == "fusion": UpperCAmelCase_ : str = self._np_extract_fbank_features(_snake_case ,self.mel_filters ) UpperCAmelCase_ : Any = np.stack([input_mel, input_mel, input_mel, input_mel] ,axis=0 ) else: UpperCAmelCase_ : Optional[int] = self._np_extract_fbank_features(_snake_case ,self.mel_filters_slaney )[None, :] return input_mel, longer def __call__( self ,_snake_case ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,**_snake_case ,): UpperCAmelCase_ : Union[str, Any] = truncation if truncation is not None else self.truncation UpperCAmelCase_ : Tuple = padding if padding else self.padding if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( f'''The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a''' f''' sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input''' f''' was sampled with {self.sampling_rate} and not {sampling_rate}.''' ) else: logger.warning( "It is strongly recommended to pass the `sampling_rate` argument to this function. " "Failing to do so can result in silent errors that might be hard to debug." ) UpperCAmelCase_ : Tuple = isinstance(_snake_case ,np.ndarray ) and len(raw_speech.shape ) > 1 if is_batched_numpy and len(raw_speech.shape ) > 2: raise ValueError(f'''Only mono-channel audio is supported for input to {self}''' ) UpperCAmelCase_ : Optional[int] = is_batched_numpy or ( isinstance(_snake_case ,(list, tuple) ) and (isinstance(raw_speech[0] ,(np.ndarray, tuple, list) )) ) if is_batched: UpperCAmelCase_ : List[Any] = [np.asarray(_snake_case ,dtype=np.floataa ) for speech in raw_speech] elif not is_batched and not isinstance(_snake_case ,np.ndarray ): UpperCAmelCase_ : Union[str, Any] = np.asarray(_snake_case ,dtype=np.floataa ) elif isinstance(_snake_case ,np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ): UpperCAmelCase_ : Optional[int] = raw_speech.astype(np.floataa ) # always return batch if not is_batched: UpperCAmelCase_ : int = [np.asarray(_snake_case )] # convert to mel spectrogram, truncate and pad if needed. UpperCAmelCase_ : Union[str, Any] = [ self._get_input_mel(_snake_case ,max_length if max_length else self.nb_max_samples ,_snake_case ,_snake_case ) for waveform in raw_speech ] UpperCAmelCase_ : Optional[int] = [] UpperCAmelCase_ : Tuple = [] for mel, longer in padded_inputs: input_mel.append(_snake_case ) is_longer.append(_snake_case ) if truncation == "fusion" and sum(_snake_case ) == 0: # if no audio is longer than 10s, then randomly select one audio to be longer UpperCAmelCase_ : Any = np.random.randint(0 ,len(_snake_case ) ) UpperCAmelCase_ : Optional[Any] = True if isinstance(input_mel[0] ,_snake_case ): UpperCAmelCase_ : Dict = [np.asarray(_snake_case ,dtype=np.floataa ) for feature in input_mel] # is_longer is a list of bool UpperCAmelCase_ : Any = [[longer] for longer in is_longer] UpperCAmelCase_ : Optional[Any] = {"input_features": input_mel, "is_longer": is_longer} UpperCAmelCase_ : int = BatchFeature(_snake_case ) if return_tensors is not None: UpperCAmelCase_ : str = input_features.convert_to_tensors(_snake_case ) return input_features
71
'''simple docstring''' from __future__ import annotations import unittest from transformers import BlenderbotConfig, BlenderbotTokenizer, is_tf_available from transformers.testing_utils import require_tf, require_tokenizers, slow from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotForConditionalGeneration, TFBlenderbotModel @require_tf class _snake_case : __A : Dict =BlenderbotConfig __A : Union[str, Any] ={} __A : Any ="gelu" def __init__( self ,_snake_case ,_snake_case=13 ,_snake_case=7 ,_snake_case=True ,_snake_case=False ,_snake_case=99 ,_snake_case=32 ,_snake_case=2 ,_snake_case=4 ,_snake_case=37 ,_snake_case=0.1 ,_snake_case=0.1 ,_snake_case=20 ,_snake_case=2 ,_snake_case=1 ,_snake_case=0 ,): UpperCAmelCase_ : List[Any] = parent UpperCAmelCase_ : str = batch_size UpperCAmelCase_ : Dict = seq_length UpperCAmelCase_ : int = is_training UpperCAmelCase_ : Optional[Any] = use_labels UpperCAmelCase_ : Any = vocab_size UpperCAmelCase_ : Optional[int] = hidden_size UpperCAmelCase_ : Optional[int] = num_hidden_layers UpperCAmelCase_ : int = num_attention_heads UpperCAmelCase_ : Tuple = intermediate_size UpperCAmelCase_ : Any = hidden_dropout_prob UpperCAmelCase_ : Optional[int] = attention_probs_dropout_prob UpperCAmelCase_ : List[Any] = max_position_embeddings UpperCAmelCase_ : str = eos_token_id UpperCAmelCase_ : List[Any] = pad_token_id UpperCAmelCase_ : List[Any] = bos_token_id def UpperCamelCase__ ( self ): UpperCAmelCase_ : Any = ids_tensor([self.batch_size, self.seq_length - 1] ,self.vocab_size ) UpperCAmelCase_ : int = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) ,1 ) UpperCAmelCase_ : Optional[Any] = tf.concat([input_ids, eos_tensor] ,axis=1 ) UpperCAmelCase_ : int = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size ) UpperCAmelCase_ : Optional[Any] = self.config_cls( vocab_size=self.vocab_size ,d_model=self.hidden_size ,encoder_layers=self.num_hidden_layers ,decoder_layers=self.num_hidden_layers ,encoder_attention_heads=self.num_attention_heads ,decoder_attention_heads=self.num_attention_heads ,encoder_ffn_dim=self.intermediate_size ,decoder_ffn_dim=self.intermediate_size ,dropout=self.hidden_dropout_prob ,attention_dropout=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,eos_token_ids=[2] ,bos_token_id=self.bos_token_id ,pad_token_id=self.pad_token_id ,decoder_start_token_id=self.pad_token_id ,**self.config_updates ,) UpperCAmelCase_ : List[str] = prepare_blenderbot_inputs_dict(_snake_case ,_snake_case ,_snake_case ) return config, inputs_dict def UpperCamelCase__ ( self ,_snake_case ,_snake_case ): UpperCAmelCase_ : Tuple = TFBlenderbotModel(config=_snake_case ).get_decoder() UpperCAmelCase_ : int = inputs_dict["input_ids"] UpperCAmelCase_ : Dict = input_ids[:1, :] UpperCAmelCase_ : Any = inputs_dict["attention_mask"][:1, :] UpperCAmelCase_ : int = inputs_dict["head_mask"] UpperCAmelCase_ : Optional[int] = 1 # first forward pass UpperCAmelCase_ : List[str] = model(_snake_case ,attention_mask=_snake_case ,head_mask=_snake_case ,use_cache=_snake_case ) UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = outputs.to_tuple() # create hypothetical next token and extent to next_input_ids UpperCAmelCase_ : Optional[int] = ids_tensor((self.batch_size, 3) ,config.vocab_size ) UpperCAmelCase_ : Any = tf.cast(ids_tensor((self.batch_size, 3) ,2 ) ,tf.inta ) # append to next input_ids and UpperCAmelCase_ : Union[str, Any] = tf.concat([input_ids, next_tokens] ,axis=-1 ) UpperCAmelCase_ : Any = tf.concat([attention_mask, next_attn_mask] ,axis=-1 ) UpperCAmelCase_ : Any = model(_snake_case ,attention_mask=_snake_case )[0] UpperCAmelCase_ : List[Any] = model(_snake_case ,attention_mask=_snake_case ,past_key_values=_snake_case )[0] self.parent.assertEqual(next_tokens.shape[1] ,output_from_past.shape[1] ) # select random slice UpperCAmelCase_ : str = int(ids_tensor((1,) ,output_from_past.shape[-1] ) ) UpperCAmelCase_ : List[str] = output_from_no_past[:, -3:, random_slice_idx] UpperCAmelCase_ : Union[str, Any] = output_from_past[:, :, random_slice_idx] # test that outputs are equal for slice tf.debugging.assert_near(_snake_case ,_snake_case ,rtol=1E-3 ) def a__ ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : str=None , _SCREAMING_SNAKE_CASE : Any=None , _SCREAMING_SNAKE_CASE : Any=None , _SCREAMING_SNAKE_CASE : List[str]=None , _SCREAMING_SNAKE_CASE : Dict=None , ) -> Union[str, Any]: """simple docstring""" if attention_mask is None: UpperCAmelCase_ : Dict = tf.cast(tf.math.not_equal(_SCREAMING_SNAKE_CASE , config.pad_token_id ) , tf.inta ) if decoder_attention_mask is None: UpperCAmelCase_ : Optional[int] = tf.concat( [ tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ), tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ), ] , axis=-1 , ) if head_mask is None: UpperCAmelCase_ : List[Any] = tf.ones((config.encoder_layers, config.encoder_attention_heads) ) if decoder_head_mask is None: UpperCAmelCase_ : Optional[int] = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) if cross_attn_head_mask is None: UpperCAmelCase_ : str = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": decoder_attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, "cross_attn_head_mask": cross_attn_head_mask, } @require_tf class _snake_case (__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase): __A : Union[str, Any] =(TFBlenderbotForConditionalGeneration, TFBlenderbotModel) if is_tf_available() else () __A : List[str] =(TFBlenderbotForConditionalGeneration,) if is_tf_available() else () __A : Dict =( { "conversational": TFBlenderbotForConditionalGeneration, "feature-extraction": TFBlenderbotModel, "summarization": TFBlenderbotForConditionalGeneration, "text2text-generation": TFBlenderbotForConditionalGeneration, "translation": TFBlenderbotForConditionalGeneration, } if is_tf_available() else {} ) __A : Any =True __A : Dict =False __A : Dict =False def UpperCamelCase__ ( self ): UpperCAmelCase_ : Optional[int] = TFBlenderbotModelTester(self ) UpperCAmelCase_ : int = ConfigTester(self ,config_class=_snake_case ) def UpperCamelCase__ ( self ): self.config_tester.run_common_tests() def UpperCamelCase__ ( self ): UpperCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_decoder_model_past_large_inputs(*_snake_case ) @require_tokenizers @require_tf class _snake_case (unittest.TestCase): __A : Optional[int] =["My friends are cool but they eat too many carbs."] __A : Optional[Any] ="facebook/blenderbot-400M-distill" @cached_property def UpperCamelCase__ ( self ): return BlenderbotTokenizer.from_pretrained(self.model_name ) @cached_property def UpperCamelCase__ ( self ): UpperCAmelCase_ : List[Any] = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name ) return model @slow def UpperCamelCase__ ( self ): UpperCAmelCase_ : List[Any] = self.tokenizer(self.src_text ,return_tensors="tf" ) UpperCAmelCase_ : Union[str, Any] = self.model.generate( model_inputs.input_ids ,) UpperCAmelCase_ : str = self.tokenizer.batch_decode(generated_ids.numpy() ,skip_special_tokens=_snake_case )[0] assert ( generated_words == " That's unfortunate. Are they trying to lose weight or are they just trying to be healthier?" )
71
1
'''simple docstring''' from typing import Any def a__ ( _SCREAMING_SNAKE_CASE : list , _SCREAMING_SNAKE_CASE : list , _SCREAMING_SNAKE_CASE : dict , _SCREAMING_SNAKE_CASE : dict , _SCREAMING_SNAKE_CASE : dict , ) -> list: """simple docstring""" _validation( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , ) # Creates data structures and fill initial step UpperCAmelCase_ : dict = {} UpperCAmelCase_ : dict = {} for state in states_space: UpperCAmelCase_ : int = observations_space[0] UpperCAmelCase_ : Union[str, Any] = ( initial_probabilities[state] * emission_probabilities[state][observation] ) UpperCAmelCase_ : List[str] = None # Fills the data structure with the probabilities of # different transitions and pointers to previous states for o in range(1 , len(_SCREAMING_SNAKE_CASE ) ): UpperCAmelCase_ : Optional[int] = observations_space[o] UpperCAmelCase_ : Optional[int] = observations_space[o - 1] for state in states_space: # Calculates the argmax for probability function UpperCAmelCase_ : Optional[int] = "" UpperCAmelCase_ : List[str] = -1 for k_state in states_space: UpperCAmelCase_ : Any = ( probabilities[(k_state, prior_observation)] * transition_probabilities[k_state][state] * emission_probabilities[state][observation] ) if probability > max_probability: UpperCAmelCase_ : str = probability UpperCAmelCase_ : Dict = k_state # Update probabilities and pointers dicts UpperCAmelCase_ : Optional[int] = ( probabilities[(arg_max, prior_observation)] * transition_probabilities[arg_max][state] * emission_probabilities[state][observation] ) UpperCAmelCase_ : int = arg_max # The final observation UpperCAmelCase_ : int = observations_space[len(_SCREAMING_SNAKE_CASE ) - 1] # argmax for given final observation UpperCAmelCase_ : Any = "" UpperCAmelCase_ : int = -1 for k_state in states_space: UpperCAmelCase_ : Any = probabilities[(k_state, final_observation)] if probability > max_probability: UpperCAmelCase_ : Tuple = probability UpperCAmelCase_ : List[str] = k_state UpperCAmelCase_ : Tuple = arg_max # Process pointers backwards UpperCAmelCase_ : Optional[Any] = last_state UpperCAmelCase_ : str = [] for o in range(len(_SCREAMING_SNAKE_CASE ) - 1 , -1 , -1 ): result.append(_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : str = pointers[previous, observations_space[o]] result.reverse() return result def a__ ( _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : Any , ) -> None: """simple docstring""" _validate_not_empty( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , ) _validate_lists(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) _validate_dicts( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) def a__ ( _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : Any , ) -> None: """simple docstring""" if not all( [ observations_space, states_space, initial_probabilities, transition_probabilities, emission_probabilities, ] ): raise ValueError("There's an empty parameter" ) def a__ ( _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : Any ) -> None: """simple docstring""" _validate_list(_SCREAMING_SNAKE_CASE , "observations_space" ) _validate_list(_SCREAMING_SNAKE_CASE , "states_space" ) def a__ ( _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : str ) -> None: """simple docstring""" if not isinstance(_object , _SCREAMING_SNAKE_CASE ): UpperCAmelCase_ : Optional[int] = F'''{var_name} must be a list''' raise ValueError(_SCREAMING_SNAKE_CASE ) else: for x in _object: if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): UpperCAmelCase_ : int = F'''{var_name} must be a list of strings''' raise ValueError(_SCREAMING_SNAKE_CASE ) def a__ ( _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : Any , ) -> None: """simple docstring""" _validate_dict(_SCREAMING_SNAKE_CASE , "initial_probabilities" , _SCREAMING_SNAKE_CASE ) _validate_nested_dict(_SCREAMING_SNAKE_CASE , "transition_probabilities" ) _validate_nested_dict(_SCREAMING_SNAKE_CASE , "emission_probabilities" ) def a__ ( _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : str ) -> None: """simple docstring""" _validate_dict(_object , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for x in _object.values(): _validate_dict(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) def a__ ( _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : type , _SCREAMING_SNAKE_CASE : bool = False ) -> None: """simple docstring""" if not isinstance(_object , _SCREAMING_SNAKE_CASE ): UpperCAmelCase_ : Dict = F'''{var_name} must be a dict''' raise ValueError(_SCREAMING_SNAKE_CASE ) if not all(isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for x in _object ): UpperCAmelCase_ : Tuple = F'''{var_name} all keys must be strings''' raise ValueError(_SCREAMING_SNAKE_CASE ) if not all(isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for x in _object.values() ): UpperCAmelCase_ : Tuple = "nested dictionary " if nested else "" UpperCAmelCase_ : int = F'''{var_name} {nested_text}all values must be {value_type.__name__}''' raise ValueError(_SCREAMING_SNAKE_CASE ) if __name__ == "__main__": from doctest import testmod testmod()
71
'''simple docstring''' from numpy import exp, pi, sqrt def a__ ( _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : float = 0.0 , _SCREAMING_SNAKE_CASE : float = 1.0 ) -> int: """simple docstring""" return 1 / sqrt(2 * pi * sigma**2 ) * exp(-((x - mu) ** 2) / (2 * sigma**2) ) if __name__ == "__main__": import doctest doctest.testmod()
71
1
'''simple docstring''' import unittest from typing import Tuple import torch from diffusers.utils import floats_tensor, randn_tensor, torch_all_close, torch_device from diffusers.utils.testing_utils import require_torch @require_torch class _snake_case : @property def UpperCamelCase__ ( self ): return self.get_dummy_input() @property def UpperCamelCase__ ( self ): if self.block_type == "down": return (4, 32, 16, 16) elif self.block_type == "mid": return (4, 32, 32, 32) elif self.block_type == "up": return (4, 32, 64, 64) raise ValueError(f'''\'{self.block_type}\' is not a supported block_type. Set it to \'up\', \'mid\', or \'down\'.''' ) def UpperCamelCase__ ( self ,_snake_case=True ,_snake_case=False ,_snake_case=False ,_snake_case=False ,): UpperCAmelCase_ : List[Any] = 4 UpperCAmelCase_ : Any = 32 UpperCAmelCase_ : Tuple = (32, 32) UpperCAmelCase_ : Optional[int] = torch.manual_seed(0 ) UpperCAmelCase_ : str = torch.device(_snake_case ) UpperCAmelCase_ : Optional[Any] = (batch_size, num_channels) + sizes UpperCAmelCase_ : Tuple = randn_tensor(_snake_case ,generator=_snake_case ,device=_snake_case ) UpperCAmelCase_ : Union[str, Any] = {"hidden_states": hidden_states} if include_temb: UpperCAmelCase_ : Optional[Any] = 1_28 UpperCAmelCase_ : Any = randn_tensor((batch_size, temb_channels) ,generator=_snake_case ,device=_snake_case ) if include_res_hidden_states_tuple: UpperCAmelCase_ : Optional[Any] = torch.manual_seed(1 ) UpperCAmelCase_ : List[str] = (randn_tensor(_snake_case ,generator=_snake_case ,device=_snake_case ),) if include_encoder_hidden_states: UpperCAmelCase_ : int = floats_tensor((batch_size, 32, 32) ).to(_snake_case ) if include_skip_sample: UpperCAmelCase_ : str = randn_tensor(((batch_size, 3) + sizes) ,generator=_snake_case ,device=_snake_case ) return dummy_input def UpperCamelCase__ ( self ): UpperCAmelCase_ : Tuple = { "in_channels": 32, "out_channels": 32, "temb_channels": 1_28, } if self.block_type == "up": UpperCAmelCase_ : List[str] = 32 if self.block_type == "mid": init_dict.pop("out_channels" ) UpperCAmelCase_ : List[Any] = self.dummy_input return init_dict, inputs_dict def UpperCamelCase__ ( self ,_snake_case ): UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = self.prepare_init_args_and_inputs_for_common() UpperCAmelCase_ : Tuple = self.block_class(**_snake_case ) unet_block.to(_snake_case ) unet_block.eval() with torch.no_grad(): UpperCAmelCase_ : Any = unet_block(**_snake_case ) if isinstance(_snake_case ,_snake_case ): UpperCAmelCase_ : List[Any] = output[0] self.assertEqual(output.shape ,self.output_shape ) UpperCAmelCase_ : Union[str, Any] = output[0, -1, -3:, -3:] UpperCAmelCase_ : Tuple = torch.tensor(_snake_case ).to(_snake_case ) assert torch_all_close(output_slice.flatten() ,_snake_case ,atol=5E-3 ) @unittest.skipIf(torch_device == "mps" ,"Training is not supported in mps" ) def UpperCamelCase__ ( self ): UpperCAmelCase_ , UpperCAmelCase_ : List[str] = self.prepare_init_args_and_inputs_for_common() UpperCAmelCase_ : Tuple = self.block_class(**_snake_case ) model.to(_snake_case ) model.train() UpperCAmelCase_ : List[Any] = model(**_snake_case ) if isinstance(_snake_case ,_snake_case ): UpperCAmelCase_ : Optional[Any] = output[0] UpperCAmelCase_ : int = torch.device(_snake_case ) UpperCAmelCase_ : List[str] = randn_tensor(output.shape ,device=_snake_case ) UpperCAmelCase_ : str = torch.nn.functional.mse_loss(_snake_case ,_snake_case ) loss.backward()
71
'''simple docstring''' from typing import Optional from torch import nn from .transformer_ad import TransformeraDModel, TransformeraDModelOutput class _snake_case (nn.Module): def __init__( self ,_snake_case = 16 ,_snake_case = 88 ,_snake_case = None ,_snake_case = 1 ,_snake_case = 0.0 ,_snake_case = 32 ,_snake_case = None ,_snake_case = False ,_snake_case = None ,_snake_case = None ,_snake_case = "geglu" ,_snake_case = None ,): super().__init__() UpperCAmelCase_ : Optional[Any] = nn.ModuleList( [ TransformeraDModel( num_attention_heads=_snake_case ,attention_head_dim=_snake_case ,in_channels=_snake_case ,num_layers=_snake_case ,dropout=_snake_case ,norm_num_groups=_snake_case ,cross_attention_dim=_snake_case ,attention_bias=_snake_case ,sample_size=_snake_case ,num_vector_embeds=_snake_case ,activation_fn=_snake_case ,num_embeds_ada_norm=_snake_case ,) for _ in range(2 ) ] ) # Variables that can be set by a pipeline: # The ratio of transformer1 to transformer2's output states to be combined during inference UpperCAmelCase_ : List[str] = 0.5 # The shape of `encoder_hidden_states` is expected to be # `(batch_size, condition_lengths[0]+condition_lengths[1], num_features)` UpperCAmelCase_ : int = [77, 2_57] # Which transformer to use to encode which condition. # E.g. `(1, 0)` means that we'll use `transformers[1](conditions[0])` and `transformers[0](conditions[1])` UpperCAmelCase_ : List[Any] = [1, 0] def UpperCamelCase__ ( self ,_snake_case ,_snake_case ,_snake_case=None ,_snake_case=None ,_snake_case=None ,_snake_case = True ,): UpperCAmelCase_ : List[str] = hidden_states UpperCAmelCase_ : str = [] UpperCAmelCase_ : Optional[int] = 0 # attention_mask is not used yet for i in range(2 ): # for each of the two transformers, pass the corresponding condition tokens UpperCAmelCase_ : Any = encoder_hidden_states[:, tokens_start : tokens_start + self.condition_lengths[i]] UpperCAmelCase_ : Any = self.transformer_index_for_condition[i] UpperCAmelCase_ : int = self.transformers[transformer_index]( _snake_case ,encoder_hidden_states=_snake_case ,timestep=_snake_case ,cross_attention_kwargs=_snake_case ,return_dict=_snake_case ,)[0] encoded_states.append(encoded_state - input_states ) tokens_start += self.condition_lengths[i] UpperCAmelCase_ : Dict = encoded_states[0] * self.mix_ratio + encoded_states[1] * (1 - self.mix_ratio) UpperCAmelCase_ : List[Any] = output_states + input_states if not return_dict: return (output_states,) return TransformeraDModelOutput(sample=_snake_case )
71
1
'''simple docstring''' import io import json import unittest from parameterized import parameterized from transformers import FSMTForConditionalGeneration, FSMTTokenizer from transformers.testing_utils import get_tests_dir, require_torch, slow, torch_device from utils import calculate_bleu _lowerCamelCase = get_tests_dir() + """/test_data/fsmt/fsmt_val_data.json""" with io.open(filename, """r""", encoding="""utf-8""") as f: _lowerCamelCase = json.load(f) @require_torch class _snake_case (unittest.TestCase): def UpperCamelCase__ ( self ,_snake_case ): return FSMTTokenizer.from_pretrained(_snake_case ) def UpperCamelCase__ ( self ,_snake_case ): UpperCAmelCase_ : int = FSMTForConditionalGeneration.from_pretrained(_snake_case ).to(_snake_case ) if torch_device == "cuda": model.half() return model @parameterized.expand( [ ["en-ru", 26.0], ["ru-en", 22.0], ["en-de", 22.0], ["de-en", 29.0], ] ) @slow def UpperCamelCase__ ( self ,_snake_case ,_snake_case ): # note: this test is not testing the best performance since it only evals a small batch # but it should be enough to detect a regression in the output quality UpperCAmelCase_ : Dict = f'''facebook/wmt19-{pair}''' UpperCAmelCase_ : Union[str, Any] = self.get_tokenizer(_snake_case ) UpperCAmelCase_ : int = self.get_model(_snake_case ) UpperCAmelCase_ : Union[str, Any] = bleu_data[pair]["src"] UpperCAmelCase_ : Any = bleu_data[pair]["tgt"] UpperCAmelCase_ : List[Any] = tokenizer(_snake_case ,return_tensors="pt" ,truncation=_snake_case ,padding="longest" ).to(_snake_case ) UpperCAmelCase_ : List[str] = model.generate( input_ids=batch.input_ids ,num_beams=8 ,) UpperCAmelCase_ : List[str] = tokenizer.batch_decode( _snake_case ,skip_special_tokens=_snake_case ,clean_up_tokenization_spaces=_snake_case ) UpperCAmelCase_ : int = calculate_bleu(_snake_case ,_snake_case ) print(_snake_case ) self.assertGreaterEqual(scores["bleu"] ,_snake_case )
71
'''simple docstring''' import json import sys def a__ ( _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : int ) -> Tuple: """simple docstring""" with open(_SCREAMING_SNAKE_CASE , encoding="utf-8" ) as f: UpperCAmelCase_ : Dict = json.load(_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : str = ["<details>", "<summary>Show updated benchmarks!</summary>", " "] for benchmark_name in sorted(_SCREAMING_SNAKE_CASE ): UpperCAmelCase_ : Optional[Any] = results[benchmark_name] UpperCAmelCase_ : Any = benchmark_name.split("/" )[-1] output_md.append(F'''### Benchmark: {benchmark_file_name}''' ) UpperCAmelCase_ : Any = "| metric |" UpperCAmelCase_ : Any = "|--------|" UpperCAmelCase_ : Union[str, Any] = "| new / old (diff) |" for metric_name in sorted(_SCREAMING_SNAKE_CASE ): UpperCAmelCase_ : Tuple = benchmark_res[metric_name] UpperCAmelCase_ : Union[str, Any] = metric_vals["new"] UpperCAmelCase_ : Optional[Any] = metric_vals.get("old" , _SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : Dict = metric_vals.get("diff" , _SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : Dict = F''' {new_val:f}''' if isinstance(_SCREAMING_SNAKE_CASE , (int, float) ) else "None" if old_val is not None: val_str += F''' / {old_val:f}''' if isinstance(_SCREAMING_SNAKE_CASE , (int, float) ) else "None" if dif_val is not None: val_str += F''' ({dif_val:f})''' if isinstance(_SCREAMING_SNAKE_CASE , (int, float) ) else "None" title += " " + metric_name + " |" lines += "---|" value += val_str + " |" output_md += [title, lines, value, " "] output_md.append("</details>" ) with open(_SCREAMING_SNAKE_CASE , "w" , encoding="utf-8" ) as f: f.writelines("\n".join(_SCREAMING_SNAKE_CASE ) ) if __name__ == "__main__": _lowerCamelCase = sys.argv[1] _lowerCamelCase = sys.argv[2] format_json_to_md(input_json_file, output_md_file)
71
1
'''simple docstring''' def a__ ( _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : Tuple ) -> Optional[int]: """simple docstring""" if index == r: for j in range(_SCREAMING_SNAKE_CASE ): print(data[j] , end=" " ) print(" " ) return # When no more elements are there to put in data[] if i >= n: return # current is included, put next at next location UpperCAmelCase_ : Union[str, Any] = arr[i] combination_util(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , index + 1 , _SCREAMING_SNAKE_CASE , i + 1 ) # current is excluded, replace it with # next (Note that i+1 is passed, but # index is not changed) combination_util(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , i + 1 ) # The main function that prints all combinations # of size r in arr[] of size n. This function # mainly uses combinationUtil() def a__ ( _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : str ) -> Any: """simple docstring""" UpperCAmelCase_ : Union[str, Any] = [0] * r # Print all combination using temporary array 'data[]' combination_util(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , 0 , _SCREAMING_SNAKE_CASE , 0 ) if __name__ == "__main__": # Driver code to check the function above _lowerCamelCase = [10, 20, 30, 40, 50] print_combination(arr, len(arr), 3) # This code is contributed by Ambuj sahu
71
'''simple docstring''' import unittest from transformers import DebertaVaTokenizer, DebertaVaTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin _lowerCamelCase = get_tests_dir("""fixtures/spiece.model""") @require_sentencepiece @require_tokenizers class _snake_case (__SCREAMING_SNAKE_CASE , unittest.TestCase): __A : Optional[int] =DebertaVaTokenizer __A : Union[str, Any] =DebertaVaTokenizerFast __A : str =True __A : List[str] =True def UpperCamelCase__ ( self ): super().setUp() # We have a SentencePiece fixture for testing UpperCAmelCase_ : Optional[int] = DebertaVaTokenizer(_snake_case ,unk_token="<unk>" ) tokenizer.save_pretrained(self.tmpdirname ) def UpperCamelCase__ ( self ,_snake_case ): UpperCAmelCase_ : List[Any] = "this is a test" UpperCAmelCase_ : Optional[Any] = "this is a test" return input_text, output_text def UpperCamelCase__ ( self ): UpperCAmelCase_ : Optional[Any] = "<pad>" UpperCAmelCase_ : str = 0 self.assertEqual(self.get_tokenizer()._convert_token_to_id(_snake_case ) ,_snake_case ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(_snake_case ) ,_snake_case ) def UpperCamelCase__ ( self ): UpperCAmelCase_ : int = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] ,"<pad>" ) self.assertEqual(vocab_keys[1] ,"<unk>" ) self.assertEqual(vocab_keys[-1] ,"[PAD]" ) self.assertEqual(len(_snake_case ) ,3_00_01 ) def UpperCamelCase__ ( self ): self.assertEqual(self.get_tokenizer().vocab_size ,3_00_00 ) def UpperCamelCase__ ( self ): # fmt: off UpperCAmelCase_ : str = " \tHeLLo!how \n Are yoU? " UpperCAmelCase_ : Union[str, Any] = ["▁hello", "!", "how", "▁are", "▁you", "?"] # fmt: on UpperCAmelCase_ : Tuple = DebertaVaTokenizer(_snake_case ,do_lower_case=_snake_case ) UpperCAmelCase_ : Union[str, Any] = tokenizer.convert_ids_to_tokens(tokenizer.encode(_snake_case ,add_special_tokens=_snake_case ) ) self.assertListEqual(_snake_case ,_snake_case ) UpperCAmelCase_ : Tuple = DebertaVaTokenizerFast(_snake_case ,do_lower_case=_snake_case ) UpperCAmelCase_ : Any = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_snake_case ,add_special_tokens=_snake_case ) ) self.assertListEqual(_snake_case ,_snake_case ) @unittest.skip("There is an inconsistency between slow and fast tokenizer due to a bug in the fast one." ) def UpperCamelCase__ ( self ): pass @unittest.skip("There is an inconsistency between slow and fast tokenizer due to a bug in the fast one." ) def UpperCamelCase__ ( self ): pass def UpperCamelCase__ ( self ): # fmt: off UpperCAmelCase_ : Optional[int] = "I was born in 92000, and this is falsé." UpperCAmelCase_ : List[str] = ["▁", "<unk>", "▁was", "▁born", "▁in", "▁9", "2000", "▁", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", "▁", ".", ] # fmt: on UpperCAmelCase_ : List[Any] = DebertaVaTokenizer(_snake_case ,split_by_punct=_snake_case ) UpperCAmelCase_ : Optional[int] = tokenizer.convert_ids_to_tokens(tokenizer.encode(_snake_case ,add_special_tokens=_snake_case ) ) self.assertListEqual(_snake_case ,_snake_case ) UpperCAmelCase_ : int = DebertaVaTokenizerFast(_snake_case ,split_by_punct=_snake_case ) UpperCAmelCase_ : Dict = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_snake_case ,add_special_tokens=_snake_case ) ) self.assertListEqual(_snake_case ,_snake_case ) def UpperCamelCase__ ( self ): # fmt: off UpperCAmelCase_ : Tuple = "I was born in 92000, and this is falsé." UpperCAmelCase_ : Dict = ["▁i", "▁was", "▁born", "▁in", "▁9", "2000", "▁", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", "▁", ".", ] # fmt: on UpperCAmelCase_ : Optional[Any] = DebertaVaTokenizer(_snake_case ,do_lower_case=_snake_case ,split_by_punct=_snake_case ) UpperCAmelCase_ : Any = tokenizer.convert_ids_to_tokens(tokenizer.encode(_snake_case ,add_special_tokens=_snake_case ) ) self.assertListEqual(_snake_case ,_snake_case ) UpperCAmelCase_ : List[Any] = DebertaVaTokenizerFast(_snake_case ,do_lower_case=_snake_case ,split_by_punct=_snake_case ) UpperCAmelCase_ : Tuple = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_snake_case ,add_special_tokens=_snake_case ) ) self.assertListEqual(_snake_case ,_snake_case ) def UpperCamelCase__ ( self ): # fmt: off UpperCAmelCase_ : Optional[int] = "I was born in 92000, and this is falsé." UpperCAmelCase_ : Optional[int] = ["▁i", "▁was", "▁born", "▁in", "▁9", "2000", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", ".", ] # fmt: on UpperCAmelCase_ : List[Any] = DebertaVaTokenizer(_snake_case ,do_lower_case=_snake_case ,split_by_punct=_snake_case ) UpperCAmelCase_ : str = tokenizer.convert_ids_to_tokens(tokenizer.encode(_snake_case ,add_special_tokens=_snake_case ) ) self.assertListEqual(_snake_case ,_snake_case ) UpperCAmelCase_ : Optional[Any] = DebertaVaTokenizerFast(_snake_case ,do_lower_case=_snake_case ,split_by_punct=_snake_case ) UpperCAmelCase_ : str = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_snake_case ,add_special_tokens=_snake_case ) ) self.assertListEqual(_snake_case ,_snake_case ) def UpperCamelCase__ ( self ): # fmt: off UpperCAmelCase_ : Optional[int] = "I was born in 92000, and this is falsé." UpperCAmelCase_ : Optional[Any] = ["▁", "<unk>", "▁was", "▁born", "▁in", "▁9", "2000", "▁", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", "▁", ".", ] # fmt: on UpperCAmelCase_ : List[str] = DebertaVaTokenizer(_snake_case ,do_lower_case=_snake_case ,split_by_punct=_snake_case ) UpperCAmelCase_ : Tuple = tokenizer.convert_ids_to_tokens(tokenizer.encode(_snake_case ,add_special_tokens=_snake_case ) ) self.assertListEqual(_snake_case ,_snake_case ) UpperCAmelCase_ : Dict = DebertaVaTokenizerFast(_snake_case ,do_lower_case=_snake_case ,split_by_punct=_snake_case ) UpperCAmelCase_ : Dict = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_snake_case ,add_special_tokens=_snake_case ) ) self.assertListEqual(_snake_case ,_snake_case ) def UpperCamelCase__ ( self ): # fmt: off UpperCAmelCase_ : Tuple = " \tHeLLo!how \n Are yoU? " UpperCAmelCase_ : List[Any] = ["▁", "<unk>", "e", "<unk>", "o", "!", "how", "▁", "<unk>", "re", "▁yo", "<unk>", "?"] # fmt: on UpperCAmelCase_ : Any = DebertaVaTokenizer(_snake_case ,do_lower_case=_snake_case ,split_by_punct=_snake_case ) UpperCAmelCase_ : List[str] = tokenizer.convert_ids_to_tokens(tokenizer.encode(_snake_case ,add_special_tokens=_snake_case ) ) self.assertListEqual(_snake_case ,_snake_case ) UpperCAmelCase_ : int = DebertaVaTokenizerFast(_snake_case ,do_lower_case=_snake_case ,split_by_punct=_snake_case ) UpperCAmelCase_ : Tuple = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_snake_case ,add_special_tokens=_snake_case ) ) self.assertListEqual(_snake_case ,_snake_case ) def UpperCamelCase__ ( self ): UpperCAmelCase_ : List[str] = self.get_tokenizer() UpperCAmelCase_ : Union[str, Any] = self.get_rust_tokenizer() UpperCAmelCase_ : Dict = "I was born in 92000, and this is falsé." UpperCAmelCase_ : Any = tokenizer.convert_ids_to_tokens(tokenizer.encode(_snake_case ,add_special_tokens=_snake_case ) ) UpperCAmelCase_ : Optional[Any] = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_snake_case ,add_special_tokens=_snake_case ) ) self.assertListEqual(_snake_case ,_snake_case ) UpperCAmelCase_ : Tuple = tokenizer.encode(_snake_case ,add_special_tokens=_snake_case ) UpperCAmelCase_ : int = rust_tokenizer.encode(_snake_case ,add_special_tokens=_snake_case ) self.assertListEqual(_snake_case ,_snake_case ) UpperCAmelCase_ : Optional[Any] = self.get_rust_tokenizer() UpperCAmelCase_ : Union[str, Any] = tokenizer.encode(_snake_case ) UpperCAmelCase_ : List[Any] = rust_tokenizer.encode(_snake_case ) self.assertListEqual(_snake_case ,_snake_case ) def UpperCamelCase__ ( self ): UpperCAmelCase_ : Any = "This is a test" UpperCAmelCase_ : Optional[int] = [13, 1, 43_98, 25, 21, 12_89] UpperCAmelCase_ : Optional[Any] = ["▁", "T", "his", "▁is", "▁a", "▁test"] UpperCAmelCase_ : List[str] = ["▁", "<unk>", "his", "▁is", "▁a", "▁test"] UpperCAmelCase_ : str = DebertaVaTokenizer(_snake_case ,keep_accents=_snake_case ) UpperCAmelCase_ : List[Any] = DebertaVaTokenizerFast(_snake_case ,keep_accents=_snake_case ) UpperCAmelCase_ : Optional[int] = tokenizer.encode(_snake_case ,add_special_tokens=_snake_case ) self.assertListEqual(_snake_case ,_snake_case ) UpperCAmelCase_ : Any = tokenizer.tokenize(_snake_case ) self.assertListEqual(_snake_case ,_snake_case ) UpperCAmelCase_ : Union[str, Any] = tokenizer.convert_ids_to_tokens(_snake_case ) self.assertListEqual(_snake_case ,_snake_case ) UpperCAmelCase_ : List[Any] = rust_tokenizer.encode(_snake_case ,add_special_tokens=_snake_case ) self.assertListEqual(_snake_case ,_snake_case ) UpperCAmelCase_ : Dict = rust_tokenizer.tokenize(_snake_case ) self.assertListEqual(_snake_case ,_snake_case ) UpperCAmelCase_ : List[str] = rust_tokenizer.convert_ids_to_tokens(_snake_case ) self.assertListEqual(_snake_case ,_snake_case ) # fmt: off UpperCAmelCase_ : List[str] = "I was born in 92000, and this is falsé." UpperCAmelCase_ : Optional[int] = [13, 1, 23, 3_86, 19, 5_61, 30_50, 15, 17, 48, 25, 82_56, 18, 1, 9] UpperCAmelCase_ : str = ["▁", "I", "▁was", "▁born", "▁in", "▁9", "2000", ",", "▁and", "▁this", "▁is", "▁fal", "s", "é", ".", ] UpperCAmelCase_ : List[str] = ["▁", "<unk>", "▁was", "▁born", "▁in", "▁9", "2000", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", ".", ] # fmt: on UpperCAmelCase_ : List[str] = tokenizer.encode(_snake_case ,add_special_tokens=_snake_case ) self.assertListEqual(_snake_case ,_snake_case ) UpperCAmelCase_ : Dict = tokenizer.tokenize(_snake_case ) self.assertListEqual(_snake_case ,_snake_case ) UpperCAmelCase_ : int = tokenizer.convert_ids_to_tokens(_snake_case ) self.assertListEqual(_snake_case ,_snake_case ) UpperCAmelCase_ : Optional[int] = rust_tokenizer.encode(_snake_case ,add_special_tokens=_snake_case ) self.assertListEqual(_snake_case ,_snake_case ) UpperCAmelCase_ : Optional[int] = rust_tokenizer.tokenize(_snake_case ) self.assertListEqual(_snake_case ,_snake_case ) UpperCAmelCase_ : Any = rust_tokenizer.convert_ids_to_tokens(_snake_case ) self.assertListEqual(_snake_case ,_snake_case ) def UpperCamelCase__ ( self ): UpperCAmelCase_ : Any = DebertaVaTokenizer(_snake_case ) UpperCAmelCase_ : Optional[int] = tokenizer.encode("sequence builders" ) UpperCAmelCase_ : Dict = tokenizer.encode("multi-sequence build" ) UpperCAmelCase_ : Tuple = tokenizer.build_inputs_with_special_tokens(_snake_case ) UpperCAmelCase_ : Optional[Any] = tokenizer.build_inputs_with_special_tokens(_snake_case ,_snake_case ) self.assertEqual([tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] ,_snake_case ) self.assertEqual( [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [tokenizer.sep_token_id] ,_snake_case ,) @slow def UpperCamelCase__ ( self ): # fmt: off UpperCAmelCase_ : Union[str, Any] = {"input_ids": [[1, 3_98_67, 36, 1_93_90, 4_86, 27, 3_50_52, 8_14_36, 18, 6_06_85, 12_25, 7, 3_50_52, 8_14_36, 18, 93_67, 1_68_99, 18, 1_59_37, 53, 5_94, 7_73, 18, 1_62_87, 3_04_65, 36, 1_59_37, 6, 4_11_39, 38, 3_69_79, 6_07_63, 1_91, 6, 3_41_32, 99, 6, 5_05_38, 3_90, 4_32_30, 6, 3_41_32, 27_79, 2_08_50, 14, 6_99, 10_72, 11_94, 36, 3_82, 1_09_01, 53, 7, 6_99, 10_72, 20_84, 36, 2_04_22, 6_30, 53, 19, 1_05, 30_49, 18_96, 10_53, 1_68_99, 15_06, 11, 3_79_78, 42_43, 7, 12_37, 3_18_69, 2_00, 1_65_66, 6_54, 6, 3_50_52, 8_14_36, 7, 5_56_30, 1_35_93, 4, 2], [1, 26, 1_50_11, 13, 6_67, 8, 10_53, 18, 2_36_11, 12_37, 7_23_56, 1_28_20, 34, 10_41_34, 12_09, 35, 1_33_13, 66_27, 21, 2_02, 3_47, 7, 1_64, 23_99, 11, 46, 44_85, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 5, 12_32, 28_64, 1_57_85, 1_49_51, 1_05, 5, 85_81, 12_50, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "token_type_ids": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=_snake_case ,model_name="microsoft/deberta-v2-xlarge" ,revision="ad6e42c1532ddf3a15c39246b63f5559d558b670" ,)
71
1
'''simple docstring''' def a__ ( _SCREAMING_SNAKE_CASE : list[int] , _SCREAMING_SNAKE_CASE : list[int] , _SCREAMING_SNAKE_CASE : int ) -> bool: """simple docstring""" return not any( neighbour == 1 and colored_vertices[i] == color for i, neighbour in enumerate(_SCREAMING_SNAKE_CASE ) ) def a__ ( _SCREAMING_SNAKE_CASE : list[list[int]] , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : list[int] , _SCREAMING_SNAKE_CASE : int ) -> bool: """simple docstring""" if index == len(_SCREAMING_SNAKE_CASE ): return True # Recursive Step for i in range(_SCREAMING_SNAKE_CASE ): if valid_coloring(graph[index] , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): # Color current vertex UpperCAmelCase_ : Optional[Any] = i # Validate coloring if util_color(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , index + 1 ): return True # Backtrack UpperCAmelCase_ : Tuple = -1 return False def a__ ( _SCREAMING_SNAKE_CASE : list[list[int]] , _SCREAMING_SNAKE_CASE : int ) -> list[int]: """simple docstring""" UpperCAmelCase_ : Tuple = [-1] * len(_SCREAMING_SNAKE_CASE ) if util_color(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , 0 ): return colored_vertices return []
71
'''simple docstring''' def a__ ( _SCREAMING_SNAKE_CASE : int ) -> int: """simple docstring""" if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): raise TypeError("Input value must be an 'int' type" ) UpperCAmelCase_ : Union[str, Any] = 0 while number: position += 1 number >>= 1 return position if __name__ == "__main__": import doctest doctest.testmod()
71
1
'''simple docstring''' import unittest from transformers import load_tool from .test_tools_common import ToolTesterMixin _lowerCamelCase = """ Hugging Face was founded in 2016 by French entrepreneurs Clément Delangue, Julien Chaumond, and Thomas Wolf originally as a company that developed a chatbot app targeted at teenagers.[2] After open-sourcing the model behind the chatbot, the company pivoted to focus on being a platform for machine learning. In March 2021, Hugging Face raised $40 million in a Series B funding round.[3] On April 28, 2021, the company launched the BigScience Research Workshop in collaboration with several other research groups to release an open large language model.[4] In 2022, the workshop concluded with the announcement of BLOOM, a multilingual large language model with 176 billion parameters.[5] """ class _snake_case (unittest.TestCase , __SCREAMING_SNAKE_CASE): def UpperCamelCase__ ( self ): UpperCAmelCase_ : Any = load_tool("text-question-answering" ) self.tool.setup() UpperCAmelCase_ : Any = load_tool("text-question-answering" ,remote=_snake_case ) def UpperCamelCase__ ( self ): UpperCAmelCase_ : str = self.tool(_snake_case ,"What did Hugging Face do in April 2021?" ) self.assertEqual(_snake_case ,"launched the BigScience Research Workshop" ) def UpperCamelCase__ ( self ): UpperCAmelCase_ : List[str] = self.remote_tool(_snake_case ,"What did Hugging Face do in April 2021?" ) self.assertEqual(_snake_case ,"launched the BigScience Research Workshop" ) def UpperCamelCase__ ( self ): UpperCAmelCase_ : Union[str, Any] = self.tool(text=_snake_case ,question="What did Hugging Face do in April 2021?" ) self.assertEqual(_snake_case ,"launched the BigScience Research Workshop" ) def UpperCamelCase__ ( self ): UpperCAmelCase_ : Optional[int] = self.remote_tool(text=_snake_case ,question="What did Hugging Face do in April 2021?" ) self.assertEqual(_snake_case ,"launched the BigScience Research Workshop" )
71
'''simple docstring''' from math import factorial def a__ ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int ) -> int: """simple docstring""" if n < k or k < 0: raise ValueError("Please enter positive integers for n and k where n >= k" ) return factorial(_SCREAMING_SNAKE_CASE ) // (factorial(_SCREAMING_SNAKE_CASE ) * factorial(n - k )) if __name__ == "__main__": print( """The number of five-card hands possible from a standard""", f"""fifty-two card deck is: {combinations(52, 5)}\n""", ) print( """If a class of 40 students must be arranged into groups of""", f"""4 for group projects, there are {combinations(40, 4)} ways""", """to arrange them.\n""", ) print( """If 10 teams are competing in a Formula One race, there""", f"""are {combinations(10, 3)} ways that first, second and""", """third place can be awarded.""", )
71
1
'''simple docstring''' import math from dataclasses import dataclass from typing import Optional, Tuple, Union import numpy as np import torch from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput, randn_tensor from .scheduling_utils import SchedulerMixin @dataclass # Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->UnCLIP class _snake_case (__SCREAMING_SNAKE_CASE): __A : torch.FloatTensor __A : Optional[torch.FloatTensor] =None def a__ ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : Optional[int]=0.999 , _SCREAMING_SNAKE_CASE : List[Any]="cosine" , ) -> Union[str, Any]: """simple docstring""" if alpha_transform_type == "cosine": def alpha_bar_fn(_SCREAMING_SNAKE_CASE : Union[str, Any] ): return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2 elif alpha_transform_type == "exp": def alpha_bar_fn(_SCREAMING_SNAKE_CASE : Optional[Any] ): return math.exp(t * -12.0 ) else: raise ValueError(F'''Unsupported alpha_tranform_type: {alpha_transform_type}''' ) UpperCAmelCase_ : List[str] = [] for i in range(_SCREAMING_SNAKE_CASE ): UpperCAmelCase_ : Tuple = i / num_diffusion_timesteps UpperCAmelCase_ : int = (i + 1) / num_diffusion_timesteps betas.append(min(1 - alpha_bar_fn(_SCREAMING_SNAKE_CASE ) / alpha_bar_fn(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE ) ) return torch.tensor(_SCREAMING_SNAKE_CASE , dtype=torch.floataa ) class _snake_case (__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE): @register_to_config def __init__( self ,_snake_case = 10_00 ,_snake_case = "fixed_small_log" ,_snake_case = True ,_snake_case = 1.0 ,_snake_case = "epsilon" ,_snake_case = "squaredcos_cap_v2" ,): if beta_schedule != "squaredcos_cap_v2": raise ValueError("UnCLIPScheduler only supports `beta_schedule`: 'squaredcos_cap_v2'" ) UpperCAmelCase_ : Optional[Any] = betas_for_alpha_bar(_snake_case ) UpperCAmelCase_ : Union[str, Any] = 1.0 - self.betas UpperCAmelCase_ : int = torch.cumprod(self.alphas ,dim=0 ) UpperCAmelCase_ : List[str] = torch.tensor(1.0 ) # standard deviation of the initial noise distribution UpperCAmelCase_ : int = 1.0 # setable values UpperCAmelCase_ : Any = None UpperCAmelCase_ : Union[str, Any] = torch.from_numpy(np.arange(0 ,_snake_case )[::-1].copy() ) UpperCAmelCase_ : Optional[Any] = variance_type def UpperCamelCase__ ( self ,_snake_case ,_snake_case = None ): return sample def UpperCamelCase__ ( self ,_snake_case ,_snake_case = None ): UpperCAmelCase_ : Optional[Any] = num_inference_steps UpperCAmelCase_ : Optional[Any] = (self.config.num_train_timesteps - 1) / (self.num_inference_steps - 1) UpperCAmelCase_ : Tuple = (np.arange(0 ,_snake_case ) * step_ratio).round()[::-1].copy().astype(np.intaa ) UpperCAmelCase_ : Tuple = torch.from_numpy(_snake_case ).to(_snake_case ) def UpperCamelCase__ ( self ,_snake_case ,_snake_case=None ,_snake_case=None ,_snake_case=None ): if prev_timestep is None: UpperCAmelCase_ : Any = t - 1 UpperCAmelCase_ : Tuple = self.alphas_cumprod[t] UpperCAmelCase_ : List[str] = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one UpperCAmelCase_ : Tuple = 1 - alpha_prod_t UpperCAmelCase_ : Optional[Any] = 1 - alpha_prod_t_prev if prev_timestep == t - 1: UpperCAmelCase_ : Any = self.betas[t] else: UpperCAmelCase_ : Optional[int] = 1 - alpha_prod_t / alpha_prod_t_prev # For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf) # and sample from it to get previous sample # x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample UpperCAmelCase_ : List[str] = beta_prod_t_prev / beta_prod_t * beta if variance_type is None: UpperCAmelCase_ : Dict = self.config.variance_type # hacks - were probably added for training stability if variance_type == "fixed_small_log": UpperCAmelCase_ : int = torch.log(torch.clamp(_snake_case ,min=1E-20 ) ) UpperCAmelCase_ : List[str] = torch.exp(0.5 * variance ) elif variance_type == "learned_range": # NOTE difference with DDPM scheduler UpperCAmelCase_ : Optional[Any] = variance.log() UpperCAmelCase_ : Union[str, Any] = beta.log() UpperCAmelCase_ : Dict = (predicted_variance + 1) / 2 UpperCAmelCase_ : List[str] = frac * max_log + (1 - frac) * min_log return variance def UpperCamelCase__ ( self ,_snake_case ,_snake_case ,_snake_case ,_snake_case = None ,_snake_case=None ,_snake_case = True ,): UpperCAmelCase_ : int = timestep if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type == "learned_range": UpperCAmelCase_ , UpperCAmelCase_ : Any = torch.split(_snake_case ,sample.shape[1] ,dim=1 ) else: UpperCAmelCase_ : List[Any] = None # 1. compute alphas, betas if prev_timestep is None: UpperCAmelCase_ : Optional[int] = t - 1 UpperCAmelCase_ : int = self.alphas_cumprod[t] UpperCAmelCase_ : Optional[int] = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one UpperCAmelCase_ : Dict = 1 - alpha_prod_t UpperCAmelCase_ : Dict = 1 - alpha_prod_t_prev if prev_timestep == t - 1: UpperCAmelCase_ : List[str] = self.betas[t] UpperCAmelCase_ : int = self.alphas[t] else: UpperCAmelCase_ : Optional[int] = 1 - alpha_prod_t / alpha_prod_t_prev UpperCAmelCase_ : List[str] = 1 - beta # 2. compute predicted original sample from predicted noise also called # "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf if self.config.prediction_type == "epsilon": UpperCAmelCase_ : Tuple = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5 elif self.config.prediction_type == "sample": UpperCAmelCase_ : Optional[int] = model_output else: raise ValueError( f'''prediction_type given as {self.config.prediction_type} must be one of `epsilon` or `sample`''' " for the UnCLIPScheduler." ) # 3. Clip "predicted x_0" if self.config.clip_sample: UpperCAmelCase_ : Dict = torch.clamp( _snake_case ,-self.config.clip_sample_range ,self.config.clip_sample_range ) # 4. Compute coefficients for pred_original_sample x_0 and current sample x_t # See formula (7) from https://arxiv.org/pdf/2006.11239.pdf UpperCAmelCase_ : int = (alpha_prod_t_prev ** 0.5 * beta) / beta_prod_t UpperCAmelCase_ : List[str] = alpha ** 0.5 * beta_prod_t_prev / beta_prod_t # 5. Compute predicted previous sample µ_t # See formula (7) from https://arxiv.org/pdf/2006.11239.pdf UpperCAmelCase_ : List[str] = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample # 6. Add noise UpperCAmelCase_ : Union[str, Any] = 0 if t > 0: UpperCAmelCase_ : Optional[Any] = randn_tensor( model_output.shape ,dtype=model_output.dtype ,generator=_snake_case ,device=model_output.device ) UpperCAmelCase_ : Any = self._get_variance( _snake_case ,predicted_variance=_snake_case ,prev_timestep=_snake_case ,) if self.variance_type == "fixed_small_log": UpperCAmelCase_ : Union[str, Any] = variance elif self.variance_type == "learned_range": UpperCAmelCase_ : List[str] = (0.5 * variance).exp() else: raise ValueError( f'''variance_type given as {self.variance_type} must be one of `fixed_small_log` or `learned_range`''' " for the UnCLIPScheduler." ) UpperCAmelCase_ : List[Any] = variance * variance_noise UpperCAmelCase_ : Tuple = pred_prev_sample + variance if not return_dict: return (pred_prev_sample,) return UnCLIPSchedulerOutput(prev_sample=_snake_case ,pred_original_sample=_snake_case ) def UpperCamelCase__ ( self ,_snake_case ,_snake_case ,_snake_case ,): # Make sure alphas_cumprod and timestep have same device and dtype as original_samples UpperCAmelCase_ : int = self.alphas_cumprod.to(device=original_samples.device ,dtype=original_samples.dtype ) UpperCAmelCase_ : str = timesteps.to(original_samples.device ) UpperCAmelCase_ : Dict = alphas_cumprod[timesteps] ** 0.5 UpperCAmelCase_ : str = sqrt_alpha_prod.flatten() while len(sqrt_alpha_prod.shape ) < len(original_samples.shape ): UpperCAmelCase_ : Dict = sqrt_alpha_prod.unsqueeze(-1 ) UpperCAmelCase_ : List[str] = (1 - alphas_cumprod[timesteps]) ** 0.5 UpperCAmelCase_ : Optional[int] = sqrt_one_minus_alpha_prod.flatten() while len(sqrt_one_minus_alpha_prod.shape ) < len(original_samples.shape ): UpperCAmelCase_ : Union[str, Any] = sqrt_one_minus_alpha_prod.unsqueeze(-1 ) UpperCAmelCase_ : Optional[Any] = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise return noisy_samples
71
'''simple docstring''' import random import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, UNetaDConditionModel, VideoToVideoSDPipeline, ) from diffusers.utils import floats_tensor, is_xformers_available, skip_mps from diffusers.utils.testing_utils import enable_full_determinism, slow, torch_device from ..pipeline_params import ( TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS, ) from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() @skip_mps class _snake_case (__SCREAMING_SNAKE_CASE , unittest.TestCase): __A : Union[str, Any] =VideoToVideoSDPipeline __A : Tuple =TEXT_GUIDED_IMAGE_VARIATION_PARAMS.union({"video"}) - {"image", "width", "height"} __A : Union[str, Any] =TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({"video"}) - {"image"} __A : str =PipelineTesterMixin.required_optional_params - {"latents"} __A : Dict =False # No `output_type`. __A : Optional[int] =frozenset( [ "num_inference_steps", "generator", "latents", "return_dict", "callback", "callback_steps", ]) def UpperCamelCase__ ( self ): torch.manual_seed(0 ) UpperCAmelCase_ : Optional[int] = UNetaDConditionModel( block_out_channels=(32, 64, 64, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=4 ,out_channels=4 ,down_block_types=("CrossAttnDownBlock3D", "CrossAttnDownBlock3D", "CrossAttnDownBlock3D", "DownBlock3D") ,up_block_types=("UpBlock3D", "CrossAttnUpBlock3D", "CrossAttnUpBlock3D", "CrossAttnUpBlock3D") ,cross_attention_dim=32 ,attention_head_dim=4 ,) UpperCAmelCase_ : int = DDIMScheduler( beta_start=0.00085 ,beta_end=0.012 ,beta_schedule="scaled_linear" ,clip_sample=_snake_case ,set_alpha_to_one=_snake_case ,) torch.manual_seed(0 ) UpperCAmelCase_ : Dict = AutoencoderKL( block_out_channels=[32, 64] ,in_channels=3 ,out_channels=3 ,down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] ,up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] ,latent_channels=4 ,sample_size=1_28 ,) torch.manual_seed(0 ) UpperCAmelCase_ : Dict = CLIPTextConfig( bos_token_id=0 ,eos_token_id=2 ,hidden_size=32 ,intermediate_size=37 ,layer_norm_eps=1E-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=10_00 ,hidden_act="gelu" ,projection_dim=5_12 ,) UpperCAmelCase_ : Union[str, Any] = CLIPTextModel(_snake_case ) UpperCAmelCase_ : List[Any] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" ) UpperCAmelCase_ : Optional[int] = { "unet": unet, "scheduler": scheduler, "vae": vae, "text_encoder": text_encoder, "tokenizer": tokenizer, } return components def UpperCamelCase__ ( self ,_snake_case ,_snake_case=0 ): # 3 frames UpperCAmelCase_ : Dict = floats_tensor((1, 3, 3, 32, 32) ,rng=random.Random(_snake_case ) ).to(_snake_case ) if str(_snake_case ).startswith("mps" ): UpperCAmelCase_ : Tuple = torch.manual_seed(_snake_case ) else: UpperCAmelCase_ : Tuple = torch.Generator(device=_snake_case ).manual_seed(_snake_case ) UpperCAmelCase_ : Union[str, Any] = { "prompt": "A painting of a squirrel eating a burger", "video": video, "generator": generator, "num_inference_steps": 2, "guidance_scale": 6.0, "output_type": "pt", } return inputs def UpperCamelCase__ ( self ): UpperCAmelCase_ : str = "cpu" # ensure determinism for the device-dependent torch.Generator UpperCAmelCase_ : Dict = self.get_dummy_components() UpperCAmelCase_ : str = VideoToVideoSDPipeline(**_snake_case ) UpperCAmelCase_ : int = sd_pipe.to(_snake_case ) sd_pipe.set_progress_bar_config(disable=_snake_case ) UpperCAmelCase_ : Tuple = self.get_dummy_inputs(_snake_case ) UpperCAmelCase_ : str = "np" UpperCAmelCase_ : Dict = sd_pipe(**_snake_case ).frames UpperCAmelCase_ : Tuple = frames[0][-3:, -3:, -1] assert frames[0].shape == (32, 32, 3) UpperCAmelCase_ : Dict = np.array([1_06, 1_17, 1_13, 1_74, 1_37, 1_12, 1_48, 1_51, 1_31] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 @unittest.skipIf( torch_device != "cuda" or not is_xformers_available() ,reason="XFormers attention is only available with CUDA and `xformers` installed" ,) def UpperCamelCase__ ( self ): self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=_snake_case ,expected_max_diff=5E-3 ) @unittest.skip(reason="Batching needs to be properly figured out first for this pipeline." ) def UpperCamelCase__ ( self ): pass @unittest.skip(reason="Batching needs to be properly figured out first for this pipeline." ) def UpperCamelCase__ ( self ): pass @unittest.skip(reason="`num_images_per_prompt` argument is not supported for this pipeline." ) def UpperCamelCase__ ( self ): pass def UpperCamelCase__ ( self ): return super().test_progress_bar() @slow @skip_mps class _snake_case (unittest.TestCase): def UpperCamelCase__ ( self ): UpperCAmelCase_ : Dict = VideoToVideoSDPipeline.from_pretrained("cerspense/zeroscope_v2_XL" ,torch_dtype=torch.floataa ) pipe.enable_model_cpu_offload() # 10 frames UpperCAmelCase_ : str = torch.Generator(device="cpu" ).manual_seed(0 ) UpperCAmelCase_ : int = torch.randn((1, 10, 3, 10_24, 5_76) ,generator=_snake_case ) UpperCAmelCase_ : List[Any] = video.to("cuda" ) UpperCAmelCase_ : List[Any] = "Spiderman is surfing" UpperCAmelCase_ : Optional[Any] = pipe(_snake_case ,video=_snake_case ,generator=_snake_case ,num_inference_steps=3 ,output_type="pt" ).frames UpperCAmelCase_ : Any = np.array([-1.0458984, -1.1279297, -0.9663086, -0.91503906, -0.75097656] ) assert np.abs(video_frames.cpu().numpy()[0, 0, 0, 0, -5:] - expected_array ).sum() < 1E-2
71
1
'''simple docstring''' import unittest from transformers import ( MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING, TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING, TextaTextGenerationPipeline, pipeline, ) from transformers.testing_utils import is_pipeline_test, require_tf, require_torch from transformers.utils import is_torch_available from .test_pipelines_common import ANY if is_torch_available(): import torch @is_pipeline_test class _snake_case (unittest.TestCase): __A : int =MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING __A : Optional[int] =TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING def UpperCamelCase__ ( self ,_snake_case ,_snake_case ,_snake_case ): UpperCAmelCase_ : int = TextaTextGenerationPipeline(model=_snake_case ,tokenizer=_snake_case ) return generator, ["Something to write", "Something else"] def UpperCamelCase__ ( self ,_snake_case ,_snake_case ): UpperCAmelCase_ : Optional[int] = generator("Something there" ) self.assertEqual(_snake_case ,[{"generated_text": ANY(_snake_case )}] ) # These are encoder decoder, they don't just append to incoming string self.assertFalse(outputs[0]["generated_text"].startswith("Something there" ) ) UpperCAmelCase_ : List[str] = generator(["This is great !", "Something else"] ,num_return_sequences=2 ,do_sample=_snake_case ) self.assertEqual( _snake_case ,[ [{"generated_text": ANY(_snake_case )}, {"generated_text": ANY(_snake_case )}], [{"generated_text": ANY(_snake_case )}, {"generated_text": ANY(_snake_case )}], ] ,) UpperCAmelCase_ : Union[str, Any] = generator( ["This is great !", "Something else"] ,num_return_sequences=2 ,batch_size=2 ,do_sample=_snake_case ) self.assertEqual( _snake_case ,[ [{"generated_text": ANY(_snake_case )}, {"generated_text": ANY(_snake_case )}], [{"generated_text": ANY(_snake_case )}, {"generated_text": ANY(_snake_case )}], ] ,) with self.assertRaises(_snake_case ): generator(4 ) @require_torch def UpperCamelCase__ ( self ): UpperCAmelCase_ : Tuple = pipeline("text2text-generation" ,model="patrickvonplaten/t5-tiny-random" ,framework="pt" ) # do_sample=False necessary for reproducibility UpperCAmelCase_ : Dict = generator("Something there" ,do_sample=_snake_case ) self.assertEqual(_snake_case ,[{"generated_text": ""}] ) UpperCAmelCase_ : str = 3 UpperCAmelCase_ : str = generator( "Something there" ,num_return_sequences=_snake_case ,num_beams=_snake_case ,) UpperCAmelCase_ : Tuple = [ {"generated_text": "Beide Beide Beide Beide Beide Beide Beide Beide Beide"}, {"generated_text": "Beide Beide Beide Beide Beide Beide Beide Beide"}, {"generated_text": ""}, ] self.assertEqual(_snake_case ,_snake_case ) UpperCAmelCase_ : Optional[Any] = generator("This is a test" ,do_sample=_snake_case ,num_return_sequences=2 ,return_tensors=_snake_case ) self.assertEqual( _snake_case ,[ {"generated_token_ids": ANY(torch.Tensor )}, {"generated_token_ids": ANY(torch.Tensor )}, ] ,) UpperCAmelCase_ : int = generator.model.config.eos_token_id UpperCAmelCase_ : List[str] = "<pad>" UpperCAmelCase_ : List[str] = generator( ["This is a test", "This is a second test"] ,do_sample=_snake_case ,num_return_sequences=2 ,batch_size=2 ,return_tensors=_snake_case ,) self.assertEqual( _snake_case ,[ [ {"generated_token_ids": ANY(torch.Tensor )}, {"generated_token_ids": ANY(torch.Tensor )}, ], [ {"generated_token_ids": ANY(torch.Tensor )}, {"generated_token_ids": ANY(torch.Tensor )}, ], ] ,) @require_tf def UpperCamelCase__ ( self ): UpperCAmelCase_ : Tuple = pipeline("text2text-generation" ,model="patrickvonplaten/t5-tiny-random" ,framework="tf" ) # do_sample=False necessary for reproducibility UpperCAmelCase_ : Optional[Any] = generator("Something there" ,do_sample=_snake_case ) self.assertEqual(_snake_case ,[{"generated_text": ""}] )
71
'''simple docstring''' import os import pytest from datasets import ( get_dataset_config_info, get_dataset_config_names, get_dataset_infos, get_dataset_split_names, inspect_dataset, inspect_metric, ) _lowerCamelCase = pytest.mark.integration @pytest.mark.parametrize("path" , ["paws", "csv"] ) def a__ ( _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : str ) -> Tuple: """simple docstring""" inspect_dataset(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : Optional[Any] = path + ".py" assert script_name in os.listdir(_SCREAMING_SNAKE_CASE ) assert "__pycache__" not in os.listdir(_SCREAMING_SNAKE_CASE ) @pytest.mark.filterwarnings("ignore:inspect_metric is deprecated:FutureWarning" ) @pytest.mark.filterwarnings("ignore:metric_module_factory is deprecated:FutureWarning" ) @pytest.mark.parametrize("path" , ["accuracy"] ) def a__ ( _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : Tuple ) -> List[str]: """simple docstring""" inspect_metric(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : List[Any] = path + ".py" assert script_name in os.listdir(_SCREAMING_SNAKE_CASE ) assert "__pycache__" not in os.listdir(_SCREAMING_SNAKE_CASE ) @pytest.mark.parametrize( "path, config_name, expected_splits" , [ ("squad", "plain_text", ["train", "validation"]), ("dalle-mini/wit", "dalle-mini--wit", ["train"]), ("paws", "labeled_final", ["train", "test", "validation"]), ] , ) def a__ ( _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : Dict ) -> str: """simple docstring""" UpperCAmelCase_ : int = get_dataset_config_info(_SCREAMING_SNAKE_CASE , config_name=_SCREAMING_SNAKE_CASE ) assert info.config_name == config_name assert list(info.splits.keys() ) == expected_splits @pytest.mark.parametrize( "path, config_name, expected_exception" , [ ("paws", None, ValueError), ] , ) def a__ ( _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : List[str] ) -> str: """simple docstring""" with pytest.raises(_SCREAMING_SNAKE_CASE ): get_dataset_config_info(_SCREAMING_SNAKE_CASE , config_name=_SCREAMING_SNAKE_CASE ) @pytest.mark.parametrize( "path, expected" , [ ("squad", "plain_text"), ("acronym_identification", "default"), ("lhoestq/squad", "plain_text"), ("lhoestq/test", "default"), ("lhoestq/demo1", "lhoestq--demo1"), ("dalle-mini/wit", "dalle-mini--wit"), ] , ) def a__ ( _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : int ) -> List[str]: """simple docstring""" UpperCAmelCase_ : Optional[int] = get_dataset_config_names(_SCREAMING_SNAKE_CASE ) assert expected in config_names @pytest.mark.parametrize( "path, expected_configs, expected_splits_in_first_config" , [ ("squad", ["plain_text"], ["train", "validation"]), ("dalle-mini/wit", ["dalle-mini--wit"], ["train"]), ("paws", ["labeled_final", "labeled_swap", "unlabeled_final"], ["train", "test", "validation"]), ] , ) def a__ ( _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : List[str] ) -> Any: """simple docstring""" UpperCAmelCase_ : Any = get_dataset_infos(_SCREAMING_SNAKE_CASE ) assert list(infos.keys() ) == expected_configs UpperCAmelCase_ : Optional[Any] = expected_configs[0] assert expected_config in infos UpperCAmelCase_ : Dict = infos[expected_config] assert info.config_name == expected_config assert list(info.splits.keys() ) == expected_splits_in_first_config @pytest.mark.parametrize( "path, expected_config, expected_splits" , [ ("squad", "plain_text", ["train", "validation"]), ("dalle-mini/wit", "dalle-mini--wit", ["train"]), ("paws", "labeled_final", ["train", "test", "validation"]), ] , ) def a__ ( _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : Dict ) -> Any: """simple docstring""" UpperCAmelCase_ : Optional[int] = get_dataset_infos(_SCREAMING_SNAKE_CASE ) assert expected_config in infos UpperCAmelCase_ : Dict = infos[expected_config] assert info.config_name == expected_config assert list(info.splits.keys() ) == expected_splits @pytest.mark.parametrize( "path, config_name, expected_exception" , [ ("paws", None, ValueError), ] , ) def a__ ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : str ) -> Any: """simple docstring""" with pytest.raises(_SCREAMING_SNAKE_CASE ): get_dataset_split_names(_SCREAMING_SNAKE_CASE , config_name=_SCREAMING_SNAKE_CASE )
71
1
'''simple docstring''' import unittest from transformers import DebertaVaTokenizer, DebertaVaTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin _lowerCamelCase = get_tests_dir("""fixtures/spiece.model""") @require_sentencepiece @require_tokenizers class _snake_case (__SCREAMING_SNAKE_CASE , unittest.TestCase): __A : Optional[int] =DebertaVaTokenizer __A : Union[str, Any] =DebertaVaTokenizerFast __A : str =True __A : List[str] =True def UpperCamelCase__ ( self ): super().setUp() # We have a SentencePiece fixture for testing UpperCAmelCase_ : Optional[int] = DebertaVaTokenizer(_snake_case ,unk_token="<unk>" ) tokenizer.save_pretrained(self.tmpdirname ) def UpperCamelCase__ ( self ,_snake_case ): UpperCAmelCase_ : List[Any] = "this is a test" UpperCAmelCase_ : Optional[Any] = "this is a test" return input_text, output_text def UpperCamelCase__ ( self ): UpperCAmelCase_ : Optional[Any] = "<pad>" UpperCAmelCase_ : str = 0 self.assertEqual(self.get_tokenizer()._convert_token_to_id(_snake_case ) ,_snake_case ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(_snake_case ) ,_snake_case ) def UpperCamelCase__ ( self ): UpperCAmelCase_ : int = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] ,"<pad>" ) self.assertEqual(vocab_keys[1] ,"<unk>" ) self.assertEqual(vocab_keys[-1] ,"[PAD]" ) self.assertEqual(len(_snake_case ) ,3_00_01 ) def UpperCamelCase__ ( self ): self.assertEqual(self.get_tokenizer().vocab_size ,3_00_00 ) def UpperCamelCase__ ( self ): # fmt: off UpperCAmelCase_ : str = " \tHeLLo!how \n Are yoU? " UpperCAmelCase_ : Union[str, Any] = ["▁hello", "!", "how", "▁are", "▁you", "?"] # fmt: on UpperCAmelCase_ : Tuple = DebertaVaTokenizer(_snake_case ,do_lower_case=_snake_case ) UpperCAmelCase_ : Union[str, Any] = tokenizer.convert_ids_to_tokens(tokenizer.encode(_snake_case ,add_special_tokens=_snake_case ) ) self.assertListEqual(_snake_case ,_snake_case ) UpperCAmelCase_ : Tuple = DebertaVaTokenizerFast(_snake_case ,do_lower_case=_snake_case ) UpperCAmelCase_ : Any = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_snake_case ,add_special_tokens=_snake_case ) ) self.assertListEqual(_snake_case ,_snake_case ) @unittest.skip("There is an inconsistency between slow and fast tokenizer due to a bug in the fast one." ) def UpperCamelCase__ ( self ): pass @unittest.skip("There is an inconsistency between slow and fast tokenizer due to a bug in the fast one." ) def UpperCamelCase__ ( self ): pass def UpperCamelCase__ ( self ): # fmt: off UpperCAmelCase_ : Optional[int] = "I was born in 92000, and this is falsé." UpperCAmelCase_ : List[str] = ["▁", "<unk>", "▁was", "▁born", "▁in", "▁9", "2000", "▁", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", "▁", ".", ] # fmt: on UpperCAmelCase_ : List[Any] = DebertaVaTokenizer(_snake_case ,split_by_punct=_snake_case ) UpperCAmelCase_ : Optional[int] = tokenizer.convert_ids_to_tokens(tokenizer.encode(_snake_case ,add_special_tokens=_snake_case ) ) self.assertListEqual(_snake_case ,_snake_case ) UpperCAmelCase_ : int = DebertaVaTokenizerFast(_snake_case ,split_by_punct=_snake_case ) UpperCAmelCase_ : Dict = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_snake_case ,add_special_tokens=_snake_case ) ) self.assertListEqual(_snake_case ,_snake_case ) def UpperCamelCase__ ( self ): # fmt: off UpperCAmelCase_ : Tuple = "I was born in 92000, and this is falsé." UpperCAmelCase_ : Dict = ["▁i", "▁was", "▁born", "▁in", "▁9", "2000", "▁", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", "▁", ".", ] # fmt: on UpperCAmelCase_ : Optional[Any] = DebertaVaTokenizer(_snake_case ,do_lower_case=_snake_case ,split_by_punct=_snake_case ) UpperCAmelCase_ : Any = tokenizer.convert_ids_to_tokens(tokenizer.encode(_snake_case ,add_special_tokens=_snake_case ) ) self.assertListEqual(_snake_case ,_snake_case ) UpperCAmelCase_ : List[Any] = DebertaVaTokenizerFast(_snake_case ,do_lower_case=_snake_case ,split_by_punct=_snake_case ) UpperCAmelCase_ : Tuple = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_snake_case ,add_special_tokens=_snake_case ) ) self.assertListEqual(_snake_case ,_snake_case ) def UpperCamelCase__ ( self ): # fmt: off UpperCAmelCase_ : Optional[int] = "I was born in 92000, and this is falsé." UpperCAmelCase_ : Optional[int] = ["▁i", "▁was", "▁born", "▁in", "▁9", "2000", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", ".", ] # fmt: on UpperCAmelCase_ : List[Any] = DebertaVaTokenizer(_snake_case ,do_lower_case=_snake_case ,split_by_punct=_snake_case ) UpperCAmelCase_ : str = tokenizer.convert_ids_to_tokens(tokenizer.encode(_snake_case ,add_special_tokens=_snake_case ) ) self.assertListEqual(_snake_case ,_snake_case ) UpperCAmelCase_ : Optional[Any] = DebertaVaTokenizerFast(_snake_case ,do_lower_case=_snake_case ,split_by_punct=_snake_case ) UpperCAmelCase_ : str = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_snake_case ,add_special_tokens=_snake_case ) ) self.assertListEqual(_snake_case ,_snake_case ) def UpperCamelCase__ ( self ): # fmt: off UpperCAmelCase_ : Optional[int] = "I was born in 92000, and this is falsé." UpperCAmelCase_ : Optional[Any] = ["▁", "<unk>", "▁was", "▁born", "▁in", "▁9", "2000", "▁", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", "▁", ".", ] # fmt: on UpperCAmelCase_ : List[str] = DebertaVaTokenizer(_snake_case ,do_lower_case=_snake_case ,split_by_punct=_snake_case ) UpperCAmelCase_ : Tuple = tokenizer.convert_ids_to_tokens(tokenizer.encode(_snake_case ,add_special_tokens=_snake_case ) ) self.assertListEqual(_snake_case ,_snake_case ) UpperCAmelCase_ : Dict = DebertaVaTokenizerFast(_snake_case ,do_lower_case=_snake_case ,split_by_punct=_snake_case ) UpperCAmelCase_ : Dict = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_snake_case ,add_special_tokens=_snake_case ) ) self.assertListEqual(_snake_case ,_snake_case ) def UpperCamelCase__ ( self ): # fmt: off UpperCAmelCase_ : Tuple = " \tHeLLo!how \n Are yoU? " UpperCAmelCase_ : List[Any] = ["▁", "<unk>", "e", "<unk>", "o", "!", "how", "▁", "<unk>", "re", "▁yo", "<unk>", "?"] # fmt: on UpperCAmelCase_ : Any = DebertaVaTokenizer(_snake_case ,do_lower_case=_snake_case ,split_by_punct=_snake_case ) UpperCAmelCase_ : List[str] = tokenizer.convert_ids_to_tokens(tokenizer.encode(_snake_case ,add_special_tokens=_snake_case ) ) self.assertListEqual(_snake_case ,_snake_case ) UpperCAmelCase_ : int = DebertaVaTokenizerFast(_snake_case ,do_lower_case=_snake_case ,split_by_punct=_snake_case ) UpperCAmelCase_ : Tuple = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_snake_case ,add_special_tokens=_snake_case ) ) self.assertListEqual(_snake_case ,_snake_case ) def UpperCamelCase__ ( self ): UpperCAmelCase_ : List[str] = self.get_tokenizer() UpperCAmelCase_ : Union[str, Any] = self.get_rust_tokenizer() UpperCAmelCase_ : Dict = "I was born in 92000, and this is falsé." UpperCAmelCase_ : Any = tokenizer.convert_ids_to_tokens(tokenizer.encode(_snake_case ,add_special_tokens=_snake_case ) ) UpperCAmelCase_ : Optional[Any] = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_snake_case ,add_special_tokens=_snake_case ) ) self.assertListEqual(_snake_case ,_snake_case ) UpperCAmelCase_ : Tuple = tokenizer.encode(_snake_case ,add_special_tokens=_snake_case ) UpperCAmelCase_ : int = rust_tokenizer.encode(_snake_case ,add_special_tokens=_snake_case ) self.assertListEqual(_snake_case ,_snake_case ) UpperCAmelCase_ : Optional[Any] = self.get_rust_tokenizer() UpperCAmelCase_ : Union[str, Any] = tokenizer.encode(_snake_case ) UpperCAmelCase_ : List[Any] = rust_tokenizer.encode(_snake_case ) self.assertListEqual(_snake_case ,_snake_case ) def UpperCamelCase__ ( self ): UpperCAmelCase_ : Any = "This is a test" UpperCAmelCase_ : Optional[int] = [13, 1, 43_98, 25, 21, 12_89] UpperCAmelCase_ : Optional[Any] = ["▁", "T", "his", "▁is", "▁a", "▁test"] UpperCAmelCase_ : List[str] = ["▁", "<unk>", "his", "▁is", "▁a", "▁test"] UpperCAmelCase_ : str = DebertaVaTokenizer(_snake_case ,keep_accents=_snake_case ) UpperCAmelCase_ : List[Any] = DebertaVaTokenizerFast(_snake_case ,keep_accents=_snake_case ) UpperCAmelCase_ : Optional[int] = tokenizer.encode(_snake_case ,add_special_tokens=_snake_case ) self.assertListEqual(_snake_case ,_snake_case ) UpperCAmelCase_ : Any = tokenizer.tokenize(_snake_case ) self.assertListEqual(_snake_case ,_snake_case ) UpperCAmelCase_ : Union[str, Any] = tokenizer.convert_ids_to_tokens(_snake_case ) self.assertListEqual(_snake_case ,_snake_case ) UpperCAmelCase_ : List[Any] = rust_tokenizer.encode(_snake_case ,add_special_tokens=_snake_case ) self.assertListEqual(_snake_case ,_snake_case ) UpperCAmelCase_ : Dict = rust_tokenizer.tokenize(_snake_case ) self.assertListEqual(_snake_case ,_snake_case ) UpperCAmelCase_ : List[str] = rust_tokenizer.convert_ids_to_tokens(_snake_case ) self.assertListEqual(_snake_case ,_snake_case ) # fmt: off UpperCAmelCase_ : List[str] = "I was born in 92000, and this is falsé." UpperCAmelCase_ : Optional[int] = [13, 1, 23, 3_86, 19, 5_61, 30_50, 15, 17, 48, 25, 82_56, 18, 1, 9] UpperCAmelCase_ : str = ["▁", "I", "▁was", "▁born", "▁in", "▁9", "2000", ",", "▁and", "▁this", "▁is", "▁fal", "s", "é", ".", ] UpperCAmelCase_ : List[str] = ["▁", "<unk>", "▁was", "▁born", "▁in", "▁9", "2000", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", ".", ] # fmt: on UpperCAmelCase_ : List[str] = tokenizer.encode(_snake_case ,add_special_tokens=_snake_case ) self.assertListEqual(_snake_case ,_snake_case ) UpperCAmelCase_ : Dict = tokenizer.tokenize(_snake_case ) self.assertListEqual(_snake_case ,_snake_case ) UpperCAmelCase_ : int = tokenizer.convert_ids_to_tokens(_snake_case ) self.assertListEqual(_snake_case ,_snake_case ) UpperCAmelCase_ : Optional[int] = rust_tokenizer.encode(_snake_case ,add_special_tokens=_snake_case ) self.assertListEqual(_snake_case ,_snake_case ) UpperCAmelCase_ : Optional[int] = rust_tokenizer.tokenize(_snake_case ) self.assertListEqual(_snake_case ,_snake_case ) UpperCAmelCase_ : Any = rust_tokenizer.convert_ids_to_tokens(_snake_case ) self.assertListEqual(_snake_case ,_snake_case ) def UpperCamelCase__ ( self ): UpperCAmelCase_ : Any = DebertaVaTokenizer(_snake_case ) UpperCAmelCase_ : Optional[int] = tokenizer.encode("sequence builders" ) UpperCAmelCase_ : Dict = tokenizer.encode("multi-sequence build" ) UpperCAmelCase_ : Tuple = tokenizer.build_inputs_with_special_tokens(_snake_case ) UpperCAmelCase_ : Optional[Any] = tokenizer.build_inputs_with_special_tokens(_snake_case ,_snake_case ) self.assertEqual([tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] ,_snake_case ) self.assertEqual( [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [tokenizer.sep_token_id] ,_snake_case ,) @slow def UpperCamelCase__ ( self ): # fmt: off UpperCAmelCase_ : Union[str, Any] = {"input_ids": [[1, 3_98_67, 36, 1_93_90, 4_86, 27, 3_50_52, 8_14_36, 18, 6_06_85, 12_25, 7, 3_50_52, 8_14_36, 18, 93_67, 1_68_99, 18, 1_59_37, 53, 5_94, 7_73, 18, 1_62_87, 3_04_65, 36, 1_59_37, 6, 4_11_39, 38, 3_69_79, 6_07_63, 1_91, 6, 3_41_32, 99, 6, 5_05_38, 3_90, 4_32_30, 6, 3_41_32, 27_79, 2_08_50, 14, 6_99, 10_72, 11_94, 36, 3_82, 1_09_01, 53, 7, 6_99, 10_72, 20_84, 36, 2_04_22, 6_30, 53, 19, 1_05, 30_49, 18_96, 10_53, 1_68_99, 15_06, 11, 3_79_78, 42_43, 7, 12_37, 3_18_69, 2_00, 1_65_66, 6_54, 6, 3_50_52, 8_14_36, 7, 5_56_30, 1_35_93, 4, 2], [1, 26, 1_50_11, 13, 6_67, 8, 10_53, 18, 2_36_11, 12_37, 7_23_56, 1_28_20, 34, 10_41_34, 12_09, 35, 1_33_13, 66_27, 21, 2_02, 3_47, 7, 1_64, 23_99, 11, 46, 44_85, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 5, 12_32, 28_64, 1_57_85, 1_49_51, 1_05, 5, 85_81, 12_50, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "token_type_ids": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=_snake_case ,model_name="microsoft/deberta-v2-xlarge" ,revision="ad6e42c1532ddf3a15c39246b63f5559d558b670" ,)
71
'''simple docstring''' import json import os import shutil import tempfile import unittest import numpy as np import pytest from transformers import CLIPTokenizer, CLIPTokenizerFast from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES from transformers.testing_utils import require_vision from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available if is_vision_available(): from PIL import Image from transformers import CLIPImageProcessor, CLIPProcessor @require_vision class _snake_case (unittest.TestCase): def UpperCamelCase__ ( self ): UpperCAmelCase_ : Dict = tempfile.mkdtemp() # fmt: off UpperCAmelCase_ : List[str] = ["l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "lo", "l</w>", "w</w>", "r</w>", "t</w>", "low</w>", "er</w>", "lowest</w>", "newer</w>", "wider", "<unk>", "<|startoftext|>", "<|endoftext|>"] # fmt: on UpperCAmelCase_ : List[str] = dict(zip(_snake_case ,range(len(_snake_case ) ) ) ) UpperCAmelCase_ : List[Any] = ["#version: 0.2", "l o", "lo w</w>", "e r</w>", ""] UpperCAmelCase_ : Dict = {"unk_token": "<unk>"} UpperCAmelCase_ : Optional[int] = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["vocab_file"] ) UpperCAmelCase_ : str = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["merges_file"] ) with open(self.vocab_file ,"w" ,encoding="utf-8" ) as fp: fp.write(json.dumps(_snake_case ) + "\n" ) with open(self.merges_file ,"w" ,encoding="utf-8" ) as fp: fp.write("\n".join(_snake_case ) ) UpperCAmelCase_ : Optional[Any] = { "do_resize": True, "size": 20, "do_center_crop": True, "crop_size": 18, "do_normalize": True, "image_mean": [0.48145466, 0.4578275, 0.40821073], "image_std": [0.26862954, 0.26130258, 0.27577711], } UpperCAmelCase_ : str = os.path.join(self.tmpdirname ,_snake_case ) with open(self.image_processor_file ,"w" ,encoding="utf-8" ) as fp: json.dump(_snake_case ,_snake_case ) def UpperCamelCase__ ( self ,**_snake_case ): return CLIPTokenizer.from_pretrained(self.tmpdirname ,**_snake_case ) def UpperCamelCase__ ( self ,**_snake_case ): return CLIPTokenizerFast.from_pretrained(self.tmpdirname ,**_snake_case ) def UpperCamelCase__ ( self ,**_snake_case ): return CLIPImageProcessor.from_pretrained(self.tmpdirname ,**_snake_case ) def UpperCamelCase__ ( self ): shutil.rmtree(self.tmpdirname ) def UpperCamelCase__ ( self ): UpperCAmelCase_ : Optional[Any] = [np.random.randint(2_55 ,size=(3, 30, 4_00) ,dtype=np.uinta )] UpperCAmelCase_ : Union[str, Any] = [Image.fromarray(np.moveaxis(_snake_case ,0 ,-1 ) ) for x in image_inputs] return image_inputs def UpperCamelCase__ ( self ): UpperCAmelCase_ : Tuple = self.get_tokenizer() UpperCAmelCase_ : str = self.get_rust_tokenizer() UpperCAmelCase_ : List[str] = self.get_image_processor() UpperCAmelCase_ : Tuple = CLIPProcessor(tokenizer=_snake_case ,image_processor=_snake_case ) processor_slow.save_pretrained(self.tmpdirname ) UpperCAmelCase_ : int = CLIPProcessor.from_pretrained(self.tmpdirname ,use_fast=_snake_case ) UpperCAmelCase_ : str = CLIPProcessor(tokenizer=_snake_case ,image_processor=_snake_case ) processor_fast.save_pretrained(self.tmpdirname ) UpperCAmelCase_ : str = CLIPProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor_slow.tokenizer.get_vocab() ,tokenizer_slow.get_vocab() ) self.assertEqual(processor_fast.tokenizer.get_vocab() ,tokenizer_fast.get_vocab() ) self.assertEqual(tokenizer_slow.get_vocab() ,tokenizer_fast.get_vocab() ) self.assertIsInstance(processor_slow.tokenizer ,_snake_case ) self.assertIsInstance(processor_fast.tokenizer ,_snake_case ) self.assertEqual(processor_slow.image_processor.to_json_string() ,image_processor.to_json_string() ) self.assertEqual(processor_fast.image_processor.to_json_string() ,image_processor.to_json_string() ) self.assertIsInstance(processor_slow.image_processor ,_snake_case ) self.assertIsInstance(processor_fast.image_processor ,_snake_case ) def UpperCamelCase__ ( self ): UpperCAmelCase_ : List[str] = CLIPProcessor(tokenizer=self.get_tokenizer() ,image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) UpperCAmelCase_ : Union[str, Any] = self.get_tokenizer(bos_token="(BOS)" ,eos_token="(EOS)" ) UpperCAmelCase_ : Tuple = self.get_image_processor(do_normalize=_snake_case ,padding_value=1.0 ) UpperCAmelCase_ : int = CLIPProcessor.from_pretrained( self.tmpdirname ,bos_token="(BOS)" ,eos_token="(EOS)" ,do_normalize=_snake_case ,padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab() ,tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer ,_snake_case ) self.assertEqual(processor.image_processor.to_json_string() ,image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor ,_snake_case ) def UpperCamelCase__ ( self ): UpperCAmelCase_ : List[str] = self.get_image_processor() UpperCAmelCase_ : Dict = self.get_tokenizer() UpperCAmelCase_ : Dict = CLIPProcessor(tokenizer=_snake_case ,image_processor=_snake_case ) UpperCAmelCase_ : Any = self.prepare_image_inputs() UpperCAmelCase_ : Optional[int] = image_processor(_snake_case ,return_tensors="np" ) UpperCAmelCase_ : Any = processor(images=_snake_case ,return_tensors="np" ) for key in input_image_proc.keys(): self.assertAlmostEqual(input_image_proc[key].sum() ,input_processor[key].sum() ,delta=1E-2 ) def UpperCamelCase__ ( self ): UpperCAmelCase_ : Optional[Any] = self.get_image_processor() UpperCAmelCase_ : Union[str, Any] = self.get_tokenizer() UpperCAmelCase_ : Optional[int] = CLIPProcessor(tokenizer=_snake_case ,image_processor=_snake_case ) UpperCAmelCase_ : Tuple = "lower newer" UpperCAmelCase_ : Any = processor(text=_snake_case ) UpperCAmelCase_ : List[Any] = tokenizer(_snake_case ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] ,encoded_processor[key] ) def UpperCamelCase__ ( self ): UpperCAmelCase_ : str = self.get_image_processor() UpperCAmelCase_ : Union[str, Any] = self.get_tokenizer() UpperCAmelCase_ : Tuple = CLIPProcessor(tokenizer=_snake_case ,image_processor=_snake_case ) UpperCAmelCase_ : Any = "lower newer" UpperCAmelCase_ : List[str] = self.prepare_image_inputs() UpperCAmelCase_ : str = processor(text=_snake_case ,images=_snake_case ) self.assertListEqual(list(inputs.keys() ) ,["input_ids", "attention_mask", "pixel_values"] ) # test if it raises when no input is passed with pytest.raises(_snake_case ): processor() def UpperCamelCase__ ( self ): UpperCAmelCase_ : str = self.get_image_processor() UpperCAmelCase_ : Dict = self.get_tokenizer() UpperCAmelCase_ : Optional[int] = CLIPProcessor(tokenizer=_snake_case ,image_processor=_snake_case ) UpperCAmelCase_ : List[Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] UpperCAmelCase_ : int = processor.batch_decode(_snake_case ) UpperCAmelCase_ : int = tokenizer.batch_decode(_snake_case ) self.assertListEqual(_snake_case ,_snake_case ) def UpperCamelCase__ ( self ): UpperCAmelCase_ : Dict = self.get_image_processor() UpperCAmelCase_ : int = self.get_tokenizer() UpperCAmelCase_ : Tuple = CLIPProcessor(tokenizer=_snake_case ,image_processor=_snake_case ) UpperCAmelCase_ : Optional[int] = "lower newer" UpperCAmelCase_ : Any = self.prepare_image_inputs() UpperCAmelCase_ : Dict = processor(text=_snake_case ,images=_snake_case ) self.assertListEqual(list(inputs.keys() ) ,processor.model_input_names )
71
1
'''simple docstring''' import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging _lowerCamelCase = logging.get_logger(__name__) _lowerCamelCase = {"""vocab_file""": """spiece.model"""} _lowerCamelCase = { """vocab_file""": { """bert_for_seq_generation""": ( """https://huggingface.co/google/bert_for_seq_generation_L-24_bbc_encoder/resolve/main/spiece.model""" ), } } _lowerCamelCase = {"""bert_for_seq_generation""": 512} class _snake_case (__SCREAMING_SNAKE_CASE): __A : List[str] =VOCAB_FILES_NAMES __A : Dict =PRETRAINED_VOCAB_FILES_MAP __A : Optional[Any] =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __A : List[int] =[] __A : Any =["input_ids", "attention_mask"] def __init__( self ,_snake_case ,_snake_case="<s>" ,_snake_case="</s>" ,_snake_case="<unk>" ,_snake_case="<pad>" ,_snake_case="<::::>" ,_snake_case = None ,**_snake_case ,): UpperCAmelCase_ : Union[str, Any] = {} if sp_model_kwargs is None else sp_model_kwargs # Add extra_ids to the special token list super().__init__( bos_token=_snake_case ,eos_token=_snake_case ,unk_token=_snake_case ,pad_token=_snake_case ,sep_token=_snake_case ,sp_model_kwargs=self.sp_model_kwargs ,**_snake_case ,) UpperCAmelCase_ : Optional[Any] = vocab_file UpperCAmelCase_ : Any = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(_snake_case ) @property def UpperCamelCase__ ( self ): return self.sp_model.get_piece_size() def UpperCamelCase__ ( self ): UpperCAmelCase_ : Dict = {self.convert_ids_to_tokens(_snake_case ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __getstate__( self ): UpperCAmelCase_ : Tuple = self.__dict__.copy() UpperCAmelCase_ : Dict = None return state def __setstate__( self ,_snake_case ): UpperCAmelCase_ : Dict = d # for backward compatibility if not hasattr(self ,"sp_model_kwargs" ): UpperCAmelCase_ : Union[str, Any] = {} UpperCAmelCase_ : Any = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def UpperCamelCase__ ( self ,_snake_case ): return self.sp_model.encode(_snake_case ,out_type=_snake_case ) def UpperCamelCase__ ( self ,_snake_case ): return self.sp_model.piece_to_id(_snake_case ) def UpperCamelCase__ ( self ,_snake_case ): UpperCAmelCase_ : str = self.sp_model.IdToPiece(_snake_case ) return token def UpperCamelCase__ ( self ,_snake_case ): UpperCAmelCase_ : Dict = [] UpperCAmelCase_ : int = "" for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: out_string += self.sp_model.decode(_snake_case ) + token UpperCAmelCase_ : int = [] else: current_sub_tokens.append(_snake_case ) out_string += self.sp_model.decode(_snake_case ) return out_string.strip() def UpperCamelCase__ ( self ,_snake_case ,_snake_case = None ): if not os.path.isdir(_snake_case ): logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' ) return UpperCAmelCase_ : Any = os.path.join( _snake_case ,(filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(_snake_case ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file ,_snake_case ) elif not os.path.isfile(self.vocab_file ): with open(_snake_case ,"wb" ) as fi: UpperCAmelCase_ : Union[str, Any] = self.sp_model.serialized_model_proto() fi.write(_snake_case ) return (out_vocab_file,)
71
'''simple docstring''' import gc import unittest import numpy as np import torch import torch.nn.functional as F from transformers import ( ClapTextConfig, ClapTextModelWithProjection, RobertaTokenizer, SpeechTaHifiGan, SpeechTaHifiGanConfig, ) from diffusers import ( AudioLDMPipeline, AutoencoderKL, DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel, ) from diffusers.utils import is_xformers_available, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism from ..pipeline_params import TEXT_TO_AUDIO_BATCH_PARAMS, TEXT_TO_AUDIO_PARAMS from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class _snake_case (__SCREAMING_SNAKE_CASE , unittest.TestCase): __A : Any =AudioLDMPipeline __A : Dict =TEXT_TO_AUDIO_PARAMS __A : Any =TEXT_TO_AUDIO_BATCH_PARAMS __A : Tuple =frozenset( [ "num_inference_steps", "num_waveforms_per_prompt", "generator", "latents", "output_type", "return_dict", "callback", "callback_steps", ]) def UpperCamelCase__ ( self ): torch.manual_seed(0 ) UpperCAmelCase_ : Union[str, Any] = UNetaDConditionModel( block_out_channels=(32, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=4 ,out_channels=4 ,down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") ,up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") ,cross_attention_dim=(32, 64) ,class_embed_type="simple_projection" ,projection_class_embeddings_input_dim=32 ,class_embeddings_concat=_snake_case ,) UpperCAmelCase_ : Optional[Any] = DDIMScheduler( beta_start=0.00085 ,beta_end=0.012 ,beta_schedule="scaled_linear" ,clip_sample=_snake_case ,set_alpha_to_one=_snake_case ,) torch.manual_seed(0 ) UpperCAmelCase_ : Union[str, Any] = AutoencoderKL( block_out_channels=[32, 64] ,in_channels=1 ,out_channels=1 ,down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] ,up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] ,latent_channels=4 ,) torch.manual_seed(0 ) UpperCAmelCase_ : Optional[int] = ClapTextConfig( bos_token_id=0 ,eos_token_id=2 ,hidden_size=32 ,intermediate_size=37 ,layer_norm_eps=1E-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=10_00 ,projection_dim=32 ,) UpperCAmelCase_ : Optional[Any] = ClapTextModelWithProjection(_snake_case ) UpperCAmelCase_ : List[Any] = RobertaTokenizer.from_pretrained("hf-internal-testing/tiny-random-roberta" ,model_max_length=77 ) UpperCAmelCase_ : Optional[int] = SpeechTaHifiGanConfig( model_in_dim=8 ,sampling_rate=1_60_00 ,upsample_initial_channel=16 ,upsample_rates=[2, 2] ,upsample_kernel_sizes=[4, 4] ,resblock_kernel_sizes=[3, 7] ,resblock_dilation_sizes=[[1, 3, 5], [1, 3, 5]] ,normalize_before=_snake_case ,) UpperCAmelCase_ : Union[str, Any] = SpeechTaHifiGan(_snake_case ) UpperCAmelCase_ : Union[str, Any] = { "unet": unet, "scheduler": scheduler, "vae": vae, "text_encoder": text_encoder, "tokenizer": tokenizer, "vocoder": vocoder, } return components def UpperCamelCase__ ( self ,_snake_case ,_snake_case=0 ): if str(_snake_case ).startswith("mps" ): UpperCAmelCase_ : Optional[int] = torch.manual_seed(_snake_case ) else: UpperCAmelCase_ : List[str] = torch.Generator(device=_snake_case ).manual_seed(_snake_case ) UpperCAmelCase_ : Any = { "prompt": "A hammer hitting a wooden surface", "generator": generator, "num_inference_steps": 2, "guidance_scale": 6.0, } return inputs def UpperCamelCase__ ( self ): UpperCAmelCase_ : int = "cpu" # ensure determinism for the device-dependent torch.Generator UpperCAmelCase_ : str = self.get_dummy_components() UpperCAmelCase_ : Optional[Any] = AudioLDMPipeline(**_snake_case ) UpperCAmelCase_ : List[Any] = audioldm_pipe.to(_snake_case ) audioldm_pipe.set_progress_bar_config(disable=_snake_case ) UpperCAmelCase_ : List[str] = self.get_dummy_inputs(_snake_case ) UpperCAmelCase_ : Any = audioldm_pipe(**_snake_case ) UpperCAmelCase_ : Dict = output.audios[0] assert audio.ndim == 1 assert len(_snake_case ) == 2_56 UpperCAmelCase_ : Any = audio[:10] UpperCAmelCase_ : Any = np.array( [-0.0050, 0.0050, -0.0060, 0.0033, -0.0026, 0.0033, -0.0027, 0.0033, -0.0028, 0.0033] ) assert np.abs(audio_slice - expected_slice ).max() < 1E-2 def UpperCamelCase__ ( self ): UpperCAmelCase_ : Optional[int] = self.get_dummy_components() UpperCAmelCase_ : int = AudioLDMPipeline(**_snake_case ) UpperCAmelCase_ : Dict = audioldm_pipe.to(_snake_case ) UpperCAmelCase_ : Tuple = audioldm_pipe.to(_snake_case ) audioldm_pipe.set_progress_bar_config(disable=_snake_case ) UpperCAmelCase_ : Union[str, Any] = self.get_dummy_inputs(_snake_case ) UpperCAmelCase_ : Tuple = 3 * [inputs["prompt"]] # forward UpperCAmelCase_ : Any = audioldm_pipe(**_snake_case ) UpperCAmelCase_ : List[str] = output.audios[0] UpperCAmelCase_ : Optional[Any] = self.get_dummy_inputs(_snake_case ) UpperCAmelCase_ : str = 3 * [inputs.pop("prompt" )] UpperCAmelCase_ : str = audioldm_pipe.tokenizer( _snake_case ,padding="max_length" ,max_length=audioldm_pipe.tokenizer.model_max_length ,truncation=_snake_case ,return_tensors="pt" ,) UpperCAmelCase_ : Dict = text_inputs["input_ids"].to(_snake_case ) UpperCAmelCase_ : str = audioldm_pipe.text_encoder( _snake_case ,) UpperCAmelCase_ : Optional[Any] = prompt_embeds.text_embeds # additional L_2 normalization over each hidden-state UpperCAmelCase_ : Tuple = F.normalize(_snake_case ,dim=-1 ) UpperCAmelCase_ : int = prompt_embeds # forward UpperCAmelCase_ : int = audioldm_pipe(**_snake_case ) UpperCAmelCase_ : List[Any] = output.audios[0] assert np.abs(audio_a - audio_a ).max() < 1E-2 def UpperCamelCase__ ( self ): UpperCAmelCase_ : List[Any] = self.get_dummy_components() UpperCAmelCase_ : Tuple = AudioLDMPipeline(**_snake_case ) UpperCAmelCase_ : List[Any] = audioldm_pipe.to(_snake_case ) UpperCAmelCase_ : List[Any] = audioldm_pipe.to(_snake_case ) audioldm_pipe.set_progress_bar_config(disable=_snake_case ) UpperCAmelCase_ : Union[str, Any] = self.get_dummy_inputs(_snake_case ) UpperCAmelCase_ : Optional[int] = 3 * ["this is a negative prompt"] UpperCAmelCase_ : Any = negative_prompt UpperCAmelCase_ : Union[str, Any] = 3 * [inputs["prompt"]] # forward UpperCAmelCase_ : Dict = audioldm_pipe(**_snake_case ) UpperCAmelCase_ : Dict = output.audios[0] UpperCAmelCase_ : Tuple = self.get_dummy_inputs(_snake_case ) UpperCAmelCase_ : Optional[Any] = 3 * [inputs.pop("prompt" )] UpperCAmelCase_ : List[Any] = [] for p in [prompt, negative_prompt]: UpperCAmelCase_ : Any = audioldm_pipe.tokenizer( _snake_case ,padding="max_length" ,max_length=audioldm_pipe.tokenizer.model_max_length ,truncation=_snake_case ,return_tensors="pt" ,) UpperCAmelCase_ : List[Any] = text_inputs["input_ids"].to(_snake_case ) UpperCAmelCase_ : str = audioldm_pipe.text_encoder( _snake_case ,) UpperCAmelCase_ : List[Any] = text_embeds.text_embeds # additional L_2 normalization over each hidden-state UpperCAmelCase_ : Any = F.normalize(_snake_case ,dim=-1 ) embeds.append(_snake_case ) UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = embeds # forward UpperCAmelCase_ : Tuple = audioldm_pipe(**_snake_case ) UpperCAmelCase_ : Any = output.audios[0] assert np.abs(audio_a - audio_a ).max() < 1E-2 def UpperCamelCase__ ( self ): UpperCAmelCase_ : Optional[int] = "cpu" # ensure determinism for the device-dependent torch.Generator UpperCAmelCase_ : Optional[Any] = self.get_dummy_components() UpperCAmelCase_ : Any = PNDMScheduler(skip_prk_steps=_snake_case ) UpperCAmelCase_ : Optional[Any] = AudioLDMPipeline(**_snake_case ) UpperCAmelCase_ : List[Any] = audioldm_pipe.to(_snake_case ) audioldm_pipe.set_progress_bar_config(disable=_snake_case ) UpperCAmelCase_ : Any = self.get_dummy_inputs(_snake_case ) UpperCAmelCase_ : int = "egg cracking" UpperCAmelCase_ : Optional[Any] = audioldm_pipe(**_snake_case ,negative_prompt=_snake_case ) UpperCAmelCase_ : int = output.audios[0] assert audio.ndim == 1 assert len(_snake_case ) == 2_56 UpperCAmelCase_ : List[Any] = audio[:10] UpperCAmelCase_ : Any = np.array( [-0.0051, 0.0050, -0.0060, 0.0034, -0.0026, 0.0033, -0.0027, 0.0033, -0.0028, 0.0032] ) assert np.abs(audio_slice - expected_slice ).max() < 1E-2 def UpperCamelCase__ ( self ): UpperCAmelCase_ : Optional[int] = "cpu" # ensure determinism for the device-dependent torch.Generator UpperCAmelCase_ : List[str] = self.get_dummy_components() UpperCAmelCase_ : Dict = PNDMScheduler(skip_prk_steps=_snake_case ) UpperCAmelCase_ : Any = AudioLDMPipeline(**_snake_case ) UpperCAmelCase_ : Any = audioldm_pipe.to(_snake_case ) audioldm_pipe.set_progress_bar_config(disable=_snake_case ) UpperCAmelCase_ : Dict = "A hammer hitting a wooden surface" # test num_waveforms_per_prompt=1 (default) UpperCAmelCase_ : Any = audioldm_pipe(_snake_case ,num_inference_steps=2 ).audios assert audios.shape == (1, 2_56) # test num_waveforms_per_prompt=1 (default) for batch of prompts UpperCAmelCase_ : List[str] = 2 UpperCAmelCase_ : Dict = audioldm_pipe([prompt] * batch_size ,num_inference_steps=2 ).audios assert audios.shape == (batch_size, 2_56) # test num_waveforms_per_prompt for single prompt UpperCAmelCase_ : List[str] = 2 UpperCAmelCase_ : List[Any] = audioldm_pipe(_snake_case ,num_inference_steps=2 ,num_waveforms_per_prompt=_snake_case ).audios assert audios.shape == (num_waveforms_per_prompt, 2_56) # test num_waveforms_per_prompt for batch of prompts UpperCAmelCase_ : Union[str, Any] = 2 UpperCAmelCase_ : Optional[int] = audioldm_pipe( [prompt] * batch_size ,num_inference_steps=2 ,num_waveforms_per_prompt=_snake_case ).audios assert audios.shape == (batch_size * num_waveforms_per_prompt, 2_56) def UpperCamelCase__ ( self ): UpperCAmelCase_ : List[str] = "cpu" # ensure determinism for the device-dependent torch.Generator UpperCAmelCase_ : Optional[Any] = self.get_dummy_components() UpperCAmelCase_ : Union[str, Any] = AudioLDMPipeline(**_snake_case ) UpperCAmelCase_ : List[Any] = audioldm_pipe.to(_snake_case ) audioldm_pipe.set_progress_bar_config(disable=_snake_case ) UpperCAmelCase_ : Optional[Any] = audioldm_pipe.vocoder.config.sampling_rate UpperCAmelCase_ : Any = self.get_dummy_inputs(_snake_case ) UpperCAmelCase_ : Optional[int] = audioldm_pipe(audio_length_in_s=0.016 ,**_snake_case ) UpperCAmelCase_ : str = output.audios[0] assert audio.ndim == 1 assert len(_snake_case ) / vocoder_sampling_rate == 0.016 UpperCAmelCase_ : List[Any] = audioldm_pipe(audio_length_in_s=0.032 ,**_snake_case ) UpperCAmelCase_ : Any = output.audios[0] assert audio.ndim == 1 assert len(_snake_case ) / vocoder_sampling_rate == 0.032 def UpperCamelCase__ ( self ): UpperCAmelCase_ : Optional[int] = self.get_dummy_components() UpperCAmelCase_ : str = AudioLDMPipeline(**_snake_case ) UpperCAmelCase_ : int = audioldm_pipe.to(_snake_case ) audioldm_pipe.set_progress_bar_config(disable=_snake_case ) UpperCAmelCase_ : int = ["hey"] UpperCAmelCase_ : Dict = audioldm_pipe(_snake_case ,num_inference_steps=1 ) UpperCAmelCase_ : Any = output.audios.shape assert audio_shape == (1, 2_56) UpperCAmelCase_ : Tuple = audioldm_pipe.vocoder.config config.model_in_dim *= 2 UpperCAmelCase_ : List[Any] = SpeechTaHifiGan(_snake_case ).to(_snake_case ) UpperCAmelCase_ : Tuple = audioldm_pipe(_snake_case ,num_inference_steps=1 ) UpperCAmelCase_ : int = output.audios.shape # waveform shape is unchanged, we just have 2x the number of mel channels in the spectrogram assert audio_shape == (1, 2_56) def UpperCamelCase__ ( self ): self._test_attention_slicing_forward_pass(test_mean_pixel_difference=_snake_case ) def UpperCamelCase__ ( self ): self._test_inference_batch_single_identical(test_mean_pixel_difference=_snake_case ) @unittest.skipIf( torch_device != "cuda" or not is_xformers_available() ,reason="XFormers attention is only available with CUDA and `xformers` installed" ,) def UpperCamelCase__ ( self ): self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=_snake_case ) @slow class _snake_case (unittest.TestCase): def UpperCamelCase__ ( self ): super().tearDown() gc.collect() torch.cuda.empty_cache() def UpperCamelCase__ ( self ,_snake_case ,_snake_case="cpu" ,_snake_case=torch.floataa ,_snake_case=0 ): UpperCAmelCase_ : Union[str, Any] = torch.Generator(device=_snake_case ).manual_seed(_snake_case ) UpperCAmelCase_ : str = np.random.RandomState(_snake_case ).standard_normal((1, 8, 1_28, 16) ) UpperCAmelCase_ : Optional[Any] = torch.from_numpy(_snake_case ).to(device=_snake_case ,dtype=_snake_case ) UpperCAmelCase_ : List[str] = { "prompt": "A hammer hitting a wooden surface", "latents": latents, "generator": generator, "num_inference_steps": 3, "guidance_scale": 2.5, } return inputs def UpperCamelCase__ ( self ): UpperCAmelCase_ : int = AudioLDMPipeline.from_pretrained("cvssp/audioldm" ) UpperCAmelCase_ : Optional[int] = audioldm_pipe.to(_snake_case ) audioldm_pipe.set_progress_bar_config(disable=_snake_case ) UpperCAmelCase_ : List[Any] = self.get_inputs(_snake_case ) UpperCAmelCase_ : List[Any] = 25 UpperCAmelCase_ : Union[str, Any] = audioldm_pipe(**_snake_case ).audios[0] assert audio.ndim == 1 assert len(_snake_case ) == 8_19_20 UpperCAmelCase_ : Union[str, Any] = audio[7_72_30:7_72_40] UpperCAmelCase_ : Any = np.array( [-0.4884, -0.4607, 0.0023, 0.5007, 0.5896, 0.5151, 0.3813, -0.0208, -0.3687, -0.4315] ) UpperCAmelCase_ : Dict = np.abs(expected_slice - audio_slice ).max() assert max_diff < 1E-2 def UpperCamelCase__ ( self ): UpperCAmelCase_ : Optional[int] = AudioLDMPipeline.from_pretrained("cvssp/audioldm" ) UpperCAmelCase_ : List[Any] = LMSDiscreteScheduler.from_config(audioldm_pipe.scheduler.config ) UpperCAmelCase_ : int = audioldm_pipe.to(_snake_case ) audioldm_pipe.set_progress_bar_config(disable=_snake_case ) UpperCAmelCase_ : Tuple = self.get_inputs(_snake_case ) UpperCAmelCase_ : Optional[Any] = audioldm_pipe(**_snake_case ).audios[0] assert audio.ndim == 1 assert len(_snake_case ) == 8_19_20 UpperCAmelCase_ : Any = audio[2_77_80:2_77_90] UpperCAmelCase_ : List[str] = np.array([-0.2131, -0.0873, -0.0124, -0.0189, 0.0569, 0.1373, 0.1883, 0.2886, 0.3297, 0.2212] ) UpperCAmelCase_ : Union[str, Any] = np.abs(expected_slice - audio_slice ).max() assert max_diff < 3E-2
71
1
'''simple docstring''' from graphs.minimum_spanning_tree_kruskal import kruskal def a__ ( ) -> List[str]: """simple docstring""" UpperCAmelCase_ : List[str] = 9 UpperCAmelCase_ : List[Any] = [ [0, 1, 4], [0, 7, 8], [1, 2, 8], [7, 8, 7], [7, 6, 1], [2, 8, 2], [8, 6, 6], [2, 3, 7], [2, 5, 4], [6, 5, 2], [3, 5, 14], [3, 4, 9], [5, 4, 10], [1, 7, 11], ] UpperCAmelCase_ : int = kruskal(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : List[Any] = [ [7, 6, 1], [2, 8, 2], [6, 5, 2], [0, 1, 4], [2, 5, 4], [2, 3, 7], [0, 7, 8], [3, 4, 9], ] assert sorted(_SCREAMING_SNAKE_CASE ) == sorted(_SCREAMING_SNAKE_CASE )
71
'''simple docstring''' from typing import TYPE_CHECKING from ...file_utils import _LazyModule, is_torch_available from ...utils import OptionalDependencyNotAvailable _lowerCamelCase = { """configuration_gpt_neox_japanese""": ["""GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GPTNeoXJapaneseConfig"""], """tokenization_gpt_neox_japanese""": ["""GPTNeoXJapaneseTokenizer"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCamelCase = [ """GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST""", """GPTNeoXJapaneseForCausalLM""", """GPTNeoXJapaneseLayer""", """GPTNeoXJapaneseModel""", """GPTNeoXJapanesePreTrainedModel""", ] if TYPE_CHECKING: from .configuration_gpt_neox_japanese import GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXJapaneseConfig from .tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_gpt_neox_japanese import ( GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST, GPTNeoXJapaneseForCausalLM, GPTNeoXJapaneseLayer, GPTNeoXJapaneseModel, GPTNeoXJapanesePreTrainedModel, ) else: import sys _lowerCamelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
71
1
'''simple docstring''' from __future__ import annotations import inspect import unittest import numpy as np from transformers import DeiTConfig from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import cached_property, is_tf_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher, TFDeiTForMaskedImageModeling, TFDeiTModel, ) from transformers.models.deit.modeling_tf_deit import TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import DeiTImageProcessor class _snake_case : def __init__( self ,_snake_case ,_snake_case=13 ,_snake_case=30 ,_snake_case=2 ,_snake_case=3 ,_snake_case=True ,_snake_case=True ,_snake_case=32 ,_snake_case=2 ,_snake_case=4 ,_snake_case=37 ,_snake_case="gelu" ,_snake_case=0.1 ,_snake_case=0.1 ,_snake_case=10 ,_snake_case=0.02 ,_snake_case=3 ,_snake_case=None ,_snake_case=2 ,): UpperCAmelCase_ : Any = parent UpperCAmelCase_ : int = batch_size UpperCAmelCase_ : Optional[Any] = image_size UpperCAmelCase_ : List[Any] = patch_size UpperCAmelCase_ : Optional[int] = num_channels UpperCAmelCase_ : Dict = is_training UpperCAmelCase_ : Optional[int] = use_labels UpperCAmelCase_ : List[Any] = hidden_size UpperCAmelCase_ : int = num_hidden_layers UpperCAmelCase_ : Tuple = num_attention_heads UpperCAmelCase_ : Optional[int] = intermediate_size UpperCAmelCase_ : List[Any] = hidden_act UpperCAmelCase_ : Union[str, Any] = hidden_dropout_prob UpperCAmelCase_ : List[str] = attention_probs_dropout_prob UpperCAmelCase_ : List[Any] = type_sequence_label_size UpperCAmelCase_ : Any = initializer_range UpperCAmelCase_ : Dict = scope UpperCAmelCase_ : Optional[int] = encoder_stride # in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens) UpperCAmelCase_ : Union[str, Any] = (image_size // patch_size) ** 2 UpperCAmelCase_ : Any = num_patches + 2 def UpperCamelCase__ ( self ): UpperCAmelCase_ : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) UpperCAmelCase_ : Optional[int] = None if self.use_labels: UpperCAmelCase_ : Dict = ids_tensor([self.batch_size] ,self.type_sequence_label_size ) UpperCAmelCase_ : Dict = self.get_config() return config, pixel_values, labels def UpperCamelCase__ ( self ): return DeiTConfig( image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,is_decoder=_snake_case ,initializer_range=self.initializer_range ,encoder_stride=self.encoder_stride ,) def UpperCamelCase__ ( self ,_snake_case ,_snake_case ,_snake_case ): UpperCAmelCase_ : Union[str, Any] = TFDeiTModel(config=_snake_case ) UpperCAmelCase_ : Dict = model(_snake_case ) self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) ) def UpperCamelCase__ ( self ,_snake_case ,_snake_case ,_snake_case ): UpperCAmelCase_ : Any = TFDeiTForMaskedImageModeling(config=_snake_case ) UpperCAmelCase_ : Tuple = model(_snake_case ) self.parent.assertEqual( result.reconstruction.shape ,(self.batch_size, self.num_channels, self.image_size, self.image_size) ) # test greyscale images UpperCAmelCase_ : int = 1 UpperCAmelCase_ : Union[str, Any] = TFDeiTForMaskedImageModeling(_snake_case ) UpperCAmelCase_ : Tuple = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) UpperCAmelCase_ : Tuple = model(_snake_case ) self.parent.assertEqual(result.reconstruction.shape ,(self.batch_size, 1, self.image_size, self.image_size) ) def UpperCamelCase__ ( self ,_snake_case ,_snake_case ,_snake_case ): UpperCAmelCase_ : Dict = self.type_sequence_label_size UpperCAmelCase_ : Dict = TFDeiTForImageClassification(_snake_case ) UpperCAmelCase_ : List[Any] = model(_snake_case ,labels=_snake_case ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) ) # test greyscale images UpperCAmelCase_ : List[Any] = 1 UpperCAmelCase_ : Union[str, Any] = TFDeiTForImageClassification(_snake_case ) UpperCAmelCase_ : Tuple = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) UpperCAmelCase_ : Any = model(_snake_case ,labels=_snake_case ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) ) def UpperCamelCase__ ( self ): UpperCAmelCase_ : str = self.prepare_config_and_inputs() UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : int = config_and_inputs UpperCAmelCase_ : str = {"pixel_values": pixel_values} return config, inputs_dict @require_tf class _snake_case (__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase): __A : str =( ( TFDeiTModel, TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher, TFDeiTForMaskedImageModeling, ) if is_tf_available() else () ) __A : Union[str, Any] =( { "feature-extraction": TFDeiTModel, "image-classification": (TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher), } if is_tf_available() else {} ) __A : Any =False __A : Dict =False __A : Tuple =False __A : List[str] =False def UpperCamelCase__ ( self ): UpperCAmelCase_ : List[str] = TFDeiTModelTester(self ) UpperCAmelCase_ : Any = ConfigTester(self ,config_class=_snake_case ,has_text_modality=_snake_case ,hidden_size=37 ) def UpperCamelCase__ ( self ): self.config_tester.run_common_tests() @unittest.skip(reason="DeiT does not use inputs_embeds" ) def UpperCamelCase__ ( self ): pass def UpperCamelCase__ ( self ): UpperCAmelCase_ , UpperCAmelCase_ : int = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCAmelCase_ : Optional[int] = model_class(_snake_case ) self.assertIsInstance(model.get_input_embeddings() ,(tf.keras.layers.Layer) ) UpperCAmelCase_ : Optional[Any] = model.get_output_embeddings() self.assertTrue(x is None or isinstance(_snake_case ,tf.keras.layers.Dense ) ) def UpperCamelCase__ ( self ): UpperCAmelCase_ , UpperCAmelCase_ : Dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCAmelCase_ : List[Any] = model_class(_snake_case ) UpperCAmelCase_ : List[Any] = inspect.signature(model.call ) # signature.parameters is an OrderedDict => so arg_names order is deterministic UpperCAmelCase_ : Tuple = [*signature.parameters.keys()] UpperCAmelCase_ : Tuple = ["pixel_values"] self.assertListEqual(arg_names[:1] ,_snake_case ) def UpperCamelCase__ ( self ): UpperCAmelCase_ : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_snake_case ) def UpperCamelCase__ ( self ): UpperCAmelCase_ : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_image_modeling(*_snake_case ) def UpperCamelCase__ ( self ): UpperCAmelCase_ : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*_snake_case ) def UpperCamelCase__ ( self ,_snake_case ,_snake_case ,_snake_case=False ): UpperCAmelCase_ : Union[str, Any] = super()._prepare_for_class(_snake_case ,_snake_case ,return_labels=_snake_case ) if return_labels: if "labels" in inputs_dict and "labels" not in inspect.signature(model_class.call ).parameters: del inputs_dict["labels"] return inputs_dict @slow def UpperCamelCase__ ( self ): for model_name in TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCAmelCase_ : str = TFDeiTModel.from_pretrained(_snake_case ) self.assertIsNotNone(_snake_case ) def a__ ( ) -> Optional[int]: """simple docstring""" UpperCAmelCase_ : Optional[Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) return image @require_tf @require_vision class _snake_case (unittest.TestCase): @cached_property def UpperCamelCase__ ( self ): return ( DeiTImageProcessor.from_pretrained("facebook/deit-base-distilled-patch16-224" ) if is_vision_available() else None ) @slow def UpperCamelCase__ ( self ): UpperCAmelCase_ : Any = TFDeiTForImageClassificationWithTeacher.from_pretrained("facebook/deit-base-distilled-patch16-224" ) UpperCAmelCase_ : Any = self.default_image_processor UpperCAmelCase_ : List[str] = prepare_img() UpperCAmelCase_ : Tuple = image_processor(images=_snake_case ,return_tensors="tf" ) # forward pass UpperCAmelCase_ : List[str] = model(**_snake_case ) # verify the logits UpperCAmelCase_ : List[Any] = tf.TensorShape((1, 10_00) ) self.assertEqual(outputs.logits.shape ,_snake_case ) UpperCAmelCase_ : str = tf.constant([-1.0266, 0.1912, -1.2861] ) self.assertTrue(np.allclose(outputs.logits[0, :3] ,_snake_case ,atol=1E-4 ) )
71
'''simple docstring''' import heapq def a__ ( _SCREAMING_SNAKE_CASE : dict ) -> set[int]: """simple docstring""" UpperCAmelCase_ : list[list] = [] # for each node and his adjacency list add them and the rank of the node to queue # using heapq module the queue will be filled like a Priority Queue # heapq works with a min priority queue, so I used -1*len(v) to build it for key, value in graph.items(): # O(log(n)) heapq.heappush(_SCREAMING_SNAKE_CASE , [-1 * len(_SCREAMING_SNAKE_CASE ), (key, value)] ) # chosen_vertices = set of chosen vertices UpperCAmelCase_ : Optional[int] = set() # while queue isn't empty and there are still edges # (queue[0][0] is the rank of the node with max rank) while queue and queue[0][0] != 0: # extract vertex with max rank from queue and add it to chosen_vertices UpperCAmelCase_ : Tuple = heapq.heappop(_SCREAMING_SNAKE_CASE )[1][0] chosen_vertices.add(_SCREAMING_SNAKE_CASE ) # Remove all arcs adjacent to argmax for elem in queue: # if v haven't adjacent node, skip if elem[0] == 0: continue # if argmax is reachable from elem # remove argmax from elem's adjacent list and update his rank if argmax in elem[1][1]: UpperCAmelCase_ : Any = elem[1][1].index(_SCREAMING_SNAKE_CASE ) del elem[1][1][index] elem[0] += 1 # re-order the queue heapq.heapify(_SCREAMING_SNAKE_CASE ) return chosen_vertices if __name__ == "__main__": import doctest doctest.testmod() _lowerCamelCase = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]} print(f"""Minimum vertex cover:\n{greedy_min_vertex_cover(graph)}""")
71
1