code
stringlengths
86
54.5k
code_codestyle
int64
0
371
style_context
stringlengths
87
49.2k
style_context_codestyle
int64
0
349
label
int64
0
1
from __future__ import annotations def _snake_case( SCREAMING_SNAKE_CASE__ ) -> int: # preprocessing the first row for i in range(1 , len(matrix[0] ) ): matrix[0][i] += matrix[0][i - 1] # preprocessing the first column for i in range(1 , len(SCREAMING_SNAKE_CASE__ ) ): matrix[i][0] += matrix[i - 1][0] # updating the path cost for current position for i in range(1 , len(SCREAMING_SNAKE_CASE__ ) ): for j in range(1 , len(matrix[0] ) ): matrix[i][j] += min(matrix[i - 1][j] , matrix[i][j - 1] ) return matrix[-1][-1] if __name__ == "__main__": import doctest doctest.testmod()
20
import os import numpy import onnx def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Union[str, Any]: lowercase : int = a.name lowercase : Any = b.name lowercase : Optional[Any] = """""" lowercase : Dict = """""" lowercase : int = a == b lowercase : int = name_a lowercase : List[str] = name_b return res def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> str: for i, input_name in enumerate(node_proto.input ): if input_name == name: node_proto.input.insert(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) node_proto.input.pop(i + 1 ) if node_proto.op_type == "If": _graph_replace_input_with(node_proto.attribute[0].g , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) _graph_replace_input_with(node_proto.attribute[1].g , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) if node_proto.op_type == "Loop": _graph_replace_input_with(node_proto.attribute[0].g , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Union[str, Any]: for n in graph_proto.node: _node_replace_input_with(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> List[Any]: lowercase : Any = list(model.graph.initializer ) lowercase : Dict = list(model_without_ext.graph.initializer ) for i, ref_i in ind_to_replace: assert inits_with_data[i].name == inits[i].name assert inits_with_data[ref_i].name == inits[ref_i].name assert i > ref_i lowercase : Union[str, Any] = inits[i].name lowercase : Dict = inits[ref_i].name model_without_ext.graph.initializer.remove(inits[i] ) # for n in model.graph.node: _graph_replace_input_with(model_without_ext.graph , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) def _snake_case( SCREAMING_SNAKE_CASE__ ) -> List[str]: lowercase : Union[str, Any] = os.path.dirname(SCREAMING_SNAKE_CASE__ ) lowercase : Dict = os.path.basename(SCREAMING_SNAKE_CASE__ ) lowercase : str = onnx.load(os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) ) lowercase : List[str] = list(model.graph.initializer ) lowercase : Tuple = set() lowercase : int = {} lowercase : Optional[Any] = [] lowercase : Dict = 0 for i in range(len(SCREAMING_SNAKE_CASE__ ) ): if i in dup_set: continue for j in range(i + 1 , len(SCREAMING_SNAKE_CASE__ ) ): if j in dup_set: continue if _is_equal_tensor_proto(inits[i] , inits[j] ): dup_set.add(SCREAMING_SNAKE_CASE__ ) dup_set.add(SCREAMING_SNAKE_CASE__ ) lowercase : int = inits[j].data_type lowercase : Optional[int] = numpy.prod(inits[j].dims ) if dtype == 1: mem_size *= 4 elif dtype == 6: mem_size *= 4 elif dtype == 7 or dtype == 11: mem_size *= 8 else: print("""unexpected data type: """ , SCREAMING_SNAKE_CASE__ ) total_reduced_size += mem_size lowercase : Tuple = inits[i].name lowercase : int = inits[j].name if name_i in dup_map: dup_map[name_i].append(SCREAMING_SNAKE_CASE__ ) else: lowercase : List[str] = [name_j] ind_to_replace.append((j, i) ) print("""total reduced size: """ , total_reduced_size / 1_024 / 1_024 / 1_024 , """GB""" ) lowercase : str = sorted(SCREAMING_SNAKE_CASE__ ) _remove_dup_initializers_from_model(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) lowercase : Optional[Any] = """optimized_""" + model_file_name lowercase : Dict = os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) onnx.save(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) return new_model
20
1
'''simple docstring''' import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import AddedToken from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_camembert import CamembertTokenizer else: UpperCAmelCase_ = None UpperCAmelCase_ = logging.get_logger(__name__) UpperCAmelCase_ = {'vocab_file': 'sentencepiece.bpe.model', 'tokenizer_file': 'tokenizer.json'} UpperCAmelCase_ = { 'vocab_file': { 'camembert-base': 'https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model', }, 'tokenizer_file': { 'camembert-base': 'https://huggingface.co/camembert-base/resolve/main/tokenizer.json', }, } UpperCAmelCase_ = { 'camembert-base': 5_1_2, } UpperCAmelCase_ = '▁' class lowerCAmelCase_ ( lowerCamelCase_ ): '''simple docstring''' lowerCAmelCase_ : str = VOCAB_FILES_NAMES lowerCAmelCase_ : Tuple = PRETRAINED_VOCAB_FILES_MAP lowerCAmelCase_ : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowerCAmelCase_ : Optional[int] = ["""input_ids""", """attention_mask"""] lowerCAmelCase_ : Tuple = CamembertTokenizer def __init__( self : Any , _UpperCAmelCase : str=None , _UpperCAmelCase : List[str]=None , _UpperCAmelCase : int="<s>" , _UpperCAmelCase : Optional[int]="</s>" , _UpperCAmelCase : Optional[int]="</s>" , _UpperCAmelCase : Any="<s>" , _UpperCAmelCase : List[str]="<unk>" , _UpperCAmelCase : Dict="<pad>" , _UpperCAmelCase : str="<mask>" , _UpperCAmelCase : Any=["<s>NOTUSED", "</s>NOTUSED"] , **_UpperCAmelCase : Optional[int] , ): """simple docstring""" UpperCAmelCase__ = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else mask_token super().__init__( _UpperCAmelCase , tokenizer_file=_UpperCAmelCase , bos_token=_UpperCAmelCase , eos_token=_UpperCAmelCase , sep_token=_UpperCAmelCase , cls_token=_UpperCAmelCase , unk_token=_UpperCAmelCase , pad_token=_UpperCAmelCase , mask_token=_UpperCAmelCase , additional_special_tokens=_UpperCAmelCase , **_UpperCAmelCase , ) UpperCAmelCase__ = vocab_file UpperCAmelCase__ = False if not self.vocab_file else True def SCREAMING_SNAKE_CASE__ ( self : Tuple , _UpperCAmelCase : List[int] , _UpperCAmelCase : Optional[List[int]] = None ): """simple docstring""" if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] UpperCAmelCase__ = [self.cls_token_id] UpperCAmelCase__ = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def SCREAMING_SNAKE_CASE__ ( self : List[Any] , _UpperCAmelCase : List[int] , _UpperCAmelCase : Optional[List[int]] = None ): """simple docstring""" UpperCAmelCase__ = [self.sep_token_id] UpperCAmelCase__ = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def SCREAMING_SNAKE_CASE__ ( self : Any , _UpperCAmelCase : str , _UpperCAmelCase : Optional[str] = None ): """simple docstring""" if not self.can_save_slow_tokenizer: raise ValueError( """Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """ """tokenizer.""" ) if not os.path.isdir(_UpperCAmelCase ): logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' ) return UpperCAmelCase__ = os.path.join( _UpperCAmelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(_UpperCAmelCase ): copyfile(self.vocab_file , _UpperCAmelCase ) return (out_vocab_file,)
61
'''simple docstring''' def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : list[list[int]] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : set ): '''simple docstring''' UpperCAmelCase__ , UpperCAmelCase__ = len(SCREAMING_SNAKE_CASE__ ), len(grid[0] ) if ( min(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) < 0 or row == row_length or col == col_length or (row, col) in visit or grid[row][col] == 1 ): return 0 if row == row_length - 1 and col == col_length - 1: return 1 visit.add((row, col) ) UpperCAmelCase__ = 0 count += depth_first_search(SCREAMING_SNAKE_CASE__ , row + 1 , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) count += depth_first_search(SCREAMING_SNAKE_CASE__ , row - 1 , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) count += depth_first_search(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , col + 1 , SCREAMING_SNAKE_CASE__ ) count += depth_first_search(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , col - 1 , SCREAMING_SNAKE_CASE__ ) visit.remove((row, col) ) return count if __name__ == "__main__": import doctest doctest.testmod()
61
1
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, is_vision_available, ) A : int = { 'configuration_clip': [ 'CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP', 'CLIPConfig', 'CLIPOnnxConfig', 'CLIPTextConfig', 'CLIPVisionConfig', ], 'processing_clip': ['CLIPProcessor'], 'tokenization_clip': ['CLIPTokenizer'], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A : Optional[int] = ['CLIPTokenizerFast'] try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A : int = ['CLIPFeatureExtractor'] A : Dict = ['CLIPImageProcessor'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A : Tuple = [ 'CLIP_PRETRAINED_MODEL_ARCHIVE_LIST', 'CLIPModel', 'CLIPPreTrainedModel', 'CLIPTextModel', 'CLIPTextModelWithProjection', 'CLIPVisionModel', 'CLIPVisionModelWithProjection', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A : List[Any] = [ 'TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST', 'TFCLIPModel', 'TFCLIPPreTrainedModel', 'TFCLIPTextModel', 'TFCLIPVisionModel', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A : List[str] = [ 'FlaxCLIPModel', 'FlaxCLIPPreTrainedModel', 'FlaxCLIPTextModel', 'FlaxCLIPTextPreTrainedModel', 'FlaxCLIPVisionModel', 'FlaxCLIPVisionPreTrainedModel', ] if TYPE_CHECKING: from .configuration_clip import ( CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, CLIPConfig, CLIPOnnxConfig, CLIPTextConfig, CLIPVisionConfig, ) from .processing_clip import CLIPProcessor from .tokenization_clip import CLIPTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_clip_fast import CLIPTokenizerFast try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_clip import CLIPFeatureExtractor from .image_processing_clip import CLIPImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_clip import ( CLIP_PRETRAINED_MODEL_ARCHIVE_LIST, CLIPModel, CLIPPreTrainedModel, CLIPTextModel, CLIPTextModelWithProjection, CLIPVisionModel, CLIPVisionModelWithProjection, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_clip import ( TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST, TFCLIPModel, TFCLIPPreTrainedModel, TFCLIPTextModel, TFCLIPVisionModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_clip import ( FlaxCLIPModel, FlaxCLIPPreTrainedModel, FlaxCLIPTextModel, FlaxCLIPTextPreTrainedModel, FlaxCLIPVisionModel, FlaxCLIPVisionPreTrainedModel, ) else: import sys A : int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
6
from dataclasses import dataclass, field from typing import ClassVar, Dict from ..features import Features, Value from .base import TaskTemplate @dataclass(frozen=a ) class __A( a ): snake_case_ = field(default='''language-modeling''' , metadata={'''include_in_asdict_even_if_is_default''': True} ) snake_case_ = Features({'''text''': Value('''string''' )} ) snake_case_ = Features({} ) snake_case_ = "text" @property def SCREAMING_SNAKE_CASE_ ( self ) -> Dict[str, str]: '''simple docstring''' return {self.text_column: "text"}
6
1
"""simple docstring""" import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging _UpperCamelCase: int = logging.get_logger(__name__) _UpperCamelCase: Optional[Any] = {'vocab_file': 'sentencepiece.bpe.model'} _UpperCamelCase: List[Any] = { 'vocab_file': { 'camembert-base': 'https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model', } } _UpperCamelCase: Union[str, Any] = { 'camembert-base': 5_1_2, } _UpperCamelCase: Any = '▁' class a__ ( SCREAMING_SNAKE_CASE__ ): _lowerCamelCase = VOCAB_FILES_NAMES _lowerCamelCase = PRETRAINED_VOCAB_FILES_MAP _lowerCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _lowerCamelCase = ['input_ids', 'attention_mask'] def __init__( self : Optional[int], lowerCAmelCase : List[Any], lowerCAmelCase : int="<s>", lowerCAmelCase : Optional[int]="</s>", lowerCAmelCase : Union[str, Any]="</s>", lowerCAmelCase : Union[str, Any]="<s>", lowerCAmelCase : List[str]="<unk>", lowerCAmelCase : Union[str, Any]="<pad>", lowerCAmelCase : Dict="<mask>", lowerCAmelCase : Any=["<s>NOTUSED", "</s>NOTUSED"], lowerCAmelCase : Optional[Dict[str, Any]] = None, **lowerCAmelCase : int, ) -> None: # Mask token behave like a normal word, i.e. include the space before it lowercase : Union[str, Any] = AddedToken(lowerCAmelCase, lstrip=lowerCAmelCase, rstrip=lowerCAmelCase ) if isinstance(lowerCAmelCase, lowerCAmelCase ) else mask_token lowercase : Dict = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( bos_token=lowerCAmelCase, eos_token=lowerCAmelCase, unk_token=lowerCAmelCase, sep_token=lowerCAmelCase, cls_token=lowerCAmelCase, pad_token=lowerCAmelCase, mask_token=lowerCAmelCase, additional_special_tokens=lowerCAmelCase, sp_model_kwargs=self.sp_model_kwargs, **lowerCAmelCase, ) lowercase : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(str(lowerCAmelCase ) ) lowercase : Union[str, Any] = vocab_file # HACK: These tokens were added by fairseq but don't seem to be actually used when duplicated in the actual # sentencepiece vocabulary (this is the case for <s> and </s> lowercase : int = {'<s>NOTUSED': 0, '<pad>': 1, '</s>NOTUSED': 2, '<unk>': 3} lowercase : Optional[int] = len(self.fairseq_tokens_to_ids ) lowercase : List[str] = len(self.sp_model ) + len(self.fairseq_tokens_to_ids ) lowercase : Union[str, Any] = {v: k for k, v in self.fairseq_tokens_to_ids.items()} def lowercase ( self : Tuple, lowerCAmelCase : List[int], lowerCAmelCase : Optional[List[int]] = None ) -> List[int]: if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] lowercase : List[str] = [self.cls_token_id] lowercase : List[str] = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def lowercase ( self : List[Any], lowerCAmelCase : List[int], lowerCAmelCase : Optional[List[int]] = None, lowerCAmelCase : bool = False ) -> List[int]: if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=lowerCAmelCase, token_ids_a=lowerCAmelCase, already_has_special_tokens=lowerCAmelCase ) if token_ids_a is None: return [1] + ([0] * len(lowerCAmelCase )) + [1] return [1] + ([0] * len(lowerCAmelCase )) + [1, 1] + ([0] * len(lowerCAmelCase )) + [1] def lowercase ( self : int, lowerCAmelCase : List[int], lowerCAmelCase : Optional[List[int]] = None ) -> List[int]: lowercase : str = [self.sep_token_id] lowercase : Any = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] @property def lowercase ( self : Optional[int] ) -> List[str]: return len(self.fairseq_tokens_to_ids ) + len(self.sp_model ) def lowercase ( self : Optional[int] ) -> str: lowercase : List[str] = {self.convert_ids_to_tokens(lowerCAmelCase ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def lowercase ( self : List[str], lowerCAmelCase : str ) -> List[str]: return self.sp_model.encode(lowerCAmelCase, out_type=lowerCAmelCase ) def lowercase ( self : Any, lowerCAmelCase : Optional[Any] ) -> Optional[int]: if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] elif self.sp_model.PieceToId(lowerCAmelCase ) == 0: # Convert sentence piece unk token to fairseq unk token index return self.unk_token_id return self.fairseq_offset + self.sp_model.PieceToId(lowerCAmelCase ) def lowercase ( self : Tuple, lowerCAmelCase : Dict ) -> List[str]: if index in self.fairseq_ids_to_tokens: return self.fairseq_ids_to_tokens[index] return self.sp_model.IdToPiece(index - self.fairseq_offset ) def lowercase ( self : Optional[int], lowerCAmelCase : Tuple ) -> List[str]: lowercase : Tuple = [] lowercase : Optional[int] = '' lowercase : List[str] = False for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: if not prev_is_special: out_string += " " out_string += self.sp_model.decode(lowerCAmelCase ) + token lowercase : Any = True lowercase : Union[str, Any] = [] else: current_sub_tokens.append(lowerCAmelCase ) lowercase : str = False out_string += self.sp_model.decode(lowerCAmelCase ) return out_string.strip() def __getstate__( self : int ) -> Optional[int]: lowercase : List[Any] = self.__dict__.copy() lowercase : Tuple = None return state def __setstate__( self : str, lowerCAmelCase : List[Any] ) -> Any: lowercase : Optional[int] = d # for backward compatibility if not hasattr(self, 'sp_model_kwargs' ): lowercase : str = {} lowercase : str = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def lowercase ( self : List[Any], lowerCAmelCase : str, lowerCAmelCase : Optional[str] = None ) -> Tuple[str]: if not os.path.isdir(lowerCAmelCase ): logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' ) return lowercase : Dict = os.path.join( lowerCAmelCase, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCAmelCase ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file, lowerCAmelCase ) elif not os.path.isfile(self.vocab_file ): with open(lowerCAmelCase, 'wb' ) as fi: lowercase : int = self.sp_model.serialized_model_proto() fi.write(lowerCAmelCase ) return (out_vocab_file,)
53
"""simple docstring""" import os from typing import BinaryIO, Optional, Union import numpy as np import pyarrow.parquet as pq from .. import Audio, Dataset, Features, Image, NamedSplit, Value, config from ..features.features import FeatureType, _visit from ..formatting import query_table from ..packaged_modules import _PACKAGED_DATASETS_MODULES from ..packaged_modules.parquet.parquet import Parquet from ..utils import logging from ..utils.typing import NestedDataStructureLike, PathLike from .abc import AbstractDatasetReader def lowercase__ ( _UpperCAmelCase ) -> Optional[int]: '''simple docstring''' lowercase : List[Any] = np.inf def set_batch_size(_UpperCAmelCase ) -> None: nonlocal batch_size if isinstance(_UpperCAmelCase , _UpperCAmelCase ): lowercase : Any = min(_UpperCAmelCase , config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS ) elif isinstance(_UpperCAmelCase , _UpperCAmelCase ): lowercase : Dict = min(_UpperCAmelCase , config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS ) elif isinstance(_UpperCAmelCase , _UpperCAmelCase ) and feature.dtype == "binary": lowercase : int = min(_UpperCAmelCase , config.PARQUET_ROW_GROUP_SIZE_FOR_BINARY_DATASETS ) _visit(_UpperCAmelCase , _UpperCAmelCase ) return None if batch_size is np.inf else batch_size class a__ ( SCREAMING_SNAKE_CASE__ ): def __init__( self : Union[str, Any], lowerCAmelCase : NestedDataStructureLike[PathLike], lowerCAmelCase : Optional[NamedSplit] = None, lowerCAmelCase : Optional[Features] = None, lowerCAmelCase : str = None, lowerCAmelCase : bool = False, lowerCAmelCase : bool = False, lowerCAmelCase : Optional[int] = None, **lowerCAmelCase : int, ) -> List[Any]: super().__init__( lowerCAmelCase, split=lowerCAmelCase, features=lowerCAmelCase, cache_dir=lowerCAmelCase, keep_in_memory=lowerCAmelCase, streaming=lowerCAmelCase, num_proc=lowerCAmelCase, **lowerCAmelCase, ) lowercase : str = path_or_paths if isinstance(lowerCAmelCase, lowerCAmelCase ) else {self.split: path_or_paths} lowercase : Tuple = _PACKAGED_DATASETS_MODULES['parquet'][1] lowercase : Optional[int] = Parquet( cache_dir=lowerCAmelCase, data_files=lowerCAmelCase, features=lowerCAmelCase, hash=lowerCAmelCase, **lowerCAmelCase, ) def lowercase ( self : Optional[int] ) -> Union[str, Any]: # Build iterable dataset if self.streaming: lowercase : Union[str, Any] = self.builder.as_streaming_dataset(split=self.split ) # Build regular (map-style) dataset else: lowercase : Tuple = None lowercase : Union[str, Any] = None lowercase : List[Any] = None lowercase : int = None self.builder.download_and_prepare( download_config=lowerCAmelCase, download_mode=lowerCAmelCase, verification_mode=lowerCAmelCase, base_path=lowerCAmelCase, num_proc=self.num_proc, ) lowercase : Any = self.builder.as_dataset( split=self.split, verification_mode=lowerCAmelCase, in_memory=self.keep_in_memory ) return dataset class a__ : def __init__( self : Dict, lowerCAmelCase : Dataset, lowerCAmelCase : Union[PathLike, BinaryIO], lowerCAmelCase : Optional[int] = None, **lowerCAmelCase : Optional[Any], ) -> Optional[Any]: lowercase : List[Any] = dataset lowercase : int = path_or_buf lowercase : Optional[Any] = batch_size or get_writer_batch_size(dataset.features ) lowercase : Optional[Any] = parquet_writer_kwargs def lowercase ( self : Union[str, Any] ) -> int: lowercase : Union[str, Any] = self.batch_size if self.batch_size else config.DEFAULT_MAX_BATCH_SIZE if isinstance(self.path_or_buf, (str, bytes, os.PathLike) ): with open(self.path_or_buf, 'wb+' ) as buffer: lowercase : int = self._write(file_obj=lowerCAmelCase, batch_size=lowerCAmelCase, **self.parquet_writer_kwargs ) else: lowercase : List[Any] = self._write(file_obj=self.path_or_buf, batch_size=lowerCAmelCase, **self.parquet_writer_kwargs ) return written def lowercase ( self : int, lowerCAmelCase : BinaryIO, lowerCAmelCase : int, **lowerCAmelCase : Union[str, Any] ) -> int: lowercase : Optional[Any] = 0 lowercase : int = parquet_writer_kwargs.pop('path_or_buf', lowerCAmelCase ) lowercase : List[str] = self.dataset.features.arrow_schema lowercase : int = pq.ParquetWriter(lowerCAmelCase, schema=lowerCAmelCase, **lowerCAmelCase ) for offset in logging.tqdm( range(0, len(self.dataset ), lowerCAmelCase ), unit='ba', disable=not logging.is_progress_bar_enabled(), desc='Creating parquet from Arrow format', ): lowercase : Tuple = query_table( table=self.dataset._data, key=slice(lowerCAmelCase, offset + batch_size ), indices=self.dataset._indices if self.dataset._indices is not None else None, ) writer.write_table(lowerCAmelCase ) written += batch.nbytes writer.close() return written
53
1
import argparse import os import shutil import torch from emmental.modules import MagnitudeBinarizer, ThresholdBinarizer, TopKBinarizer def _a ( lowerCamelCase: str ) -> str: '''simple docstring''' __A = args.pruning_method __A = args.threshold __A = args.model_name_or_path.rstrip('''/''' ) __A = args.target_model_path print(F"""Load fine-pruned model from {model_name_or_path}""" ) __A = torch.load(os.path.join(lowerCamelCase , '''pytorch_model.bin''' ) ) __A = {} for name, tensor in model.items(): if "embeddings" in name or "LayerNorm" in name or "pooler" in name: __A = tensor print(F"""Copied layer {name}""" ) elif "classifier" in name or "qa_output" in name: __A = tensor print(F"""Copied layer {name}""" ) elif "bias" in name: __A = tensor print(F"""Copied layer {name}""" ) else: if pruning_method == "magnitude": __A = MagnitudeBinarizer.apply(inputs=lowerCamelCase , threshold=lowerCamelCase ) __A = tensor * mask print(F"""Pruned layer {name}""" ) elif pruning_method == "topK": if "mask_scores" in name: continue __A = name[:-6] __A = model[F"""{prefix_}mask_scores"""] __A = TopKBinarizer.apply(lowerCamelCase , lowerCamelCase ) __A = tensor * mask print(F"""Pruned layer {name}""" ) elif pruning_method == "sigmoied_threshold": if "mask_scores" in name: continue __A = name[:-6] __A = model[F"""{prefix_}mask_scores"""] __A = ThresholdBinarizer.apply(lowerCamelCase , lowerCamelCase , lowerCamelCase ) __A = tensor * mask print(F"""Pruned layer {name}""" ) elif pruning_method == "l0": if "mask_scores" in name: continue __A = name[:-6] __A = model[F"""{prefix_}mask_scores"""] __A , __A = -0.1, 1.1 __A = torch.sigmoid(lowerCamelCase ) __A = s * (r - l) + l __A = s_bar.clamp(min=0.0 , max=1.0 ) __A = tensor * mask print(F"""Pruned layer {name}""" ) else: raise ValueError('''Unknown pruning method''' ) if target_model_path is None: __A = os.path.join( os.path.dirname(lowerCamelCase ) , F"""bertarized_{os.path.basename(lowerCamelCase )}""" ) if not os.path.isdir(lowerCamelCase ): shutil.copytree(lowerCamelCase , lowerCamelCase ) print(F"""\nCreated folder {target_model_path}""" ) torch.save(lowerCamelCase , os.path.join(lowerCamelCase , '''pytorch_model.bin''' ) ) print('''\nPruned model saved! See you later!''' ) if __name__ == "__main__": snake_case__ : str = argparse.ArgumentParser() parser.add_argument( '--pruning_method', choices=['l0', 'magnitude', 'topK', 'sigmoied_threshold'], type=str, required=True, help=( 'Pruning Method (l0 = L0 regularization, magnitude = Magnitude pruning, topK = Movement pruning,' ' sigmoied_threshold = Soft movement pruning)' ), ) parser.add_argument( '--threshold', type=float, required=False, help=( 'For `magnitude` and `topK`, it is the level of remaining weights (in %) in the fine-pruned model.' 'For `sigmoied_threshold`, it is the threshold \tau against which the (sigmoied) scores are compared.' 'Not needed for `l0`' ), ) parser.add_argument( '--model_name_or_path', type=str, required=True, help='Folder containing the model that was previously fine-pruned', ) parser.add_argument( '--target_model_path', default=None, type=str, required=False, help='Folder containing the model that was previously fine-pruned', ) snake_case__ : Optional[int] = parser.parse_args() main(args)
117
import argparse import torch from transformers import BertForMaskedLM if __name__ == "__main__": snake_case__ : Optional[int] = argparse.ArgumentParser( description=( 'Extraction some layers of the full BertForMaskedLM or RObertaForMaskedLM for Transfer Learned' ' Distillation' ) ) parser.add_argument('--model_type', default='bert', choices=['bert']) parser.add_argument('--model_name', default='bert-base-uncased', type=str) parser.add_argument('--dump_checkpoint', default='serialization_dir/tf_bert-base-uncased_0247911.pth', type=str) parser.add_argument('--vocab_transform', action='store_true') snake_case__ : Optional[int] = parser.parse_args() if args.model_type == "bert": snake_case__ : Dict = BertForMaskedLM.from_pretrained(args.model_name) snake_case__ : Union[str, Any] = 'bert' else: raise ValueError('args.model_type should be "bert".') snake_case__ : Optional[int] = model.state_dict() snake_case__ : List[Any] = {} for w in ["word_embeddings", "position_embeddings"]: snake_case__ : Tuple = state_dict[f'{prefix}.embeddings.{w}.weight'] for w in ["weight", "bias"]: snake_case__ : Optional[Any] = state_dict[f'{prefix}.embeddings.LayerNorm.{w}'] snake_case__ : int = 0 for teacher_idx in [0, 2, 4, 7, 9, 11]: for w in ["weight", "bias"]: snake_case__ : Union[str, Any] = state_dict[ f'{prefix}.encoder.layer.{teacher_idx}.attention.self.query.{w}' ] snake_case__ : Dict = state_dict[ f'{prefix}.encoder.layer.{teacher_idx}.attention.self.key.{w}' ] snake_case__ : int = state_dict[ f'{prefix}.encoder.layer.{teacher_idx}.attention.self.value.{w}' ] snake_case__ : int = state_dict[ f'{prefix}.encoder.layer.{teacher_idx}.attention.output.dense.{w}' ] snake_case__ : Optional[int] = state_dict[ f'{prefix}.encoder.layer.{teacher_idx}.attention.output.LayerNorm.{w}' ] snake_case__ : Optional[Any] = state_dict[ f'{prefix}.encoder.layer.{teacher_idx}.intermediate.dense.{w}' ] snake_case__ : List[str] = state_dict[ f'{prefix}.encoder.layer.{teacher_idx}.output.dense.{w}' ] snake_case__ : int = state_dict[ f'{prefix}.encoder.layer.{teacher_idx}.output.LayerNorm.{w}' ] std_idx += 1 snake_case__ : Optional[int] = state_dict['cls.predictions.decoder.weight'] snake_case__ : str = state_dict['cls.predictions.bias'] if args.vocab_transform: for w in ["weight", "bias"]: snake_case__ : int = state_dict[f'cls.predictions.transform.dense.{w}'] snake_case__ : Optional[int] = state_dict[f'cls.predictions.transform.LayerNorm.{w}'] print(f'N layers selected for distillation: {std_idx}') print(f'Number of params transferred for distillation: {len(compressed_sd.keys())}') print(f'Save transferred checkpoint to {args.dump_checkpoint}.') torch.save(compressed_sd, args.dump_checkpoint)
117
1
'''simple docstring''' from math import factorial def _a( UpperCamelCase__ : int = 1_0_0 ): '''simple docstring''' return sum(int(UpperCamelCase__ ) for x in str(factorial(UpperCamelCase__ ) ) ) if __name__ == "__main__": print(solution(int(input('Enter the Number: ').strip())))
354
'''simple docstring''' from __future__ import annotations import unittest from transformers import AutoTokenizer, PegasusConfig, is_tf_available from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFAutoModelForSeqaSeqLM, TFPegasusForConditionalGeneration, TFPegasusModel @require_tf class __SCREAMING_SNAKE_CASE : snake_case_ = PegasusConfig snake_case_ = {} snake_case_ = """gelu""" def __init__( self : int , __lowercase : Optional[Any] , __lowercase : int=13 , __lowercase : List[str]=7 , __lowercase : Dict=True , __lowercase : Tuple=False , __lowercase : Optional[Any]=99 , __lowercase : str=32 , __lowercase : List[str]=2 , __lowercase : str=4 , __lowercase : Optional[int]=37 , __lowercase : List[Any]=0.1 , __lowercase : List[Any]=0.1 , __lowercase : List[Any]=40 , __lowercase : str=2 , __lowercase : List[Any]=1 , __lowercase : Optional[Any]=0 , ) -> Tuple: SCREAMING_SNAKE_CASE__ : Optional[Any] =parent SCREAMING_SNAKE_CASE__ : List[Any] =batch_size SCREAMING_SNAKE_CASE__ : Optional[int] =seq_length SCREAMING_SNAKE_CASE__ : Optional[Any] =is_training SCREAMING_SNAKE_CASE__ : Union[str, Any] =use_labels SCREAMING_SNAKE_CASE__ : str =vocab_size SCREAMING_SNAKE_CASE__ : Optional[Any] =hidden_size SCREAMING_SNAKE_CASE__ : List[str] =num_hidden_layers SCREAMING_SNAKE_CASE__ : int =num_attention_heads SCREAMING_SNAKE_CASE__ : List[str] =intermediate_size SCREAMING_SNAKE_CASE__ : List[Any] =hidden_dropout_prob SCREAMING_SNAKE_CASE__ : List[Any] =attention_probs_dropout_prob SCREAMING_SNAKE_CASE__ : List[Any] =max_position_embeddings SCREAMING_SNAKE_CASE__ : Optional[Any] =eos_token_id SCREAMING_SNAKE_CASE__ : Any =pad_token_id SCREAMING_SNAKE_CASE__ : Union[str, Any] =bos_token_id def __magic_name__ ( self : Any ) -> str: SCREAMING_SNAKE_CASE__ : List[str] =ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) SCREAMING_SNAKE_CASE__ : Optional[int] =tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 ) SCREAMING_SNAKE_CASE__ : Any =tf.concat([input_ids, eos_tensor] , axis=1 ) SCREAMING_SNAKE_CASE__ : Dict =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) SCREAMING_SNAKE_CASE__ : Optional[int] =self.config_cls( vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , ) SCREAMING_SNAKE_CASE__ : Union[str, Any] =prepare_pegasus_inputs_dict(__lowercase , __lowercase , __lowercase ) return config, inputs_dict def __magic_name__ ( self : Optional[int] , __lowercase : List[str] , __lowercase : Optional[int] ) -> Tuple: SCREAMING_SNAKE_CASE__ : List[Any] =TFPegasusModel(config=__lowercase ).get_decoder() SCREAMING_SNAKE_CASE__ : List[str] =inputs_dict['''input_ids'''] SCREAMING_SNAKE_CASE__ : Tuple =input_ids[:1, :] SCREAMING_SNAKE_CASE__ : Tuple =inputs_dict['''attention_mask'''][:1, :] SCREAMING_SNAKE_CASE__ : Tuple =inputs_dict['''head_mask'''] SCREAMING_SNAKE_CASE__ : List[str] =1 # first forward pass SCREAMING_SNAKE_CASE__ : Any =model(__lowercase , attention_mask=__lowercase , head_mask=__lowercase , use_cache=__lowercase ) SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Tuple =outputs.to_tuple() # create hypothetical next token and extent to next_input_ids SCREAMING_SNAKE_CASE__ : str =ids_tensor((self.batch_size, 3) , config.vocab_size ) SCREAMING_SNAKE_CASE__ : List[Any] =tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta ) # append to next input_ids and SCREAMING_SNAKE_CASE__ : Tuple =tf.concat([input_ids, next_tokens] , axis=-1 ) SCREAMING_SNAKE_CASE__ : Optional[Any] =tf.concat([attention_mask, next_attn_mask] , axis=-1 ) SCREAMING_SNAKE_CASE__ : int =model(__lowercase , attention_mask=__lowercase )[0] SCREAMING_SNAKE_CASE__ : Any =model(__lowercase , attention_mask=__lowercase , past_key_values=__lowercase )[0] self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] ) # select random slice SCREAMING_SNAKE_CASE__ : Optional[Any] =int(ids_tensor((1,) , output_from_past.shape[-1] ) ) SCREAMING_SNAKE_CASE__ : Any =output_from_no_past[:, -3:, random_slice_idx] SCREAMING_SNAKE_CASE__ : List[str] =output_from_past[:, :, random_slice_idx] # test that outputs are equal for slice tf.debugging.assert_near(__lowercase , __lowercase , rtol=1e-3 ) def _a( UpperCamelCase__ : Any, UpperCamelCase__ : Union[str, Any], UpperCamelCase__ : Union[str, Any], UpperCamelCase__ : Optional[Any]=None, UpperCamelCase__ : Optional[Any]=None, UpperCamelCase__ : Union[str, Any]=None, UpperCamelCase__ : Any=None, UpperCamelCase__ : Optional[Any]=None, ): '''simple docstring''' if attention_mask is None: SCREAMING_SNAKE_CASE__ : str =tf.cast(tf.math.not_equal(UpperCamelCase__, config.pad_token_id ), tf.inta ) if decoder_attention_mask is None: SCREAMING_SNAKE_CASE__ : Any =tf.concat( [ tf.ones(decoder_input_ids[:, :1].shape, dtype=tf.inta ), tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:], config.pad_token_id ), tf.inta ), ], axis=-1, ) if head_mask is None: SCREAMING_SNAKE_CASE__ : int =tf.ones((config.encoder_layers, config.encoder_attention_heads) ) if decoder_head_mask is None: SCREAMING_SNAKE_CASE__ : List[Any] =tf.ones((config.decoder_layers, config.decoder_attention_heads) ) if cross_attn_head_mask is None: SCREAMING_SNAKE_CASE__ : List[str] =tf.ones((config.decoder_layers, config.decoder_attention_heads) ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": decoder_attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, "cross_attn_head_mask": cross_attn_head_mask, } @require_tf class __SCREAMING_SNAKE_CASE ( lowerCamelCase , lowerCamelCase , unittest.TestCase ): snake_case_ = (TFPegasusForConditionalGeneration, TFPegasusModel) if is_tf_available() else () snake_case_ = (TFPegasusForConditionalGeneration,) if is_tf_available() else () snake_case_ = ( { """conversational""": TFPegasusForConditionalGeneration, """feature-extraction""": TFPegasusModel, """summarization""": TFPegasusForConditionalGeneration, """text2text-generation""": TFPegasusForConditionalGeneration, """translation""": TFPegasusForConditionalGeneration, } if is_tf_available() else {} ) snake_case_ = True snake_case_ = False snake_case_ = False def __magic_name__ ( self : Union[str, Any] ) -> str: SCREAMING_SNAKE_CASE__ : List[Any] =TFPegasusModelTester(self ) SCREAMING_SNAKE_CASE__ : Dict =ConfigTester(self , config_class=__lowercase ) def __magic_name__ ( self : int ) -> Any: self.config_tester.run_common_tests() def __magic_name__ ( self : Optional[Any] ) -> List[Any]: SCREAMING_SNAKE_CASE__ : Tuple =self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_decoder_model_past_large_inputs(*__lowercase ) @require_sentencepiece @require_tokenizers @require_tf class __SCREAMING_SNAKE_CASE ( unittest.TestCase ): snake_case_ = [ """ PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.""", """ The London trio are up for best UK act and best album, as well as getting two nominations in the best song category.\"We got told like this morning 'Oh I think you're nominated'\", said Dappy.\"And I was like 'Oh yeah, which one?' And now we've got nominated for four awards. I mean, wow!\"Bandmate Fazer added: \"We thought it's best of us to come down and mingle with everyone and say hello to the cameras. And now we find we've got four nominations.\"The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn't be too disappointed if they didn't win this time around.\"At the end of the day we're grateful to be where we are in our careers.\"If it don't happen then it don't happen - live to fight another day and keep on making albums and hits for the fans.\"Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers' All These Things That I've Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year's Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border.\"We just done Edinburgh the other day,\" said Dappy.\"We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!\" """, ] snake_case_ = [ """California's largest electricity provider has cut power to hundreds of thousands of customers in an effort to""" """ reduce the risk of wildfires.""", """N-Dubz have revealed they\'re \"grateful\" to have been nominated for four Mobo Awards.""", ] # differs slightly from pytorch, likely due to numerical differences in linear layers snake_case_ = """google/pegasus-xsum""" @cached_property def __magic_name__ ( self : Optional[int] ) -> Tuple: return AutoTokenizer.from_pretrained(self.model_name ) @cached_property def __magic_name__ ( self : List[Any] ) -> Optional[Any]: SCREAMING_SNAKE_CASE__ : Optional[int] =TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name ) return model def __magic_name__ ( self : List[str] , **__lowercase : Any ) -> Optional[int]: SCREAMING_SNAKE_CASE__ : Union[str, Any] =self.translate_src_text(**__lowercase ) assert self.expected_text == generated_words def __magic_name__ ( self : Optional[Any] , **__lowercase : List[str] ) -> Optional[int]: SCREAMING_SNAKE_CASE__ : Union[str, Any] =self.tokenizer(self.src_text , **__lowercase , padding=__lowercase , return_tensors='''tf''' ) SCREAMING_SNAKE_CASE__ : Union[str, Any] =self.model.generate( model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 , use_cache=__lowercase , ) SCREAMING_SNAKE_CASE__ : Any =self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=__lowercase ) return generated_words @slow def __magic_name__ ( self : Optional[Any] ) -> Optional[int]: self._assert_generated_batch_equal_expected()
222
0
import unittest from parameterized import parameterized from transformers import AutoTokenizer, GPTNeoXConfig, is_torch_available, set_seed from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( GPTNeoXForCausalLM, GPTNeoXForQuestionAnswering, GPTNeoXForSequenceClassification, GPTNeoXForTokenClassification, GPTNeoXModel, ) class snake_case : '''simple docstring''' def __init__( self : List[str] , lowerCAmelCase : Optional[int] , lowerCAmelCase : Dict=13 , lowerCAmelCase : str=7 , lowerCAmelCase : Dict=True , lowerCAmelCase : List[str]=True , lowerCAmelCase : str=True , lowerCAmelCase : int=True , lowerCAmelCase : Tuple=99 , lowerCAmelCase : Dict=64 , lowerCAmelCase : Tuple=5 , lowerCAmelCase : Dict=4 , lowerCAmelCase : int=37 , lowerCAmelCase : List[Any]="gelu" , lowerCAmelCase : List[str]=0.1 , lowerCAmelCase : Union[str, Any]=0.1 , lowerCAmelCase : List[Any]=512 , lowerCAmelCase : Union[str, Any]=16 , lowerCAmelCase : Optional[Any]=2 , lowerCAmelCase : str=0.02 , lowerCAmelCase : Dict=3 , lowerCAmelCase : int=4 , lowerCAmelCase : List[Any]=None , ) -> Dict: """simple docstring""" _snake_case : Optional[int] = parent _snake_case : Dict = batch_size _snake_case : Any = seq_length _snake_case : Union[str, Any] = is_training _snake_case : Union[str, Any] = use_input_mask _snake_case : str = use_token_type_ids _snake_case : Dict = use_labels _snake_case : Any = vocab_size _snake_case : Tuple = hidden_size _snake_case : Dict = num_hidden_layers _snake_case : Union[str, Any] = num_attention_heads _snake_case : Optional[Any] = intermediate_size _snake_case : Union[str, Any] = hidden_act _snake_case : int = hidden_dropout_prob _snake_case : Optional[int] = attention_probs_dropout_prob _snake_case : int = max_position_embeddings _snake_case : List[str] = type_vocab_size _snake_case : Optional[int] = type_sequence_label_size _snake_case : List[Any] = initializer_range _snake_case : int = num_labels _snake_case : Tuple = num_choices _snake_case : List[Any] = scope _snake_case : str = vocab_size - 1 def UpperCamelCase_ ( self : Optional[Any]) -> Optional[Any]: """simple docstring""" _snake_case : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size) _snake_case : Dict = None if self.use_input_mask: _snake_case : List[Any] = random_attention_mask([self.batch_size, self.seq_length]) _snake_case : List[Any] = None if self.use_labels: _snake_case : str = ids_tensor([self.batch_size, self.seq_length] , self.num_labels) _snake_case : List[Any] = self.get_config() return config, input_ids, input_mask, token_labels def UpperCamelCase_ ( self : Any) -> int: """simple docstring""" return GPTNeoXConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCAmelCase , initializer_range=self.initializer_range , pad_token_id=self.pad_token_id , ) def UpperCamelCase_ ( self : Tuple) -> Dict: """simple docstring""" _snake_case , _snake_case , _snake_case , _snake_case : Union[str, Any] = self.prepare_config_and_inputs() _snake_case : Optional[Any] = True return config, input_ids, input_mask, token_labels def UpperCamelCase_ ( self : Union[str, Any] , lowerCAmelCase : Tuple , lowerCAmelCase : str , lowerCAmelCase : int) -> Optional[int]: """simple docstring""" _snake_case : Union[str, Any] = GPTNeoXModel(config=lowerCAmelCase) model.to(lowerCAmelCase) model.eval() _snake_case : Optional[Any] = model(lowerCAmelCase , attention_mask=lowerCAmelCase) _snake_case : Dict = model(lowerCAmelCase) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size)) def UpperCamelCase_ ( self : Optional[Any] , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Dict , lowerCAmelCase : int) -> Dict: """simple docstring""" _snake_case : Tuple = True _snake_case : Union[str, Any] = GPTNeoXModel(lowerCAmelCase) model.to(lowerCAmelCase) model.eval() _snake_case : Union[str, Any] = model(lowerCAmelCase , attention_mask=lowerCAmelCase) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size)) def UpperCamelCase_ ( self : str , lowerCAmelCase : Optional[int] , lowerCAmelCase : Dict , lowerCAmelCase : str , lowerCAmelCase : str) -> int: """simple docstring""" _snake_case : Optional[Any] = GPTNeoXForCausalLM(config=lowerCAmelCase) model.to(lowerCAmelCase) model.eval() _snake_case : int = model(lowerCAmelCase , attention_mask=lowerCAmelCase , labels=lowerCAmelCase) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size)) def UpperCamelCase_ ( self : Tuple , lowerCAmelCase : Optional[Any] , lowerCAmelCase : int , lowerCAmelCase : str , lowerCAmelCase : List[str]) -> str: """simple docstring""" _snake_case : Optional[Any] = self.num_labels _snake_case : str = GPTNeoXForQuestionAnswering(lowerCAmelCase) model.to(lowerCAmelCase) model.eval() _snake_case : Tuple = model(lowerCAmelCase , attention_mask=lowerCAmelCase) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length)) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length)) def UpperCamelCase_ ( self : str , lowerCAmelCase : Tuple , lowerCAmelCase : Dict , lowerCAmelCase : Dict , lowerCAmelCase : Any) -> Any: """simple docstring""" _snake_case : Any = self.num_labels _snake_case : Tuple = GPTNeoXForSequenceClassification(lowerCAmelCase) model.to(lowerCAmelCase) model.eval() _snake_case : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size) _snake_case : Union[str, Any] = model(lowerCAmelCase , attention_mask=lowerCAmelCase , labels=lowerCAmelCase) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels)) def UpperCamelCase_ ( self : Tuple , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Any , lowerCAmelCase : Optional[Any] , lowerCAmelCase : str) -> Any: """simple docstring""" _snake_case : Any = self.num_labels _snake_case : str = GPTNeoXForTokenClassification(lowerCAmelCase) model.to(lowerCAmelCase) model.eval() _snake_case : Dict = model(lowerCAmelCase , attention_mask=lowerCAmelCase , labels=lowerCAmelCase) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels)) def UpperCamelCase_ ( self : List[Any] , lowerCAmelCase : Dict , lowerCAmelCase : str , lowerCAmelCase : Union[str, Any]) -> Dict: """simple docstring""" _snake_case : Optional[Any] = True _snake_case : List[Any] = GPTNeoXForCausalLM(config=lowerCAmelCase) model.to(lowerCAmelCase) model.eval() # first forward pass _snake_case : Optional[int] = model(lowerCAmelCase , attention_mask=lowerCAmelCase , use_cache=lowerCAmelCase) _snake_case : List[str] = outputs.past_key_values # create hypothetical multiple next token and extent to next_input_ids _snake_case : List[Any] = ids_tensor((self.batch_size, 3) , config.vocab_size) _snake_case : Optional[Any] = ids_tensor((self.batch_size, 3) , vocab_size=2) # append to next input_ids and _snake_case : Any = torch.cat([input_ids, next_tokens] , dim=-1) _snake_case : int = torch.cat([input_mask, next_mask] , dim=-1) _snake_case : Dict = model(lowerCAmelCase , attention_mask=lowerCAmelCase , output_hidden_states=lowerCAmelCase) _snake_case : List[str] = output_from_no_past["""hidden_states"""][0] _snake_case : List[Any] = model( lowerCAmelCase , attention_mask=lowerCAmelCase , past_key_values=lowerCAmelCase , output_hidden_states=lowerCAmelCase , )["""hidden_states"""][0] # select random slice _snake_case : Any = ids_tensor((1,) , output_from_past.shape[-1]).item() _snake_case : Tuple = output_from_no_past[:, -3:, random_slice_idx].detach() _snake_case : str = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1]) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(lowerCAmelCase , lowerCAmelCase , atol=1E-3)) def UpperCamelCase_ ( self : int) -> List[Any]: """simple docstring""" _snake_case : int = self.prepare_config_and_inputs() _snake_case , _snake_case , _snake_case , _snake_case : List[str] = config_and_inputs _snake_case : int = {"""input_ids""": input_ids, """attention_mask""": input_mask} return config, inputs_dict @require_torch class snake_case ( SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,unittest.TestCase ): '''simple docstring''' snake_case_ : str = ( ( GPTNeoXModel, GPTNeoXForCausalLM, GPTNeoXForQuestionAnswering, GPTNeoXForSequenceClassification, GPTNeoXForTokenClassification, ) if is_torch_available() else () ) snake_case_ : Union[str, Any] = (GPTNeoXForCausalLM,) if is_torch_available() else () snake_case_ : Dict = ( { """feature-extraction""": GPTNeoXModel, """question-answering""": GPTNeoXForQuestionAnswering, """text-classification""": GPTNeoXForSequenceClassification, """text-generation""": GPTNeoXForCausalLM, """token-classification""": GPTNeoXForTokenClassification, """zero-shot""": GPTNeoXForSequenceClassification, } if is_torch_available() else {} ) snake_case_ : int = False snake_case_ : List[Any] = False snake_case_ : Any = False snake_case_ : str = False def UpperCamelCase_ ( self : Union[str, Any]) -> List[Any]: """simple docstring""" _snake_case : Union[str, Any] = GPTNeoXModelTester(self) _snake_case : List[Any] = ConfigTester(self , config_class=lowerCAmelCase , hidden_size=64 , num_attention_heads=8) def UpperCamelCase_ ( self : List[str]) -> Any: """simple docstring""" self.config_tester.run_common_tests() def UpperCamelCase_ ( self : int) -> str: """simple docstring""" _snake_case , _snake_case , _snake_case , _snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase) def UpperCamelCase_ ( self : int) -> List[str]: """simple docstring""" _snake_case , _snake_case , _snake_case , _snake_case : str = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_model_as_decoder(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase) def UpperCamelCase_ ( self : Optional[Any]) -> str: """simple docstring""" _snake_case , _snake_case , _snake_case , _snake_case : int = self.model_tester.prepare_config_and_inputs_for_decoder() _snake_case : Dict = None self.model_tester.create_and_check_model_as_decoder(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase) def UpperCamelCase_ ( self : Optional[int]) -> Optional[int]: """simple docstring""" _snake_case , _snake_case , _snake_case , _snake_case : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_decoder_model_past_large_inputs(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase) def UpperCamelCase_ ( self : Tuple) -> Dict: """simple docstring""" _snake_case : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_causal_lm(*lowerCAmelCase) def UpperCamelCase_ ( self : Dict) -> str: """simple docstring""" _snake_case : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*lowerCAmelCase) def UpperCamelCase_ ( self : List[str]) -> Optional[Any]: """simple docstring""" _snake_case : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*lowerCAmelCase) def UpperCamelCase_ ( self : Optional[Any]) -> int: """simple docstring""" _snake_case : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*lowerCAmelCase) @unittest.skip(reason="""Feed forward chunking is not implemented""") def UpperCamelCase_ ( self : Any) -> List[str]: """simple docstring""" pass @parameterized.expand([("""linear""",), ("""dynamic""",)]) def UpperCamelCase_ ( self : int , lowerCAmelCase : List[Any]) -> Any: """simple docstring""" _snake_case , _snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() _snake_case : Tuple = ids_tensor([1, 10] , config.vocab_size) _snake_case : Optional[int] = ids_tensor([1, int(config.max_position_embeddings * 1.5)] , config.vocab_size) set_seed(42) # Fixed seed at init time so the two models get the same random weights _snake_case : Tuple = GPTNeoXModel(lowerCAmelCase) original_model.to(lowerCAmelCase) original_model.eval() _snake_case : Union[str, Any] = original_model(lowerCAmelCase).last_hidden_state _snake_case : Union[str, Any] = original_model(lowerCAmelCase).last_hidden_state set_seed(42) # Fixed seed at init time so the two models get the same random weights _snake_case : str = {"""type""": scaling_type, """factor""": 10.0} _snake_case : Optional[Any] = GPTNeoXModel(lowerCAmelCase) scaled_model.to(lowerCAmelCase) scaled_model.eval() _snake_case : Any = scaled_model(lowerCAmelCase).last_hidden_state _snake_case : List[Any] = scaled_model(lowerCAmelCase).last_hidden_state # Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original # maximum sequence length, so the outputs for the short input should match. if scaling_type == "dynamic": self.assertTrue(torch.allclose(lowerCAmelCase , lowerCAmelCase , atol=1E-5)) else: self.assertFalse(torch.allclose(lowerCAmelCase , lowerCAmelCase , atol=1E-5)) # The output should be different for long inputs self.assertFalse(torch.allclose(lowerCAmelCase , lowerCAmelCase , atol=1E-5)) @require_torch class snake_case ( unittest.TestCase ): '''simple docstring''' @slow def UpperCamelCase_ ( self : Optional[int]) -> Any: """simple docstring""" _snake_case : int = AutoTokenizer.from_pretrained("""EleutherAI/pythia-410m-deduped""") for checkpointing in [True, False]: _snake_case : Optional[Any] = GPTNeoXForCausalLM.from_pretrained("""EleutherAI/pythia-410m-deduped""") if checkpointing: model.gradient_checkpointing_enable() else: model.gradient_checkpointing_disable() model.to(lowerCAmelCase) _snake_case : Union[str, Any] = tokenizer("""My favorite food is""" , return_tensors="""pt""").to(lowerCAmelCase) # The hub repo. is updated on 2023-04-04, resulting in poor outputs. # See: https://github.com/huggingface/transformers/pull/24193 _snake_case : Union[str, Any] = """My favorite food is a good old-fashioned, old-fashioned, old-fashioned.\n\nI'm not sure""" _snake_case : Optional[Any] = model.generate(**lowerCAmelCase , do_sample=lowerCAmelCase , max_new_tokens=20) _snake_case : List[Any] = tokenizer.batch_decode(lowerCAmelCase)[0] self.assertEqual(lowerCAmelCase , lowerCAmelCase)
317
import warnings from ...utils import logging from .image_processing_yolos import YolosImageProcessor a__ = logging.get_logger(__name__) class snake_case ( SCREAMING_SNAKE_CASE_ ): '''simple docstring''' def __init__( self : List[Any] , *lowerCAmelCase : List[Any] , **lowerCAmelCase : Dict) -> None: """simple docstring""" warnings.warn( """The class YolosFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please""" """ use YolosImageProcessor instead.""" , lowerCAmelCase , ) super().__init__(*lowerCAmelCase , **lowerCAmelCase)
317
1
import unittest import numpy as np import torch from diffusers import ScoreSdeVePipeline, ScoreSdeVeScheduler, UNetaDModel from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device enable_full_determinism() class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ): @property def SCREAMING_SNAKE_CASE ( self : Dict ) -> str: torch.manual_seed(0 ) a_ : Any = UNetaDModel( block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=3 , out_channels=3 , down_block_types=('DownBlock2D', 'AttnDownBlock2D') , up_block_types=('AttnUpBlock2D', 'UpBlock2D') , ) return model def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Tuple: a_ : List[str] = self.dummy_uncond_unet a_ : List[Any] = ScoreSdeVeScheduler() a_ : str = ScoreSdeVePipeline(unet=SCREAMING_SNAKE_CASE__ , scheduler=SCREAMING_SNAKE_CASE__ ) sde_ve.to(SCREAMING_SNAKE_CASE__ ) sde_ve.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ ) a_ : List[str] = torch.manual_seed(0 ) a_ : List[str] = sde_ve(num_inference_steps=2 , output_type='numpy' , generator=SCREAMING_SNAKE_CASE__ ).images a_ : Optional[int] = torch.manual_seed(0 ) a_ : Union[str, Any] = sde_ve(num_inference_steps=2 , output_type='numpy' , generator=SCREAMING_SNAKE_CASE__ , return_dict=SCREAMING_SNAKE_CASE__ )[ 0 ] a_ : Optional[int] = image[0, -3:, -3:, -1] a_ : List[str] = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 3_2, 3_2, 3) a_ : int = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2 @slow @require_torch class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ): def SCREAMING_SNAKE_CASE ( self : int ) -> Tuple: a_ : Optional[Any] = 'google/ncsnpp-church-256' a_ : List[str] = UNetaDModel.from_pretrained(SCREAMING_SNAKE_CASE__ ) a_ : Tuple = ScoreSdeVeScheduler.from_pretrained(SCREAMING_SNAKE_CASE__ ) a_ : List[str] = ScoreSdeVePipeline(unet=SCREAMING_SNAKE_CASE__ , scheduler=SCREAMING_SNAKE_CASE__ ) sde_ve.to(SCREAMING_SNAKE_CASE__ ) sde_ve.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ ) a_ : Any = torch.manual_seed(0 ) a_ : List[Any] = sde_ve(num_inference_steps=1_0 , output_type='numpy' , generator=SCREAMING_SNAKE_CASE__ ).images a_ : str = image[0, -3:, -3:, -1] assert image.shape == (1, 2_5_6, 2_5_6, 3) a_ : Any = np.array([0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
370
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch import math from typing import Union import torch from ..configuration_utils import ConfigMixin, register_to_config from ..utils import randn_tensor from .scheduling_utils import SchedulerMixin class SCREAMING_SNAKE_CASE__ ( lowercase__ , lowercase__ ): snake_case__ : Dict = 1 @register_to_config def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE__ : Tuple=2_0_0_0 , SCREAMING_SNAKE_CASE__ : Optional[Any]=0.1 , SCREAMING_SNAKE_CASE__ : Optional[int]=2_0 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=1E-3 ) -> Optional[int]: a_ : Tuple = None a_ : int = None a_ : Tuple = None def SCREAMING_SNAKE_CASE ( self : str , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Union[str, torch.device] = None ) -> List[str]: a_ : Tuple = torch.linspace(1 , self.config.sampling_eps , SCREAMING_SNAKE_CASE__ , device=SCREAMING_SNAKE_CASE__ ) def SCREAMING_SNAKE_CASE ( self : Dict , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Tuple=None ) -> Tuple: if self.timesteps is None: raise ValueError( '`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler' ) # TODO(Patrick) better comments + non-PyTorch # postprocess model score a_ : Tuple = ( -0.25 * t**2 * (self.config.beta_max - self.config.beta_min) - 0.5 * t * self.config.beta_min ) a_ : int = torch.sqrt(1.0 - torch.exp(2.0 * log_mean_coeff ) ) a_ : Dict = std.flatten() while len(std.shape ) < len(score.shape ): a_ : str = std.unsqueeze(-1 ) a_ : List[str] = -score / std # compute a_ : List[str] = -1.0 / len(self.timesteps ) a_ : Optional[int] = self.config.beta_min + t * (self.config.beta_max - self.config.beta_min) a_ : Optional[Any] = beta_t.flatten() while len(beta_t.shape ) < len(x.shape ): a_ : List[str] = beta_t.unsqueeze(-1 ) a_ : Optional[Any] = -0.5 * beta_t * x a_ : Tuple = torch.sqrt(SCREAMING_SNAKE_CASE__ ) a_ : List[Any] = drift - diffusion**2 * score a_ : List[str] = x + drift * dt # add noise a_ : Optional[Any] = randn_tensor(x.shape , layout=x.layout , generator=SCREAMING_SNAKE_CASE__ , device=x.device , dtype=x.dtype ) a_ : Optional[Any] = x_mean + diffusion * math.sqrt(-dt ) * noise return x, x_mean def __len__( self : int ) -> Tuple: return self.config.num_train_timesteps
120
0
'''simple docstring''' from typing import List from ...configuration_utils import PretrainedConfig from ...utils import logging _A : Dict =logging.get_logger(__name__) _A : List[str] ={ '''snap-research/efficientformer-l1-300''': ( '''https://huggingface.co/snap-research/efficientformer-l1-300/resolve/main/config.json''' ), } class _lowercase ( SCREAMING_SNAKE_CASE__ ): a = """efficientformer""" def __init__( self: Any , UpperCamelCase__: Dict = [3, 2, 6, 4] , UpperCamelCase__: List[Any] = [48, 96, 224, 448] , UpperCamelCase__: List[Any] = [True, True, True, True] , UpperCamelCase__: Dict = 448 , UpperCamelCase__: Union[str, Any] = 32 , UpperCamelCase__: List[str] = 4 , UpperCamelCase__: int = 7 , UpperCamelCase__: str = 5 , UpperCamelCase__: Any = 8 , UpperCamelCase__: Tuple = 4 , UpperCamelCase__: str = 0.0 , UpperCamelCase__: str = 16 , UpperCamelCase__: int = 3 , UpperCamelCase__: Union[str, Any] = 3 , UpperCamelCase__: List[Any] = 3 , UpperCamelCase__: Tuple = 2 , UpperCamelCase__: Tuple = 1 , UpperCamelCase__: int = 0.0 , UpperCamelCase__: List[Any] = 1 , UpperCamelCase__: Optional[Any] = True , UpperCamelCase__: Optional[Any] = True , UpperCamelCase__: str = 1e-5 , UpperCamelCase__: List[str] = "gelu" , UpperCamelCase__: Dict = 0.02 , UpperCamelCase__: List[Any] = 1e-12 , UpperCamelCase__: int = 224 , UpperCamelCase__: str = 1e-05 , **UpperCamelCase__: Optional[int] , ): super().__init__(**_UpperCAmelCase ) lowerCamelCase__ : int = hidden_act lowerCamelCase__ : Optional[int] = hidden_dropout_prob lowerCamelCase__ : List[str] = hidden_sizes lowerCamelCase__ : Union[str, Any] = num_hidden_layers lowerCamelCase__ : Any = num_attention_heads lowerCamelCase__ : Any = initializer_range lowerCamelCase__ : Optional[int] = layer_norm_eps lowerCamelCase__ : Union[str, Any] = patch_size lowerCamelCase__ : Tuple = num_channels lowerCamelCase__ : List[str] = depths lowerCamelCase__ : Union[str, Any] = mlp_expansion_ratio lowerCamelCase__ : Any = downsamples lowerCamelCase__ : Optional[int] = dim lowerCamelCase__ : Tuple = key_dim lowerCamelCase__ : Dict = attention_ratio lowerCamelCase__ : str = resolution lowerCamelCase__ : Union[str, Any] = pool_size lowerCamelCase__ : str = downsample_patch_size lowerCamelCase__ : List[Any] = downsample_stride lowerCamelCase__ : Optional[int] = downsample_pad lowerCamelCase__ : Dict = drop_path_rate lowerCamelCase__ : Tuple = num_metaad_blocks lowerCamelCase__ : Union[str, Any] = distillation lowerCamelCase__ : Optional[int] = use_layer_scale lowerCamelCase__ : Union[str, Any] = layer_scale_init_value lowerCamelCase__ : str = image_size lowerCamelCase__ : int = batch_norm_eps
41
'''simple docstring''' from typing import Optional, Tuple, Union import tensorflow as tf from ...activations_tf import ACTaFN from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward from ...modeling_tf_outputs import ( TFBaseModelOutputWithNoAttention, TFBaseModelOutputWithPoolingAndNoAttention, TFSequenceClassifierOutput, ) from ...modeling_tf_utils import TFPreTrainedModel, TFSequenceClassificationLoss, keras_serializable, unpack_inputs from ...tf_utils import shape_list from ...utils import logging from .configuration_regnet import RegNetConfig _lowerCAmelCase = logging.get_logger(__name__) # General docstring _lowerCAmelCase = '''RegNetConfig''' # Base docstring _lowerCAmelCase = '''facebook/regnet-y-040''' _lowerCAmelCase = [1, 1088, 7, 7] # Image classification docstring _lowerCAmelCase = '''facebook/regnet-y-040''' _lowerCAmelCase = '''tabby, tabby cat''' _lowerCAmelCase = [ '''facebook/regnet-y-040''', # See all regnet models at https://huggingface.co/models?filter=regnet ] class A ( tf.keras.layers.Layer ): '''simple docstring''' def __init__(self , _UpperCAmelCase , _UpperCAmelCase = 3 , _UpperCAmelCase = 1 , _UpperCAmelCase = 1 , _UpperCAmelCase = "relu" , **_UpperCAmelCase , ) -> Optional[int]: super().__init__(**_UpperCAmelCase ) # The padding and conv has been verified in # https://colab.research.google.com/gist/sayakpaul/854bc10eeaf21c9ee2119e0b9f3841a7/scratchpad.ipynb __UpperCamelCase : List[Any] = tf.keras.layers.ZeroPaddingaD(padding=kernel_size // 2 ) __UpperCamelCase : Tuple = tf.keras.layers.ConvaD( filters=_UpperCAmelCase , kernel_size=_UpperCAmelCase , strides=_UpperCAmelCase , padding="VALID" , groups=_UpperCAmelCase , use_bias=_UpperCAmelCase , name="convolution" , ) __UpperCamelCase : int = tf.keras.layers.BatchNormalization(epsilon=1E-5 , momentum=0.9 , name="normalization" ) __UpperCamelCase : List[str] = ACTaFN[activation] if activation is not None else tf.identity def a_ (self , _UpperCAmelCase ) -> Dict: __UpperCamelCase : str = self.convolution(self.padding(_UpperCAmelCase ) ) __UpperCamelCase : Dict = self.normalization(_UpperCAmelCase ) __UpperCamelCase : Dict = self.activation(_UpperCAmelCase ) return hidden_state class A ( tf.keras.layers.Layer ): '''simple docstring''' def __init__(self , _UpperCAmelCase , **_UpperCAmelCase ) -> Optional[Any]: super().__init__(**_UpperCAmelCase ) __UpperCamelCase : Any = config.num_channels __UpperCamelCase : str = TFRegNetConvLayer( out_channels=config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act , name="embedder" , ) def a_ (self , _UpperCAmelCase ) -> Tuple: __UpperCamelCase : Dict = shape_list(_UpperCAmelCase )[1] if tf.executing_eagerly() and num_channels != self.num_channels: raise ValueError( "Make sure that the channel dimension of the pixel values match with the one set in the configuration." ) # When running on CPU, `tf.keras.layers.Conv2D` doesn't support `NCHW` format. # So change the input format from `NCHW` to `NHWC`. # shape = (batch_size, in_height, in_width, in_channels=num_channels) __UpperCamelCase : Any = tf.transpose(_UpperCAmelCase , perm=(0, 2, 3, 1) ) __UpperCamelCase : List[Any] = self.embedder(_UpperCAmelCase ) return hidden_state class A ( tf.keras.layers.Layer ): '''simple docstring''' def __init__(self , _UpperCAmelCase , _UpperCAmelCase = 2 , **_UpperCAmelCase ) -> Any: super().__init__(**_UpperCAmelCase ) __UpperCamelCase : Any = tf.keras.layers.ConvaD( filters=_UpperCAmelCase , kernel_size=1 , strides=_UpperCAmelCase , use_bias=_UpperCAmelCase , name="convolution" ) __UpperCamelCase : Tuple = tf.keras.layers.BatchNormalization(epsilon=1E-5 , momentum=0.9 , name="normalization" ) def a_ (self , _UpperCAmelCase , _UpperCAmelCase = False ) -> tf.Tensor: return self.normalization(self.convolution(_UpperCAmelCase ) , training=_UpperCAmelCase ) class A ( tf.keras.layers.Layer ): '''simple docstring''' def __init__(self , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ) -> Any: super().__init__(**_UpperCAmelCase ) __UpperCamelCase : List[str] = tf.keras.layers.GlobalAveragePoolingaD(keepdims=_UpperCAmelCase , name="pooler" ) __UpperCamelCase : Optional[Any] = [ tf.keras.layers.ConvaD(filters=_UpperCAmelCase , kernel_size=1 , activation="relu" , name="attention.0" ), tf.keras.layers.ConvaD(filters=_UpperCAmelCase , kernel_size=1 , activation="sigmoid" , name="attention.2" ), ] def a_ (self , _UpperCAmelCase ) -> Tuple: # [batch_size, h, w, num_channels] -> [batch_size, 1, 1, num_channels] __UpperCamelCase : List[str] = self.pooler(_UpperCAmelCase ) for layer_module in self.attention: __UpperCamelCase : str = layer_module(_UpperCAmelCase ) __UpperCamelCase : List[Any] = hidden_state * pooled return hidden_state class A ( tf.keras.layers.Layer ): '''simple docstring''' def __init__(self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = 1 , **_UpperCAmelCase ) -> int: super().__init__(**_UpperCAmelCase ) __UpperCamelCase : List[Any] = in_channels != out_channels or stride != 1 __UpperCamelCase : List[str] = max(1 , out_channels // config.groups_width ) __UpperCamelCase : List[Any] = ( TFRegNetShortCut(_UpperCAmelCase , stride=_UpperCAmelCase , name="shortcut" ) if should_apply_shortcut else tf.keras.layers.Activation("linear" , name="shortcut" ) ) # `self.layers` instead of `self.layer` because that is a reserved argument. __UpperCamelCase : Optional[Any] = [ TFRegNetConvLayer(_UpperCAmelCase , kernel_size=1 , activation=config.hidden_act , name="layer.0" ), TFRegNetConvLayer( _UpperCAmelCase , stride=_UpperCAmelCase , groups=_UpperCAmelCase , activation=config.hidden_act , name="layer.1" ), TFRegNetConvLayer(_UpperCAmelCase , kernel_size=1 , activation=_UpperCAmelCase , name="layer.2" ), ] __UpperCamelCase : Dict = ACTaFN[config.hidden_act] def a_ (self , _UpperCAmelCase ) -> Union[str, Any]: __UpperCamelCase : List[Any] = hidden_state for layer_module in self.layers: __UpperCamelCase : Dict = layer_module(_UpperCAmelCase ) __UpperCamelCase : List[Any] = self.shortcut(_UpperCAmelCase ) hidden_state += residual __UpperCamelCase : Tuple = self.activation(_UpperCAmelCase ) return hidden_state class A ( tf.keras.layers.Layer ): '''simple docstring''' def __init__(self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = 1 , **_UpperCAmelCase ) -> Any: super().__init__(**_UpperCAmelCase ) __UpperCamelCase : str = in_channels != out_channels or stride != 1 __UpperCamelCase : Optional[int] = max(1 , out_channels // config.groups_width ) __UpperCamelCase : Union[str, Any] = ( TFRegNetShortCut(_UpperCAmelCase , stride=_UpperCAmelCase , name="shortcut" ) if should_apply_shortcut else tf.keras.layers.Activation("linear" , name="shortcut" ) ) __UpperCamelCase : Union[str, Any] = [ TFRegNetConvLayer(_UpperCAmelCase , kernel_size=1 , activation=config.hidden_act , name="layer.0" ), TFRegNetConvLayer( _UpperCAmelCase , stride=_UpperCAmelCase , groups=_UpperCAmelCase , activation=config.hidden_act , name="layer.1" ), TFRegNetSELayer(_UpperCAmelCase , reduced_channels=int(round(in_channels / 4 ) ) , name="layer.2" ), TFRegNetConvLayer(_UpperCAmelCase , kernel_size=1 , activation=_UpperCAmelCase , name="layer.3" ), ] __UpperCamelCase : Union[str, Any] = ACTaFN[config.hidden_act] def a_ (self , _UpperCAmelCase ) -> int: __UpperCamelCase : str = hidden_state for layer_module in self.layers: __UpperCamelCase : Any = layer_module(_UpperCAmelCase ) __UpperCamelCase : Optional[Any] = self.shortcut(_UpperCAmelCase ) hidden_state += residual __UpperCamelCase : Union[str, Any] = self.activation(_UpperCAmelCase ) return hidden_state class A ( tf.keras.layers.Layer ): '''simple docstring''' def __init__(self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = 2 , _UpperCAmelCase = 2 , **_UpperCAmelCase ) -> int: super().__init__(**_UpperCAmelCase ) __UpperCamelCase : List[str] = TFRegNetXLayer if config.layer_type == "x" else TFRegNetYLayer __UpperCamelCase : Tuple = [ # downsampling is done in the first layer with stride of 2 layer(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , stride=_UpperCAmelCase , name="layers.0" ), *[layer(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , name=f"layers.{i+1}" ) for i in range(depth - 1 )], ] def a_ (self , _UpperCAmelCase ) -> Any: for layer_module in self.layers: __UpperCamelCase : Dict = layer_module(_UpperCAmelCase ) return hidden_state class A ( tf.keras.layers.Layer ): '''simple docstring''' def __init__(self , _UpperCAmelCase , **_UpperCAmelCase ) -> str: super().__init__(**_UpperCAmelCase ) __UpperCamelCase : Dict = [] # based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input self.stages.append( TFRegNetStage( _UpperCAmelCase , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , name="stages.0" , ) ) __UpperCamelCase : Union[str, Any] = zip(config.hidden_sizes , config.hidden_sizes[1:] ) for i, ((in_channels, out_channels), depth) in enumerate(zip(_UpperCAmelCase , config.depths[1:] ) ): self.stages.append(TFRegNetStage(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , depth=_UpperCAmelCase , name=f"stages.{i+1}" ) ) def a_ (self , _UpperCAmelCase , _UpperCAmelCase = False , _UpperCAmelCase = True ) -> TFBaseModelOutputWithNoAttention: __UpperCamelCase : List[Any] = () if output_hidden_states else None for stage_module in self.stages: if output_hidden_states: __UpperCamelCase : Any = hidden_states + (hidden_state,) __UpperCamelCase : Any = stage_module(_UpperCAmelCase ) if output_hidden_states: __UpperCamelCase : List[Any] = hidden_states + (hidden_state,) if not return_dict: return tuple(v for v in [hidden_state, hidden_states] if v is not None ) return TFBaseModelOutputWithNoAttention(last_hidden_state=_UpperCAmelCase , hidden_states=_UpperCAmelCase ) @keras_serializable class A ( tf.keras.layers.Layer ): '''simple docstring''' A = RegNetConfig def __init__(self , _UpperCAmelCase , **_UpperCAmelCase ) -> List[Any]: super().__init__(**_UpperCAmelCase ) __UpperCamelCase : Optional[int] = config __UpperCamelCase : List[Any] = TFRegNetEmbeddings(_UpperCAmelCase , name="embedder" ) __UpperCamelCase : Union[str, Any] = TFRegNetEncoder(_UpperCAmelCase , name="encoder" ) __UpperCamelCase : Optional[Any] = tf.keras.layers.GlobalAveragePoolingaD(keepdims=_UpperCAmelCase , name="pooler" ) @unpack_inputs def a_ (self , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = False , ) -> TFBaseModelOutputWithPoolingAndNoAttention: __UpperCamelCase : Optional[int] = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) __UpperCamelCase : Dict = return_dict if return_dict is not None else self.config.use_return_dict __UpperCamelCase : Union[str, Any] = self.embedder(_UpperCAmelCase , training=_UpperCAmelCase ) __UpperCamelCase : str = self.encoder( _UpperCAmelCase , output_hidden_states=_UpperCAmelCase , return_dict=_UpperCAmelCase , training=_UpperCAmelCase ) __UpperCamelCase : List[str] = encoder_outputs[0] __UpperCamelCase : Tuple = self.pooler(_UpperCAmelCase ) # Change to NCHW output format have uniformity in the modules __UpperCamelCase : List[str] = tf.transpose(_UpperCAmelCase , perm=(0, 3, 1, 2) ) __UpperCamelCase : List[Any] = tf.transpose(_UpperCAmelCase , perm=(0, 3, 1, 2) ) # Change the other hidden state outputs to NCHW as well if output_hidden_states: __UpperCamelCase : List[str] = tuple([tf.transpose(_UpperCAmelCase , perm=(0, 3, 1, 2) ) for h in encoder_outputs[1]] ) if not return_dict: return (last_hidden_state, pooled_output) + encoder_outputs[1:] return TFBaseModelOutputWithPoolingAndNoAttention( last_hidden_state=_UpperCAmelCase , pooler_output=_UpperCAmelCase , hidden_states=hidden_states if output_hidden_states else encoder_outputs.hidden_states , ) class A ( SCREAMING_SNAKE_CASE__ ): '''simple docstring''' A = RegNetConfig A = "regnet" A = "pixel_values" @property def a_ (self ) -> List[Any]: return {"pixel_values": tf.TensorSpec(shape=(None, self.config.num_channels, 2_2_4, 2_2_4) , dtype=tf.floataa )} _lowerCAmelCase = R''' Parameters: This model is a Tensorflow [tf.keras.layers.Layer](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Layer) sub-class. Use it as a regular Tensorflow Module and refer to the Tensorflow documentation for all matter related to general usage and behavior. config ([`RegNetConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights. ''' _lowerCAmelCase = R''' Args: pixel_values (`tf.Tensor` of shape `(batch_size, num_channels, height, width)`): Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See [`ConveNextImageProcessor.__call__`] for details. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. ''' @add_start_docstrings( "The bare RegNet model outputting raw features without any specific head on top." , SCREAMING_SNAKE_CASE__ , ) class A ( SCREAMING_SNAKE_CASE__ ): '''simple docstring''' def __init__(self , _UpperCAmelCase , *_UpperCAmelCase , **_UpperCAmelCase ) -> Tuple: super().__init__(_UpperCAmelCase , *_UpperCAmelCase , **_UpperCAmelCase ) __UpperCamelCase : Optional[Any] = TFRegNetMainLayer(_UpperCAmelCase , name="regnet" ) @unpack_inputs @add_start_docstrings_to_model_forward(_UpperCAmelCase ) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC , output_type=_UpperCAmelCase , config_class=_CONFIG_FOR_DOC , modality="vision" , expected_output=_EXPECTED_OUTPUT_SHAPE , ) def a_ (self , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase=False , ) -> Union[TFBaseModelOutputWithPoolingAndNoAttention, Tuple[tf.Tensor]]: __UpperCamelCase : List[str] = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) __UpperCamelCase : Optional[int] = return_dict if return_dict is not None else self.config.use_return_dict __UpperCamelCase : Tuple = self.regnet( pixel_values=_UpperCAmelCase , output_hidden_states=_UpperCAmelCase , return_dict=_UpperCAmelCase , training=_UpperCAmelCase , ) if not return_dict: return (outputs[0],) + outputs[1:] return TFBaseModelOutputWithPoolingAndNoAttention( last_hidden_state=outputs.last_hidden_state , pooler_output=outputs.pooler_output , hidden_states=outputs.hidden_states , ) @add_start_docstrings( "\n RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n " , SCREAMING_SNAKE_CASE__ , ) class A ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): '''simple docstring''' def __init__(self , _UpperCAmelCase , *_UpperCAmelCase , **_UpperCAmelCase ) -> int: super().__init__(_UpperCAmelCase , *_UpperCAmelCase , **_UpperCAmelCase ) __UpperCamelCase : Optional[Any] = config.num_labels __UpperCamelCase : Any = TFRegNetMainLayer(_UpperCAmelCase , name="regnet" ) # classification head __UpperCamelCase : List[str] = [ tf.keras.layers.Flatten(), tf.keras.layers.Dense(config.num_labels , name="classifier.1" ) if config.num_labels > 0 else tf.identity, ] @unpack_inputs @add_start_docstrings_to_model_forward(_UpperCAmelCase ) @add_code_sample_docstrings( checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=_UpperCAmelCase , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , ) def a_ (self , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase=False , ) -> Union[TFSequenceClassifierOutput, Tuple[tf.Tensor]]: __UpperCamelCase : Dict = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) __UpperCamelCase : str = return_dict if return_dict is not None else self.config.use_return_dict __UpperCamelCase : Dict = self.regnet( _UpperCAmelCase , output_hidden_states=_UpperCAmelCase , return_dict=_UpperCAmelCase , training=_UpperCAmelCase ) __UpperCamelCase : Union[str, Any] = outputs.pooler_output if return_dict else outputs[1] __UpperCamelCase : List[str] = self.classifier[0](_UpperCAmelCase ) __UpperCamelCase : Optional[int] = self.classifier[1](_UpperCAmelCase ) __UpperCamelCase : str = None if labels is None else self.hf_compute_loss(labels=_UpperCAmelCase , logits=_UpperCAmelCase ) if not return_dict: __UpperCamelCase : Union[str, Any] = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return TFSequenceClassifierOutput(loss=_UpperCAmelCase , logits=_UpperCAmelCase , hidden_states=outputs.hidden_states )
298
0
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available __UpperCAmelCase = { 'configuration_m2m_100': ['M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP', 'M2M100Config', 'M2M100OnnxConfig'], 'tokenization_m2m_100': ['M2M100Tokenizer'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCAmelCase = [ 'M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST', 'M2M100ForConditionalGeneration', 'M2M100Model', 'M2M100PreTrainedModel', ] if TYPE_CHECKING: from .configuration_mam_aaa import M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP, MaMaaaConfig, MaMaaaOnnxConfig from .tokenization_mam_aaa import MaMaaaTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mam_aaa import ( M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST, MaMaaaForConditionalGeneration, MaMaaaModel, MaMaaaPreTrainedModel, ) else: import sys __UpperCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
369
from typing import Tuple, Union from ...modeling_outputs import BackboneOutput from ...modeling_utils import PreTrainedModel from ...utils import is_timm_available, is_torch_available, requires_backends from ...utils.backbone_utils import BackboneMixin from .configuration_timm_backbone import TimmBackboneConfig if is_timm_available(): import timm if is_torch_available(): from torch import Tensor class __a ( __UpperCamelCase ,__UpperCamelCase ): __snake_case : Union[str, Any] = """pixel_values""" __snake_case : Optional[Any] = False __snake_case : Dict = TimmBackboneConfig def __init__( self : List[str] , UpperCAmelCase : int , **UpperCAmelCase : List[str] ): requires_backends(self , """timm""" ) super().__init__(UpperCAmelCase ) lowerCAmelCase_ : List[Any] = config if config.backbone is None: raise ValueError("""backbone is not set in the config. Please set it to a timm model name.""" ) if config.backbone not in timm.list_models(): raise ValueError(F'backbone {config.backbone} is not supported by timm.' ) if hasattr(UpperCAmelCase , """out_features""" ) and config.out_features is not None: raise ValueError("""out_features is not supported by TimmBackbone. Please use out_indices instead.""" ) lowerCAmelCase_ : List[str] = getattr(UpperCAmelCase , """use_pretrained_backbone""" , UpperCAmelCase ) if pretrained is None: raise ValueError("""use_pretrained_backbone is not set in the config. Please set it to True or False.""" ) # We just take the final layer by default. This matches the default for the transformers models. lowerCAmelCase_ : str = config.out_indices if getattr(UpperCAmelCase , """out_indices""" , UpperCAmelCase ) is not None else (-1,) lowerCAmelCase_ : Optional[int] = timm.create_model( config.backbone , pretrained=UpperCAmelCase , features_only=config.features_only , in_chans=config.num_channels , out_indices=UpperCAmelCase , **UpperCAmelCase , ) # These are used to control the output of the model when called. If output_hidden_states is True, then # return_layers is modified to include all layers. lowerCAmelCase_ : Union[str, Any] = self._backbone.return_layers lowerCAmelCase_ : Dict = {layer["""module"""]: str(UpperCAmelCase ) for i, layer in enumerate(self._backbone.feature_info.info )} super()._init_backbone(UpperCAmelCase ) @classmethod def A ( cls : Dict , UpperCAmelCase : Union[str, Any] , *UpperCAmelCase : List[Any] , **UpperCAmelCase : Dict ): requires_backends(cls , ["""vision""", """timm"""] ) from ...models.timm_backbone import TimmBackboneConfig lowerCAmelCase_ : Optional[Any] = kwargs.pop("""config""" , TimmBackboneConfig() ) lowerCAmelCase_ : Union[str, Any] = kwargs.pop("""use_timm_backbone""" , UpperCAmelCase ) if not use_timm: raise ValueError("""use_timm_backbone must be True for timm backbones""" ) lowerCAmelCase_ : Union[str, Any] = kwargs.pop("""num_channels""" , config.num_channels ) lowerCAmelCase_ : Tuple = kwargs.pop("""features_only""" , config.features_only ) lowerCAmelCase_ : List[str] = kwargs.pop("""use_pretrained_backbone""" , config.use_pretrained_backbone ) lowerCAmelCase_ : Optional[Any] = kwargs.pop("""out_indices""" , config.out_indices ) lowerCAmelCase_ : Optional[Any] = TimmBackboneConfig( backbone=UpperCAmelCase , num_channels=UpperCAmelCase , features_only=UpperCAmelCase , use_pretrained_backbone=UpperCAmelCase , out_indices=UpperCAmelCase , ) return super()._from_config(UpperCAmelCase , **UpperCAmelCase ) def A ( self : Union[str, Any] , UpperCAmelCase : Union[str, Any] ): pass def A ( self : Union[str, Any] , UpperCAmelCase : List[Any] , UpperCAmelCase : Optional[Any]=None , UpperCAmelCase : List[Any]=None , UpperCAmelCase : int=None , **UpperCAmelCase : Any ): lowerCAmelCase_ : int = return_dict if return_dict is not None else self.config.use_return_dict lowerCAmelCase_ : Dict = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) lowerCAmelCase_ : Any = output_attentions if output_attentions is not None else self.config.output_attentions if output_attentions: raise ValueError("""Cannot output attentions for timm backbones at the moment""" ) if output_hidden_states: # We modify the return layers to include all the stages of the backbone lowerCAmelCase_ : Optional[Any] = self._all_layers lowerCAmelCase_ : List[Any] = self._backbone(UpperCAmelCase , **UpperCAmelCase ) lowerCAmelCase_ : str = self._return_layers lowerCAmelCase_ : Any = tuple(hidden_states[i] for i in self.out_indices ) else: lowerCAmelCase_ : Tuple = self._backbone(UpperCAmelCase , **UpperCAmelCase ) lowerCAmelCase_ : Optional[int] = None lowerCAmelCase_ : List[str] = tuple(UpperCAmelCase ) lowerCAmelCase_ : int = tuple(UpperCAmelCase ) if hidden_states is not None else None if not return_dict: lowerCAmelCase_ : Optional[Any] = (feature_maps,) if output_hidden_states: lowerCAmelCase_ : Tuple = output + (hidden_states,) return output return BackboneOutput(feature_maps=UpperCAmelCase , hidden_states=UpperCAmelCase , attentions=UpperCAmelCase )
28
0
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_tokenizers_available, is_torch_available, ) a_ = { '''configuration_convbert''': ['''CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ConvBertConfig''', '''ConvBertOnnxConfig'''], '''tokenization_convbert''': ['''ConvBertTokenizer'''], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ = ['''ConvBertTokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ = [ '''CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''ConvBertForMaskedLM''', '''ConvBertForMultipleChoice''', '''ConvBertForQuestionAnswering''', '''ConvBertForSequenceClassification''', '''ConvBertForTokenClassification''', '''ConvBertLayer''', '''ConvBertModel''', '''ConvBertPreTrainedModel''', '''load_tf_weights_in_convbert''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ = [ '''TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFConvBertForMaskedLM''', '''TFConvBertForMultipleChoice''', '''TFConvBertForQuestionAnswering''', '''TFConvBertForSequenceClassification''', '''TFConvBertForTokenClassification''', '''TFConvBertLayer''', '''TFConvBertModel''', '''TFConvBertPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_convbert import CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvBertConfig, ConvBertOnnxConfig from .tokenization_convbert import ConvBertTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_convbert_fast import ConvBertTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_convbert import ( CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST, ConvBertForMaskedLM, ConvBertForMultipleChoice, ConvBertForQuestionAnswering, ConvBertForSequenceClassification, ConvBertForTokenClassification, ConvBertLayer, ConvBertModel, ConvBertPreTrainedModel, load_tf_weights_in_convbert, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_convbert import ( TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST, TFConvBertForMaskedLM, TFConvBertForMultipleChoice, TFConvBertForQuestionAnswering, TFConvBertForSequenceClassification, TFConvBertForTokenClassification, TFConvBertLayer, TFConvBertModel, TFConvBertPreTrainedModel, ) else: import sys a_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
340
import argparse import os from transformers.utils import direct_transformers_import # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_task_guides.py a_ = '''src/transformers''' a_ = '''docs/source/en/tasks''' def _a ( UpperCamelCase_ : Optional[int] , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Tuple ) -> Tuple: """simple docstring""" with open(UpperCamelCase_ , "r" , encoding="utf-8" , newline="\n" ) as f: lowerCAmelCase__ = f.readlines() # Find the start prompt. lowerCAmelCase__ = 0 while not lines[start_index].startswith(UpperCamelCase_ ): start_index += 1 start_index += 1 lowerCAmelCase__ = start_index while not lines[end_index].startswith(UpperCamelCase_ ): end_index += 1 end_index -= 1 while len(lines[start_index] ) <= 1: start_index += 1 while len(lines[end_index] ) <= 1: end_index -= 1 end_index += 1 return "".join(lines[start_index:end_index] ), start_index, end_index, lines # This is to make sure the transformers module imported is the one in the repo. a_ = direct_transformers_import(TRANSFORMERS_PATH) a_ = { '''asr.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_CTC_MAPPING_NAMES, '''audio_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES, '''language_modeling.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_CAUSAL_LM_MAPPING_NAMES, '''image_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES, '''masked_language_modeling.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_MASKED_LM_MAPPING_NAMES, '''multiple_choice.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES, '''object_detection.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES, '''question_answering.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES, '''semantic_segmentation.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES, '''sequence_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES, '''summarization.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES, '''token_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES, '''translation.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES, '''video_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES, '''document_question_answering.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES, '''monocular_depth_estimation.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES, } # This list contains model types used in some task guides that are not in `CONFIG_MAPPING_NAMES` (therefore not in any # `MODEL_MAPPING_NAMES` or any `MODEL_FOR_XXX_MAPPING_NAMES`). a_ = { '''summarization.md''': ('''nllb''',), '''translation.md''': ('''nllb''',), } def _a ( UpperCamelCase_ : List[str] ) -> Optional[int]: """simple docstring""" lowerCAmelCase__ = TASK_GUIDE_TO_MODELS[task_guide] lowerCAmelCase__ = SPECIAL_TASK_GUIDE_TO_MODEL_TYPES.get(UpperCamelCase_ , set() ) lowerCAmelCase__ = { code: name for code, name in transformers_module.MODEL_NAMES_MAPPING.items() if (code in model_maping_names or code in special_model_types) } return ", ".join([F"[{name}](../model_doc/{code})" for code, name in model_names.items()] ) + "\n" def _a ( UpperCamelCase_ : Tuple , UpperCamelCase_ : List[str]=False ) -> List[str]: """simple docstring""" lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = _find_text_in_file( filename=os.path.join(UpperCamelCase_ , UpperCamelCase_ ) , start_prompt="<!--This tip is automatically generated by `make fix-copies`, do not fill manually!-->" , end_prompt="<!--End of the generated tip-->" , ) lowerCAmelCase__ = get_model_list_for_task(UpperCamelCase_ ) if current_list != new_list: if overwrite: with open(os.path.join(UpperCamelCase_ , UpperCamelCase_ ) , "w" , encoding="utf-8" , newline="\n" ) as f: f.writelines(lines[:start_index] + [new_list] + lines[end_index:] ) else: raise ValueError( F"The list of models that can be used in the {task_guide} guide needs an update. Run `make fix-copies`" " to fix this." ) if __name__ == "__main__": a_ = argparse.ArgumentParser() parser.add_argument('''--fix_and_overwrite''', action='''store_true''', help='''Whether to fix inconsistencies.''') a_ = parser.parse_args() for task_guide in TASK_GUIDE_TO_MODELS.keys(): check_model_list_for_task(task_guide, args.fix_and_overwrite)
340
1
'''simple docstring''' import argparse import random import joblib import numpy as np import torch from igf.igf import ( SecondaryLearner, collect_objective_set, compute_perplexity, generate_datasets, load_gpta, recopy_gpta, set_seed, train_secondary_learner, ) from torch.utils.data import DataLoader, RandomSampler from transformers import GPTaLMHeadModel def a_ ( __snake_case : Optional[int]=32 , __snake_case : Optional[Any]=10 , __snake_case : str=100 , __snake_case : Any=1026 , __snake_case : List[str]=True , __snake_case : int="data/tokenized_stories_train_wikitext103.jbl" , __snake_case : Any="igf_context_pairs.jbl" , ) -> List[str]: """simple docstring""" set_seed(3 ) # generate train_data and objective_set lowerCamelCase_ =generate_datasets( _lowerCAmelCase , _lowerCAmelCase , number=_lowerCAmelCase , min_len=1026 , trim=_lowerCAmelCase ) # keeps model same across runs set_seed(4 ) # model, lm_optimizer, lm_scheduler = recopy_gpt2(model, device, max_steps) # store original model weights # can we train on GPU? lowerCamelCase_ =torch.device('''cuda:0''' if torch.cuda.is_available() else '''cpu''' ) # load pretrained model lowerCamelCase_ =load_gpta('''gpt2''' ).to(_lowerCAmelCase ) print('''computing perplexity on objective set''' ) lowerCamelCase_ =compute_perplexity(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ).item() print('''perplexity on objective set:''' , _lowerCAmelCase ) # collect igf pairs and save to file demo.jbl collect_objective_set(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) # clean up, delete model and data we don't need anymore del model, train_data, objective_set torch.cuda.empty_cache() def a_ ( __snake_case : str , __snake_case : Any=15 , __snake_case : Optional[Any]=128 , __snake_case : int=100 , __snake_case : Any="igf_model.pt" , ) -> Union[str, Any]: """simple docstring""" set_seed(42 ) # Load pre-trained model lowerCamelCase_ =GPTaLMHeadModel.from_pretrained('''gpt2''' ) # Initialize secondary learner to use embedding weights of model lowerCamelCase_ =SecondaryLearner(_lowerCAmelCase ) # Train secondary learner lowerCamelCase_ =train_secondary_learner( _lowerCAmelCase , _lowerCAmelCase , max_epochs=_lowerCAmelCase , batch_size=_lowerCAmelCase , eval_freq=100 , igf_model_path=_lowerCAmelCase , ) del model, secondary_learner_train_data torch.cuda.empty_cache() return secondary_learner def a_ ( __snake_case : int , __snake_case : Optional[int] , __snake_case : List[str] , __snake_case : Tuple=32 , __snake_case : Optional[Any]=1000 , __snake_case : Union[str, Any]=16 , __snake_case : Union[str, Any]=1.0 , __snake_case : List[Any]=recopy_gpta , __snake_case : Tuple=None , __snake_case : Tuple=10 , __snake_case : Any="gpt2_finetuned.pt" , ) -> Union[str, Any]: """simple docstring""" lowerCamelCase_ =torch.device('''cuda:0''' if torch.cuda.is_available() else '''cpu''' ) lowerCamelCase_ =RandomSampler(_lowerCAmelCase ) lowerCamelCase_ =DataLoader(_lowerCAmelCase , sampler=_lowerCAmelCase ) lowerCamelCase_ =max_steps // (len(_lowerCAmelCase )) + 1 lowerCamelCase_ =0 lowerCamelCase_ =torch.zeros((1, context_len) , dtype=torch.long , device=_lowerCAmelCase ) lowerCamelCase_ =recopy_model(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) model.train() if secondary_learner is not None: secondary_learner.to(_lowerCAmelCase ) secondary_learner.eval() lowerCamelCase_ =[] lowerCamelCase_ =0 lowerCamelCase_ =[] lowerCamelCase_ =[] # Compute the performance of the transformer model at the beginning lowerCamelCase_ =compute_perplexity(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) test_perps.append(_lowerCAmelCase ) print('''Test perplexity, step''' , _lowerCAmelCase , ''':''' , _lowerCAmelCase ) for epoch in range(int(_lowerCAmelCase ) ): for step, example in enumerate(_lowerCAmelCase ): torch.cuda.empty_cache() lowerCamelCase_ =random.randint(0 , example.size(2 ) - context_len - 1 ) lowerCamelCase_ =example[0, 0, start : start + context_len] lm_optimizer.zero_grad() lowerCamelCase_ =model(_lowerCAmelCase , labels=_lowerCAmelCase ) lowerCamelCase_ =True if secondary_learner is not None: lowerCamelCase_ =secondary_learner.forward( torch.tensor(_lowerCAmelCase , dtype=torch.long , device=_lowerCAmelCase ).unsqueeze(0 ) )[0].item() observed_qs.append(float(_lowerCAmelCase ) ) # Here we implement the simple non-constant threshold for the predicted IG(X) value # We will decay the selectivity of our secondary learner filter from # 1 standard deviation above average to 1 below average after 10 batches. if global_step == 10: lowerCamelCase_ =-1 if predicted_q < threshold: lowerCamelCase_ =False # If we passed the filter, add the context to the batch! if do_backprop: contexts.append(np.array(context.cpu() ) ) lowerCamelCase_ =outputs[0] lm_loss.backward() examples += 1 del outputs # Once the batch is filled with enough contexts, backprop on the batch. if examples == batch_size: torch.cuda.empty_cache() lowerCamelCase_ =0 # Do LM backprop torch.nn.utils.clip_grad_norm_(model.parameters() , 3.0 ) lm_optimizer.step() lm_scheduler.step() # Update learning rate schedule global_step += 1 # Compute the performance of the transformer model at this batch if global_step % eval_interval == 0: lowerCamelCase_ =compute_perplexity(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) test_perps.append(_lowerCAmelCase ) print('''Test perplexity, step''' , _lowerCAmelCase , ''':''' , _lowerCAmelCase ) # Break out of the loop after 60 batches if max_steps > 0 and global_step > 60: break if max_steps > 0 and global_step > 60: break # save finetuned transformer model torch.save(model.state_dict() , _lowerCAmelCase ) torch.cuda.empty_cache() # Do some cleaning up so we can reinitialize for the next run of this function del lm_optimizer del lm_scheduler return model def a_ ( ) -> Optional[int]: """simple docstring""" lowerCamelCase_ =argparse.ArgumentParser(description='''Fine-tune a transformer model with IGF on a language modeling task''' ) # Required parameters parser.add_argument( '''--data_dir''' , default=_lowerCAmelCase , type=_lowerCAmelCase , required=_lowerCAmelCase , help='''The input data dir. Should contain data files for WikiText.''' , ) parser.add_argument( '''--model_name_or_path''' , default=_lowerCAmelCase , type=_lowerCAmelCase , required=_lowerCAmelCase , help='''Path to pretrained model or model identifier from huggingface.co/models''' , ) parser.add_argument( '''--data_file''' , type=_lowerCAmelCase , default=_lowerCAmelCase , help=( '''A jbl file containing tokenized data which can be split as objective dataset, ''' '''train_dataset and test_dataset.''' ) , ) parser.add_argument( '''--igf_data_file''' , type=_lowerCAmelCase , default=_lowerCAmelCase , help='''A jbl file containing the context and information gain pairs to train secondary learner.''' , ) parser.add_argument( '''--output_dir''' , default=_lowerCAmelCase , type=_lowerCAmelCase , required=_lowerCAmelCase , help='''The output directory where the final fine-tuned model is stored.''' , ) parser.add_argument( '''--tokenizer_name''' , default=_lowerCAmelCase , type=_lowerCAmelCase , help='''Pretrained tokenizer name or path if not the same as model_name''' , ) parser.add_argument('''--seed''' , type=_lowerCAmelCase , default=_lowerCAmelCase , help='''A seed for reproducible training.''' ) parser.add_argument( '''--context_len''' , default=32 , type=_lowerCAmelCase , help=( '''The maximum total input sequence length after tokenization. Sequences longer ''' '''than this will be truncated, sequences shorter will be padded.''' ) , ) parser.add_argument( '''--size_objective_set''' , default=100 , type=_lowerCAmelCase , help='''number of articles that are long enough to be used as our objective set''' , ) parser.add_argument( '''--eval_freq''' , default=100 , type=_lowerCAmelCase , help='''secondary model evaluation is triggered at eval_freq''' ) parser.add_argument('''--max_steps''' , default=1000 , type=_lowerCAmelCase , help='''To calculate training epochs''' ) parser.add_argument( '''--secondary_learner_batch_size''' , default=128 , type=_lowerCAmelCase , help='''batch size of training data for secondary learner''' , ) parser.add_argument( '''--batch_size''' , default=16 , type=_lowerCAmelCase , help='''batch size of training data of language model(gpt2) ''' ) parser.add_argument( '''--eval_interval''' , default=10 , type=_lowerCAmelCase , help=( '''decay the selectivity of our secondary learner filter from''' '''1 standard deviation above average to 1 below average after 10 batches''' ) , ) parser.add_argument( '''--number''' , default=100 , type=_lowerCAmelCase , help='''The number of examples split to be used as objective_set/test_data''' ) parser.add_argument( '''--min_len''' , default=1026 , type=_lowerCAmelCase , help='''The minimum length of the article to be used as objective set''' ) parser.add_argument( '''--secondary_learner_max_epochs''' , default=15 , type=_lowerCAmelCase , help='''number of epochs to train secondary learner''' ) parser.add_argument('''--trim''' , default=_lowerCAmelCase , type=_lowerCAmelCase , help='''truncate the example if it exceeds context length''' ) parser.add_argument( '''--threshold''' , default=1.0 , type=_lowerCAmelCase , help=( '''The threshold value used by secondary learner to filter the train_data and allow only''' ''' informative data as input to the model''' ) , ) parser.add_argument('''--finetuned_model_name''' , default='''gpt2_finetuned.pt''' , type=_lowerCAmelCase , help='''finetuned_model_name''' ) parser.add_argument( '''--recopy_model''' , default=_lowerCAmelCase , type=_lowerCAmelCase , help='''Reset the model to the original pretrained GPT-2 weights after each iteration''' , ) # function calls # Collecting *n* pairs of context and information gain(X, IG(X)) for training the secondary learner generate_n_pairs( context_len=32 , max_steps=10 , size_objective_set=100 , min_len=1026 , trim=_lowerCAmelCase , data_file='''data/tokenized_stories_train_wikitext103.jbl''' , igf_data_file='''igf_context_pairs.jbl''' , ) # Load train data for secondary learner lowerCamelCase_ =joblib.load('''data/IGF_values.jbl''' ) # Train secondary learner lowerCamelCase_ =training_secondary_learner( _lowerCAmelCase , secondary_learner_max_epochs=15 , secondary_learner_batch_size=128 , eval_freq=100 , igf_model_path='''igf_model.pt''' , ) # load pretrained gpt2 model lowerCamelCase_ =GPTaLMHeadModel.from_pretrained('''gpt2''' ) set_seed(42 ) # Generate train and test data to train and evaluate gpt2 model lowerCamelCase_ =generate_datasets( context_len=32 , file='''data/tokenized_stories_train_wikitext103.jbl''' , number=100 , min_len=1026 , trim=_lowerCAmelCase ) # fine-tuning of the gpt2 model using igf (Information Gain Filtration) finetune( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , context_len=32 , max_steps=1000 , batch_size=16 , threshold=1.0 , recopy_model=_lowerCAmelCase , secondary_learner=_lowerCAmelCase , eval_interval=10 , finetuned_model_name='''gpt2_finetuned.pt''' , ) if __name__ == "__main__": main()
370
'''simple docstring''' import warnings from typing import List, Optional, Union from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType class __UpperCamelCase ( lowerCamelCase__ ): lowercase : int =['image_processor', 'tokenizer'] lowercase : int ='LayoutLMv2ImageProcessor' lowercase : Any =('LayoutXLMTokenizer', 'LayoutXLMTokenizerFast') def __init__( self, lowerCAmelCase=None, lowerCAmelCase=None, **lowerCAmelCase ): """simple docstring""" if "feature_extractor" in kwargs: warnings.warn( '''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`''' ''' instead.''', lowerCAmelCase, ) lowerCamelCase_ =kwargs.pop('''feature_extractor''' ) lowerCamelCase_ =image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError('''You need to specify an `image_processor`.''' ) if tokenizer is None: raise ValueError('''You need to specify a `tokenizer`.''' ) super().__init__(lowerCAmelCase, lowerCAmelCase ) def __call__( self, lowerCAmelCase, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = True, lowerCAmelCase = False, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = 0, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = False, lowerCAmelCase = False, lowerCAmelCase = False, lowerCAmelCase = False, lowerCAmelCase = True, lowerCAmelCase = None, **lowerCAmelCase, ): """simple docstring""" if self.image_processor.apply_ocr and (boxes is not None): raise ValueError( '''You cannot provide bounding boxes ''' '''if you initialized the image processor with apply_ocr set to True.''' ) if self.image_processor.apply_ocr and (word_labels is not None): raise ValueError( '''You cannot provide word labels if you initialized the image processor with apply_ocr set to True.''' ) if return_overflowing_tokens is True and return_offsets_mapping is False: raise ValueError('''You cannot return overflowing tokens without returning the offsets mapping.''' ) # first, apply the image processor lowerCamelCase_ =self.image_processor(images=lowerCAmelCase, return_tensors=lowerCAmelCase ) # second, apply the tokenizer if text is not None and self.image_processor.apply_ocr and text_pair is None: if isinstance(lowerCAmelCase, lowerCAmelCase ): lowerCamelCase_ =[text] # add batch dimension (as the image processor always adds a batch dimension) lowerCamelCase_ =features['''words'''] lowerCamelCase_ =self.tokenizer( text=text if text is not None else features['''words'''], text_pair=text_pair if text_pair is not None else None, boxes=boxes if boxes is not None else features['''boxes'''], word_labels=lowerCAmelCase, add_special_tokens=lowerCAmelCase, padding=lowerCAmelCase, truncation=lowerCAmelCase, max_length=lowerCAmelCase, stride=lowerCAmelCase, pad_to_multiple_of=lowerCAmelCase, return_token_type_ids=lowerCAmelCase, return_attention_mask=lowerCAmelCase, return_overflowing_tokens=lowerCAmelCase, return_special_tokens_mask=lowerCAmelCase, return_offsets_mapping=lowerCAmelCase, return_length=lowerCAmelCase, verbose=lowerCAmelCase, return_tensors=lowerCAmelCase, **lowerCAmelCase, ) # add pixel values lowerCamelCase_ =features.pop('''pixel_values''' ) if return_overflowing_tokens is True: lowerCamelCase_ =self.get_overflowing_images(lowerCAmelCase, encoded_inputs['''overflow_to_sample_mapping'''] ) lowerCamelCase_ =images return encoded_inputs def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =[] for sample_idx in overflow_to_sample_mapping: images_with_overflow.append(images[sample_idx] ) if len(lowerCAmelCase ) != len(lowerCAmelCase ): raise ValueError( '''Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got''' f''' {len(lowerCAmelCase )} and {len(lowerCAmelCase )}''' ) return images_with_overflow def lowercase__ ( self, *lowerCAmelCase, **lowerCAmelCase ): """simple docstring""" return self.tokenizer.batch_decode(*lowerCAmelCase, **lowerCAmelCase ) def lowercase__ ( self, *lowerCAmelCase, **lowerCAmelCase ): """simple docstring""" return self.tokenizer.decode(*lowerCAmelCase, **lowerCAmelCase ) @property def lowercase__ ( self ): """simple docstring""" return ["input_ids", "bbox", "attention_mask", "image"] @property def lowercase__ ( self ): """simple docstring""" warnings.warn( '''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''', lowerCAmelCase, ) return self.image_processor_class @property def lowercase__ ( self ): """simple docstring""" warnings.warn( '''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''', lowerCAmelCase, ) return self.image_processor
6
0
'''simple docstring''' # Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import os from accelerate.test_utils import execute_subprocess_async def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : Union[str, Any]=None ) -> List[str]: if subparsers is not None: _a : Optional[int] =subparsers.add_parser("""test""" ) else: _a : Union[str, Any] =argparse.ArgumentParser("""Accelerate test command""" ) parser.add_argument( """--config_file""" ,default=_UpperCAmelCase ,help=( """The path to use to store the config file. Will default to a file named default_config.yaml in the cache """ """location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have """ """such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed """ """with 'huggingface'.""" ) ,) if subparsers is not None: parser.set_defaults(func=_UpperCAmelCase ) return parser def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : Any ) -> Dict: _a : Any =os.path.sep.join(__file__.split(os.path.sep )[:-2] + ["""test_utils""", """scripts""", """test_script.py"""] ) if args.config_file is None: _a : List[str] =script_name else: _a : Dict =F"--config_file={args.config_file} {script_name}" _a : int =["""accelerate-launch"""] + test_args.split() _a : int =execute_subprocess_async(_UpperCAmelCase ,env=os.environ.copy() ) if result.returncode == 0: print("""Test is a success! You are ready for your distributed training!""" ) def SCREAMING_SNAKE_CASE_ ( ) -> Any: _a : Optional[int] =test_command_parser() _a : Tuple =parser.parse_args() test_command(_UpperCAmelCase ) if __name__ == "__main__": main()
276
'''simple docstring''' import logging import os from dataclasses import dataclass from enum import Enum from typing import List, Optional, Union from filelock import FileLock from transformers import PreTrainedTokenizer, is_tf_available, is_torch_available A__: int = logging.getLogger(__name__) @dataclass class A__ : __UpperCamelCase : str __UpperCamelCase : List[str] __UpperCamelCase : Optional[List[str]] @dataclass class A__ : __UpperCamelCase : List[int] __UpperCamelCase : List[int] __UpperCamelCase : Optional[List[int]] = None __UpperCamelCase : Optional[List[int]] = None class A__ ( UpperCAmelCase__ ): __UpperCamelCase : str = "train" __UpperCamelCase : Tuple = "dev" __UpperCamelCase : str = "test" class A__ : @staticmethod def __UpperCAmelCase ( SCREAMING_SNAKE_CASE :Dict , SCREAMING_SNAKE_CASE :Union[Split, str] ) -> List[InputExample]: '''simple docstring''' raise NotImplementedError @staticmethod def __UpperCAmelCase ( SCREAMING_SNAKE_CASE :str ) -> List[str]: '''simple docstring''' raise NotImplementedError @staticmethod def __UpperCAmelCase ( SCREAMING_SNAKE_CASE :List[InputExample] , SCREAMING_SNAKE_CASE :List[str] , SCREAMING_SNAKE_CASE :int , SCREAMING_SNAKE_CASE :PreTrainedTokenizer , SCREAMING_SNAKE_CASE :str=False , SCREAMING_SNAKE_CASE :Optional[Any]="[CLS]" , SCREAMING_SNAKE_CASE :Optional[int]=1 , SCREAMING_SNAKE_CASE :Any="[SEP]" , SCREAMING_SNAKE_CASE :List[Any]=False , SCREAMING_SNAKE_CASE :Union[str, Any]=False , SCREAMING_SNAKE_CASE :List[str]=0 , SCREAMING_SNAKE_CASE :str=0 , SCREAMING_SNAKE_CASE :Dict=-1_0_0 , SCREAMING_SNAKE_CASE :Optional[int]=0 , SCREAMING_SNAKE_CASE :Tuple=True , ) -> List[InputFeatures]: '''simple docstring''' _a : str ={label: i for i, label in enumerate(SCREAMING_SNAKE_CASE )} _a : Tuple =[] for ex_index, example in enumerate(SCREAMING_SNAKE_CASE ): if ex_index % 1_0_0_0_0 == 0: logger.info("""Writing example %d of %d""" , SCREAMING_SNAKE_CASE , len(SCREAMING_SNAKE_CASE ) ) _a : Optional[Any] =[] _a : List[Any] =[] for word, label in zip(example.words , example.labels ): _a : Optional[int] =tokenizer.tokenize(SCREAMING_SNAKE_CASE ) # bert-base-multilingual-cased sometimes output "nothing ([]) when calling tokenize with just a space. if len(SCREAMING_SNAKE_CASE ) > 0: tokens.extend(SCREAMING_SNAKE_CASE ) # Use the real label id for the first token of the word, and padding ids for the remaining tokens label_ids.extend([label_map[label]] + [pad_token_label_id] * (len(SCREAMING_SNAKE_CASE ) - 1) ) # Account for [CLS] and [SEP] with "- 2" and with "- 3" for RoBERTa. _a : Optional[int] =tokenizer.num_special_tokens_to_add() if len(SCREAMING_SNAKE_CASE ) > max_seq_length - special_tokens_count: _a : List[Any] =tokens[: (max_seq_length - special_tokens_count)] _a : Tuple =label_ids[: (max_seq_length - special_tokens_count)] # The convention in BERT is: # (a) For sequence pairs: # tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP] # type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1 # (b) For single sequences: # tokens: [CLS] the dog is hairy . [SEP] # type_ids: 0 0 0 0 0 0 0 # # Where "type_ids" are used to indicate whether this is the first # sequence or the second sequence. The embedding vectors for `type=0` and # `type=1` were learned during pre-training and are added to the wordpiece # embedding vector (and position vector). This is not *strictly* necessary # since the [SEP] token unambiguously separates the sequences, but it makes # it easier for the model to learn the concept of sequences. # # For classification tasks, the first vector (corresponding to [CLS]) is # used as the "sentence vector". Note that this only makes sense because # the entire model is fine-tuned. tokens += [sep_token] label_ids += [pad_token_label_id] if sep_token_extra: # roberta uses an extra separator b/w pairs of sentences tokens += [sep_token] label_ids += [pad_token_label_id] _a : Dict =[sequence_a_segment_id] * len(SCREAMING_SNAKE_CASE ) if cls_token_at_end: tokens += [cls_token] label_ids += [pad_token_label_id] segment_ids += [cls_token_segment_id] else: _a : Any =[cls_token] + tokens _a : Dict =[pad_token_label_id] + label_ids _a : Union[str, Any] =[cls_token_segment_id] + segment_ids _a : List[str] =tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE ) # The mask has 1 for real tokens and 0 for padding tokens. Only real # tokens are attended to. _a : Optional[int] =[1 if mask_padding_with_zero else 0] * len(SCREAMING_SNAKE_CASE ) # Zero-pad up to the sequence length. _a : Union[str, Any] =max_seq_length - len(SCREAMING_SNAKE_CASE ) if pad_on_left: _a : Optional[Any] =([pad_token] * padding_length) + input_ids _a : Optional[int] =([0 if mask_padding_with_zero else 1] * padding_length) + input_mask _a : Union[str, Any] =([pad_token_segment_id] * padding_length) + segment_ids _a : Dict =([pad_token_label_id] * padding_length) + label_ids else: input_ids += [pad_token] * padding_length input_mask += [0 if mask_padding_with_zero else 1] * padding_length segment_ids += [pad_token_segment_id] * padding_length label_ids += [pad_token_label_id] * padding_length assert len(SCREAMING_SNAKE_CASE ) == max_seq_length assert len(SCREAMING_SNAKE_CASE ) == max_seq_length assert len(SCREAMING_SNAKE_CASE ) == max_seq_length assert len(SCREAMING_SNAKE_CASE ) == max_seq_length if ex_index < 5: logger.info("""*** Example ***""" ) logger.info("""guid: %s""" , example.guid ) logger.info("""tokens: %s""" , """ """.join([str(SCREAMING_SNAKE_CASE ) for x in tokens] ) ) logger.info("""input_ids: %s""" , """ """.join([str(SCREAMING_SNAKE_CASE ) for x in input_ids] ) ) logger.info("""input_mask: %s""" , """ """.join([str(SCREAMING_SNAKE_CASE ) for x in input_mask] ) ) logger.info("""segment_ids: %s""" , """ """.join([str(SCREAMING_SNAKE_CASE ) for x in segment_ids] ) ) logger.info("""label_ids: %s""" , """ """.join([str(SCREAMING_SNAKE_CASE ) for x in label_ids] ) ) if "token_type_ids" not in tokenizer.model_input_names: _a : Tuple =None features.append( InputFeatures( input_ids=SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , token_type_ids=SCREAMING_SNAKE_CASE , label_ids=SCREAMING_SNAKE_CASE ) ) return features if is_torch_available(): import torch from torch import nn from torch.utils.data import Dataset class A__ ( UpperCAmelCase__ ): __UpperCamelCase : List[InputFeatures] __UpperCamelCase : int = nn.CrossEntropyLoss().ignore_index def __init__( self :Dict , SCREAMING_SNAKE_CASE :TokenClassificationTask , SCREAMING_SNAKE_CASE :str , SCREAMING_SNAKE_CASE :PreTrainedTokenizer , SCREAMING_SNAKE_CASE :List[str] , SCREAMING_SNAKE_CASE :str , SCREAMING_SNAKE_CASE :Optional[int] = None , SCREAMING_SNAKE_CASE :int=False , SCREAMING_SNAKE_CASE :Split = Split.train , ) -> List[str]: '''simple docstring''' # Load data features from cache or dataset file _a : Optional[Any] =os.path.join( SCREAMING_SNAKE_CASE , """cached_{}_{}_{}""".format(mode.value , tokenizer.__class__.__name__ , str(SCREAMING_SNAKE_CASE ) ) , ) # Make sure only the first process in distributed training processes the dataset, # and the others will use the cache. _a : List[str] =cached_features_file + """.lock""" with FileLock(SCREAMING_SNAKE_CASE ): if os.path.exists(SCREAMING_SNAKE_CASE ) and not overwrite_cache: logger.info(f"Loading features from cached file {cached_features_file}" ) _a : Any =torch.load(SCREAMING_SNAKE_CASE ) else: logger.info(f"Creating features from dataset file at {data_dir}" ) _a : Any =token_classification_task.read_examples_from_file(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) # TODO clean up all this to leverage built-in features of tokenizers _a : List[str] =token_classification_task.convert_examples_to_features( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , cls_token_at_end=bool(model_type in ["""xlnet"""] ) , cls_token=tokenizer.cls_token , cls_token_segment_id=2 if model_type in ["""xlnet"""] else 0 , sep_token=tokenizer.sep_token , sep_token_extra=SCREAMING_SNAKE_CASE , pad_on_left=bool(tokenizer.padding_side == """left""" ) , pad_token=tokenizer.pad_token_id , pad_token_segment_id=tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , ) logger.info(f"Saving features into cached file {cached_features_file}" ) torch.save(self.features , SCREAMING_SNAKE_CASE ) def __len__( self :Optional[int] ) -> Union[str, Any]: '''simple docstring''' return len(self.features ) def __getitem__( self :Dict , SCREAMING_SNAKE_CASE :int ) -> InputFeatures: '''simple docstring''' return self.features[i] if is_tf_available(): import tensorflow as tf class A__ : __UpperCamelCase : List[InputFeatures] __UpperCamelCase : int = -100 def __init__( self :str , SCREAMING_SNAKE_CASE :TokenClassificationTask , SCREAMING_SNAKE_CASE :str , SCREAMING_SNAKE_CASE :PreTrainedTokenizer , SCREAMING_SNAKE_CASE :List[str] , SCREAMING_SNAKE_CASE :str , SCREAMING_SNAKE_CASE :Optional[int] = None , SCREAMING_SNAKE_CASE :str=False , SCREAMING_SNAKE_CASE :Split = Split.train , ) -> Any: '''simple docstring''' _a : Tuple =token_classification_task.read_examples_from_file(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) # TODO clean up all this to leverage built-in features of tokenizers _a : List[Any] =token_classification_task.convert_examples_to_features( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , cls_token_at_end=bool(model_type in ["""xlnet"""] ) , cls_token=tokenizer.cls_token , cls_token_segment_id=2 if model_type in ["""xlnet"""] else 0 , sep_token=tokenizer.sep_token , sep_token_extra=SCREAMING_SNAKE_CASE , pad_on_left=bool(tokenizer.padding_side == """left""" ) , pad_token=tokenizer.pad_token_id , pad_token_segment_id=tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , ) def gen(): for ex in self.features: if ex.token_type_ids is None: yield ( {"input_ids": ex.input_ids, "attention_mask": ex.attention_mask}, ex.label_ids, ) else: yield ( { "input_ids": ex.input_ids, "attention_mask": ex.attention_mask, "token_type_ids": ex.token_type_ids, }, ex.label_ids, ) if "token_type_ids" not in tokenizer.model_input_names: _a : Union[str, Any] =tf.data.Dataset.from_generator( SCREAMING_SNAKE_CASE , ({"""input_ids""": tf.intaa, """attention_mask""": tf.intaa}, tf.intaa) , ( {"""input_ids""": tf.TensorShape([None] ), """attention_mask""": tf.TensorShape([None] )}, tf.TensorShape([None] ), ) , ) else: _a : Union[str, Any] =tf.data.Dataset.from_generator( SCREAMING_SNAKE_CASE , ({"""input_ids""": tf.intaa, """attention_mask""": tf.intaa, """token_type_ids""": tf.intaa}, tf.intaa) , ( { """input_ids""": tf.TensorShape([None] ), """attention_mask""": tf.TensorShape([None] ), """token_type_ids""": tf.TensorShape([None] ), }, tf.TensorShape([None] ), ) , ) def __UpperCAmelCase ( self :Tuple ) -> Any: '''simple docstring''' _a : List[Any] =self.dataset.apply(tf.data.experimental.assert_cardinality(len(self.features ) ) ) return self.dataset def __len__( self :str ) -> Optional[int]: '''simple docstring''' return len(self.features ) def __getitem__( self :int , SCREAMING_SNAKE_CASE :str ) -> InputFeatures: '''simple docstring''' return self.features[i]
276
1
import inspect import unittest from transformers import ViTMSNConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ViTMSNForImageClassification, ViTMSNModel from transformers.models.vit_msn.modeling_vit_msn import VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import ViTImageProcessor class _lowercase : def __init__( self : Union[str, Any] , snake_case : Optional[int] , snake_case : List[str]=1_3 , snake_case : Union[str, Any]=3_0 , snake_case : int=2 , snake_case : List[str]=3 , snake_case : int=True , snake_case : Optional[Any]=True , snake_case : Optional[Any]=3_2 , snake_case : Optional[Any]=5 , snake_case : List[Any]=4 , snake_case : List[str]=3_7 , snake_case : Optional[Any]="gelu" , snake_case : Optional[int]=0.1 , snake_case : Optional[Any]=0.1 , snake_case : Union[str, Any]=1_0 , snake_case : Dict=0.02 , snake_case : Dict=None , ) -> Tuple: """simple docstring""" UpperCamelCase_ : Optional[int] = parent UpperCamelCase_ : str = batch_size UpperCamelCase_ : Tuple = image_size UpperCamelCase_ : List[Any] = patch_size UpperCamelCase_ : Tuple = num_channels UpperCamelCase_ : str = is_training UpperCamelCase_ : Optional[int] = use_labels UpperCamelCase_ : Optional[Any] = hidden_size UpperCamelCase_ : List[Any] = num_hidden_layers UpperCamelCase_ : Tuple = num_attention_heads UpperCamelCase_ : List[Any] = intermediate_size UpperCamelCase_ : Optional[Any] = hidden_act UpperCamelCase_ : Union[str, Any] = hidden_dropout_prob UpperCamelCase_ : Dict = attention_probs_dropout_prob UpperCamelCase_ : List[str] = type_sequence_label_size UpperCamelCase_ : Optional[Any] = initializer_range UpperCamelCase_ : List[str] = scope # in ViT MSN, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token) UpperCamelCase_ : int = (image_size // patch_size) ** 2 UpperCamelCase_ : Union[str, Any] = num_patches + 1 def SCREAMING_SNAKE_CASE__ ( self : str ) -> Any: """simple docstring""" UpperCamelCase_ : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) UpperCamelCase_ : Optional[int] = None if self.use_labels: UpperCamelCase_ : str = ids_tensor([self.batch_size] , self.type_sequence_label_size ) UpperCamelCase_ : Tuple = self.get_config() return config, pixel_values, labels def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Tuple: """simple docstring""" return ViTMSNConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , ) def SCREAMING_SNAKE_CASE__ ( self : Tuple , snake_case : List[Any] , snake_case : Dict , snake_case : Any ) -> List[Any]: """simple docstring""" UpperCamelCase_ : List[str] = ViTMSNModel(config=snake_case ) model.to(snake_case ) model.eval() UpperCamelCase_ : Tuple = model(snake_case ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , snake_case : Tuple , snake_case : Optional[Any] , snake_case : Union[str, Any] ) -> Optional[int]: """simple docstring""" UpperCamelCase_ : str = self.type_sequence_label_size UpperCamelCase_ : Union[str, Any] = ViTMSNForImageClassification(snake_case ) model.to(snake_case ) model.eval() UpperCamelCase_ : List[str] = model(snake_case , labels=snake_case ) print('Pixel and labels shape: {pixel_values.shape}, {labels.shape}' ) print('Labels: {labels}' ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # test greyscale images UpperCamelCase_ : Any = 1 UpperCamelCase_ : Union[str, Any] = ViTMSNForImageClassification(snake_case ) model.to(snake_case ) model.eval() UpperCamelCase_ : str = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) UpperCamelCase_ : Union[str, Any] = model(snake_case ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> List[str]: """simple docstring""" UpperCamelCase_ : int = self.prepare_config_and_inputs() UpperCamelCase_ : List[Any] = config_and_inputs UpperCamelCase_ : List[Any] = {'pixel_values': pixel_values} return config, inputs_dict @require_torch class _lowercase ( snake_case_ , snake_case_ , unittest.TestCase ): lowercase = (ViTMSNModel, ViTMSNForImageClassification) if is_torch_available() else () lowercase = ( {'feature-extraction': ViTMSNModel, 'image-classification': ViTMSNForImageClassification} if is_torch_available() else {} ) lowercase = False lowercase = False lowercase = False lowercase = False def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Optional[int]: """simple docstring""" UpperCamelCase_ : List[Any] = ViTMSNModelTester(self ) UpperCamelCase_ : Tuple = ConfigTester(self , config_class=snake_case , has_text_modality=snake_case , hidden_size=3_7 ) def SCREAMING_SNAKE_CASE__ ( self : List[Any] ) -> int: """simple docstring""" self.config_tester.run_common_tests() @unittest.skip(reason='ViTMSN does not use inputs_embeds' ) def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> Dict: """simple docstring""" pass def SCREAMING_SNAKE_CASE__ ( self : str ) -> Tuple: """simple docstring""" UpperCamelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCamelCase_ : int = model_class(snake_case ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) UpperCamelCase_ : List[Any] = model.get_output_embeddings() self.assertTrue(x is None or isinstance(snake_case , nn.Linear ) ) def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> int: """simple docstring""" UpperCamelCase_ : List[str] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCamelCase_ : Optional[int] = model_class(snake_case ) UpperCamelCase_ : Any = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic UpperCamelCase_ : Union[str, Any] = [*signature.parameters.keys()] UpperCamelCase_ : int = ['pixel_values'] self.assertListEqual(arg_names[:1] , snake_case ) def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> Dict: """simple docstring""" UpperCamelCase_ : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*snake_case ) def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> int: """simple docstring""" UpperCamelCase_ : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*snake_case ) @slow def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Any: """simple docstring""" for model_name in VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCamelCase_ : str = ViTMSNModel.from_pretrained(snake_case ) self.assertIsNotNone(snake_case ) def __lowercase ( ): UpperCamelCase_ : Union[str, Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) return image @require_torch @require_vision class _lowercase ( unittest.TestCase ): @cached_property def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> Dict: """simple docstring""" return ViTImageProcessor.from_pretrained('facebook/vit-msn-small' ) if is_vision_available() else None @slow def SCREAMING_SNAKE_CASE__ ( self : int ) -> str: """simple docstring""" torch.manual_seed(2 ) UpperCamelCase_ : Optional[int] = ViTMSNForImageClassification.from_pretrained('facebook/vit-msn-small' ).to(snake_case ) UpperCamelCase_ : Dict = self.default_image_processor UpperCamelCase_ : Union[str, Any] = prepare_img() UpperCamelCase_ : int = image_processor(images=snake_case , return_tensors='pt' ).to(snake_case ) # forward pass with torch.no_grad(): UpperCamelCase_ : Dict = model(**snake_case ) # verify the logits UpperCamelCase_ : Optional[Any] = torch.Size((1, 1_0_0_0) ) self.assertEqual(outputs.logits.shape , snake_case ) UpperCamelCase_ : Optional[int] = torch.tensor([-0.0803, -0.4454, -0.2375] ).to(snake_case ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , snake_case , atol=1e-4 ) )
368
def __lowercase ( lowerCamelCase : str ): if not all(x.isalpha() for x in string ): raise ValueError('String must only contain alphabetic characters.' ) UpperCamelCase_ : Optional[int] = sorted(string.lower() ) return len(lowerCamelCase ) == len(set(lowerCamelCase ) ) if __name__ == "__main__": a_ = input('Enter a string ').strip() a_ = is_isogram(input_str) print(F"""{input_str} is {'an' if isogram else 'not an'} isogram.""")
50
0
import multiprocessing from typing import TYPE_CHECKING, Optional, Union from .. import Dataset, Features, config from ..formatting import query_table from ..packaged_modules.sql.sql import Sql from ..utils import logging from .abc import AbstractDatasetInputStream if TYPE_CHECKING: import sqlitea import sqlalchemy class lowercase ( SCREAMING_SNAKE_CASE__ ): def __init__( self ,A__ ,A__ ,A__ = None ,A__ = None ,A__ = False ,**A__ ,): super().__init__(features=A__ ,cache_dir=A__ ,keep_in_memory=A__ ,**A__) lowercase = Sql( cache_dir=A__ ,features=A__ ,sql=A__ ,con=A__ ,**A__ ,) def A__ ( self): lowercase = None lowercase = None lowercase = None lowercase = None self.builder.download_and_prepare( download_config=A__ ,download_mode=A__ ,verification_mode=A__ ,base_path=A__ ,) # Build dataset for splits lowercase = self.builder.as_dataset( split='''train''' ,verification_mode=A__ ,in_memory=self.keep_in_memory) return dataset class lowercase : def __init__( self ,A__ ,A__ ,A__ ,A__ = None ,A__ = None ,**A__ ,): if num_proc is not None and num_proc <= 0: raise ValueError(f'num_proc {num_proc} must be an integer > 0.') lowercase = dataset lowercase = name lowercase = con lowercase = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE lowercase = num_proc lowercase = to_sql_kwargs def A__ ( self): lowercase = self.to_sql_kwargs.pop('''sql''' ,A__) lowercase = self.to_sql_kwargs.pop('''con''' ,A__) lowercase = self.to_sql_kwargs.pop('''index''' ,A__) lowercase = self._write(index=A__ ,**self.to_sql_kwargs) return written def A__ ( self ,A__): lowercase , lowercase , lowercase = args lowercase = {**to_sql_kwargs, '''if_exists''': '''append'''} if offset > 0 else to_sql_kwargs lowercase = query_table( table=self.dataset.data ,key=slice(A__ ,offset + self.batch_size) ,indices=self.dataset._indices ,) lowercase = batch.to_pandas() lowercase = df.to_sql(self.name ,self.con ,index=A__ ,**A__) return num_rows or len(A__) def A__ ( self ,A__ ,**A__): lowercase = 0 if self.num_proc is None or self.num_proc == 1: for offset in logging.tqdm( range(0 ,len(self.dataset) ,self.batch_size) ,unit='''ba''' ,disable=not logging.is_progress_bar_enabled() ,desc='''Creating SQL from Arrow format''' ,): written += self._batch_sql((offset, index, to_sql_kwargs)) else: lowercase , lowercase = len(self.dataset), self.batch_size with multiprocessing.Pool(self.num_proc) as pool: for num_rows in logging.tqdm( pool.imap( self._batch_sql ,[(offset, index, to_sql_kwargs) for offset in range(0 ,A__ ,A__)] ,) ,total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size ,unit='''ba''' ,disable=not logging.is_progress_bar_enabled() ,desc='''Creating SQL from Arrow format''' ,): written += num_rows return written
101
from functools import lru_cache @lru_cache def UpperCamelCase ( lowerCAmelCase__ ): '''simple docstring''' if num < 0: raise ValueError('''Number should not be negative.''' ) return 1 if num in (0, 1) else num * factorial(num - 1 ) if __name__ == "__main__": import doctest doctest.testmod()
101
1
import math from typing import List, Optional, Tuple, Union import numpy as np import torch from ..configuration_utils import ConfigMixin, register_to_config from .scheduling_utils import SchedulerMixin, SchedulerOutput class A_ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): _UpperCAmelCase : List[str] = 1 @register_to_config def __init__( self : List[Any] ,SCREAMING_SNAKE_CASE__ : int = 1_0_0_0 ,SCREAMING_SNAKE_CASE__ : Optional[Union[np.ndarray, List[float]]] = None): # set `betas`, `alphas`, `timesteps` self.set_timesteps(SCREAMING_SNAKE_CASE__) # standard deviation of the initial noise distribution __lowerCamelCase : int = 1.0 # For now we only support F-PNDM, i.e. the runge-kutta method # For more information on the algorithm please take a look at the paper: https://arxiv.org/pdf/2202.09778.pdf # mainly at formula (9), (12), (13) and the Algorithm 2. __lowerCamelCase : int = 4 # running values __lowerCamelCase : Tuple = [] def lowerCAmelCase ( self : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : Union[str, torch.device] = None): __lowerCamelCase : List[str] = num_inference_steps __lowerCamelCase : int = torch.linspace(1 ,0 ,num_inference_steps + 1)[:-1] __lowerCamelCase : Dict = torch.cat([steps, torch.tensor([0.0])]) if self.config.trained_betas is not None: __lowerCamelCase : Any = torch.tensor(self.config.trained_betas ,dtype=torch.floataa) else: __lowerCamelCase : Dict = torch.sin(steps * math.pi / 2) ** 2 __lowerCamelCase : Optional[int] = (1.0 - self.betas**2) ** 0.5 __lowerCamelCase : Union[str, Any] = (torch.atana(self.betas ,self.alphas) / math.pi * 2)[:-1] __lowerCamelCase : Tuple = timesteps.to(SCREAMING_SNAKE_CASE__) __lowerCamelCase : List[str] = [] def lowerCAmelCase ( self : Tuple ,SCREAMING_SNAKE_CASE__ : torch.FloatTensor ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : torch.FloatTensor ,SCREAMING_SNAKE_CASE__ : bool = True ,): if self.num_inference_steps is None: raise ValueError( 'Number of inference steps is \'None\', you need to run \'set_timesteps\' after creating the scheduler') __lowerCamelCase : Tuple = (self.timesteps == timestep).nonzero().item() __lowerCamelCase : int = timestep_index + 1 __lowerCamelCase : str = sample * self.betas[timestep_index] + model_output * self.alphas[timestep_index] self.ets.append(SCREAMING_SNAKE_CASE__) if len(self.ets) == 1: __lowerCamelCase : List[Any] = self.ets[-1] elif len(self.ets) == 2: __lowerCamelCase : str = (3 * self.ets[-1] - self.ets[-2]) / 2 elif len(self.ets) == 3: __lowerCamelCase : Dict = (2_3 * self.ets[-1] - 1_6 * self.ets[-2] + 5 * self.ets[-3]) / 1_2 else: __lowerCamelCase : Optional[Any] = (1 / 2_4) * (5_5 * self.ets[-1] - 5_9 * self.ets[-2] + 3_7 * self.ets[-3] - 9 * self.ets[-4]) __lowerCamelCase : Tuple = self._get_prev_sample(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__) if not return_dict: return (prev_sample,) return SchedulerOutput(prev_sample=SCREAMING_SNAKE_CASE__) def lowerCAmelCase ( self : Tuple ,SCREAMING_SNAKE_CASE__ : torch.FloatTensor ,*SCREAMING_SNAKE_CASE__ : Any ,**SCREAMING_SNAKE_CASE__ : List[str]): return sample def lowerCAmelCase ( self : str ,SCREAMING_SNAKE_CASE__ : List[Any] ,SCREAMING_SNAKE_CASE__ : Optional[int] ,SCREAMING_SNAKE_CASE__ : Optional[int] ,SCREAMING_SNAKE_CASE__ : List[Any]): __lowerCamelCase : Any = self.alphas[timestep_index] __lowerCamelCase : Dict = self.betas[timestep_index] __lowerCamelCase : Optional[int] = self.alphas[prev_timestep_index] __lowerCamelCase : str = self.betas[prev_timestep_index] __lowerCamelCase : Any = (sample - sigma * ets) / max(SCREAMING_SNAKE_CASE__ ,1E-8) __lowerCamelCase : Tuple = next_alpha * pred + ets * next_sigma return prev_sample def __len__( self : Union[str, Any]): return self.config.num_train_timesteps
113
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available a ={ """configuration_bigbird_pegasus""": [ """BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP""", """BigBirdPegasusConfig""", """BigBirdPegasusOnnxConfig""", ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a =[ """BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST""", """BigBirdPegasusForCausalLM""", """BigBirdPegasusForConditionalGeneration""", """BigBirdPegasusForQuestionAnswering""", """BigBirdPegasusForSequenceClassification""", """BigBirdPegasusModel""", """BigBirdPegasusPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_bigbird_pegasus import ( BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP, BigBirdPegasusConfig, BigBirdPegasusOnnxConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_bigbird_pegasus import ( BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST, BigBirdPegasusForCausalLM, BigBirdPegasusForConditionalGeneration, BigBirdPegasusForQuestionAnswering, BigBirdPegasusForSequenceClassification, BigBirdPegasusModel, BigBirdPegasusPreTrainedModel, ) else: import sys a =_LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
113
1
'''simple docstring''' import itertools import os from collections import Counter, defaultdict from concurrent.futures import ThreadPoolExecutor, as_completed import numpy as np import datasets from .execute import check_correctness _lowerCAmelCase = '''\ @misc{chen2021evaluating, title={Evaluating Large Language Models Trained on Code}, author={Mark Chen and Jerry Tworek and Heewoo Jun and Qiming Yuan \ and Henrique Ponde de Oliveira Pinto and Jared Kaplan and Harri Edwards \ and Yuri Burda and Nicholas Joseph and Greg Brockman and Alex Ray \ and Raul Puri and Gretchen Krueger and Michael Petrov and Heidy Khlaaf \ and Girish Sastry and Pamela Mishkin and Brooke Chan and Scott Gray \ and Nick Ryder and Mikhail Pavlov and Alethea Power and Lukasz Kaiser \ and Mohammad Bavarian and Clemens Winter and Philippe Tillet \ and Felipe Petroski Such and Dave Cummings and Matthias Plappert \ and Fotios Chantzis and Elizabeth Barnes and Ariel Herbert-Voss \ and William Hebgen Guss and Alex Nichol and Alex Paino and Nikolas Tezak \ and Jie Tang and Igor Babuschkin and Suchir Balaji and Shantanu Jain \ and William Saunders and Christopher Hesse and Andrew N. Carr \ and Jan Leike and Josh Achiam and Vedant Misra and Evan Morikawa \ and Alec Radford and Matthew Knight and Miles Brundage and Mira Murati \ and Katie Mayer and Peter Welinder and Bob McGrew and Dario Amodei \ and Sam McCandlish and Ilya Sutskever and Wojciech Zaremba}, year={2021}, eprint={2107.03374}, archivePrefix={arXiv}, primaryClass={cs.LG} } ''' _lowerCAmelCase = '''\ This metric implements the evaluation harness for the HumanEval problem solving dataset described in the paper "Evaluating Large Language Models Trained on Code" (https://arxiv.org/abs/2107.03374). ''' _lowerCAmelCase = ''' Calculates how good are predictions given some references, using certain scores Args: predictions: list of candidates to evaluate. Each candidates should be a list of strings with several code candidates to solve the problem. references: a list with a test for each prediction. Each test should evaluate the correctness of a code candidate. k: number of code candidates to consider in the evaluation (Default: [1, 10, 100]) num_workers: number of workers used to evaluate the canidate programs (Default: 4). timeout: Returns: pass_at_k: dict with pass rates for each k results: dict with granular results of each unittest Examples: >>> code_eval = datasets.load_metric("code_eval") >>> test_cases = ["assert add(2,3)==5"] >>> candidates = [["def add(a,b): return a*b", "def add(a, b): return a+b"]] >>> pass_at_k, results = code_eval.compute(references=test_cases, predictions=candidates, k=[1, 2]) >>> print(pass_at_k) {\'pass@1\': 0.5, \'pass@2\': 1.0} ''' _lowerCAmelCase = ''' ################################################################################ !!!WARNING!!! ################################################################################ The "code_eval" metric executes untrusted model-generated code in Python. Although it is highly unlikely that model-generated code will do something overtly malicious in response to this test suite, model-generated code may act destructively due to a lack of model capability or alignment. Users are strongly encouraged to sandbox this evaluation suite so that it does not perform destructive actions on their host or network. For more information on how OpenAI sandboxes its code, see the paper "Evaluating Large Language Models Trained on Code" (https://arxiv.org/abs/2107.03374). Once you have read this disclaimer and taken appropriate precautions, set the environment variable HF_ALLOW_CODE_EVAL="1". Within Python you can to this with: >>> import os >>> os.environ["HF_ALLOW_CODE_EVAL"] = "1" ################################################################################\ ''' _lowerCAmelCase = '''The MIT License Copyright (c) OpenAI (https://openai.com) Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.''' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class A ( datasets.Metric ): '''simple docstring''' def a_ (self ) -> Tuple: return datasets.MetricInfo( # This is the description that will appear on the metrics page. description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { "predictions": datasets.Sequence(datasets.Value("string" ) ), "references": datasets.Value("string" ), } ) , homepage="https://github.com/openai/human-eval" , codebase_urls=["https://github.com/openai/human-eval"] , reference_urls=["https://github.com/openai/human-eval"] , license=_LICENSE , ) def a_ (self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=[1, 1_0, 1_0_0] , _UpperCAmelCase=4 , _UpperCAmelCase=3.0 ) -> Optional[Any]: if os.getenv("HF_ALLOW_CODE_EVAL" , 0 ) != "1": raise ValueError(_WARNING ) if os.name == "nt": raise NotImplementedError("This metric is currently not supported on Windows." ) with ThreadPoolExecutor(max_workers=_UpperCAmelCase ) as executor: __UpperCamelCase : int = [] __UpperCamelCase : Union[str, Any] = Counter() __UpperCamelCase : Optional[int] = 0 __UpperCamelCase : str = defaultdict(_UpperCAmelCase ) for task_id, (candidates, test_case) in enumerate(zip(_UpperCAmelCase , _UpperCAmelCase ) ): for candidate in candidates: __UpperCamelCase : Union[str, Any] = candidate + "\n" + test_case __UpperCamelCase : Dict = (test_program, timeout, task_id, completion_id[task_id]) __UpperCamelCase : int = executor.submit(_UpperCAmelCase , *_UpperCAmelCase ) futures.append(_UpperCAmelCase ) completion_id[task_id] += 1 n_samples += 1 for future in as_completed(_UpperCAmelCase ): __UpperCamelCase : Any = future.result() results[result["task_id"]].append((result["completion_id"], result) ) __UpperCamelCase , __UpperCamelCase : str = [], [] for result in results.values(): result.sort() __UpperCamelCase : int = [r[1]["passed"] for r in result] total.append(len(_UpperCAmelCase ) ) correct.append(sum(_UpperCAmelCase ) ) __UpperCamelCase : Optional[int] = np.array(_UpperCAmelCase ) __UpperCamelCase : int = np.array(_UpperCAmelCase ) __UpperCamelCase : Union[str, Any] = k __UpperCamelCase : str = {f"pass@{k}": estimate_pass_at_k(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ).mean() for k in ks if (total >= k).all()} return pass_at_k, results def __lowerCAmelCase ( snake_case__ , snake_case__ , snake_case__ ): def estimator(snake_case__ , snake_case__ , snake_case__ ) -> float: if n - c < k: return 1.0 return 1.0 - np.prod(1.0 - k / np.arange(n - c + 1 , n + 1 ) ) if isinstance(snake_case__ , snake_case__ ): __UpperCamelCase : Tuple = itertools.repeat(snake_case__ , len(snake_case__ ) ) else: assert len(snake_case__ ) == len(snake_case__ ) __UpperCamelCase : Optional[int] = iter(snake_case__ ) return np.array([estimator(int(snake_case__ ) , int(snake_case__ ) , snake_case__ ) for n, c in zip(snake_case__ , snake_case__ )] )
298
'''simple docstring''' import warnings from typing import List import numpy as np from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding from ...utils import is_flax_available, is_tf_available, is_torch_available class A ( SCREAMING_SNAKE_CASE__ ): '''simple docstring''' A = ["image_processor", "tokenizer"] A = "OwlViTImageProcessor" A = ("CLIPTokenizer", "CLIPTokenizerFast") def __init__(self , _UpperCAmelCase=None , _UpperCAmelCase=None , **_UpperCAmelCase ) -> str: __UpperCamelCase : Tuple = None if "feature_extractor" in kwargs: warnings.warn( "The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`" " instead." , _UpperCAmelCase , ) __UpperCamelCase : str = kwargs.pop("feature_extractor" ) __UpperCamelCase : Tuple = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError("You need to specify an `image_processor`." ) if tokenizer is None: raise ValueError("You need to specify a `tokenizer`." ) super().__init__(_UpperCAmelCase , _UpperCAmelCase ) def __call__(self , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase="max_length" , _UpperCAmelCase="np" , **_UpperCAmelCase ) -> str: if text is None and query_images is None and images is None: raise ValueError( "You have to specify at least one text or query image or image. All three cannot be none." ) if text is not None: if isinstance(_UpperCAmelCase , _UpperCAmelCase ) or (isinstance(_UpperCAmelCase , _UpperCAmelCase ) and not isinstance(text[0] , _UpperCAmelCase )): __UpperCamelCase : Tuple = [self.tokenizer(_UpperCAmelCase , padding=_UpperCAmelCase , return_tensors=_UpperCAmelCase , **_UpperCAmelCase )] elif isinstance(_UpperCAmelCase , _UpperCAmelCase ) and isinstance(text[0] , _UpperCAmelCase ): __UpperCamelCase : List[str] = [] # Maximum number of queries across batch __UpperCamelCase : List[str] = max([len(_UpperCAmelCase ) for t in text] ) # Pad all batch samples to max number of text queries for t in text: if len(_UpperCAmelCase ) != max_num_queries: __UpperCamelCase : Any = t + [" "] * (max_num_queries - len(_UpperCAmelCase )) __UpperCamelCase : int = self.tokenizer(_UpperCAmelCase , padding=_UpperCAmelCase , return_tensors=_UpperCAmelCase , **_UpperCAmelCase ) encodings.append(_UpperCAmelCase ) else: raise TypeError("Input text should be a string, a list of strings or a nested list of strings" ) if return_tensors == "np": __UpperCamelCase : List[str] = np.concatenate([encoding["input_ids"] for encoding in encodings] , axis=0 ) __UpperCamelCase : int = np.concatenate([encoding["attention_mask"] for encoding in encodings] , axis=0 ) elif return_tensors == "jax" and is_flax_available(): import jax.numpy as jnp __UpperCamelCase : Tuple = jnp.concatenate([encoding["input_ids"] for encoding in encodings] , axis=0 ) __UpperCamelCase : Optional[Any] = jnp.concatenate([encoding["attention_mask"] for encoding in encodings] , axis=0 ) elif return_tensors == "pt" and is_torch_available(): import torch __UpperCamelCase : Any = torch.cat([encoding["input_ids"] for encoding in encodings] , dim=0 ) __UpperCamelCase : List[Any] = torch.cat([encoding["attention_mask"] for encoding in encodings] , dim=0 ) elif return_tensors == "tf" and is_tf_available(): import tensorflow as tf __UpperCamelCase : Any = tf.stack([encoding["input_ids"] for encoding in encodings] , axis=0 ) __UpperCamelCase : Optional[Any] = tf.stack([encoding["attention_mask"] for encoding in encodings] , axis=0 ) else: raise ValueError("Target return tensor type could not be returned" ) __UpperCamelCase : Optional[Any] = BatchEncoding() __UpperCamelCase : Union[str, Any] = input_ids __UpperCamelCase : List[str] = attention_mask if query_images is not None: __UpperCamelCase : str = BatchEncoding() __UpperCamelCase : Any = self.image_processor( _UpperCAmelCase , return_tensors=_UpperCAmelCase , **_UpperCAmelCase ).pixel_values __UpperCamelCase : List[Any] = query_pixel_values if images is not None: __UpperCamelCase : Dict = self.image_processor(_UpperCAmelCase , return_tensors=_UpperCAmelCase , **_UpperCAmelCase ) if text is not None and images is not None: __UpperCamelCase : Optional[Any] = image_features.pixel_values return encoding elif query_images is not None and images is not None: __UpperCamelCase : Union[str, Any] = image_features.pixel_values return encoding elif text is not None or query_images is not None: return encoding else: return BatchEncoding(data=dict(**_UpperCAmelCase ) , tensor_type=_UpperCAmelCase ) def a_ (self , *_UpperCAmelCase , **_UpperCAmelCase ) -> Optional[int]: return self.image_processor.post_process(*_UpperCAmelCase , **_UpperCAmelCase ) def a_ (self , *_UpperCAmelCase , **_UpperCAmelCase ) -> List[str]: return self.image_processor.post_process_object_detection(*_UpperCAmelCase , **_UpperCAmelCase ) def a_ (self , *_UpperCAmelCase , **_UpperCAmelCase ) -> Optional[int]: return self.image_processor.post_process_image_guided_detection(*_UpperCAmelCase , **_UpperCAmelCase ) def a_ (self , *_UpperCAmelCase , **_UpperCAmelCase ) -> Union[str, Any]: return self.tokenizer.batch_decode(*_UpperCAmelCase , **_UpperCAmelCase ) def a_ (self , *_UpperCAmelCase , **_UpperCAmelCase ) -> int: return self.tokenizer.decode(*_UpperCAmelCase , **_UpperCAmelCase ) @property def a_ (self ) -> Tuple: warnings.warn( "`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , _UpperCAmelCase , ) return self.image_processor_class @property def a_ (self ) -> Union[str, Any]: warnings.warn( "`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , _UpperCAmelCase , ) return self.image_processor
298
1
"""simple docstring""" from math import acos, sin from typing import List, Tuple, Union import numpy as np import torch from PIL import Image from ...models import AutoencoderKL, UNetaDConditionModel from ...schedulers import DDIMScheduler, DDPMScheduler from ...utils import randn_tensor from ..pipeline_utils import AudioPipelineOutput, BaseOutput, DiffusionPipeline, ImagePipelineOutput from .mel import Mel class UpperCamelCase_ (__A ): __magic_name__ = ['''vqvae'''] def __init__( self : int , lowerCAmelCase_ : AutoencoderKL , lowerCAmelCase_ : UNetaDConditionModel , lowerCAmelCase_ : Mel , lowerCAmelCase_ : Union[DDIMScheduler, DDPMScheduler] , ) -> Tuple: super().__init__() self.register_modules(unet=lowerCAmelCase_ , scheduler=lowerCAmelCase_ , mel=lowerCAmelCase_ , vqvae=lowerCAmelCase_ ) def _SCREAMING_SNAKE_CASE ( self : str ) -> int: return 50 if isinstance(self.scheduler , lowerCAmelCase_ ) else 1_000 @torch.no_grad() def __call__( self : Any , lowerCAmelCase_ : int = 1 , lowerCAmelCase_ : str = None , lowerCAmelCase_ : np.ndarray = None , lowerCAmelCase_ : int = 0 , lowerCAmelCase_ : int = 0 , lowerCAmelCase_ : int = None , lowerCAmelCase_ : torch.Generator = None , lowerCAmelCase_ : float = 0 , lowerCAmelCase_ : float = 0 , lowerCAmelCase_ : torch.Generator = None , lowerCAmelCase_ : float = 0 , lowerCAmelCase_ : torch.Tensor = None , lowerCAmelCase_ : torch.Tensor = None , lowerCAmelCase_ : List[Any]=True , ) -> Union[ Union[AudioPipelineOutput, ImagePipelineOutput], Tuple[List[Image.Image], Tuple[int, List[np.ndarray]]], ]: UpperCAmelCase_ : str = steps or self.get_default_steps() self.scheduler.set_timesteps(lowerCAmelCase_ ) UpperCAmelCase_ : Optional[Any] = step_generator or generator # For backwards compatibility if type(self.unet.config.sample_size ) == int: UpperCAmelCase_ : int = (self.unet.config.sample_size, self.unet.config.sample_size) if noise is None: UpperCAmelCase_ : Any = randn_tensor( ( batch_size, self.unet.config.in_channels, self.unet.config.sample_size[0], self.unet.config.sample_size[1], ) , generator=lowerCAmelCase_ , device=self.device , ) UpperCAmelCase_ : Tuple = noise UpperCAmelCase_ : Optional[Any] = None if audio_file is not None or raw_audio is not None: self.mel.load_audio(lowerCAmelCase_ , lowerCAmelCase_ ) UpperCAmelCase_ : int = self.mel.audio_slice_to_image(lowerCAmelCase_ ) UpperCAmelCase_ : Optional[Any] = np.frombuffer(input_image.tobytes() , dtype="uint8" ).reshape( (input_image.height, input_image.width) ) UpperCAmelCase_ : Dict = (input_image / 255) * 2 - 1 UpperCAmelCase_ : List[str] = torch.tensor(input_image[np.newaxis, :, :] , dtype=torch.float ).to(self.device ) if self.vqvae is not None: UpperCAmelCase_ : Union[str, Any] = self.vqvae.encode(torch.unsqueeze(lowerCAmelCase_ , 0 ) ).latent_dist.sample( generator=lowerCAmelCase_ )[0] UpperCAmelCase_ : List[str] = self.vqvae.config.scaling_factor * input_images if start_step > 0: UpperCAmelCase_ : str = self.scheduler.add_noise(lowerCAmelCase_ , lowerCAmelCase_ , self.scheduler.timesteps[start_step - 1] ) UpperCAmelCase_ : List[str] = ( self.unet.config.sample_size[1] * self.mel.get_sample_rate() / self.mel.x_res / self.mel.hop_length ) UpperCAmelCase_ : List[Any] = int(mask_start_secs * pixels_per_second ) UpperCAmelCase_ : Any = int(mask_end_secs * pixels_per_second ) UpperCAmelCase_ : int = self.scheduler.add_noise(lowerCAmelCase_ , lowerCAmelCase_ , torch.tensor(self.scheduler.timesteps[start_step:] ) ) for step, t in enumerate(self.progress_bar(self.scheduler.timesteps[start_step:] ) ): if isinstance(self.unet , lowerCAmelCase_ ): UpperCAmelCase_ : List[str] = self.unet(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )["sample"] else: UpperCAmelCase_ : Optional[int] = self.unet(lowerCAmelCase_ , lowerCAmelCase_ )["sample"] if isinstance(self.scheduler , lowerCAmelCase_ ): UpperCAmelCase_ : str = self.scheduler.step( model_output=lowerCAmelCase_ , timestep=lowerCAmelCase_ , sample=lowerCAmelCase_ , eta=lowerCAmelCase_ , generator=lowerCAmelCase_ , )["prev_sample"] else: UpperCAmelCase_ : List[str] = self.scheduler.step( model_output=lowerCAmelCase_ , timestep=lowerCAmelCase_ , sample=lowerCAmelCase_ , generator=lowerCAmelCase_ , )["prev_sample"] if mask is not None: if mask_start > 0: UpperCAmelCase_ : Any = mask[:, step, :, :mask_start] if mask_end > 0: UpperCAmelCase_ : Union[str, Any] = mask[:, step, :, -mask_end:] if self.vqvae is not None: # 0.18215 was scaling factor used in training to ensure unit variance UpperCAmelCase_ : List[Any] = 1 / self.vqvae.config.scaling_factor * images UpperCAmelCase_ : Optional[Any] = self.vqvae.decode(lowerCAmelCase_ )["sample"] UpperCAmelCase_ : Optional[int] = (images / 2 + 0.5).clamp(0 , 1 ) UpperCAmelCase_ : List[Any] = images.cpu().permute(0 , 2 , 3 , 1 ).numpy() UpperCAmelCase_ : List[Any] = (images * 255).round().astype("uint8" ) UpperCAmelCase_ : int = list( (Image.fromarray(_[:, :, 0] ) for _ in images) if images.shape[3] == 1 else (Image.fromarray(lowerCAmelCase_ , mode="RGB" ).convert("L" ) for _ in images) ) UpperCAmelCase_ : Any = [self.mel.image_to_audio(lowerCAmelCase_ ) for _ in images] if not return_dict: return images, (self.mel.get_sample_rate(), audios) return BaseOutput(**AudioPipelineOutput(np.array(lowerCAmelCase_ )[:, np.newaxis, :] ) , **ImagePipelineOutput(lowerCAmelCase_ ) ) @torch.no_grad() def _SCREAMING_SNAKE_CASE ( self : Tuple , lowerCAmelCase_ : List[Image.Image] , lowerCAmelCase_ : int = 50 ) -> np.ndarray: assert isinstance(self.scheduler , lowerCAmelCase_ ) self.scheduler.set_timesteps(lowerCAmelCase_ ) UpperCAmelCase_ : Optional[Any] = np.array( [np.frombuffer(image.tobytes() , dtype="uint8" ).reshape((1, image.height, image.width) ) for image in images] ) UpperCAmelCase_ : Dict = (sample / 255) * 2 - 1 UpperCAmelCase_ : Any = torch.Tensor(lowerCAmelCase_ ).to(self.device ) for t in self.progress_bar(torch.flip(self.scheduler.timesteps , (0,) ) ): UpperCAmelCase_ : Dict = t - self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps UpperCAmelCase_ : int = self.scheduler.alphas_cumprod[t] UpperCAmelCase_ : Optional[Any] = ( self.scheduler.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.scheduler.final_alpha_cumprod ) UpperCAmelCase_ : str = 1 - alpha_prod_t UpperCAmelCase_ : Dict = self.unet(lowerCAmelCase_ , lowerCAmelCase_ )["sample"] UpperCAmelCase_ : str = (1 - alpha_prod_t_prev) ** 0.5 * model_output UpperCAmelCase_ : int = (sample - pred_sample_direction) * alpha_prod_t_prev ** (-0.5) UpperCAmelCase_ : Tuple = sample * alpha_prod_t ** 0.5 + beta_prod_t ** 0.5 * model_output return sample @staticmethod def _SCREAMING_SNAKE_CASE ( lowerCAmelCase_ : torch.Tensor , lowerCAmelCase_ : torch.Tensor , lowerCAmelCase_ : float ) -> torch.Tensor: UpperCAmelCase_ : Tuple = acos(torch.dot(torch.flatten(lowerCAmelCase_ ) , torch.flatten(lowerCAmelCase_ ) ) / torch.norm(lowerCAmelCase_ ) / torch.norm(lowerCAmelCase_ ) ) return sin((1 - alpha) * theta ) * xa / sin(lowerCAmelCase_ ) + sin(alpha * theta ) * xa / sin(lowerCAmelCase_ )
253
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import _LazyModule lowerCamelCase_ = {'''processing_wav2vec2_with_lm''': ['''Wav2Vec2ProcessorWithLM''']} if TYPE_CHECKING: from .processing_wavaveca_with_lm import WavaVecaProcessorWithLM else: import sys lowerCamelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
253
1
"""simple docstring""" import os def _snake_case ( ) -> Dict: with open(os.path.dirname(lowerCamelCase__ ) + "/p022_names.txt" ) as file: lowerCamelCase_ : str =str(file.readlines()[0] ) lowerCamelCase_ : Union[str, Any] =names.replace("\"" , "" ).split("," ) names.sort() lowerCamelCase_ : str =0 lowerCamelCase_ : Optional[int] =0 for i, name in enumerate(lowerCamelCase__ ): for letter in name: name_score += ord(lowerCamelCase__ ) - 64 total_score += (i + 1) * name_score lowerCamelCase_ : List[Any] =0 return total_score if __name__ == "__main__": print(solution())
144
"""simple docstring""" import argparse import logging import pickle import random import time import numpy as np from transformers import BertTokenizer, GPTaTokenizer, RobertaTokenizer logging.basicConfig( format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO ) A__ : Optional[Any] = logging.getLogger(__name__) def _snake_case ( ) -> int: lowerCamelCase_ : Tuple =argparse.ArgumentParser( description="Preprocess the data to avoid re-doing it several times by (tokenization + token_to_ids)." ) parser.add_argument("--file_path" , type=lowerCamelCase__ , default="data/dump.txt" , help="The path to the data." ) parser.add_argument("--tokenizer_type" , type=lowerCamelCase__ , default="bert" , choices=["bert", "roberta", "gpt2"] ) parser.add_argument("--tokenizer_name" , type=lowerCamelCase__ , default="bert-base-uncased" , help="The tokenizer to use." ) parser.add_argument("--dump_file" , type=lowerCamelCase__ , default="data/dump" , help="The dump file prefix." ) lowerCamelCase_ : Tuple =parser.parse_args() logger.info(F"""Loading Tokenizer ({args.tokenizer_name})""" ) if args.tokenizer_type == "bert": lowerCamelCase_ : Tuple =BertTokenizer.from_pretrained(args.tokenizer_name ) lowerCamelCase_ : Optional[Any] =tokenizer.special_tokens_map["cls_token"] # `[CLS]` lowerCamelCase_ : Any =tokenizer.special_tokens_map["sep_token"] # `[SEP]` elif args.tokenizer_type == "roberta": lowerCamelCase_ : str =RobertaTokenizer.from_pretrained(args.tokenizer_name ) lowerCamelCase_ : List[Any] =tokenizer.special_tokens_map["cls_token"] # `<s>` lowerCamelCase_ : Any =tokenizer.special_tokens_map["sep_token"] # `</s>` elif args.tokenizer_type == "gpt2": lowerCamelCase_ : Tuple =GPTaTokenizer.from_pretrained(args.tokenizer_name ) lowerCamelCase_ : Dict =tokenizer.special_tokens_map["bos_token"] # `<|endoftext|>` lowerCamelCase_ : Any =tokenizer.special_tokens_map["eos_token"] # `<|endoftext|>` logger.info(F"""Loading text from {args.file_path}""" ) with open(args.file_path , "r" , encoding="utf8" ) as fp: lowerCamelCase_ : Optional[int] =fp.readlines() logger.info("Start encoding" ) logger.info(F"""{len(lowerCamelCase__ )} examples to process.""" ) lowerCamelCase_ : str =[] lowerCamelCase_ : Union[str, Any] =0 lowerCamelCase_ : List[str] =10_000 lowerCamelCase_ : int =time.time() for text in data: lowerCamelCase_ : List[str] =F"""{bos} {text.strip()} {sep}""" lowerCamelCase_ : str =tokenizer.encode(lowerCamelCase__ , add_special_tokens=lowerCamelCase__ ) rslt.append(lowerCamelCase__ ) iter += 1 if iter % interval == 0: lowerCamelCase_ : List[Any] =time.time() logger.info(F"""{iter} examples processed. - {(end-start):.2f}s/{interval}expl""" ) lowerCamelCase_ : Tuple =time.time() logger.info("Finished binarization" ) logger.info(F"""{len(lowerCamelCase__ )} examples processed.""" ) lowerCamelCase_ : Optional[Any] =F"""{args.dump_file}.{args.tokenizer_name}.pickle""" lowerCamelCase_ : Optional[int] =tokenizer.vocab_size if vocab_size < (1 << 16): lowerCamelCase_ : int =[np.uintaa(lowerCamelCase__ ) for d in rslt] else: lowerCamelCase_ : Tuple =[np.intaa(lowerCamelCase__ ) for d in rslt] random.shuffle(rslt_ ) logger.info(F"""Dump to {dp_file}""" ) with open(lowerCamelCase__ , "wb" ) as handle: pickle.dump(rslt_ , lowerCamelCase__ , protocol=pickle.HIGHEST_PROTOCOL ) if __name__ == "__main__": main()
144
1
"""simple docstring""" from __future__ import annotations import sys from collections import deque from typing import Generic, TypeVar SCREAMING_SNAKE_CASE_ : Optional[Any] = TypeVar('T') class a ( Generic[T] ): """simple docstring""" UpperCAmelCase = 4_2 # Cache store of keys UpperCAmelCase = 4_2 # References of the keys in cache UpperCAmelCase = 1_0 # Maximum capacity of cache def __init__( self: Tuple , UpperCamelCase: int ): """simple docstring""" A__ = deque() A__ = set() if not n: A__ = sys.maxsize elif n < 0: raise ValueError("""n should be an integer greater than 0.""" ) else: A__ = n def UpperCamelCase ( self: int , UpperCamelCase: T ): """simple docstring""" if x not in self.key_reference: if len(self.dq_store ) == LRUCache._MAX_CAPACITY: A__ = self.dq_store.pop() self.key_reference.remove(UpperCamelCase ) else: self.dq_store.remove(UpperCamelCase ) self.dq_store.appendleft(UpperCamelCase ) self.key_reference.add(UpperCamelCase ) def UpperCamelCase ( self: str ): """simple docstring""" for k in self.dq_store: print(UpperCamelCase ) def __repr__( self: Optional[Any] ): """simple docstring""" return f"""LRUCache({self._MAX_CAPACITY}) => {list(self.dq_store )}""" if __name__ == "__main__": import doctest doctest.testmod() SCREAMING_SNAKE_CASE_ : LRUCache[str | int] = LRUCache(4) lru_cache.refer('A') lru_cache.refer(2) lru_cache.refer(3) lru_cache.refer('A') lru_cache.refer(4) lru_cache.refer(5) lru_cache.display() print(lru_cache) assert str(lru_cache) == "LRUCache(4) => [5, 4, 'A', 3]"
364
"""simple docstring""" import copy import inspect import unittest import numpy as np from huggingface_hub import hf_hub_download from transformers import VideoMAEConfig from transformers.models.auto import get_values from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ( MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING, VideoMAEForPreTraining, VideoMAEForVideoClassification, VideoMAEModel, ) from transformers.models.videomae.modeling_videomae import VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from transformers import VideoMAEImageProcessor class a : """simple docstring""" def __init__( self: Any , UpperCamelCase: List[str] , UpperCamelCase: Optional[Any]=13 , UpperCamelCase: str=10 , UpperCamelCase: Dict=3 , UpperCamelCase: Any=2 , UpperCamelCase: str=2 , UpperCamelCase: Any=2 , UpperCamelCase: Union[str, Any]=True , UpperCamelCase: Any=True , UpperCamelCase: Dict=32 , UpperCamelCase: Optional[int]=5 , UpperCamelCase: Tuple=4 , UpperCamelCase: Optional[int]=37 , UpperCamelCase: Dict="gelu" , UpperCamelCase: Optional[int]=0.1 , UpperCamelCase: Dict=0.1 , UpperCamelCase: Union[str, Any]=10 , UpperCamelCase: List[Any]=0.02 , UpperCamelCase: str=0.9 , UpperCamelCase: Any=None , ): """simple docstring""" A__ = parent A__ = batch_size A__ = image_size A__ = num_channels A__ = patch_size A__ = tubelet_size A__ = num_frames A__ = is_training A__ = use_labels A__ = hidden_size A__ = num_hidden_layers A__ = num_attention_heads A__ = intermediate_size A__ = hidden_act A__ = hidden_dropout_prob A__ = attention_probs_dropout_prob A__ = type_sequence_label_size A__ = initializer_range A__ = mask_ratio A__ = scope # in VideoMAE, the number of tokens equals num_frames/tubelet_size * num_patches per frame A__ = (image_size // patch_size) ** 2 A__ = (num_frames // tubelet_size) * self.num_patches_per_frame # use this variable to define bool_masked_pos A__ = int(mask_ratio * self.seq_length ) def UpperCamelCase ( self: Optional[Any] ): """simple docstring""" A__ = floats_tensor( [self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] ) A__ = None if self.use_labels: A__ = ids_tensor([self.batch_size] , self.type_sequence_label_size ) A__ = self.get_config() return config, pixel_values, labels def UpperCamelCase ( self: Optional[int] ): """simple docstring""" return VideoMAEConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_frames=self.num_frames , tubelet_size=self.tubelet_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=UpperCamelCase , initializer_range=self.initializer_range , ) def UpperCamelCase ( self: Any , UpperCamelCase: Dict , UpperCamelCase: Tuple , UpperCamelCase: Tuple ): """simple docstring""" A__ = VideoMAEModel(config=UpperCamelCase ) model.to(UpperCamelCase ) model.eval() A__ = model(UpperCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def UpperCamelCase ( self: List[str] , UpperCamelCase: Optional[Any] , UpperCamelCase: Union[str, Any] , UpperCamelCase: List[Any] ): """simple docstring""" A__ = VideoMAEForPreTraining(UpperCamelCase ) model.to(UpperCamelCase ) model.eval() # important: each video needs to have the same number of masked patches # hence we define a single mask, which we then repeat for each example in the batch A__ = torch.ones((self.num_masks,) ) A__ = torch.cat([mask, torch.zeros(self.seq_length - mask.size(0 ) )] ) A__ = mask.expand(self.batch_size , -1 ).bool() A__ = model(UpperCamelCase , UpperCamelCase ) # model only returns predictions for masked patches A__ = mask.sum().item() A__ = 3 * self.tubelet_size * self.patch_size**2 self.parent.assertEqual(result.logits.shape , (self.batch_size, num_masked_patches, decoder_num_labels) ) def UpperCamelCase ( self: Union[str, Any] ): """simple docstring""" A__ = self.prepare_config_and_inputs() A__ , A__ , A__ = config_and_inputs A__ = {"""pixel_values""": pixel_values} return config, inputs_dict @require_torch class a ( _lowerCamelCase, _lowerCamelCase, unittest.TestCase ): """simple docstring""" UpperCAmelCase = ( (VideoMAEModel, VideoMAEForPreTraining, VideoMAEForVideoClassification) if is_torch_available() else () ) UpperCAmelCase = ( {"feature-extraction": VideoMAEModel, "video-classification": VideoMAEForVideoClassification} if is_torch_available() else {} ) UpperCAmelCase = False UpperCAmelCase = False UpperCAmelCase = False UpperCAmelCase = False def UpperCamelCase ( self: List[str] ): """simple docstring""" A__ = VideoMAEModelTester(self ) A__ = ConfigTester(self , config_class=UpperCamelCase , has_text_modality=UpperCamelCase , hidden_size=37 ) def UpperCamelCase ( self: str , UpperCamelCase: Optional[int] , UpperCamelCase: Dict , UpperCamelCase: Union[str, Any]=False ): """simple docstring""" A__ = copy.deepcopy(UpperCamelCase ) if model_class == VideoMAEForPreTraining: # important: each video needs to have the same number of masked patches # hence we define a single mask, which we then repeat for each example in the batch A__ = torch.ones((self.model_tester.num_masks,) ) A__ = torch.cat([mask, torch.zeros(self.model_tester.seq_length - mask.size(0 ) )] ) A__ = mask.expand(self.model_tester.batch_size , -1 ).bool() A__ = bool_masked_pos.to(UpperCamelCase ) if return_labels: if model_class in [ *get_values(UpperCamelCase ), ]: A__ = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=UpperCamelCase ) return inputs_dict def UpperCamelCase ( self: List[str] ): """simple docstring""" self.config_tester.run_common_tests() @unittest.skip(reason="""VideoMAE does not use inputs_embeds""" ) def UpperCamelCase ( self: Dict ): """simple docstring""" pass def UpperCamelCase ( self: Tuple ): """simple docstring""" A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A__ = model_class(UpperCamelCase ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) A__ = model.get_output_embeddings() self.assertTrue(x is None or isinstance(UpperCamelCase , nn.Linear ) ) def UpperCamelCase ( self: List[Any] ): """simple docstring""" A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A__ = model_class(UpperCamelCase ) A__ = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic A__ = [*signature.parameters.keys()] A__ = ["""pixel_values"""] self.assertListEqual(arg_names[:1] , UpperCamelCase ) def UpperCamelCase ( self: List[str] ): """simple docstring""" A__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*UpperCamelCase ) def UpperCamelCase ( self: Union[str, Any] ): """simple docstring""" A__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_pretraining(*UpperCamelCase ) @slow def UpperCamelCase ( self: Tuple ): """simple docstring""" for model_name in VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: A__ = VideoMAEModel.from_pretrained(UpperCamelCase ) self.assertIsNotNone(UpperCamelCase ) def UpperCamelCase ( self: Tuple ): """simple docstring""" if not self.has_attentions: pass else: A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common() A__ = True for model_class in self.all_model_classes: A__ = self.model_tester.seq_length - self.model_tester.num_masks A__ = ( num_visible_patches if model_class == VideoMAEForPreTraining else self.model_tester.seq_length ) A__ = True A__ = False A__ = True A__ = model_class(UpperCamelCase ) model.to(UpperCamelCase ) model.eval() with torch.no_grad(): A__ = model(**self._prepare_for_class(UpperCamelCase , UpperCamelCase ) ) A__ = outputs.attentions self.assertEqual(len(UpperCamelCase ) , self.model_tester.num_hidden_layers ) # check that output_attentions also work using config del inputs_dict["output_attentions"] A__ = True A__ = model_class(UpperCamelCase ) model.to(UpperCamelCase ) model.eval() with torch.no_grad(): A__ = model(**self._prepare_for_class(UpperCamelCase , UpperCamelCase ) ) A__ = outputs.attentions self.assertEqual(len(UpperCamelCase ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , ) A__ = len(UpperCamelCase ) # Check attention is always last and order is fine A__ = True A__ = True A__ = model_class(UpperCamelCase ) model.to(UpperCamelCase ) model.eval() with torch.no_grad(): A__ = model(**self._prepare_for_class(UpperCamelCase , UpperCamelCase ) ) self.assertEqual(out_len + 1 , len(UpperCamelCase ) ) A__ = outputs.attentions self.assertEqual(len(UpperCamelCase ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , ) def UpperCamelCase ( self: Optional[int] ): """simple docstring""" def check_hidden_states_output(UpperCamelCase: Tuple , UpperCamelCase: Optional[Any] , UpperCamelCase: List[str] ): A__ = model_class(UpperCamelCase ) model.to(UpperCamelCase ) model.eval() with torch.no_grad(): A__ = model(**self._prepare_for_class(UpperCamelCase , UpperCamelCase ) ) A__ = outputs.hidden_states A__ = self.model_tester.num_hidden_layers + 1 self.assertEqual(len(UpperCamelCase ) , UpperCamelCase ) A__ = self.model_tester.seq_length - self.model_tester.num_masks A__ = num_visible_patches if model_class == VideoMAEForPreTraining else self.model_tester.seq_length self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , ) A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A__ = True check_hidden_states_output(UpperCamelCase , UpperCamelCase , UpperCamelCase ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] A__ = True check_hidden_states_output(UpperCamelCase , UpperCamelCase , UpperCamelCase ) @unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" ) def UpperCamelCase ( self: Optional[Any] ): """simple docstring""" pass def _snake_case ( ): A__ = hf_hub_download( repo_id="""hf-internal-testing/spaghetti-video""" , filename="""eating_spaghetti.npy""" , repo_type="""dataset""" ) A__ = np.load(UpperCAmelCase_ ) return list(UpperCAmelCase_ ) @require_torch @require_vision class a ( unittest.TestCase ): """simple docstring""" @cached_property def UpperCamelCase ( self: Tuple ): """simple docstring""" return ( VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] ) if is_vision_available() else None ) @slow def UpperCamelCase ( self: Dict ): """simple docstring""" A__ = VideoMAEForVideoClassification.from_pretrained("""MCG-NJU/videomae-base-finetuned-kinetics""" ).to( UpperCamelCase ) A__ = self.default_image_processor A__ = prepare_video() A__ = image_processor(UpperCamelCase , return_tensors="""pt""" ).to(UpperCamelCase ) # forward pass with torch.no_grad(): A__ = model(**UpperCamelCase ) # verify the logits A__ = torch.Size((1, 4_00) ) self.assertEqual(outputs.logits.shape , UpperCamelCase ) A__ = torch.tensor([0.3_669, -0.0_688, -0.2_421] ).to(UpperCamelCase ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCamelCase , atol=1e-4 ) ) @slow def UpperCamelCase ( self: Optional[int] ): """simple docstring""" A__ = VideoMAEForPreTraining.from_pretrained("""MCG-NJU/videomae-base-short""" ).to(UpperCamelCase ) A__ = self.default_image_processor A__ = prepare_video() A__ = image_processor(UpperCamelCase , return_tensors="""pt""" ).to(UpperCamelCase ) # add boolean mask, indicating which patches to mask A__ = hf_hub_download(repo_id="""hf-internal-testing/bool-masked-pos""" , filename="""bool_masked_pos.pt""" ) A__ = torch.load(UpperCamelCase ) # forward pass with torch.no_grad(): A__ = model(**UpperCamelCase ) # verify the logits A__ = torch.Size([1, 14_08, 15_36] ) A__ = torch.tensor( [[0.7_994, 0.9_612, 0.8_508], [0.7_401, 0.8_958, 0.8_302], [0.5_862, 0.7_468, 0.7_325]] , device=UpperCamelCase ) self.assertEqual(outputs.logits.shape , UpperCamelCase ) self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , UpperCamelCase , atol=1e-4 ) ) # verify the loss (`config.norm_pix_loss` = `True`) A__ = torch.tensor([0.5_142] , device=UpperCamelCase ) self.assertTrue(torch.allclose(outputs.loss , UpperCamelCase , atol=1e-4 ) ) # verify the loss (`config.norm_pix_loss` = `False`) A__ = VideoMAEForPreTraining.from_pretrained("""MCG-NJU/videomae-base-short""" , norm_pix_loss=UpperCamelCase ).to( UpperCamelCase ) with torch.no_grad(): A__ = model(**UpperCamelCase ) A__ = torch.tensor(torch.tensor([0.6_469] ) , device=UpperCamelCase ) self.assertTrue(torch.allclose(outputs.loss , UpperCamelCase , atol=1e-4 ) )
69
0
"""simple docstring""" from argparse import ArgumentParser from . import BaseTransformersCLICommand def __a ( __lowerCamelCase ): return DownloadCommand(args.model, args.cache_dir, args.force, args.trust_remote_code ) class A_ (lowercase__ ): '''simple docstring''' @staticmethod def UpperCamelCase__ ( lowercase_ ): """simple docstring""" UpperCAmelCase_ : Optional[Any] = parser.add_parser("download" ) download_parser.add_argument( "--cache-dir" , type=lowercase_ , default=lowercase_ , help="Path to location to store the models" ) download_parser.add_argument( "--force" , action="store_true" , help="Force the model to be download even if already in cache-dir" ) download_parser.add_argument( "--trust-remote-code" , action="store_true" , help="Whether or not to allow for custom models defined on the Hub in their own modeling files. Use only if you've reviewed the code as it will execute on your local machine" , ) download_parser.add_argument("model" , type=lowercase_ , help="Name of the model to download" ) download_parser.set_defaults(func=lowercase_ ) def __init__( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ ): """simple docstring""" UpperCAmelCase_ : Optional[int] = model UpperCAmelCase_ : Union[str, Any] = cache UpperCAmelCase_ : Union[str, Any] = force UpperCAmelCase_ : Union[str, Any] = trust_remote_code def UpperCamelCase__ ( self ): """simple docstring""" from ..models.auto import AutoModel, AutoTokenizer AutoModel.from_pretrained( self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code ) AutoTokenizer.from_pretrained( self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code )
61
"""simple docstring""" def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ): return round(float(moles / volume ) * nfactor ) def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ): return round(float((moles * 0.0821 * temperature) / (volume) ) ) def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ): return round(float((moles * 0.0821 * temperature) / (pressure) ) ) def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ): return round(float((pressure * volume) / (0.0821 * moles) ) ) if __name__ == "__main__": import doctest doctest.testmod()
61
1
"""simple docstring""" import math class A__ : def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): __lowerCAmelCase : List[Any] = 0.0 __lowerCAmelCase : Optional[int] = 0.0 for i in range(len(_SCREAMING_SNAKE_CASE ) ): da += math.pow((sample[i] - weights[0][i]) , 2 ) da += math.pow((sample[i] - weights[1][i]) , 2 ) return 0 if da > da else 1 return 0 def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): for i in range(len(_SCREAMING_SNAKE_CASE ) ): weights[j][i] += alpha * (sample[i] - weights[j][i]) return weights def __lowerCAmelCase (): # Training Examples ( m, n ) __lowerCAmelCase : Union[str, Any] = [[1, 1, 0, 0], [0, 0, 0, 1], [1, 0, 0, 0], [0, 0, 1, 1]] # weight initialization ( n, C ) __lowerCAmelCase : List[Any] = [[0.2, 0.6, 0.5, 0.9], [0.8, 0.4, 0.7, 0.3]] # training __lowerCAmelCase : Dict = SelfOrganizingMap() __lowerCAmelCase : List[str] = 3 __lowerCAmelCase : List[Any] = 0.5 for _ in range(_UpperCamelCase ): for j in range(len(_UpperCamelCase ) ): # training sample __lowerCAmelCase : Optional[Any] = training_samples[j] # Compute the winning vector __lowerCAmelCase : str = self_organizing_map.get_winner(_UpperCamelCase , _UpperCamelCase ) # Update the winning vector __lowerCAmelCase : Optional[int] = self_organizing_map.update(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) # classify test sample __lowerCAmelCase : Optional[Any] = [0, 0, 0, 1] __lowerCAmelCase : List[str] = self_organizing_map.get_winner(_UpperCamelCase , _UpperCamelCase ) # results print(F"Clusters that the test sample belongs to : {winner}" ) print(F"Weights that have been trained : {weights}" ) # running the main() function if __name__ == "__main__": main()
182
"""simple docstring""" import argparse import datetime def __lowerCAmelCase (_UpperCamelCase ): __lowerCAmelCase : Optional[Any] = { '0': 'Sunday', '1': 'Monday', '2': 'Tuesday', '3': 'Wednesday', '4': 'Thursday', '5': 'Friday', '6': 'Saturday', } __lowerCAmelCase : Optional[Any] = {0: 1, 1: 2, 2: 3, 3: 4, 4: 5, 5: 6, 6: 0} # Validate if not 0 < len(_UpperCamelCase ) < 11: raise ValueError('Must be 10 characters long' ) # Get month __lowerCAmelCase : int = int(date_input[0] + date_input[1] ) # Validate if not 0 < m < 13: raise ValueError('Month must be between 1 - 12' ) __lowerCAmelCase : str = date_input[2] # Validate if sep_a not in ["-", "/"]: raise ValueError('Date separator must be \'-\' or \'/\'' ) # Get day __lowerCAmelCase : int = int(date_input[3] + date_input[4] ) # Validate if not 0 < d < 32: raise ValueError('Date must be between 1 - 31' ) # Get second separator __lowerCAmelCase : str = date_input[5] # Validate if sep_a not in ["-", "/"]: raise ValueError('Date separator must be \'-\' or \'/\'' ) # Get year __lowerCAmelCase : int = int(date_input[6] + date_input[7] + date_input[8] + date_input[9] ) # Arbitrary year range if not 45 < y < 8500: raise ValueError( 'Year out of range. There has to be some sort of limit...right?' ) # Get datetime obj for validation __lowerCAmelCase : Tuple = datetime.date(int(_UpperCamelCase ) , int(_UpperCamelCase ) , int(_UpperCamelCase ) ) # Start math if m <= 2: __lowerCAmelCase : int = y - 1 __lowerCAmelCase : Tuple = m + 12 # maths var __lowerCAmelCase : int = int(str(_UpperCamelCase )[:2] ) __lowerCAmelCase : int = int(str(_UpperCamelCase )[2:] ) __lowerCAmelCase : int = int(2.6 * m - 5.39 ) __lowerCAmelCase : int = int(c / 4 ) __lowerCAmelCase : int = int(k / 4 ) __lowerCAmelCase : int = int(d + k ) __lowerCAmelCase : int = int(t + u + v + x ) __lowerCAmelCase : int = int(z - (2 * c) ) __lowerCAmelCase : int = round(w % 7 ) # End math # Validate math if f != convert_datetime_days[dt_ck.weekday()]: raise AssertionError('The date was evaluated incorrectly. Contact developer.' ) # Response __lowerCAmelCase : str = F"Your date {date_input}, is a {days[str(_UpperCamelCase )]}!" return response if __name__ == "__main__": import doctest doctest.testmod() lowerCamelCase__ = argparse.ArgumentParser( description=( """Find out what day of the week nearly any date is or was. Enter """ """date as a string in the mm-dd-yyyy or mm/dd/yyyy format""" ) ) parser.add_argument( """date_input""", type=str, help="""Date as a string (mm-dd-yyyy or mm/dd/yyyy)""" ) lowerCamelCase__ = parser.parse_args() zeller(args.date_input)
182
1
'''simple docstring''' from typing import List, Optional import numpy as np from ...processing_utils import ProcessorMixin from ...utils import to_numpy class snake_case ( __lowerCamelCase ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Optional[int] ="EncodecFeatureExtractor" SCREAMING_SNAKE_CASE_ : List[Any] =("T5Tokenizer", "T5TokenizerFast") def __init__( self : Optional[int] , __A : Optional[Any] , __A : List[Any] ): super().__init__(__A , __A ) __UpperCamelCase = self.feature_extractor __UpperCamelCase = False def _lowerCamelCase ( self : Dict , __A : Dict=None , __A : Dict=None , __A : Union[str, Any]=True ): return self.tokenizer.get_decoder_prompt_ids(task=__A , language=__A , no_timestamps=__A ) def __call__( self : Union[str, Any] , *__A : List[Any] , **__A : List[Any] ): # For backward compatibility if self._in_target_context_manager: return self.current_processor(*__A , **__A ) __UpperCamelCase = kwargs.pop('audio' , __A ) __UpperCamelCase = kwargs.pop('sampling_rate' , __A ) __UpperCamelCase = kwargs.pop('text' , __A ) if len(__A ) > 0: __UpperCamelCase = args[0] __UpperCamelCase = args[1:] if audio is None and text is None: raise ValueError('You need to specify either an `audio` or `text` input to process.' ) if text is not None: __UpperCamelCase = self.tokenizer(__A , **__A ) if audio is not None: __UpperCamelCase = self.feature_extractor(__A , *__A , sampling_rate=__A , **__A ) if audio is None: return inputs elif text is None: return audio_inputs else: __UpperCamelCase = audio_inputs['input_values'] if "padding_mask" in audio_inputs: __UpperCamelCase = audio_inputs['padding_mask'] return inputs def _lowerCamelCase ( self : Optional[Any] , *__A : str , **__A : Dict ): __UpperCamelCase = kwargs.pop('audio' , __A ) __UpperCamelCase = kwargs.pop('padding_mask' , __A ) if len(__A ) > 0: __UpperCamelCase = args[0] __UpperCamelCase = args[1:] if audio_values is not None: return self._decode_audio(__A , padding_mask=__A ) else: return self.tokenizer.batch_decode(*__A , **__A ) def _lowerCamelCase ( self : str , *__A : List[str] , **__A : List[Any] ): return self.tokenizer.decode(*__A , **__A ) def _lowerCamelCase ( self : Dict , __A : List[Any] , __A : Optional = None ): __UpperCamelCase = to_numpy(__A ) __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = audio_values.shape if padding_mask is None: return list(__A ) __UpperCamelCase = to_numpy(__A ) # match the sequence length of the padding mask to the generated audio arrays by padding with the **non-padding** # token (so that the generated audio values are **not** treated as padded tokens) __UpperCamelCase = seq_len - padding_mask.shape[-1] __UpperCamelCase = 1 - self.feature_extractor.padding_value __UpperCamelCase = np.pad(__A , ((0, 0), (0, difference)) , 'constant' , constant_values=__A ) __UpperCamelCase = audio_values.tolist() for i in range(__A ): __UpperCamelCase = np.asarray(audio_values[i] )[ padding_mask[i][None, :] != self.feature_extractor.padding_value ] __UpperCamelCase = sliced_audio.reshape(__A , -1 ) return audio_values
53
'''simple docstring''' from ...utils import ( OptionalDependencyNotAvailable, is_torch_available, is_transformers_available, is_transformers_version, ) try: if not (is_transformers_available() and is_torch_available() and is_transformers_version('''>=''', '''4.25.0''')): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import ( VersatileDiffusionDualGuidedPipeline, VersatileDiffusionImageVariationPipeline, VersatileDiffusionPipeline, VersatileDiffusionTextToImagePipeline, ) else: from .modeling_text_unet import UNetFlatConditionModel from .pipeline_versatile_diffusion import VersatileDiffusionPipeline from .pipeline_versatile_diffusion_dual_guided import VersatileDiffusionDualGuidedPipeline from .pipeline_versatile_diffusion_image_variation import VersatileDiffusionImageVariationPipeline from .pipeline_versatile_diffusion_text_to_image import VersatileDiffusionTextToImagePipeline
53
1
from math import cos, sin, sqrt, tau from audio_filters.iir_filter import IIRFilter def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: int , lowerCAmelCase: int , lowerCAmelCase: float = 1 / sqrt(2 ) ) -> IIRFilter: _UpperCAmelCase : Dict = tau * frequency / samplerate _UpperCAmelCase : List[str] = sin(lowerCAmelCase ) _UpperCAmelCase : Tuple = cos(lowerCAmelCase ) _UpperCAmelCase : Optional[Any] = _sin / (2 * q_factor) _UpperCAmelCase : List[Any] = (1 - _cos) / 2 _UpperCAmelCase : List[Any] = 1 - _cos _UpperCAmelCase : int = 1 + alpha _UpperCAmelCase : Tuple = -2 * _cos _UpperCAmelCase : Optional[Any] = 1 - alpha _UpperCAmelCase : Union[str, Any] = IIRFilter(2 ) filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] ) return filt def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: int , lowerCAmelCase: int , lowerCAmelCase: float = 1 / sqrt(2 ) ) -> IIRFilter: _UpperCAmelCase : Optional[int] = tau * frequency / samplerate _UpperCAmelCase : List[Any] = sin(lowerCAmelCase ) _UpperCAmelCase : Any = cos(lowerCAmelCase ) _UpperCAmelCase : str = _sin / (2 * q_factor) _UpperCAmelCase : List[Any] = (1 + _cos) / 2 _UpperCAmelCase : Dict = -1 - _cos _UpperCAmelCase : int = 1 + alpha _UpperCAmelCase : Any = -2 * _cos _UpperCAmelCase : int = 1 - alpha _UpperCAmelCase : Any = IIRFilter(2 ) filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] ) return filt def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: int , lowerCAmelCase: int , lowerCAmelCase: float = 1 / sqrt(2 ) ) -> IIRFilter: _UpperCAmelCase : Tuple = tau * frequency / samplerate _UpperCAmelCase : Any = sin(lowerCAmelCase ) _UpperCAmelCase : List[Any] = cos(lowerCAmelCase ) _UpperCAmelCase : Union[str, Any] = _sin / (2 * q_factor) _UpperCAmelCase : Union[str, Any] = _sin / 2 _UpperCAmelCase : List[str] = 0 _UpperCAmelCase : Tuple = -ba _UpperCAmelCase : Union[str, Any] = 1 + alpha _UpperCAmelCase : Tuple = -2 * _cos _UpperCAmelCase : Tuple = 1 - alpha _UpperCAmelCase : int = IIRFilter(2 ) filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] ) return filt def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: int , lowerCAmelCase: int , lowerCAmelCase: float = 1 / sqrt(2 ) ) -> IIRFilter: _UpperCAmelCase : Any = tau * frequency / samplerate _UpperCAmelCase : Union[str, Any] = sin(lowerCAmelCase ) _UpperCAmelCase : Union[str, Any] = cos(lowerCAmelCase ) _UpperCAmelCase : Optional[Any] = _sin / (2 * q_factor) _UpperCAmelCase : Any = 1 - alpha _UpperCAmelCase : int = -2 * _cos _UpperCAmelCase : Any = 1 + alpha _UpperCAmelCase : Optional[Any] = IIRFilter(2 ) filt.set_coefficients([ba, ba, ba] , [ba, ba, ba] ) return filt def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: int , lowerCAmelCase: int , lowerCAmelCase: float , lowerCAmelCase: float = 1 / sqrt(2 ) , ) -> IIRFilter: _UpperCAmelCase : Any = tau * frequency / samplerate _UpperCAmelCase : Optional[int] = sin(lowerCAmelCase ) _UpperCAmelCase : Union[str, Any] = cos(lowerCAmelCase ) _UpperCAmelCase : Optional[int] = _sin / (2 * q_factor) _UpperCAmelCase : List[str] = 10 ** (gain_db / 40) _UpperCAmelCase : Dict = 1 + alpha * big_a _UpperCAmelCase : Union[str, Any] = -2 * _cos _UpperCAmelCase : Optional[Any] = 1 - alpha * big_a _UpperCAmelCase : int = 1 + alpha / big_a _UpperCAmelCase : str = -2 * _cos _UpperCAmelCase : List[Any] = 1 - alpha / big_a _UpperCAmelCase : Optional[Any] = IIRFilter(2 ) filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] ) return filt def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: int , lowerCAmelCase: int , lowerCAmelCase: float , lowerCAmelCase: float = 1 / sqrt(2 ) , ) -> IIRFilter: _UpperCAmelCase : Tuple = tau * frequency / samplerate _UpperCAmelCase : Dict = sin(lowerCAmelCase ) _UpperCAmelCase : Optional[int] = cos(lowerCAmelCase ) _UpperCAmelCase : str = _sin / (2 * q_factor) _UpperCAmelCase : str = 10 ** (gain_db / 40) _UpperCAmelCase : Any = (big_a + 1) - (big_a - 1) * _cos _UpperCAmelCase : int = (big_a + 1) + (big_a - 1) * _cos _UpperCAmelCase : List[str] = (big_a - 1) - (big_a + 1) * _cos _UpperCAmelCase : Dict = (big_a - 1) + (big_a + 1) * _cos _UpperCAmelCase : Union[str, Any] = 2 * sqrt(lowerCAmelCase ) * alpha _UpperCAmelCase : int = big_a * (pmc + aaa) _UpperCAmelCase : Optional[int] = 2 * big_a * mpc _UpperCAmelCase : str = big_a * (pmc - aaa) _UpperCAmelCase : List[Any] = ppmc + aaa _UpperCAmelCase : int = -2 * pmpc _UpperCAmelCase : int = ppmc - aaa _UpperCAmelCase : Optional[Any] = IIRFilter(2 ) filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] ) return filt def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: int , lowerCAmelCase: int , lowerCAmelCase: float , lowerCAmelCase: float = 1 / sqrt(2 ) , ) -> IIRFilter: _UpperCAmelCase : List[Any] = tau * frequency / samplerate _UpperCAmelCase : int = sin(lowerCAmelCase ) _UpperCAmelCase : List[str] = cos(lowerCAmelCase ) _UpperCAmelCase : List[Any] = _sin / (2 * q_factor) _UpperCAmelCase : Union[str, Any] = 10 ** (gain_db / 40) _UpperCAmelCase : int = (big_a + 1) - (big_a - 1) * _cos _UpperCAmelCase : Optional[Any] = (big_a + 1) + (big_a - 1) * _cos _UpperCAmelCase : Dict = (big_a - 1) - (big_a + 1) * _cos _UpperCAmelCase : List[Any] = (big_a - 1) + (big_a + 1) * _cos _UpperCAmelCase : Tuple = 2 * sqrt(lowerCAmelCase ) * alpha _UpperCAmelCase : Any = big_a * (ppmc + aaa) _UpperCAmelCase : str = -2 * big_a * pmpc _UpperCAmelCase : List[str] = big_a * (ppmc - aaa) _UpperCAmelCase : Any = pmc + aaa _UpperCAmelCase : Any = 2 * mpc _UpperCAmelCase : Union[str, Any] = pmc - aaa _UpperCAmelCase : Tuple = IIRFilter(2 ) filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] ) return filt
189
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: str , lowerCAmelCase: str ) -> bool: _UpperCAmelCase : Optional[Any] = len(lowerCAmelCase ) + 1 _UpperCAmelCase : Optional[int] = len(lowerCAmelCase ) + 1 # dp is a 2d matrix where dp[i][j] denotes whether prefix string of # length i of input_string matches with prefix string of length j of # given pattern. # "dp" stands for dynamic programming. _UpperCAmelCase : List[str] = [[0 for i in range(lowerCAmelCase )] for j in range(lowerCAmelCase )] # since string of zero length match pattern of zero length _UpperCAmelCase : List[Any] = 1 # since pattern of zero length will never match with string of non-zero length for i in range(1 , lowerCAmelCase ): _UpperCAmelCase : Dict = 0 # since string of zero length will match with pattern where there # is at least one * alternatively for j in range(1 , lowerCAmelCase ): _UpperCAmelCase : Tuple = dp[0][j - 2] if pattern[j - 1] == "*" else 0 # now using bottom-up approach to find for all remaining lengths for i in range(1 , lowerCAmelCase ): for j in range(1 , lowerCAmelCase ): if input_string[i - 1] == pattern[j - 1] or pattern[j - 1] == ".": _UpperCAmelCase : Optional[Any] = dp[i - 1][j - 1] elif pattern[j - 1] == "*": if dp[i][j - 2] == 1: _UpperCAmelCase : List[str] = 1 elif pattern[j - 2] in (input_string[i - 1], "."): _UpperCAmelCase : str = dp[i - 1][j] else: _UpperCAmelCase : int = 0 else: _UpperCAmelCase : List[Any] = 0 return bool(dp[-1][-1] ) if __name__ == "__main__": import doctest doctest.testmod() # inputing the strings # input_string = input("input a string :") # pattern = input("input a pattern :") SCREAMING_SNAKE_CASE_ = 'aab' SCREAMING_SNAKE_CASE_ = 'c*a*b' # using function to check whether given string matches the given pattern if match_pattern(input_string, pattern): print(F'''{input_string} matches the given pattern {pattern}''') else: print(F'''{input_string} does not match with the given pattern {pattern}''')
189
1
"""simple docstring""" import re from pathlib import Path from unittest import TestCase import pytest @pytest.mark.integration class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase__ ): def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE ) -> Optional[Any]: '''simple docstring''' with open(_SCREAMING_SNAKE_CASE , encoding="""utf-8""" ) as input_file: UpperCAmelCase : List[str] = re.compile(r"""(?!.*\b(?:encoding|rb|w|wb|w+|wb+|ab|ab+)\b)(?<=\s)(open)\((.*)\)""" ) UpperCAmelCase : str = input_file.read() UpperCAmelCase : int = regexp.search(_SCREAMING_SNAKE_CASE ) return match def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE ) -> Any: '''simple docstring''' with open(_SCREAMING_SNAKE_CASE , encoding="""utf-8""" ) as input_file: UpperCAmelCase : Union[str, Any] = re.compile(r"""#[^\r\n]*print\(|\"[^\r\n]*print\(|\"\"\".*?print\(.*?\"\"\"|(print\()""" , re.DOTALL ) UpperCAmelCase : Optional[Any] = input_file.read() # use `re.finditer` to handle the case where the ignored groups would be matched first by `re.search` UpperCAmelCase : int = regexp.finditer(_SCREAMING_SNAKE_CASE ) UpperCAmelCase : Optional[int] = [match for match in matches if match is not None and match.group(1 ) is not None] return matches[0] if matches else None def SCREAMING_SNAKE_CASE ( self ) -> int: '''simple docstring''' UpperCAmelCase : Dict = Path("""./datasets""" ) UpperCAmelCase : Optional[int] = list(dataset_paths.absolute().glob("""**/*.py""" ) ) for dataset in dataset_files: if self._no_encoding_on_file_open(str(_SCREAMING_SNAKE_CASE ) ): raise AssertionError(F"open(...) must use utf-8 encoding in {dataset}" ) def SCREAMING_SNAKE_CASE ( self ) -> List[Any]: '''simple docstring''' UpperCAmelCase : Any = Path("""./datasets""" ) UpperCAmelCase : Optional[Any] = list(dataset_paths.absolute().glob("""**/*.py""" ) ) for dataset in dataset_files: if self._no_print_statements(str(_SCREAMING_SNAKE_CASE ) ): raise AssertionError(F"print statement found in {dataset}. Use datasets.logger/logging instead." )
109
from ...configuration_utils import PretrainedConfig from ...utils import logging _UpperCAmelCase : Union[str, Any] = logging.get_logger(__name__) _UpperCAmelCase : List[str] = { "EleutherAI/gpt-neox-20b": "https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/config.json", # See all GPTNeoX models at https://huggingface.co/models?filter=gpt_neox } class lowercase ( _SCREAMING_SNAKE_CASE ): __lowercase : List[str] = "gpt_neox" def __init__( self , A_=50_432 , A_=6_144 , A_=44 , A_=64 , A_=24_576 , A_="gelu" , A_=0.25 , A_=10_000 , A_=0.0 , A_=0.0 , A_=0.1 , A_=2_048 , A_=0.02 , A_=1e-5 , A_=True , A_=0 , A_=2 , A_=False , A_=True , A_=None , **A_ , ) -> Tuple: """simple docstring""" super().__init__(bos_token_id=A_ , eos_token_id=A_ , **A_ ) UpperCamelCase = vocab_size UpperCamelCase = max_position_embeddings UpperCamelCase = hidden_size UpperCamelCase = num_hidden_layers UpperCamelCase = num_attention_heads UpperCamelCase = intermediate_size UpperCamelCase = hidden_act UpperCamelCase = rotary_pct UpperCamelCase = rotary_emb_base UpperCamelCase = attention_dropout UpperCamelCase = hidden_dropout UpperCamelCase = classifier_dropout UpperCamelCase = initializer_range UpperCamelCase = layer_norm_eps UpperCamelCase = use_cache UpperCamelCase = tie_word_embeddings UpperCamelCase = use_parallel_residual UpperCamelCase = rope_scaling self._rope_scaling_validation() if self.hidden_size % self.num_attention_heads != 0: raise ValueError( 'The hidden size is not divisble by the number of attention heads! Make sure to update them!' ) def __UpperCamelCase ( self ) -> Tuple: """simple docstring""" if self.rope_scaling is None: return if not isinstance(self.rope_scaling , A_ ) or len(self.rope_scaling ) != 2: raise ValueError( '`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, ' F'''got {self.rope_scaling}''' ) UpperCamelCase = self.rope_scaling.get('type' , A_ ) UpperCamelCase = self.rope_scaling.get('factor' , A_ ) if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]: raise ValueError( F'''`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}''' ) if rope_scaling_factor is None or not isinstance(A_ , A_ ) or rope_scaling_factor <= 1.0: raise ValueError(F'''`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}''' )
222
0
"""simple docstring""" def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase ): '''simple docstring''' if not len(_UpperCamelCase ) == len(_UpperCamelCase ) == 3: raise ValueError("Please enter a valid equation." ) if equationa[0] == equationa[1] == equationa[0] == equationa[1] == 0: raise ValueError("Both a & b of two equations can't be zero." ) # Extract the coefficients __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = equationa __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = equationa # Calculate the determinants of the matrices __lowerCAmelCase = aa * ba - aa * ba __lowerCAmelCase = ca * ba - ca * ba __lowerCAmelCase = aa * ca - aa * ca # Check if the system of linear equations has a solution (using Cramer's rule) if determinant == 0: if determinant_x == determinant_y == 0: raise ValueError("Infinite solutions. (Consistent system)" ) else: raise ValueError("No solution. (Inconsistent system)" ) else: if determinant_x == determinant_y == 0: # Trivial solution (Inconsistent system) return (0.0, 0.0) else: __lowerCAmelCase = determinant_x / determinant __lowerCAmelCase = determinant_y / determinant # Non-Trivial Solution (Consistent system) return (x, y)
259
"""simple docstring""" from __future__ import annotations import time A : Union[str, Any] = list[tuple[int, int]] A : int = [ [0, 0, 0, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles [0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0], [1, 0, 1, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0], ] A : int = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right class _UpperCamelCase : '''simple docstring''' def __init__( self , __a , __a , __a , __a , __a ): __lowerCAmelCase = pos_x __lowerCAmelCase = pos_y __lowerCAmelCase = (pos_y, pos_x) __lowerCAmelCase = goal_x __lowerCAmelCase = goal_y __lowerCAmelCase = parent class _UpperCamelCase : '''simple docstring''' def __init__( self , __a , __a ): __lowerCAmelCase = Node(start[1] , start[0] , goal[1] , goal[0] , __a ) __lowerCAmelCase = Node(goal[1] , goal[0] , goal[1] , goal[0] , __a ) __lowerCAmelCase = [self.start] __lowerCAmelCase = False def snake_case ( self ): while self.node_queue: __lowerCAmelCase = self.node_queue.pop(0 ) if current_node.pos == self.target.pos: __lowerCAmelCase = True return self.retrace_path(__a ) __lowerCAmelCase = self.get_successors(__a ) for node in successors: self.node_queue.append(__a ) if not self.reached: return [self.start.pos] return None def snake_case ( self , __a ): __lowerCAmelCase = [] for action in delta: __lowerCAmelCase = parent.pos_x + action[1] __lowerCAmelCase = parent.pos_y + action[0] if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(__a ) - 1): continue if grid[pos_y][pos_x] != 0: continue successors.append( Node(__a , __a , self.target.pos_y , self.target.pos_x , __a ) ) return successors def snake_case ( self , __a ): __lowerCAmelCase = node __lowerCAmelCase = [] while current_node is not None: path.append((current_node.pos_y, current_node.pos_x) ) __lowerCAmelCase = current_node.parent path.reverse() return path class _UpperCamelCase : '''simple docstring''' def __init__( self , __a , __a ): __lowerCAmelCase = BreadthFirstSearch(__a , __a ) __lowerCAmelCase = BreadthFirstSearch(__a , __a ) __lowerCAmelCase = False def snake_case ( self ): while self.fwd_bfs.node_queue or self.bwd_bfs.node_queue: __lowerCAmelCase = self.fwd_bfs.node_queue.pop(0 ) __lowerCAmelCase = self.bwd_bfs.node_queue.pop(0 ) if current_bwd_node.pos == current_fwd_node.pos: __lowerCAmelCase = True return self.retrace_bidirectional_path( __a , __a ) __lowerCAmelCase = current_bwd_node __lowerCAmelCase = current_fwd_node __lowerCAmelCase = { self.fwd_bfs: self.fwd_bfs.get_successors(__a ), self.bwd_bfs: self.bwd_bfs.get_successors(__a ), } for bfs in [self.fwd_bfs, self.bwd_bfs]: for node in successors[bfs]: bfs.node_queue.append(__a ) if not self.reached: return [self.fwd_bfs.start.pos] return None def snake_case ( self , __a , __a ): __lowerCAmelCase = self.fwd_bfs.retrace_path(__a ) __lowerCAmelCase = self.bwd_bfs.retrace_path(__a ) bwd_path.pop() bwd_path.reverse() __lowerCAmelCase = fwd_path + bwd_path return path if __name__ == "__main__": # all coordinates are given in format [y,x] import doctest doctest.testmod() A : List[Any] = (0, 0) A : Union[str, Any] = (len(grid) - 1, len(grid[0]) - 1) for elem in grid: print(elem) A : Any = time.time() A : Dict = BreadthFirstSearch(init, goal) A : Any = bfs.search() A : List[str] = time.time() - start_bfs_time print("Unidirectional BFS computation time : ", bfs_time) A : Optional[Any] = time.time() A : Optional[int] = BidirectionalBreadthFirstSearch(init, goal) A : Any = bd_bfs.search() A : str = time.time() - start_bd_bfs_time print("Bidirectional BFS computation time : ", bd_bfs_time)
259
1
"""simple docstring""" import copy import re class a : _snake_case : Any = 'hp' _snake_case : Dict = {} _snake_case : Dict = None @classmethod def lowerCAmelCase_ ( cls : Any , __lowerCAmelCase : List[Any] , __lowerCAmelCase : str ): _UpperCAmelCase = prefix _UpperCAmelCase = defaults cls.build_naming_info() @staticmethod def lowerCAmelCase_ ( __lowerCAmelCase : List[Any] , __lowerCAmelCase : Any ): if len(__lowerCAmelCase ) == 0: return "" _UpperCAmelCase = None if any(char.isdigit() for char in word ): raise Exception(f'''Parameters should not contain numbers: \'{word}\' contains a number''' ) if word in info["short_word"]: return info["short_word"][word] for prefix_len in range(1 , len(__lowerCAmelCase ) + 1 ): _UpperCAmelCase = word[:prefix_len] if prefix in info["reverse_short_word"]: continue else: _UpperCAmelCase = prefix break if short_word is None: # Paranoid fallback def int_to_alphabetic(__lowerCAmelCase : str ): _UpperCAmelCase = """""" while integer != 0: _UpperCAmelCase = chr(ord("""A""" ) + integer % 10 ) + s integer //= 10 return s _UpperCAmelCase = 0 while True: _UpperCAmelCase = word + """#""" + int_to_alphabetic(__lowerCAmelCase ) if sword in info["reverse_short_word"]: continue else: _UpperCAmelCase = sword break _UpperCAmelCase = short_word _UpperCAmelCase = word return short_word @staticmethod def lowerCAmelCase_ ( __lowerCAmelCase : Dict , __lowerCAmelCase : int ): _UpperCAmelCase = param_name.split("""_""" ) _UpperCAmelCase = [TrialShortNamer.shortname_for_word(__lowerCAmelCase , __lowerCAmelCase ) for word in words] # We try to create a separatorless short name, but if there is a collision we have to fallback # to a separated short name _UpperCAmelCase = ["""""", """_"""] for separator in separators: _UpperCAmelCase = separator.join(__lowerCAmelCase ) if shortname not in info["reverse_short_param"]: _UpperCAmelCase = shortname _UpperCAmelCase = param_name return shortname return param_name @staticmethod def lowerCAmelCase_ ( __lowerCAmelCase : int , __lowerCAmelCase : Union[str, Any] ): _UpperCAmelCase = TrialShortNamer.shortname_for_key(__lowerCAmelCase , __lowerCAmelCase ) _UpperCAmelCase = short_name _UpperCAmelCase = param_name @classmethod def lowerCAmelCase_ ( cls : List[Any] ): if cls.NAMING_INFO is not None: return _UpperCAmelCase = { """short_word""": {}, """reverse_short_word""": {}, """short_param""": {}, """reverse_short_param""": {}, } _UpperCAmelCase = list(cls.DEFAULTS.keys() ) for k in field_keys: cls.add_new_param_name(__lowerCAmelCase , __lowerCAmelCase ) _UpperCAmelCase = info @classmethod def lowerCAmelCase_ ( cls : Dict , __lowerCAmelCase : Optional[Any] ): cls.build_naming_info() assert cls.PREFIX is not None _UpperCAmelCase = [copy.copy(cls.PREFIX )] for k, v in params.items(): if k not in cls.DEFAULTS: raise Exception(f'''You should provide a default value for the param name {k} with value {v}''' ) if v == cls.DEFAULTS[k]: # The default value is not added to the name continue _UpperCAmelCase = cls.NAMING_INFO["""short_param"""][k] if isinstance(__lowerCAmelCase , __lowerCAmelCase ): _UpperCAmelCase = 1 if v else 0 _UpperCAmelCase = """""" if isinstance(__lowerCAmelCase , (int, float) ) else """-""" _UpperCAmelCase = f'''{key}{sep}{v}''' name.append(__lowerCAmelCase ) return "_".join(__lowerCAmelCase ) @classmethod def lowerCAmelCase_ ( cls : Optional[int] , __lowerCAmelCase : Dict ): _UpperCAmelCase = repr[len(cls.PREFIX ) + 1 :] if repr == "": _UpperCAmelCase = [] else: _UpperCAmelCase = repr.split("""_""" ) _UpperCAmelCase = {} for value in values: if "-" in value: _UpperCAmelCase = value.split("""-""" ) else: _UpperCAmelCase = re.sub("""[0-9.]""" , """""" , __lowerCAmelCase ) _UpperCAmelCase = float(re.sub("""[^0-9.]""" , """""" , __lowerCAmelCase ) ) _UpperCAmelCase = cls.NAMING_INFO["""reverse_short_param"""][p_k] _UpperCAmelCase = p_v for k in cls.DEFAULTS: if k not in parameters: _UpperCAmelCase = cls.DEFAULTS[k] return parameters
289
'''simple docstring''' # Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available __A : Dict = {"configuration_timm_backbone": ["TimmBackboneConfig"]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A : str = ["TimmBackbone"] if TYPE_CHECKING: from .configuration_timm_backbone import TimmBackboneConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_timm_backbone import TimmBackbone else: import sys __A : Dict = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
120
0
import shutil import tempfile import unittest import numpy as np from transformers.testing_utils import ( is_pt_tf_cross_test, require_tf, require_torch, require_torchvision, require_vision, ) from transformers.utils import is_tf_available, is_torch_available, is_vision_available if is_vision_available(): from PIL import Image from transformers import AutoProcessor, SamImageProcessor, SamProcessor if is_torch_available(): import torch if is_tf_available(): import tensorflow as tf @require_vision @require_torchvision class A( unittest.TestCase ): '''simple docstring''' def a__ ( self : Any ) -> int: """simple docstring""" lowerCamelCase_ = tempfile.mkdtemp() lowerCamelCase_ = SamImageProcessor() lowerCamelCase_ = SamProcessor(A_ ) processor.save_pretrained(self.tmpdirname ) def a__ ( self : Any , **A_ : int ) -> int: """simple docstring""" return AutoProcessor.from_pretrained(self.tmpdirname , **A_ ).image_processor def a__ ( self : Optional[Any] ) -> Optional[int]: """simple docstring""" shutil.rmtree(self.tmpdirname ) def a__ ( self : Optional[int] ) -> Dict: """simple docstring""" lowerCamelCase_ = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )] lowerCamelCase_ = [Image.fromarray(np.moveaxis(A_ , 0 , -1 ) ) for x in image_inputs] return image_inputs def a__ ( self : int ) -> Optional[int]: """simple docstring""" lowerCamelCase_ = SamProcessor(image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) lowerCamelCase_ = self.get_image_processor(do_normalize=A_ , padding_value=1.0 ) lowerCamelCase_ = SamProcessor.from_pretrained(self.tmpdirname , do_normalize=A_ , padding_value=1.0 ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , A_ ) def a__ ( self : int ) -> List[str]: """simple docstring""" lowerCamelCase_ = self.get_image_processor() lowerCamelCase_ = SamProcessor(image_processor=A_ ) lowerCamelCase_ = self.prepare_image_inputs() lowerCamelCase_ = image_processor(A_ , return_tensors='np' ) lowerCamelCase_ = processor(images=A_ , return_tensors='np' ) input_feat_extract.pop('original_sizes' ) # pop original_sizes as it is popped in the processor input_feat_extract.pop('reshaped_input_sizes' ) # pop original_sizes as it is popped in the processor for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 ) @require_torch def a__ ( self : Union[str, Any] ) -> str: """simple docstring""" lowerCamelCase_ = self.get_image_processor() lowerCamelCase_ = SamProcessor(image_processor=A_ ) lowerCamelCase_ = [torch.ones((1, 3, 5, 5) )] lowerCamelCase_ = [[1764, 2646]] lowerCamelCase_ = [[683, 1024]] lowerCamelCase_ = processor.post_process_masks(A_ , A_ , A_ ) self.assertEqual(masks[0].shape , (1, 3, 1764, 2646) ) lowerCamelCase_ = processor.post_process_masks( A_ , torch.tensor(A_ ) , torch.tensor(A_ ) ) self.assertEqual(masks[0].shape , (1, 3, 1764, 2646) ) # should also work with np lowerCamelCase_ = [np.ones((1, 3, 5, 5) )] lowerCamelCase_ = processor.post_process_masks(A_ , np.array(A_ ) , np.array(A_ ) ) self.assertEqual(masks[0].shape , (1, 3, 1764, 2646) ) lowerCamelCase_ = [[1, 0], [0, 1]] with self.assertRaises(A_ ): lowerCamelCase_ = processor.post_process_masks(A_ , np.array(A_ ) , np.array(A_ ) ) @require_vision @require_tf class A( unittest.TestCase ): '''simple docstring''' def a__ ( self : Optional[Any] ) -> Union[str, Any]: """simple docstring""" lowerCamelCase_ = tempfile.mkdtemp() lowerCamelCase_ = SamImageProcessor() lowerCamelCase_ = SamProcessor(A_ ) processor.save_pretrained(self.tmpdirname ) def a__ ( self : List[Any] , **A_ : int ) -> Dict: """simple docstring""" return AutoProcessor.from_pretrained(self.tmpdirname , **A_ ).image_processor def a__ ( self : List[Any] ) -> List[Any]: """simple docstring""" shutil.rmtree(self.tmpdirname ) def a__ ( self : int ) -> Tuple: """simple docstring""" lowerCamelCase_ = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )] lowerCamelCase_ = [Image.fromarray(np.moveaxis(A_ , 0 , -1 ) ) for x in image_inputs] return image_inputs def a__ ( self : Tuple ) -> int: """simple docstring""" lowerCamelCase_ = SamProcessor(image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) lowerCamelCase_ = self.get_image_processor(do_normalize=A_ , padding_value=1.0 ) lowerCamelCase_ = SamProcessor.from_pretrained(self.tmpdirname , do_normalize=A_ , padding_value=1.0 ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , A_ ) def a__ ( self : Any ) -> Dict: """simple docstring""" lowerCamelCase_ = self.get_image_processor() lowerCamelCase_ = SamProcessor(image_processor=A_ ) lowerCamelCase_ = self.prepare_image_inputs() lowerCamelCase_ = image_processor(A_ , return_tensors='np' ) lowerCamelCase_ = processor(images=A_ , return_tensors='np' ) input_feat_extract.pop('original_sizes' ) # pop original_sizes as it is popped in the processor input_feat_extract.pop('reshaped_input_sizes' ) # pop reshaped_input_sizes as it is popped in the processor for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 ) @require_tf def a__ ( self : Tuple ) -> Tuple: """simple docstring""" lowerCamelCase_ = self.get_image_processor() lowerCamelCase_ = SamProcessor(image_processor=A_ ) lowerCamelCase_ = [tf.ones((1, 3, 5, 5) )] lowerCamelCase_ = [[1764, 2646]] lowerCamelCase_ = [[683, 1024]] lowerCamelCase_ = processor.post_process_masks(A_ , A_ , A_ , return_tensors='tf' ) self.assertEqual(masks[0].shape , (1, 3, 1764, 2646) ) lowerCamelCase_ = processor.post_process_masks( A_ , tf.convert_to_tensor(A_ ) , tf.convert_to_tensor(A_ ) , return_tensors='tf' , ) self.assertEqual(masks[0].shape , (1, 3, 1764, 2646) ) # should also work with np lowerCamelCase_ = [np.ones((1, 3, 5, 5) )] lowerCamelCase_ = processor.post_process_masks( A_ , np.array(A_ ) , np.array(A_ ) , return_tensors='tf' ) self.assertEqual(masks[0].shape , (1, 3, 1764, 2646) ) lowerCamelCase_ = [[1, 0], [0, 1]] with self.assertRaises(tf.errors.InvalidArgumentError ): lowerCamelCase_ = processor.post_process_masks( A_ , np.array(A_ ) , np.array(A_ ) , return_tensors='tf' ) @require_vision @require_torchvision class A( unittest.TestCase ): '''simple docstring''' def a__ ( self : str ) -> List[Any]: """simple docstring""" lowerCamelCase_ = tempfile.mkdtemp() lowerCamelCase_ = SamImageProcessor() lowerCamelCase_ = SamProcessor(A_ ) processor.save_pretrained(self.tmpdirname ) def a__ ( self : List[str] , **A_ : Optional[int] ) -> Tuple: """simple docstring""" return AutoProcessor.from_pretrained(self.tmpdirname , **A_ ).image_processor def a__ ( self : Dict ) -> int: """simple docstring""" shutil.rmtree(self.tmpdirname ) def a__ ( self : List[str] ) -> Optional[Any]: """simple docstring""" lowerCamelCase_ = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )] lowerCamelCase_ = [Image.fromarray(np.moveaxis(A_ , 0 , -1 ) ) for x in image_inputs] return image_inputs @is_pt_tf_cross_test def a__ ( self : Tuple ) -> Dict: """simple docstring""" lowerCamelCase_ = self.get_image_processor() lowerCamelCase_ = SamProcessor(image_processor=A_ ) lowerCamelCase_ = np.random.randint(0 , 2 , size=(1, 3, 5, 5) ).astype(np.floataa ) lowerCamelCase_ = [tf.convert_to_tensor(A_ )] lowerCamelCase_ = [torch.tensor(A_ )] lowerCamelCase_ = [[1764, 2646]] lowerCamelCase_ = [[683, 1024]] lowerCamelCase_ = processor.post_process_masks( A_ , A_ , A_ , return_tensors='tf' ) lowerCamelCase_ = processor.post_process_masks( A_ , A_ , A_ , return_tensors='pt' ) self.assertTrue(np.all(tf_masks[0].numpy() == pt_masks[0].numpy() ) ) @is_pt_tf_cross_test def a__ ( self : Tuple ) -> str: """simple docstring""" lowerCamelCase_ = self.get_image_processor() lowerCamelCase_ = SamProcessor(image_processor=A_ ) lowerCamelCase_ = self.prepare_image_inputs() lowerCamelCase_ = image_processor(A_ , return_tensors='pt' )['pixel_values'].numpy() lowerCamelCase_ = processor(images=A_ , return_tensors='pt' )['pixel_values'].numpy() lowerCamelCase_ = image_processor(A_ , return_tensors='tf' )['pixel_values'].numpy() lowerCamelCase_ = processor(images=A_ , return_tensors='tf' )['pixel_values'].numpy() self.assertTrue(np.allclose(A_ , A_ ) ) self.assertTrue(np.allclose(A_ , A_ ) ) self.assertTrue(np.allclose(A_ , A_ ) )
208
import io import math from typing import Dict, Optional, Union import numpy as np from huggingface_hub import hf_hub_download from ...image_processing_utils import BaseImageProcessor, BatchFeature from ...image_transforms import convert_to_rgb, normalize, to_channel_dimension_format, to_pil_image from ...image_utils import ( ChannelDimension, ImageInput, get_image_size, infer_channel_dimension_format, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_torch_available, is_vision_available, logging from ...utils.import_utils import requires_backends if is_vision_available(): import textwrap from PIL import Image, ImageDraw, ImageFont if is_torch_available(): import torch from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11 else: lowerCamelCase : Optional[Any] = False lowerCamelCase : Union[str, Any] = logging.get_logger(__name__) lowerCamelCase : Any = "ybelkada/fonts" def _SCREAMING_SNAKE_CASE ( ): '''simple docstring''' if is_torch_available() and not is_torch_greater_or_equal_than_1_11: raise ImportError( f"""You are using torch=={torch.__version__}, but torch>=1.11.0 is required to use """ 'Pix2StructImageProcessor. Please upgrade torch.' ) def _SCREAMING_SNAKE_CASE ( lowercase : Dict , lowercase : List[str] , lowercase : List[Any] ): '''simple docstring''' requires_backends(lowercase , ['torch'] ) _check_torch_version() lowerCamelCase_ = image_tensor.unsqueeze(0 ) lowerCamelCase_ = torch.nn.functional.unfold(lowercase , (patch_height, patch_width) , stride=(patch_height, patch_width) ) lowerCamelCase_ = patches.reshape(image_tensor.size(0 ) , image_tensor.size(1 ) , lowercase , lowercase , -1 ) lowerCamelCase_ = patches.permute(0 , 4 , 2 , 3 , 1 ).reshape( image_tensor.size(2 ) // patch_height , image_tensor.size(3 ) // patch_width , image_tensor.size(1 ) * patch_height * patch_width , ) return patches.unsqueeze(0 ) def _SCREAMING_SNAKE_CASE ( lowercase : str , lowercase : int = 36 , lowercase : str = "black" , lowercase : str = "white" , lowercase : int = 5 , lowercase : int = 5 , lowercase : int = 5 , lowercase : int = 5 , lowercase : Optional[bytes] = None , lowercase : Optional[str] = None , ): '''simple docstring''' requires_backends(lowercase , 'vision' ) # Add new lines so that each line is no more than 80 characters. lowerCamelCase_ = textwrap.TextWrapper(width=80 ) lowerCamelCase_ = wrapper.wrap(text=lowercase ) lowerCamelCase_ = '\n'.join(lowercase ) if font_bytes is not None and font_path is None: lowerCamelCase_ = io.BytesIO(lowercase ) elif font_path is not None: lowerCamelCase_ = font_path else: lowerCamelCase_ = hf_hub_download(lowercase , 'Arial.TTF' ) lowerCamelCase_ = ImageFont.truetype(lowercase , encoding='UTF-8' , size=lowercase ) # Use a temporary canvas to determine the width and height in pixels when # rendering the text. lowerCamelCase_ = ImageDraw.Draw(Image.new('RGB' , (1, 1) , lowercase ) ) lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = temp_draw.textbbox((0, 0) , lowercase , lowercase ) # Create the actual image with a bit of padding around the text. lowerCamelCase_ = text_width + left_padding + right_padding lowerCamelCase_ = text_height + top_padding + bottom_padding lowerCamelCase_ = Image.new('RGB' , (image_width, image_height) , lowercase ) lowerCamelCase_ = ImageDraw.Draw(lowercase ) draw.text(xy=(left_padding, top_padding) , text=lowercase , fill=lowercase , font=lowercase ) return image def _SCREAMING_SNAKE_CASE ( lowercase : np.ndarray , lowercase : str , **lowercase : List[Any] ): '''simple docstring''' requires_backends(lowercase , 'vision' ) # Convert to PIL image if necessary lowerCamelCase_ = to_pil_image(lowercase ) lowerCamelCase_ = render_text(lowercase , **lowercase ) lowerCamelCase_ = max(header_image.width , image.width ) lowerCamelCase_ = int(image.height * (new_width / image.width) ) lowerCamelCase_ = int(header_image.height * (new_width / header_image.width) ) lowerCamelCase_ = Image.new('RGB' , (new_width, new_height + new_header_height) , 'white' ) new_image.paste(header_image.resize((new_width, new_header_height) ) , (0, 0) ) new_image.paste(image.resize((new_width, new_height) ) , (0, new_header_height) ) # Convert back to the original framework if necessary lowerCamelCase_ = to_numpy_array(lowercase ) if infer_channel_dimension_format(lowercase ) == ChannelDimension.LAST: lowerCamelCase_ = to_channel_dimension_format(lowercase , ChannelDimension.LAST ) return new_image class A( UpperCamelCase ): '''simple docstring''' UpperCamelCase = ['''flattened_patches'''] def __init__( self : Dict , A_ : bool = True , A_ : bool = True , A_ : Dict[str, int] = None , A_ : int = 2048 , A_ : bool = False , **A_ : str , ) -> None: """simple docstring""" super().__init__(**A_ ) lowerCamelCase_ = patch_size if patch_size is not None else {'height': 16, 'width': 16} lowerCamelCase_ = do_normalize lowerCamelCase_ = do_convert_rgb lowerCamelCase_ = max_patches lowerCamelCase_ = is_vqa def a__ ( self : Union[str, Any] , A_ : np.ndarray , A_ : int , A_ : dict , **A_ : Any ) -> np.ndarray: """simple docstring""" requires_backends(self.extract_flattened_patches , 'torch' ) _check_torch_version() # convert to torch lowerCamelCase_ = to_channel_dimension_format(A_ , ChannelDimension.FIRST ) lowerCamelCase_ = torch.from_numpy(A_ ) lowerCamelCase_ , lowerCamelCase_ = patch_size['height'], patch_size['width'] lowerCamelCase_ , lowerCamelCase_ = get_image_size(A_ ) # maximize scale s.t. lowerCamelCase_ = math.sqrt(max_patches * (patch_height / image_height) * (patch_width / image_width) ) lowerCamelCase_ = max(min(math.floor(scale * image_height / patch_height ) , A_ ) , 1 ) lowerCamelCase_ = max(min(math.floor(scale * image_width / patch_width ) , A_ ) , 1 ) lowerCamelCase_ = max(num_feasible_rows * patch_height , 1 ) lowerCamelCase_ = max(num_feasible_cols * patch_width , 1 ) lowerCamelCase_ = torch.nn.functional.interpolate( image.unsqueeze(0 ) , size=(resized_height, resized_width) , mode='bilinear' , align_corners=A_ , antialias=A_ , ).squeeze(0 ) # [1, rows, columns, patch_height * patch_width * image_channels] lowerCamelCase_ = torch_extract_patches(A_ , A_ , A_ ) lowerCamelCase_ = patches.shape lowerCamelCase_ = patches_shape[1] lowerCamelCase_ = patches_shape[2] lowerCamelCase_ = patches_shape[3] # [rows * columns, patch_height * patch_width * image_channels] lowerCamelCase_ = patches.reshape([rows * columns, depth] ) # [rows * columns, 1] lowerCamelCase_ = torch.arange(A_ ).reshape([rows, 1] ).repeat(1 , A_ ).reshape([rows * columns, 1] ) lowerCamelCase_ = torch.arange(A_ ).reshape([1, columns] ).repeat(A_ , 1 ).reshape([rows * columns, 1] ) # Offset by 1 so the ids do not contain zeros, which represent padding. row_ids += 1 col_ids += 1 # Prepare additional patch features. # [rows * columns, 1] lowerCamelCase_ = row_ids.to(torch.floataa ) lowerCamelCase_ = col_ids.to(torch.floataa ) # [rows * columns, 2 + patch_height * patch_width * image_channels] lowerCamelCase_ = torch.cat([row_ids, col_ids, patches] , -1 ) # [max_patches, 2 + patch_height * patch_width * image_channels] lowerCamelCase_ = torch.nn.functional.pad(A_ , [0, 0, 0, max_patches - (rows * columns)] ).float() lowerCamelCase_ = to_numpy_array(A_ ) return result def a__ ( self : Optional[Any] , A_ : np.ndarray , A_ : Optional[Union[str, ChannelDimension]] = None , **A_ : str ) -> np.ndarray: """simple docstring""" if image.dtype == np.uinta: lowerCamelCase_ = image.astype(np.floataa ) # take mean across the whole `image` lowerCamelCase_ = np.mean(A_ ) lowerCamelCase_ = np.std(A_ ) lowerCamelCase_ = max(A_ , 1.0 / math.sqrt(np.prod(image.shape ) ) ) return normalize(A_ , mean=A_ , std=A_ , **A_ ) def a__ ( self : Optional[Any] , A_ : ImageInput , A_ : Optional[str] = None , A_ : bool = None , A_ : Optional[bool] = None , A_ : Optional[int] = None , A_ : Optional[Dict[str, int]] = None , A_ : Optional[Union[str, TensorType]] = None , A_ : ChannelDimension = ChannelDimension.FIRST , **A_ : Optional[int] , ) -> ImageInput: """simple docstring""" lowerCamelCase_ = do_normalize if do_normalize is not None else self.do_normalize lowerCamelCase_ = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb lowerCamelCase_ = patch_size if patch_size is not None else self.patch_size lowerCamelCase_ = max_patches if max_patches is not None else self.max_patches lowerCamelCase_ = self.is_vqa if kwargs.get('data_format' , A_ ) is not None: raise ValueError('data_format is not an accepted input as the outputs are ' ) lowerCamelCase_ = make_list_of_images(A_ ) if not valid_images(A_ ): raise ValueError( 'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ' 'torch.Tensor, tf.Tensor or jax.ndarray.' ) # PIL RGBA images are converted to RGB if do_convert_rgb: lowerCamelCase_ = [convert_to_rgb(A_ ) for image in images] # All transformations expect numpy arrays. lowerCamelCase_ = [to_numpy_array(A_ ) for image in images] if is_vqa: if header_text is None: raise ValueError('A header text must be provided for VQA models.' ) lowerCamelCase_ = kwargs.pop('font_bytes' , A_ ) lowerCamelCase_ = kwargs.pop('font_path' , A_ ) if isinstance(A_ , A_ ): lowerCamelCase_ = [header_text] * len(A_ ) lowerCamelCase_ = [ render_header(A_ , header_text[i] , font_bytes=A_ , font_path=A_ ) for i, image in enumerate(A_ ) ] if do_normalize: lowerCamelCase_ = [self.normalize(image=A_ ) for image in images] # convert to torch tensor and permute lowerCamelCase_ = [ self.extract_flattened_patches(image=A_ , max_patches=A_ , patch_size=A_ ) for image in images ] # create attention mask in numpy lowerCamelCase_ = [(image.sum(axis=-1 ) != 0).astype(np.floataa ) for image in images] lowerCamelCase_ = BatchFeature( data={'flattened_patches': images, 'attention_mask': attention_masks} , tensor_type=A_ ) return encoded_outputs
208
1
'''simple docstring''' # this script reports modified .py files under the desired list of top-level sub-dirs passed as a list of arguments, e.g.: # python ./utils/get_modified_files.py utils src tests examples # # it uses git to find the forking point and which files were modified - i.e. files not under git won't be considered # since the output of this script is fed into Makefile commands it doesn't print a newline after the results import re import subprocess import sys lowerCAmelCase_ : Dict = subprocess.check_output('git merge-base main HEAD'.split()).decode('utf-8') lowerCAmelCase_ : List[str] = subprocess.check_output(f"""git diff --name-only {fork_point_sha}""".split()).decode('utf-8').split() lowerCAmelCase_ : Optional[Any] = '|'.join(sys.argv[1:]) lowerCAmelCase_ : Optional[Any] = re.compile(Rf"""^({joined_dirs}).*?\.py$""") lowerCAmelCase_ : Optional[Any] = [x for x in modified_files if regex.match(x)] print(' '.join(relevant_modified_files), end='')
63
'''simple docstring''' from PIL import Image def __lowerCamelCase ( A__ , A__ ) -> Image: """simple docstring""" def brightness(A__ ) -> float: return 128 + level + (c - 128) if not -255.0 <= level <= 255.0: raise ValueError('level must be between -255.0 (black) and 255.0 (white)' ) return img.point(A__ ) if __name__ == "__main__": # Load image with Image.open("image_data/lena.jpg") as img: # Change brightness to 100 _lowerCamelCase : List[str] = change_brightness(img, 100) brigt_img.save("image_data/lena_brightness.png", format="png")
28
0
import importlib import torch import yaml from omegaconf import OmegaConf from taming.models.vqgan import VQModel def snake_case_(_UpperCamelCase , _UpperCamelCase=False ) -> Optional[int]: """simple docstring""" _snake_case = OmegaConf.load(lowercase__ ) if display: print(yaml.dump(OmegaConf.to_container(lowercase__ ) ) ) return config def snake_case_(_UpperCamelCase , _UpperCamelCase=None , _UpperCamelCase=None ) -> Dict: """simple docstring""" if conf_path is None: _snake_case = './model_checkpoints/vqgan_only.yaml' _snake_case = load_config(lowercase__ , display=lowercase__ ) _snake_case = VQModel(**config.model.params ) if ckpt_path is None: _snake_case = './model_checkpoints/vqgan_only.pt' _snake_case = torch.load(lowercase__ , map_location=lowercase__ ) if ".ckpt" in ckpt_path: _snake_case = sd['state_dict'] model.load_state_dict(lowercase__ , strict=lowercase__ ) model.to(lowercase__ ) del sd return model def snake_case_(_UpperCamelCase , _UpperCamelCase ) -> int: """simple docstring""" _snake_case = model.encode(lowercase__ ) print(F"""VQGAN --- {model.__class__.__name__}: latent shape: {z.shape[2:]}""" ) _snake_case = model.decode(lowercase__ ) return xrec def snake_case_(_UpperCamelCase , _UpperCamelCase=False ) -> str: """simple docstring""" _snake_case = string.rsplit('''.''' , 1 ) if reload: _snake_case = importlib.import_module(lowercase__ ) importlib.reload(lowercase__ ) return getattr(importlib.import_module(lowercase__ , package=lowercase__ ) , cls ) def snake_case_(_UpperCamelCase ) -> Union[str, Any]: """simple docstring""" if "target" not in config: raise KeyError('''Expected key `target` to instantiate.''' ) return get_obj_from_str(config['''target'''] )(**config.get('''params''' , {} ) ) def snake_case_(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase=True , _UpperCamelCase=True ) -> Union[str, Any]: """simple docstring""" _snake_case = instantiate_from_config(lowercase__ ) if sd is not None: model.load_state_dict(lowercase__ ) if gpu: model.cuda() if eval_mode: model.eval() return {"model": model} def snake_case_(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> str: """simple docstring""" if ckpt: _snake_case = torch.load(lowercase__ , map_location='''cpu''' ) _snake_case = pl_sd['global_step'] print(F"""loaded model from global step {global_step}.""" ) else: _snake_case = {'state_dict': None} _snake_case = None _snake_case = load_model_from_config(config.model , pl_sd['''state_dict'''] , gpu=lowercase__ , eval_mode=lowercase__ )['model'] return model, global_step
362
import argparse import os import re import torch from flax.traverse_util import flatten_dict from tax import checkpoints from transformers import ( AutoTokenizer, PixaStructConfig, PixaStructForConditionalGeneration, PixaStructImageProcessor, PixaStructProcessor, PixaStructTextConfig, PixaStructVisionConfig, ) def snake_case_(_UpperCamelCase ) -> Optional[int]: """simple docstring""" _snake_case = checkpoints.load_tax_checkpoint(_UpperCamelCase ) _snake_case = flatten_dict(_UpperCamelCase ) return flax_params def snake_case_(_UpperCamelCase ) -> List[str]: """simple docstring""" _snake_case = {} _snake_case = { '''token_embedder''': '''embeddings''', '''encoder_norm''': '''layernorm''', '''kernel''': '''weight''', '''.out''': '''.output''', '''scale''': '''weight''', '''embedders_0.pos_embedding''': '''row_embedder.weight''', '''embedders_1.pos_embedding''': '''column_embedder.weight''', } _snake_case = { '''query''': '''attention.query''', '''key''': '''attention.key''', '''value''': '''attention.value''', '''output.dense''': '''output''', '''encoder_decoder_attention.o''': '''encoder_decoder_attention.attention.o''', '''pre_self_attention_layer_norm''': '''self_attention.layer_norm''', '''pre_cross_attention_layer_norm''': '''encoder_decoder_attention.layer_norm''', '''mlp.''': '''mlp.DenseReluDense.''', '''pre_mlp_layer_norm''': '''mlp.layer_norm''', '''self_attention.o''': '''self_attention.attention.o''', '''decoder.embeddings.embedding''': '''decoder.embed_tokens.weight''', '''decoder.relpos_bias.rel_embedding''': '''decoder.layer.0.self_attention.attention.relative_attention_bias.weight''', '''decoder.decoder_norm.weight''': '''decoder.final_layer_norm.weight''', '''decoder.logits_dense.weight''': '''decoder.lm_head.weight''', } for key in flax_dict.keys(): if "target" in key: # remove the first prefix from the key _snake_case = '''.'''.join(key[1:] ) # rename the key for old, new in CONVERSION_MAPPING.items(): _snake_case = new_key.replace(_UpperCamelCase , _UpperCamelCase ) if "decoder" in new_key: for old, new in DECODER_CONVERSION_MAPPING.items(): _snake_case = new_key.replace(_UpperCamelCase , _UpperCamelCase ) if "layers" in new_key and "decoder" not in new_key: # use regex to replace the layer number _snake_case = re.sub(R'''layers_(\d+)''' , R'''layer.\1''' , _UpperCamelCase ) _snake_case = new_key.replace('''encoder''' , '''encoder.encoder''' ) elif "layers" in new_key and "decoder" in new_key: # use regex to replace the layer number _snake_case = re.sub(R'''layers_(\d+)''' , R'''layer.\1''' , _UpperCamelCase ) _snake_case = flax_dict[key] _snake_case = {} # convert converted_dict into torch format for key in converted_dict.keys(): if ("embed_tokens" not in key) and ("embedder" not in key): _snake_case = torch.from_numpy(converted_dict[key].T ) else: _snake_case = torch.from_numpy(converted_dict[key] ) return converted_torch_dict def snake_case_(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase=False , _UpperCamelCase=False ) -> List[Any]: """simple docstring""" _snake_case = get_flax_param(_UpperCamelCase ) if not use_large: _snake_case = PixaStructVisionConfig() _snake_case = PixaStructTextConfig() else: _snake_case = PixaStructVisionConfig( hidden_size=1_536 , d_ff=3_968 , num_attention_heads=24 , num_hidden_layers=18 ) _snake_case = PixaStructTextConfig(hidden_size=1_536 , d_ff=3_968 , num_heads=24 , num_layers=18 ) _snake_case = PixaStructConfig( vision_config=encoder_config.to_dict() , text_config=decoder_config.to_dict() , is_vqa=_UpperCamelCase ) _snake_case = PixaStructForConditionalGeneration(_UpperCamelCase ) _snake_case = rename_and_convert_flax_params(_UpperCamelCase ) model.load_state_dict(_UpperCamelCase ) _snake_case = AutoTokenizer.from_pretrained('''ybelkada/test-pix2struct-tokenizer''' ) _snake_case = PixaStructImageProcessor() _snake_case = PixaStructProcessor(image_processor=_UpperCamelCase , tokenizer=_UpperCamelCase ) if use_large: _snake_case = 4_096 _snake_case = True # mkdir if needed os.makedirs(_UpperCamelCase , exist_ok=_UpperCamelCase ) model.save_pretrained(_UpperCamelCase ) processor.save_pretrained(_UpperCamelCase ) print('''Model saved in {}'''.format(_UpperCamelCase ) ) if __name__ == "__main__": __A = argparse.ArgumentParser() parser.add_argument('''--t5x_checkpoint_path''', default=None, type=str, help='''Path to the original T5x checkpoint.''') parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''') parser.add_argument('''--use_large''', action='''store_true''', help='''Use large model.''') parser.add_argument('''--is_vqa''', action='''store_true''', help='''Use large model.''') __A = parser.parse_args() convert_pixastruct_original_pytorch_checkpoint_to_hf( args.tax_checkpoint_path, args.pytorch_dump_folder_path, args.use_large )
278
0
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): '''simple docstring''' _validate_point(SCREAMING_SNAKE_CASE ) _validate_point(SCREAMING_SNAKE_CASE ) if len(SCREAMING_SNAKE_CASE ) != len(SCREAMING_SNAKE_CASE ): raise ValueError('''Both points must be in the same n-dimensional space''' ) return float(sum(abs(a - b ) for a, b in zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) ) ) def lowerCamelCase ( SCREAMING_SNAKE_CASE ): '''simple docstring''' if point: if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): for item in point: if not isinstance(SCREAMING_SNAKE_CASE , (int, float) ): __UpperCamelCase :Optional[int] = ( '''Expected a list of numbers as input, found ''' f"""{type(SCREAMING_SNAKE_CASE ).__name__}""" ) raise TypeError(SCREAMING_SNAKE_CASE ) else: __UpperCamelCase :List[str] = f"""Expected a list of numbers as input, found {type(SCREAMING_SNAKE_CASE ).__name__}""" raise TypeError(SCREAMING_SNAKE_CASE ) else: raise ValueError('''Missing an input''' ) def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): '''simple docstring''' _validate_point(SCREAMING_SNAKE_CASE ) _validate_point(SCREAMING_SNAKE_CASE ) if len(SCREAMING_SNAKE_CASE ) != len(SCREAMING_SNAKE_CASE ): raise ValueError('''Both points must be in the same n-dimensional space''' ) return float(sum(abs(x - y ) for x, y in zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) ) ) if __name__ == "__main__": import doctest doctest.testmod()
43
import html from ...feature_extraction_utils import BatchFeature, FeatureExtractionMixin from ...utils import is_bsa_available, logging, requires_backends if is_bsa_available(): import bsa from bsa import BeautifulSoup A : str = logging.get_logger(__name__) class __A( a ): def __init__( self , **_snake_case ) -> List[Any]: '''simple docstring''' requires_backends(self , ['''bs4'''] ) super().__init__(**_snake_case ) def SCREAMING_SNAKE_CASE_ ( self , _snake_case ) -> int: '''simple docstring''' __a = [] __a = [] __a = element if element.name else element.parent for parent in child.parents: # type: bs4.element.Tag __a = parent.find_all(child.name , recursive=_snake_case ) xpath_tags.append(child.name ) xpath_subscripts.append( 0 if 1 == len(_snake_case ) else next(i for i, s in enumerate(_snake_case , 1 ) if s is child ) ) __a = parent xpath_tags.reverse() xpath_subscripts.reverse() return xpath_tags, xpath_subscripts def SCREAMING_SNAKE_CASE_ ( self , _snake_case ) -> Optional[int]: '''simple docstring''' __a = BeautifulSoup(_snake_case , '''html.parser''' ) __a = [] __a = [] __a = [] for element in html_code.descendants: if type(_snake_case ) == bsa.element.NavigableString: if type(element.parent ) != bsa.element.Tag: continue __a = html.unescape(_snake_case ).strip() if not text_in_this_tag: continue all_doc_strings.append(_snake_case ) __a , __a = self.xpath_soup(_snake_case ) stringaxtag_seq.append(_snake_case ) stringaxsubs_seq.append(_snake_case ) if len(_snake_case ) != len(_snake_case ): raise ValueError('''Number of doc strings and xtags does not correspond''' ) if len(_snake_case ) != len(_snake_case ): raise ValueError('''Number of doc strings and xsubs does not correspond''' ) return all_doc_strings, stringaxtag_seq, stringaxsubs_seq def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case ) -> Optional[int]: '''simple docstring''' __a = '''''' for tagname, subs in zip(_snake_case , _snake_case ): xpath += F"""/{tagname}""" if subs != 0: xpath += F"""[{subs}]""" return xpath def __call__( self , _snake_case ) -> BatchFeature: '''simple docstring''' __a = False # Check that strings has a valid type if isinstance(_snake_case , _snake_case ): __a = True elif isinstance(_snake_case , (list, tuple) ): if len(_snake_case ) == 0 or isinstance(html_strings[0] , _snake_case ): __a = True if not valid_strings: raise ValueError( '''HTML strings must of type `str`, `List[str]` (batch of examples), ''' F"""but is of type {type(_snake_case )}.""" ) __a = bool(isinstance(_snake_case , (list, tuple) ) and (isinstance(html_strings[0] , _snake_case )) ) if not is_batched: __a = [html_strings] # Get nodes + xpaths __a = [] __a = [] for html_string in html_strings: __a , __a , __a = self.get_three_from_single(_snake_case ) nodes.append(_snake_case ) __a = [] for node, tag_list, sub_list in zip(_snake_case , _snake_case , _snake_case ): __a = self.construct_xpath(_snake_case , _snake_case ) xpath_strings.append(_snake_case ) xpaths.append(_snake_case ) # return as Dict __a = {'''nodes''': nodes, '''xpaths''': xpaths} __a = BatchFeature(data=_snake_case , tensor_type=_snake_case ) return encoded_inputs
6
0
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available, is_torch_available, ) _UpperCamelCase = {'''configuration_fnet''': ['''FNET_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''FNetConfig''']} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _UpperCamelCase = ['''FNetTokenizer'''] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _UpperCamelCase = ['''FNetTokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _UpperCamelCase = [ '''FNET_PRETRAINED_MODEL_ARCHIVE_LIST''', '''FNetForMaskedLM''', '''FNetForMultipleChoice''', '''FNetForNextSentencePrediction''', '''FNetForPreTraining''', '''FNetForQuestionAnswering''', '''FNetForSequenceClassification''', '''FNetForTokenClassification''', '''FNetLayer''', '''FNetModel''', '''FNetPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_fnet import FNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FNetConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_fnet import FNetTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_fnet_fast import FNetTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_fnet import ( FNET_PRETRAINED_MODEL_ARCHIVE_LIST, FNetForMaskedLM, FNetForMultipleChoice, FNetForNextSentencePrediction, FNetForPreTraining, FNetForQuestionAnswering, FNetForSequenceClassification, FNetForTokenClassification, FNetLayer, FNetModel, FNetPreTrainedModel, ) else: import sys _UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
16
'''simple docstring''' class _A : def __init__( self , __UpperCAmelCase , __UpperCAmelCase=None , __UpperCAmelCase=None ) -> Optional[int]: '''simple docstring''' __UpperCAmelCase : int = data __UpperCAmelCase : int = previous __UpperCAmelCase : Union[str, Any] = next_node def __str__( self ) -> str: '''simple docstring''' return f'{self.data}' def __A ( self ) -> int: '''simple docstring''' return self.data def __A ( self ) -> List[str]: '''simple docstring''' return self.next def __A ( self ) -> str: '''simple docstring''' return self.previous class _A : def __init__( self , __UpperCAmelCase ) -> str: '''simple docstring''' __UpperCAmelCase : int = head def __iter__( self ) -> str: '''simple docstring''' return self def __A ( self ) -> str: '''simple docstring''' if not self.current: raise StopIteration else: __UpperCAmelCase : List[str] = self.current.get_data() __UpperCAmelCase : int = self.current.get_next() return value class _A : def __init__( self ) -> List[Any]: '''simple docstring''' __UpperCAmelCase : Optional[Any] = None # First node in list __UpperCAmelCase : List[str] = None # Last node in list def __str__( self ) -> int: '''simple docstring''' __UpperCAmelCase : Tuple = self.head __UpperCAmelCase : Optional[int] = [] while current is not None: nodes.append(current.get_data() ) __UpperCAmelCase : Any = current.get_next() return " ".join(str(__UpperCAmelCase ) for node in nodes ) def __contains__( self , __UpperCAmelCase ) -> Optional[Any]: '''simple docstring''' __UpperCAmelCase : List[Any] = self.head while current: if current.get_data() == value: return True __UpperCAmelCase : Optional[Any] = current.get_next() return False def __iter__( self ) -> str: '''simple docstring''' return LinkedListIterator(self.head ) def __A ( self ) -> List[Any]: '''simple docstring''' if self.head: return self.head.get_data() return None def __A ( self ) -> Optional[Any]: '''simple docstring''' if self.tail: return self.tail.get_data() return None def __A ( self , __UpperCAmelCase ) -> None: '''simple docstring''' if self.head is None: __UpperCAmelCase : str = node __UpperCAmelCase : List[str] = node else: self.insert_before_node(self.head , __UpperCAmelCase ) def __A ( self , __UpperCAmelCase ) -> None: '''simple docstring''' if self.head is None: self.set_head(__UpperCAmelCase ) else: self.insert_after_node(self.tail , __UpperCAmelCase ) def __A ( self , __UpperCAmelCase ) -> None: '''simple docstring''' __UpperCAmelCase : Optional[int] = Node(__UpperCAmelCase ) if self.head is None: self.set_head(__UpperCAmelCase ) else: self.set_tail(__UpperCAmelCase ) def __A ( self , __UpperCAmelCase , __UpperCAmelCase ) -> None: '''simple docstring''' __UpperCAmelCase : Tuple = node __UpperCAmelCase : List[Any] = node.previous if node.get_previous() is None: __UpperCAmelCase : str = node_to_insert else: __UpperCAmelCase : Optional[Any] = node_to_insert __UpperCAmelCase : List[Any] = node_to_insert def __A ( self , __UpperCAmelCase , __UpperCAmelCase ) -> None: '''simple docstring''' __UpperCAmelCase : List[str] = node __UpperCAmelCase : Union[str, Any] = node.next if node.get_next() is None: __UpperCAmelCase : Dict = node_to_insert else: __UpperCAmelCase : Any = node_to_insert __UpperCAmelCase : List[str] = node_to_insert def __A ( self , __UpperCAmelCase , __UpperCAmelCase ) -> None: '''simple docstring''' __UpperCAmelCase : Optional[Any] = 1 __UpperCAmelCase : Optional[Any] = Node(__UpperCAmelCase ) __UpperCAmelCase : Optional[Any] = self.head while node: if current_position == position: self.insert_before_node(__UpperCAmelCase , __UpperCAmelCase ) return current_position += 1 __UpperCAmelCase : int = node.next self.insert_after_node(self.tail , __UpperCAmelCase ) def __A ( self , __UpperCAmelCase ) -> Node: '''simple docstring''' __UpperCAmelCase : Dict = self.head while node: if node.get_data() == item: return node __UpperCAmelCase : List[str] = node.get_next() raise Exception("""Node not found""" ) def __A ( self , __UpperCAmelCase ) -> Optional[int]: '''simple docstring''' if (node := self.get_node(__UpperCAmelCase )) is not None: if node == self.head: __UpperCAmelCase : Optional[int] = self.head.get_next() if node == self.tail: __UpperCAmelCase : Union[str, Any] = self.tail.get_previous() self.remove_node_pointers(__UpperCAmelCase ) @staticmethod def __A ( __UpperCAmelCase ) -> None: '''simple docstring''' if node.get_next(): __UpperCAmelCase : Optional[Any] = node.previous if node.get_previous(): __UpperCAmelCase : int = node.next __UpperCAmelCase : Tuple = None __UpperCAmelCase : Union[str, Any] = None def __A ( self ) -> List[Any]: '''simple docstring''' return self.head is None def lowercase_ ( ): """simple docstring""" if __name__ == "__main__": import doctest doctest.testmod()
16
1
"""simple docstring""" import argparse import json import os import time import zipfile from get_ci_error_statistics import download_artifact, get_artifacts_links from transformers import logging lowerCamelCase__ = logging.get_logger(__name__) def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase ): __lowerCAmelCase : str = set() __lowerCAmelCase : Any = [] def parse_line(_UpperCamelCase ): for line in fp: if isinstance(_UpperCAmelCase , _UpperCAmelCase ): __lowerCAmelCase : Any = line.decode('UTF-8' ) if "warnings summary (final)" in line: continue # This means we are outside the body of a warning elif not line.startswith(' ' ): # process a single warning and move it to `selected_warnings`. if len(_UpperCAmelCase ) > 0: __lowerCAmelCase : str = '\n'.join(_UpperCAmelCase ) # Only keep the warnings specified in `targets` if any(F": {x}: " in warning for x in targets ): selected_warnings.add(_UpperCAmelCase ) buffer.clear() continue else: __lowerCAmelCase : List[str] = line.strip() buffer.append(_UpperCAmelCase ) if from_gh: for filename in os.listdir(_UpperCAmelCase ): __lowerCAmelCase : Dict = os.path.join(_UpperCAmelCase , _UpperCAmelCase ) if not os.path.isdir(_UpperCAmelCase ): # read the file if filename != "warnings.txt": continue with open(_UpperCAmelCase ) as fp: parse_line(_UpperCAmelCase ) else: try: with zipfile.ZipFile(_UpperCAmelCase ) as z: for filename in z.namelist(): if not os.path.isdir(_UpperCAmelCase ): # read the file if filename != "warnings.txt": continue with z.open(_UpperCAmelCase ) as fp: parse_line(_UpperCAmelCase ) except Exception: logger.warning( F"{artifact_path} is either an invalid zip file or something else wrong. This file is skipped." ) return selected_warnings def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase ): __lowerCAmelCase : Tuple = set() __lowerCAmelCase : Optional[int] = [os.path.join(_UpperCAmelCase , _UpperCAmelCase ) for p in os.listdir(_UpperCAmelCase ) if (p.endswith('.zip' ) or from_gh)] for p in paths: selected_warnings.update(extract_warnings_from_single_artifact(_UpperCAmelCase , _UpperCAmelCase ) ) return selected_warnings if __name__ == "__main__": def __lowerCAmelCase (_UpperCamelCase ): return values.split(',' ) lowerCamelCase__ = argparse.ArgumentParser() # Required parameters parser.add_argument("""--workflow_run_id""", type=str, required=True, help="""A GitHub Actions workflow run id.""") parser.add_argument( """--output_dir""", type=str, required=True, help="""Where to store the downloaded artifacts and other result files.""", ) parser.add_argument("""--token""", default=None, type=str, help="""A token that has actions:read permission.""") # optional parameters parser.add_argument( """--targets""", default="""DeprecationWarning,UserWarning,FutureWarning""", type=list_str, help="""Comma-separated list of target warning(s) which we want to extract.""", ) parser.add_argument( """--from_gh""", action="""store_true""", help="""If running from a GitHub action workflow and collecting warnings from its artifacts.""", ) lowerCamelCase__ = parser.parse_args() lowerCamelCase__ = args.from_gh if from_gh: # The artifacts have to be downloaded using `actions/download-artifact@v3` pass else: os.makedirs(args.output_dir, exist_ok=True) # get download links lowerCamelCase__ = get_artifacts_links(args.workflow_run_id, token=args.token) with open(os.path.join(args.output_dir, """artifacts.json"""), """w""", encoding="""UTF-8""") as fp: json.dump(artifacts, fp, ensure_ascii=False, indent=4) # download artifacts for idx, (name, url) in enumerate(artifacts.items()): print(name) print(url) print("""=""" * 80) download_artifact(name, url, args.output_dir, args.token) # Be gentle to GitHub time.sleep(1) # extract warnings from artifacts lowerCamelCase__ = extract_warnings(args.output_dir, args.targets) lowerCamelCase__ = sorted(selected_warnings) with open(os.path.join(args.output_dir, """selected_warnings.json"""), """w""", encoding="""UTF-8""") as fp: json.dump(selected_warnings, fp, ensure_ascii=False, indent=4)
86
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase ) -> bool: lowerCamelCase__ : List[str] = len(_UpperCAmelCase ) lowerCamelCase__ : str = [[False] * (required_sum + 1) for _ in range(arr_len + 1 )] # for each arr value, a sum of zero(0) can be formed by not taking any element # hence True/1 for i in range(arr_len + 1 ): lowerCamelCase__ : Tuple = True # sum is not zero and set is empty then false for i in range(1 , required_sum + 1 ): lowerCamelCase__ : Dict = False for i in range(1 , arr_len + 1 ): for j in range(1 , required_sum + 1 ): if arr[i - 1] > j: lowerCamelCase__ : str = subset[i - 1][j] if arr[i - 1] <= j: lowerCamelCase__ : Dict = subset[i - 1][j] or subset[i - 1][j - arr[i - 1]] return subset[arr_len][required_sum] if __name__ == "__main__": import doctest doctest.testmod()
50
0
import tempfile import unittest from transformers import TaConfig, is_torch_available from transformers.testing_utils import ( require_sentencepiece, require_tokenizers, require_torch, slow, torch_device, ) from ...generation.test_utils import GenerationTesterMixin from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import AutoTokenizer, UMTaForConditionalGeneration, UMTaForQuestionAnswering, UMTaModel class __a : def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=99 , _SCREAMING_SNAKE_CASE=13 , _SCREAMING_SNAKE_CASE=7 , _SCREAMING_SNAKE_CASE=9 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE=5 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=37 , _SCREAMING_SNAKE_CASE=8 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.002 , _SCREAMING_SNAKE_CASE=1 , _SCREAMING_SNAKE_CASE=0 , _SCREAMING_SNAKE_CASE=0 , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , ) -> Tuple: """simple docstring""" _UpperCAmelCase = parent _UpperCAmelCase = batch_size _UpperCAmelCase = encoder_seq_length _UpperCAmelCase = decoder_seq_length # For common tests _UpperCAmelCase = self.decoder_seq_length _UpperCAmelCase = is_training _UpperCAmelCase = use_attention_mask _UpperCAmelCase = use_labels _UpperCAmelCase = vocab_size _UpperCAmelCase = hidden_size _UpperCAmelCase = num_hidden_layers _UpperCAmelCase = num_attention_heads _UpperCAmelCase = d_ff _UpperCAmelCase = relative_attention_num_buckets _UpperCAmelCase = dropout_rate _UpperCAmelCase = initializer_factor _UpperCAmelCase = eos_token_id _UpperCAmelCase = pad_token_id _UpperCAmelCase = decoder_start_token_id _UpperCAmelCase = None _UpperCAmelCase = decoder_layers def UpperCAmelCase__ ( self ) -> List[str]: """simple docstring""" return TaConfig.from_pretrained('google/umt5-base' ) def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , ) -> List[str]: """simple docstring""" if attention_mask is None: _UpperCAmelCase = input_ids.ne(config.pad_token_id ) if decoder_attention_mask is None: _UpperCAmelCase = decoder_input_ids.ne(config.pad_token_id ) if head_mask is None: _UpperCAmelCase = torch.ones(config.num_hidden_layers , config.num_attention_heads , device=_SCREAMING_SNAKE_CASE ) if decoder_head_mask is None: _UpperCAmelCase = torch.ones(config.num_decoder_layers , config.num_attention_heads , device=_SCREAMING_SNAKE_CASE ) if cross_attn_head_mask is None: _UpperCAmelCase = torch.ones( config.num_decoder_layers , config.num_attention_heads , device=_SCREAMING_SNAKE_CASE ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": decoder_attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, "cross_attn_head_mask": cross_attn_head_mask, } def UpperCAmelCase__ ( self ) -> Union[str, Any]: """simple docstring""" _UpperCAmelCase = ids_tensor([self.batch_size, self.encoder_seq_length] , self.vocab_size ) _UpperCAmelCase = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size ) # we need to clamp the input ids here to avoid having pad token in between # this is because for NllbMoe the position_ids are prepared such that # all pad tokens have pos id = 2 and rest are between 2..seq_length # and the seq_length here is seq_length - num_pad_tokens # but when using past, there is no way of knowing if the past input ids had # pad tokens in them, which results in incorrect seq_lenth and which in turn results in # position_ids being off by num_pad_tokens in past input _UpperCAmelCase = input_ids.clamp(self.pad_token_id + 1 ) _UpperCAmelCase = decoder_input_ids.clamp(self.pad_token_id + 1 ) _UpperCAmelCase = self.get_config() _UpperCAmelCase = config.num_attention_heads _UpperCAmelCase = self.prepare_inputs_dict(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) return config, input_dict def UpperCAmelCase__ ( self ) -> Any: """simple docstring""" _UpperCAmelCase , _UpperCAmelCase = self.prepare_config_and_inputs() return config, inputs_dict def UpperCAmelCase__ ( self ) -> Dict: """simple docstring""" return TaConfig( vocab_size=166 , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , ) def UpperCAmelCase__ ( self ) -> List[Any]: """simple docstring""" return TaConfig( vocab_size=self.vocab_size , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , ) def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , ) -> str: """simple docstring""" _UpperCAmelCase = UMTaModel(config=_SCREAMING_SNAKE_CASE ) model.to(_SCREAMING_SNAKE_CASE ) model.eval() _UpperCAmelCase = model( input_ids=_SCREAMING_SNAKE_CASE , decoder_input_ids=_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , decoder_attention_mask=_SCREAMING_SNAKE_CASE , ) _UpperCAmelCase = model(input_ids=_SCREAMING_SNAKE_CASE , decoder_input_ids=_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = result.last_hidden_state _UpperCAmelCase = result.past_key_values _UpperCAmelCase = result.encoder_last_hidden_state self.parent.assertEqual(encoder_output.size() , (self.batch_size, self.encoder_seq_length, self.hidden_size) ) self.parent.assertEqual(decoder_output.size() , (self.batch_size, self.decoder_seq_length, self.hidden_size) ) # There should be `num_layers` key value embeddings stored in decoder_past self.parent.assertEqual(len(_SCREAMING_SNAKE_CASE ) , config.num_layers ) # There should be a self attn key, a self attn value, a cross attn key and a cross attn value stored in each decoder_past tuple self.parent.assertEqual(len(decoder_past[0] ) , 4 ) def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , ) -> Optional[int]: """simple docstring""" _UpperCAmelCase = UMTaModel(config=_SCREAMING_SNAKE_CASE ).get_decoder().to(_SCREAMING_SNAKE_CASE ).eval() # first forward pass _UpperCAmelCase = model(_SCREAMING_SNAKE_CASE , use_cache=_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = model(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = model(_SCREAMING_SNAKE_CASE , use_cache=_SCREAMING_SNAKE_CASE ) self.parent.assertTrue(len(_SCREAMING_SNAKE_CASE ) == len(_SCREAMING_SNAKE_CASE ) ) self.parent.assertTrue(len(_SCREAMING_SNAKE_CASE ) == len(_SCREAMING_SNAKE_CASE ) + 1 ) _UpperCAmelCase , _UpperCAmelCase = outputs.to_tuple() # create hypothetical next token and extent to next_input_ids _UpperCAmelCase = ids_tensor((self.batch_size, 1) , config.vocab_size ) # append to next input_ids and _UpperCAmelCase = torch.cat([input_ids, next_tokens] , dim=-1 ) _UpperCAmelCase = model(_SCREAMING_SNAKE_CASE )['last_hidden_state'] _UpperCAmelCase = model(_SCREAMING_SNAKE_CASE , past_key_values=_SCREAMING_SNAKE_CASE )['last_hidden_state'] # select random slice _UpperCAmelCase = ids_tensor((1,) , output_from_past.shape[-1] ).item() _UpperCAmelCase = output_from_no_past[:, -1, random_slice_idx].detach() _UpperCAmelCase = output_from_past[:, 0, random_slice_idx].detach() # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , atol=1e-3 ) ) def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , ) -> List[str]: """simple docstring""" _UpperCAmelCase = UMTaModel(config=_SCREAMING_SNAKE_CASE ).to(_SCREAMING_SNAKE_CASE ).half().eval() _UpperCAmelCase = model(**_SCREAMING_SNAKE_CASE )['last_hidden_state'] self.parent.assertFalse(torch.isnan(_SCREAMING_SNAKE_CASE ).any().item() ) @require_torch class __a ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , unittest.TestCase ): _a : Union[str, Any] = ( (UMTaModel, UMTaForConditionalGeneration, UMTaForQuestionAnswering) if is_torch_available() else () ) _a : List[Any] = (UMTaForConditionalGeneration,) if is_torch_available() else () _a : Tuple = ( { 'conversational': UMTaForConditionalGeneration, 'feature-extraction': UMTaModel, 'summarization': UMTaForConditionalGeneration, 'text2text-generation': UMTaForConditionalGeneration, 'translation': UMTaForConditionalGeneration, 'question-answering': UMTaForQuestionAnswering, } if is_torch_available() else {} ) _a : List[str] = True _a : List[Any] = False _a : Tuple = False _a : List[Any] = True _a : str = True # The small UMT5 model needs higher percentages for CPU/MP tests _a : Tuple = [0.8, 0.9] def UpperCAmelCase__ ( self ) -> List[str]: """simple docstring""" _UpperCAmelCase = UMTaModelTester(self ) @unittest.skip('Test has a segmentation fault on torch 1.8.0' ) def UpperCAmelCase__ ( self ) -> Union[str, Any]: """simple docstring""" _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() _UpperCAmelCase = UMTaModel(config_and_inputs[0] ).to(_SCREAMING_SNAKE_CASE ) with tempfile.TemporaryDirectory() as tmpdirname: torch.onnx.export( _SCREAMING_SNAKE_CASE , (config_and_inputs[1], config_and_inputs[3], config_and_inputs[2]) , f'''{tmpdirname}/t5_test.onnx''' , export_params=_SCREAMING_SNAKE_CASE , opset_version=9 , input_names=['input_ids', 'decoder_input_ids'] , ) @unittest.skipIf(torch_device == 'cpu' , 'Cant do half precision' ) def UpperCAmelCase__ ( self ) -> Tuple: """simple docstring""" _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model_fpaa_forward(*_SCREAMING_SNAKE_CASE ) def UpperCAmelCase__ ( self ) -> int: """simple docstring""" _UpperCAmelCase = ['encoder_attentions', 'decoder_attentions', 'cross_attentions'] _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() _UpperCAmelCase = config_and_inputs[0] _UpperCAmelCase = UMTaForConditionalGeneration(_SCREAMING_SNAKE_CASE ).eval() model.to(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = { 'head_mask': torch.zeros(config.num_layers , config.num_heads , device=_SCREAMING_SNAKE_CASE ), 'decoder_head_mask': torch.zeros(config.num_decoder_layers , config.num_heads , device=_SCREAMING_SNAKE_CASE ), 'cross_attn_head_mask': torch.zeros(config.num_decoder_layers , config.num_heads , device=_SCREAMING_SNAKE_CASE ), } for attn_name, (name, mask) in zip(_SCREAMING_SNAKE_CASE , head_masking.items() ): _UpperCAmelCase = {name: mask} # Explicitly pass decoder_head_mask as it is required from T5 model when head_mask specified if name == "head_mask": _UpperCAmelCase = torch.ones( config.num_decoder_layers , config.num_heads , device=_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = model.generate( config_and_inputs[1]['input_ids'] , num_beams=1 , max_length=3 , output_attentions=_SCREAMING_SNAKE_CASE , return_dict_in_generate=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , ) # We check the state of decoder_attentions and cross_attentions just from the last step _UpperCAmelCase = out[attn_name] if attn_name == attention_names[0] else out[attn_name][-1] self.assertEqual(sum([w.sum().item() for w in attn_weights] ) , 0.0 ) @unittest.skip('Does not work on the tiny model as we keep hitting edge cases.' ) def UpperCAmelCase__ ( self ) -> int: """simple docstring""" pass @require_torch @require_sentencepiece @require_tokenizers class __a ( unittest.TestCase ): @slow @unittest.skip( 'Unless we stop stripping left and right by default for all special tokens, the expected ids obtained here will not match the original ones. Wait for https://github.com/huggingface/transformers/pull/23909 to be merged' ) def UpperCAmelCase__ ( self ) -> List[str]: """simple docstring""" _UpperCAmelCase = UMTaForConditionalGeneration.from_pretrained('google/umt5-small' , return_dict=_SCREAMING_SNAKE_CASE ).to(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = AutoTokenizer.from_pretrained('google/umt5-small' , use_fast=_SCREAMING_SNAKE_CASE , legacy=_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = [ 'Bonjour monsieur <extra_id_0> bien <extra_id_1>.', 'No se como puedo <extra_id_0>.', 'This is the reason why we <extra_id_0> them.', 'The <extra_id_0> walks in <extra_id_1>, seats', 'A <extra_id_0> walks into a bar and orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.', ] _UpperCAmelCase = tokenizer(_SCREAMING_SNAKE_CASE , return_tensors='pt' , padding=_SCREAMING_SNAKE_CASE ).input_ids # fmt: off _UpperCAmelCase = torch.tensor( [ [ 38530, 210703, 256299, 1410, 256298, 274, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0], [ 826, 321, 671, 25922, 256299, 274, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0], [ 1460, 339, 312, 19014, 10620, 758, 256299, 2355,274, 1, 0, 0, 0, 0, 0, 0,0, 0], [ 517, 256299, 14869, 281, 301, 256298, 275, 119983,1, 0, 0, 0, 0, 0, 0, 0,0, 0], [ 320, 256299, 14869, 281, 2234, 289, 2275, 333,61391, 289, 256298, 543, 256297, 168714, 329, 256296,274, 1], ] ) # fmt: on torch.testing.assert_allclose(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) _UpperCAmelCase = model.generate(input_ids.to(_SCREAMING_SNAKE_CASE ) ) _UpperCAmelCase = [ '<pad><extra_id_0> et<extra_id_1> [eod] <extra_id_2><extra_id_55>.. [eod] 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 <extra_id_56>ajšietosto<extra_id_56>lleux<extra_id_19><extra_id_6>ajšie</s>', '<pad><extra_id_0>.<extra_id_1>.,<0x0A>...spech <0x0A><extra_id_20> <extra_id_21></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', '<pad><extra_id_0> are not going to be a part of the world. We are not going to be a part of<extra_id_1> and<extra_id_2><0x0A><extra_id_48>.<extra_id_48></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', '<pad><extra_id_0> door<extra_id_1>, the door<extra_id_2> 피해[/</s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', '<pad><extra_id_0>nyone who<extra_id_1> drink<extra_id_2> a<extra_id_3> alcohol<extra_id_4> A<extra_id_5> A. This<extra_id_6> I<extra_id_7><extra_id_52><extra_id_53></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', ] _UpperCAmelCase = tokenizer.batch_decode(_SCREAMING_SNAKE_CASE ) self.assertEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
185
import numpy as np import pandas as pd from sklearn.preprocessing import MinMaxScaler from tensorflow.keras.layers import LSTM, Dense from tensorflow.keras.models import Sequential if __name__ == "__main__": lowerCAmelCase__ :Dict = pd.read_csv('''sample_data.csv''', header=None) lowerCAmelCase__ :int = df.shape[:1][0] # If you're using some other dataset input the target column lowerCAmelCase__ :Union[str, Any] = df.iloc[:, 1:2] lowerCAmelCase__ :Optional[int] = actual_data.values.reshape(len_data, 1) lowerCAmelCase__ :Tuple = MinMaxScaler().fit_transform(actual_data) lowerCAmelCase__ :str = 1_0 lowerCAmelCase__ :Optional[Any] = 5 lowerCAmelCase__ :List[str] = 2_0 lowerCAmelCase__ :Any = len_data - periods * look_back lowerCAmelCase__ :Union[str, Any] = actual_data[:division] lowerCAmelCase__ :Tuple = actual_data[division - look_back :] lowerCAmelCase__ , lowerCAmelCase__ :Optional[int] = [], [] lowerCAmelCase__ , lowerCAmelCase__ :str = [], [] for i in range(0, len(train_data) - forward_days - look_back + 1): train_x.append(train_data[i : i + look_back]) train_y.append(train_data[i + look_back : i + look_back + forward_days]) for i in range(0, len(test_data) - forward_days - look_back + 1): test_x.append(test_data[i : i + look_back]) test_y.append(test_data[i + look_back : i + look_back + forward_days]) lowerCAmelCase__ :Optional[Any] = np.array(train_x) lowerCAmelCase__ :Any = np.array(test_x) lowerCAmelCase__ :Dict = np.array([list(i.ravel()) for i in train_y]) lowerCAmelCase__ :Tuple = np.array([list(i.ravel()) for i in test_y]) lowerCAmelCase__ :Optional[int] = Sequential() model.add(LSTM(1_2_8, input_shape=(look_back, 1), return_sequences=True)) model.add(LSTM(6_4, input_shape=(1_2_8, 1))) model.add(Dense(forward_days)) model.compile(loss='''mean_squared_error''', optimizer='''adam''') lowerCAmelCase__ :List[Any] = model.fit( x_train, y_train, epochs=1_5_0, verbose=1, shuffle=True, batch_size=4 ) lowerCAmelCase__ :Optional[Any] = model.predict(x_test)
185
1
'''simple docstring''' from .pipelines import DiffusionPipeline, ImagePipelineOutput # noqa: F401 from .utils import deprecate deprecate( 'pipelines_utils', '0.22.0', 'Importing `DiffusionPipeline` or `ImagePipelineOutput` from diffusers.pipeline_utils is deprecated. Please import from diffusers.pipelines.pipeline_utils instead.', standard_warn=False, stacklevel=3, )
234
'''simple docstring''' def lowerCAmelCase_ ( _lowerCamelCase: list[int] , _lowerCamelCase: str ): __SCREAMING_SNAKE_CASE : str = int(_lowerCamelCase ) # Initialize Result __SCREAMING_SNAKE_CASE : Tuple = [] # Traverse through all denomination for denomination in reversed(_lowerCamelCase ): # Find denominations while int(_lowerCamelCase ) >= int(_lowerCamelCase ): total_value -= int(_lowerCamelCase ) answer.append(_lowerCamelCase ) # Append the "answers" array return answer # Driver Code if __name__ == "__main__": UpperCamelCase__ : int = [] UpperCamelCase__ : List[Any] = '''0''' if ( input('''Do you want to enter your denominations ? (yY/n): ''').strip().lower() == "y" ): UpperCamelCase__ : Tuple = int(input('''Enter the number of denominations you want to add: ''').strip()) for i in range(0, n): denominations.append(int(input(f"Denomination {i}: ").strip())) UpperCamelCase__ : str = input('''Enter the change you want to make in Indian Currency: ''').strip() else: # All denominations of Indian Currency if user does not enter UpperCamelCase__ : List[Any] = [1, 2, 5, 10, 20, 50, 1_00, 5_00, 20_00] UpperCamelCase__ : str = input('''Enter the change you want to make: ''').strip() if int(value) == 0 or int(value) < 0: print('''The total value cannot be zero or negative.''') else: print(f"Following is minimal change for {value}: ") UpperCamelCase__ : int = find_minimum_change(denominations, value) # Print result for i in range(len(answer)): print(answer[i], end=''' ''')
112
0
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) snake_case = { """configuration_blenderbot""": [ """BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """BlenderbotConfig""", """BlenderbotOnnxConfig""", ], """tokenization_blenderbot""": ["""BlenderbotTokenizer"""], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case = ["""BlenderbotTokenizerFast"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case = [ """BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST""", """BlenderbotForCausalLM""", """BlenderbotForConditionalGeneration""", """BlenderbotModel""", """BlenderbotPreTrainedModel""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case = [ """TFBlenderbotForConditionalGeneration""", """TFBlenderbotModel""", """TFBlenderbotPreTrainedModel""", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case = [ """FlaxBlenderbotForConditionalGeneration""", """FlaxBlenderbotModel""", """FlaxBlenderbotPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_blenderbot import ( BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP, BlenderbotConfig, BlenderbotOnnxConfig, ) from .tokenization_blenderbot import BlenderbotTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_blenderbot_fast import BlenderbotTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_blenderbot import ( BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST, BlenderbotForCausalLM, BlenderbotForConditionalGeneration, BlenderbotModel, BlenderbotPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_blenderbot import ( TFBlenderbotForConditionalGeneration, TFBlenderbotModel, TFBlenderbotPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_blenderbot import ( FlaxBlenderbotForConditionalGeneration, FlaxBlenderbotModel, FlaxBlenderbotPreTrainedModel, ) else: import sys snake_case = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
319
import argparse import os import torch from transformers.utils import WEIGHTS_NAME snake_case = ["""small""", """medium""", """large"""] snake_case = """lm_head.decoder.weight""" snake_case = """lm_head.weight""" def lowerCamelCase__ ( lowercase , lowercase ): """simple docstring""" SCREAMING_SNAKE_CASE : Tuple = torch.load(lowercase ) SCREAMING_SNAKE_CASE : Any = d.pop(lowercase ) os.makedirs(lowercase , exist_ok=lowercase ) torch.save(lowercase , os.path.join(lowercase , lowercase ) ) if __name__ == "__main__": snake_case = argparse.ArgumentParser() parser.add_argument("""--dialogpt_path""", default=""".""", type=str) snake_case = parser.parse_args() for MODEL in DIALOGPT_MODELS: snake_case = os.path.join(args.dialogpt_path, F"""{MODEL}_ft.pkl""") snake_case = F"""./DialoGPT-{MODEL}""" convert_dialogpt_checkpoint( checkpoint_path, pytorch_dump_folder_path, )
319
1
"""simple docstring""" from __future__ import annotations from collections import deque class __A : def __init__( self , a__ ): _lowerCAmelCase : int = [] self.adlist.append( {"""value""": """""", """next_states""": [], """fail_state""": 0, """output""": []} ) for keyword in keywords: self.add_keyword(a__ ) self.set_fail_transitions() def __A ( self , a__ , a__ ): for state in self.adlist[current_state]["next_states"]: if char == self.adlist[state]["value"]: return state return None def __A ( self , a__ ): _lowerCAmelCase : List[Any] = 0 for character in keyword: _lowerCAmelCase : List[Any] = self.find_next_state(a__ , a__ ) if next_state is None: self.adlist.append( { """value""": character, """next_states""": [], """fail_state""": 0, """output""": [], } ) self.adlist[current_state]["next_states"].append(len(self.adlist ) - 1 ) _lowerCAmelCase : Optional[int] = len(self.adlist ) - 1 else: _lowerCAmelCase : Optional[Any] = next_state self.adlist[current_state]["output"].append(a__ ) def __A ( self ): _lowerCAmelCase : Union[str, Any] = deque() for node in self.adlist[0]["next_states"]: q.append(a__ ) _lowerCAmelCase : Union[str, Any] = 0 while q: _lowerCAmelCase : List[Any] = q.popleft() for child in self.adlist[r]["next_states"]: q.append(a__ ) _lowerCAmelCase : List[Any] = self.adlist[r]["""fail_state"""] while ( self.find_next_state(a__ , self.adlist[child]["""value"""] ) is None and state != 0 ): _lowerCAmelCase : Optional[Any] = self.adlist[state]["""fail_state"""] _lowerCAmelCase : Optional[Any] = self.find_next_state( a__ , self.adlist[child]["""value"""] ) if self.adlist[child]["fail_state"] is None: _lowerCAmelCase : str = 0 _lowerCAmelCase : Tuple = ( self.adlist[child]["""output"""] + self.adlist[self.adlist[child]["""fail_state"""]]["""output"""] ) def __A ( self , a__ ): _lowerCAmelCase : Dict = {} # returns a dict with keywords and list of its occurrences _lowerCAmelCase : Tuple = 0 for i in range(len(a__ ) ): while ( self.find_next_state(a__ , string[i] ) is None and current_state != 0 ): _lowerCAmelCase : Dict = self.adlist[current_state]["""fail_state"""] _lowerCAmelCase : Optional[int] = self.find_next_state(a__ , string[i] ) if next_state is None: _lowerCAmelCase : Any = 0 else: _lowerCAmelCase : Tuple = next_state for key in self.adlist[current_state]["output"]: if key not in result: _lowerCAmelCase : Any = [] result[key].append(i - len(a__ ) + 1 ) return result if __name__ == "__main__": import doctest doctest.testmod()
44
import argparse import gc import json import os import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType from accelerate.utils.deepspeed import DummyOptim, DummyScheduler UpperCAmelCase : Optional[Any] = 16 UpperCAmelCase : Optional[Any] = 32 def __lowerCamelCase ( lowerCamelCase__ : List[str] ): '''simple docstring''' return int(x / 2**20 ) class __lowercase : """simple docstring""" def __enter__( self ) -> Optional[Any]: '''simple docstring''' gc.collect() torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() # reset the peak gauge to zero lowerCamelCase = torch.cuda.memory_allocated() return self def __exit__( self , *A ) -> int: '''simple docstring''' gc.collect() torch.cuda.empty_cache() lowerCamelCase = torch.cuda.memory_allocated() lowerCamelCase = torch.cuda.max_memory_allocated() lowerCamelCase = bamb(self.end - self.begin ) lowerCamelCase = bamb(self.peak - self.begin ) # print(f"delta used/peak {self.used:4d}/{self.peaked:4d}") def __lowerCamelCase ( lowerCamelCase__ : Accelerator , lowerCamelCase__ : int = 16 , lowerCamelCase__ : str = "bert-base-cased" , lowerCamelCase__ : int = 320 , lowerCamelCase__ : int = 160 , ): '''simple docstring''' lowerCamelCase = AutoTokenizer.from_pretrained(lowerCamelCase__ ) lowerCamelCase = load_dataset( """glue""" , """mrpc""" , split={"""train""": f'train[:{n_train}]', """validation""": f'validation[:{n_val}]'} ) def tokenize_function(lowerCamelCase__ : str ): # max_length=None => use the model max length (it's actually the default) lowerCamelCase = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=lowerCamelCase__ , max_length=lowerCamelCase__ ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset lowerCamelCase = datasets.map( lowerCamelCase__ , batched=lowerCamelCase__ , remove_columns=["""idx""", """sentence1""", """sentence2"""] , load_from_cache_file=lowerCamelCase__ ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library lowerCamelCase = tokenized_datasets.rename_column("""label""" , """labels""" ) def collate_fn(lowerCamelCase__ : Union[str, Any] ): # On TPU it's best to pad everything to the same length or training will be very slow. if accelerator.distributed_type == DistributedType.TPU: return tokenizer.pad(lowerCamelCase__ , padding="""max_length""" , max_length=128 , return_tensors="""pt""" ) return tokenizer.pad(lowerCamelCase__ , padding="""longest""" , return_tensors="""pt""" ) # Instantiate dataloaders. lowerCamelCase = DataLoader( tokenized_datasets["""train"""] , shuffle=lowerCamelCase__ , collate_fn=lowerCamelCase__ , batch_size=lowerCamelCase__ ) lowerCamelCase = DataLoader( tokenized_datasets["""validation"""] , shuffle=lowerCamelCase__ , collate_fn=lowerCamelCase__ , batch_size=lowerCamelCase__ ) return train_dataloader, eval_dataloader def __lowerCamelCase ( lowerCamelCase__ : Tuple , lowerCamelCase__ : Tuple ): '''simple docstring''' lowerCamelCase = Accelerator() # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs lowerCamelCase = config["""lr"""] lowerCamelCase = int(config["""num_epochs"""] ) lowerCamelCase = int(config["""seed"""] ) lowerCamelCase = int(config["""batch_size"""] ) lowerCamelCase = args.model_name_or_path set_seed(lowerCamelCase__ ) lowerCamelCase , lowerCamelCase = get_dataloaders(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , args.n_train , args.n_val ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) lowerCamelCase = AutoModelForSequenceClassification.from_pretrained(lowerCamelCase__ , return_dict=lowerCamelCase__ ) # Instantiate optimizer lowerCamelCase = ( AdamW if accelerator.state.deepspeed_plugin is None or """optimizer""" not in accelerator.state.deepspeed_plugin.deepspeed_config else DummyOptim ) lowerCamelCase = optimizer_cls(params=model.parameters() , lr=lowerCamelCase__ ) if accelerator.state.deepspeed_plugin is not None: lowerCamelCase = accelerator.state.deepspeed_plugin.deepspeed_config[ """gradient_accumulation_steps""" ] else: lowerCamelCase = 1 lowerCamelCase = (len(lowerCamelCase__ ) * num_epochs) // gradient_accumulation_steps # Instantiate scheduler if ( accelerator.state.deepspeed_plugin is None or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config ): lowerCamelCase = get_linear_schedule_with_warmup( optimizer=lowerCamelCase__ , num_warmup_steps=0 , num_training_steps=lowerCamelCase__ , ) else: lowerCamelCase = DummyScheduler(lowerCamelCase__ , total_num_steps=lowerCamelCase__ , warmup_num_steps=0 ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase = accelerator.prepare( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) # We need to keep track of how many total steps we have iterated over lowerCamelCase = 0 # We also need to keep track of the stating epoch so files are named properly lowerCamelCase = 0 # Now we train the model lowerCamelCase = {} for epoch in range(lowerCamelCase__ , lowerCamelCase__ ): with TorchTracemalloc() as tracemalloc: model.train() for step, batch in enumerate(lowerCamelCase__ ): lowerCamelCase = model(**lowerCamelCase__ ) lowerCamelCase = outputs.loss lowerCamelCase = loss / gradient_accumulation_steps accelerator.backward(lowerCamelCase__ ) if step % gradient_accumulation_steps == 0: optimizer.step() lr_scheduler.step() optimizer.zero_grad() overall_step += 1 # Printing the GPU memory usage details such as allocated memory, peak memory, and total memory usage accelerator.print("""Memory before entering the train : {}""".format(bamb(tracemalloc.begin ) ) ) accelerator.print("""Memory consumed at the end of the train (end-begin): {}""".format(tracemalloc.used ) ) accelerator.print("""Peak Memory consumed during the train (max-begin): {}""".format(tracemalloc.peaked ) ) accelerator.print( """Total Peak Memory consumed during the train (max): {}""".format( tracemalloc.peaked + bamb(tracemalloc.begin ) ) ) lowerCamelCase = tracemalloc.peaked + bamb(tracemalloc.begin ) if args.peak_memory_upper_bound is not None: assert ( train_total_peak_memory[f'epoch-{epoch}'] <= args.peak_memory_upper_bound ), "Peak memory usage exceeded the upper bound" accelerator.wait_for_everyone() if accelerator.is_main_process: with open(os.path.join(args.output_dir , """peak_memory_utilization.json""" ) , """w""" ) as f: json.dump(lowerCamelCase__ , lowerCamelCase__ ) def __lowerCamelCase ( ): '''simple docstring''' lowerCamelCase = argparse.ArgumentParser(description="""Simple example of training script tracking peak GPU memory usage.""" ) parser.add_argument( """--model_name_or_path""" , type=lowerCamelCase__ , default="""bert-base-cased""" , help="""Path to pretrained model or model identifier from huggingface.co/models.""" , required=lowerCamelCase__ , ) parser.add_argument( """--output_dir""" , type=lowerCamelCase__ , default=""".""" , help="""Optional save directory where all checkpoint folders will be stored. Default is the current working directory.""" , ) parser.add_argument( """--peak_memory_upper_bound""" , type=lowerCamelCase__ , default=lowerCamelCase__ , help="""The upper bound of peak memory usage in MB. If set, the training will throw an error if the peak memory usage exceeds this value.""" , ) parser.add_argument( """--n_train""" , type=lowerCamelCase__ , default=320 , help="""Number of training examples to use.""" , ) parser.add_argument( """--n_val""" , type=lowerCamelCase__ , default=160 , help="""Number of validation examples to use.""" , ) parser.add_argument( """--num_epochs""" , type=lowerCamelCase__ , default=1 , help="""Number of train epochs.""" , ) lowerCamelCase = parser.parse_args() lowerCamelCase = {"""lr""": 2E-5, """num_epochs""": args.num_epochs, """seed""": 42, """batch_size""": 16} training_function(lowerCamelCase__ , lowerCamelCase__ ) if __name__ == "__main__": main()
252
0
"""simple docstring""" from __future__ import annotations from typing import Any def lowerCamelCase_ ( _lowerCamelCase ): create_state_space_tree(UpperCamelCase__ , [] , 0 ) def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ): if index == len(UpperCamelCase__ ): print(UpperCamelCase__ ) return create_state_space_tree(UpperCamelCase__ , UpperCamelCase__ , index + 1 ) current_subsequence.append(sequence[index] ) create_state_space_tree(UpperCamelCase__ , UpperCamelCase__ , index + 1 ) current_subsequence.pop() if __name__ == "__main__": A_ : list[Any] = [3, 1, 2, 4] generate_all_subsequences(seq) seq.clear() seq.extend(["A", "B", "C"]) generate_all_subsequences(seq)
352
"""simple docstring""" import os import posixpath import uuid from dataclasses import dataclass from typing import TYPE_CHECKING, Iterable, List, Optional, Tuple, Union import numpy as np import pyarrow as pa import datasets from datasets.arrow_writer import ArrowWriter, ParquetWriter from datasets.config import MAX_SHARD_SIZE from datasets.filesystems import ( is_remote_filesystem, rename, ) from datasets.iterable_dataset import _BaseExamplesIterable from datasets.utils.py_utils import convert_file_size_to_int A_ : List[Any] = datasets.utils.logging.get_logger(__name__) if TYPE_CHECKING: import pyspark @dataclass class a_ ( datasets.BuilderConfig ): '''simple docstring''' lowerCamelCase__ : Optional[datasets.Features] = None def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase , ): import pyspark def generate_fn(): lowerCamelCase__ : Optional[Any] = df.select('*' , pyspark.sql.functions.spark_partition_id().alias('part_id' ) ) for partition_id in partition_order: lowerCamelCase__ : Dict = df_with_partition_id.select('*' ).where(f'''part_id = {partition_id}''' ).drop('part_id' ) lowerCamelCase__ : Dict = partition_df.collect() lowerCamelCase__ : int = 0 for row in rows: yield f'''{partition_id}_{row_id}''', row.asDict() row_id += 1 return generate_fn class a_ ( _BaseExamplesIterable ): '''simple docstring''' def __init__(self, lowerCamelCase_, lowerCamelCase_=None, ): '''simple docstring''' lowerCamelCase__ : Tuple = df lowerCamelCase__ : Any = partition_order or range(self.df.rdd.getNumPartitions() ) lowerCamelCase__ : List[Any] = _generate_iterable_examples(self.df, self.partition_order ) def __iter__(self ): '''simple docstring''' yield from self.generate_examples_fn() def a__ (self, lowerCamelCase_ ): '''simple docstring''' lowerCamelCase__ : Union[str, Any] = list(range(self.df.rdd.getNumPartitions() ) ) generator.shuffle(lowerCamelCase_ ) return SparkExamplesIterable(self.df, partition_order=lowerCamelCase_ ) def a__ (self, lowerCamelCase_, lowerCamelCase_ ): '''simple docstring''' lowerCamelCase__ : Union[str, Any] = self.split_shard_indices_by_worker(lowerCamelCase_, lowerCamelCase_ ) return SparkExamplesIterable(self.df, partition_order=lowerCamelCase_ ) @property def a__ (self ): '''simple docstring''' return len(self.partition_order ) class a_ ( datasets.DatasetBuilder ): '''simple docstring''' lowerCamelCase__ : Optional[int] = SparkConfig def __init__(self, lowerCamelCase_, lowerCamelCase_ = None, lowerCamelCase_ = None, **lowerCamelCase_, ): '''simple docstring''' import pyspark lowerCamelCase__ : str = pyspark.sql.SparkSession.builder.getOrCreate() lowerCamelCase__ : Optional[Any] = df lowerCamelCase__ : Dict = working_dir super().__init__( cache_dir=lowerCamelCase_, config_name=str(self.df.semanticHash() ), **lowerCamelCase_, ) def a__ (self ): '''simple docstring''' def create_cache_and_write_probe(lowerCamelCase_ ): # makedirs with exist_ok will recursively create the directory. It will not throw an error if directories # already exist. os.makedirs(self._cache_dir, exist_ok=lowerCamelCase_ ) lowerCamelCase__ : str = os.path.join(self._cache_dir, 'fs_test' + uuid.uuida().hex ) # Opening the file in append mode will create a new file unless it already exists, in which case it will not # change the file contents. open(lowerCamelCase_, 'a' ) return [probe_file] if self._spark.conf.get('spark.master', '' ).startswith('local' ): return # If the cluster is multi-node, make sure that the user provided a cache_dir and that it is on an NFS # accessible to the driver. # TODO: Stream batches to the driver using ArrowCollectSerializer instead of throwing an error. if self._cache_dir: lowerCamelCase__ : Tuple = ( self._spark.sparkContext.parallelize(range(1 ), 1 ).mapPartitions(lowerCamelCase_ ).collect() ) if os.path.isfile(probe[0] ): return raise ValueError( 'When using Dataset.from_spark on a multi-node cluster, the driver and all workers should be able to access cache_dir' ) def a__ (self ): '''simple docstring''' return datasets.DatasetInfo(features=self.config.features ) def a__ (self, lowerCamelCase_ ): '''simple docstring''' return [datasets.SplitGenerator(name=datasets.Split.TRAIN )] def a__ (self, lowerCamelCase_ ): '''simple docstring''' import pyspark def get_arrow_batch_size(lowerCamelCase_ ): for batch in it: yield pa.RecordBatch.from_pydict({'batch_bytes': [batch.nbytes]} ) lowerCamelCase__ : List[Any] = self.df.count() lowerCamelCase__ : List[Any] = df_num_rows if df_num_rows <= 1_0_0 else 1_0_0 # Approximate the size of each row (in Arrow format) by averaging over a max-100-row sample. lowerCamelCase__ : List[Any] = ( self.df.limit(lowerCamelCase_ ) .repartition(1 ) .mapInArrow(lowerCamelCase_, 'batch_bytes: long' ) .agg(pyspark.sql.functions.sum('batch_bytes' ).alias('sample_bytes' ) ) .collect()[0] .sample_bytes / sample_num_rows ) lowerCamelCase__ : Dict = approx_bytes_per_row * df_num_rows if approx_total_size > max_shard_size: # Make sure there is at least one row per partition. lowerCamelCase__ : str = min(lowerCamelCase_, int(approx_total_size / max_shard_size ) ) lowerCamelCase__ : List[Any] = self.df.repartition(lowerCamelCase_ ) def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, ): '''simple docstring''' import pyspark lowerCamelCase__ : List[str] = ParquetWriter if file_format == 'parquet' else ArrowWriter lowerCamelCase__ : List[str] = os.path.join(self._working_dir, os.path.basename(lowerCamelCase_ ) ) if self._working_dir else fpath lowerCamelCase__ : Optional[int] = file_format == 'parquet' # Define these so that we don't reference self in write_arrow, which will result in a pickling error due to # pickling the SparkContext. lowerCamelCase__ : int = self.config.features lowerCamelCase__ : Dict = self._writer_batch_size lowerCamelCase__ : Optional[Any] = self._fs.storage_options def write_arrow(lowerCamelCase_ ): # Within the same SparkContext, no two task attempts will share the same attempt ID. lowerCamelCase__ : Any = pyspark.TaskContext().taskAttemptId() lowerCamelCase__ : str = next(lowerCamelCase_, lowerCamelCase_ ) if first_batch is None: # Some partitions might not receive any data. return pa.RecordBatch.from_arrays( [[task_id], [0], [0]], names=['task_id', 'num_examples', 'num_bytes'], ) lowerCamelCase__ : Tuple = 0 lowerCamelCase__ : Any = writer_class( features=lowerCamelCase_, path=working_fpath.replace('SSSSS', f'''{shard_id:05d}''' ).replace('TTTTT', f'''{task_id:05d}''' ), writer_batch_size=lowerCamelCase_, storage_options=lowerCamelCase_, embed_local_files=lowerCamelCase_, ) lowerCamelCase__ : List[str] = pa.Table.from_batches([first_batch] ) writer.write_table(lowerCamelCase_ ) for batch in it: if max_shard_size is not None and writer._num_bytes >= max_shard_size: lowerCamelCase__ , lowerCamelCase__ : Tuple = writer.finalize() writer.close() yield pa.RecordBatch.from_arrays( [[task_id], [num_examples], [num_bytes]], names=['task_id', 'num_examples', 'num_bytes'], ) shard_id += 1 lowerCamelCase__ : Dict = writer_class( features=writer._features, path=working_fpath.replace('SSSSS', f'''{shard_id:05d}''' ).replace('TTTTT', f'''{task_id:05d}''' ), writer_batch_size=lowerCamelCase_, storage_options=lowerCamelCase_, embed_local_files=lowerCamelCase_, ) lowerCamelCase__ : Tuple = pa.Table.from_batches([batch] ) writer.write_table(lowerCamelCase_ ) if writer._num_bytes > 0: lowerCamelCase__ , lowerCamelCase__ : Tuple = writer.finalize() writer.close() yield pa.RecordBatch.from_arrays( [[task_id], [num_examples], [num_bytes]], names=['task_id', 'num_examples', 'num_bytes'], ) if working_fpath != fpath: for file in os.listdir(os.path.dirname(lowerCamelCase_ ) ): lowerCamelCase__ : Optional[int] = os.path.join(os.path.dirname(lowerCamelCase_ ), os.path.basename(lowerCamelCase_ ) ) shutil.move(lowerCamelCase_, lowerCamelCase_ ) lowerCamelCase__ : List[str] = ( self.df.mapInArrow(lowerCamelCase_, 'task_id: long, num_examples: long, num_bytes: long' ) .groupBy('task_id' ) .agg( pyspark.sql.functions.sum('num_examples' ).alias('total_num_examples' ), pyspark.sql.functions.sum('num_bytes' ).alias('total_num_bytes' ), pyspark.sql.functions.count('num_bytes' ).alias('num_shards' ), pyspark.sql.functions.collect_list('num_examples' ).alias('shard_lengths' ), ) .collect() ) for row in stats: yield row.task_id, (row.total_num_examples, row.total_num_bytes, row.num_shards, row.shard_lengths) def a__ (self, lowerCamelCase_, lowerCamelCase_ = "arrow", lowerCamelCase_ = None, lowerCamelCase_ = None, **lowerCamelCase_, ): '''simple docstring''' self._validate_cache_dir() lowerCamelCase__ : Union[str, Any] = convert_file_size_to_int(max_shard_size or MAX_SHARD_SIZE ) self._repartition_df_if_needed(lowerCamelCase_ ) lowerCamelCase__ : str = not is_remote_filesystem(self._fs ) lowerCamelCase__ : Any = os.path.join if is_local else posixpath.join lowerCamelCase__ : Any = '-TTTTT-SSSSS-of-NNNNN' lowerCamelCase__ : Tuple = f'''{self.name}-{split_generator.name}{SUFFIX}.{file_format}''' lowerCamelCase__ : Union[str, Any] = path_join(self._output_dir, lowerCamelCase_ ) lowerCamelCase__ : List[str] = 0 lowerCamelCase__ : Dict = 0 lowerCamelCase__ : List[Any] = 0 lowerCamelCase__ : Optional[Any] = [] lowerCamelCase__ : List[str] = [] for task_id, content in self._prepare_split_single(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ): ( ( lowerCamelCase__ ) , ( lowerCamelCase__ ) , ( lowerCamelCase__ ) , ( lowerCamelCase__ ) , ) : int = content if num_bytes > 0: total_num_examples += num_examples total_num_bytes += num_bytes total_shards += num_shards task_id_and_num_shards.append((task_id, num_shards) ) all_shard_lengths.extend(lowerCamelCase_ ) lowerCamelCase__ : str = total_num_examples lowerCamelCase__ : int = total_num_bytes # should rename everything at the end logger.debug(f'''Renaming {total_shards} shards.''' ) if total_shards > 1: lowerCamelCase__ : Union[str, Any] = all_shard_lengths # Define fs outside of _rename_shard so that we don't reference self in the function, which will result in a # pickling error due to pickling the SparkContext. lowerCamelCase__ : Optional[Any] = self._fs # use the -SSSSS-of-NNNNN pattern def _rename_shard( lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, ): rename( lowerCamelCase_, fpath.replace('SSSSS', f'''{shard_id:05d}''' ).replace('TTTTT', f'''{task_id:05d}''' ), fpath.replace('TTTTT-SSSSS', f'''{global_shard_id:05d}''' ).replace('NNNNN', f'''{total_shards:05d}''' ), ) lowerCamelCase__ : List[str] = [] lowerCamelCase__ : List[str] = 0 for i in range(len(lowerCamelCase_ ) ): lowerCamelCase__ , lowerCamelCase__ : Any = task_id_and_num_shards[i] for shard_id in range(lowerCamelCase_ ): args.append([task_id, shard_id, global_shard_id] ) global_shard_id += 1 self._spark.sparkContext.parallelize(lowerCamelCase_, len(lowerCamelCase_ ) ).map(lambda lowerCamelCase_ : _rename_shard(*lowerCamelCase_ ) ).collect() else: # don't use any pattern lowerCamelCase__ : List[Any] = 0 lowerCamelCase__ : Dict = task_id_and_num_shards[0][0] self._rename( fpath.replace('SSSSS', f'''{shard_id:05d}''' ).replace('TTTTT', f'''{task_id:05d}''' ), fpath.replace(lowerCamelCase_, '' ), ) def a__ (self, lowerCamelCase_, ): '''simple docstring''' return SparkExamplesIterable(self.df )
316
0
from __future__ import annotations import unittest from transformers import BlenderbotConfig, BlenderbotTokenizer, is_tf_available from transformers.testing_utils import require_tf, require_tokenizers, slow from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotForConditionalGeneration, TFBlenderbotModel @require_tf class A__ : """simple docstring""" __A : List[str] = BlenderbotConfig __A : Tuple = {} __A : Optional[int] = '''gelu''' def __init__( self , lowercase , lowercase=13 , lowercase=7 , lowercase=True , lowercase=False , lowercase=99 , lowercase=32 , lowercase=2 , lowercase=4 , lowercase=37 , lowercase=0.1 , lowercase=0.1 , lowercase=20 , lowercase=2 , lowercase=1 , lowercase=0 , ) -> Optional[int]: '''simple docstring''' a__ : Union[str, Any] = parent a__ : Dict = batch_size a__ : Union[str, Any] = seq_length a__ : List[str] = is_training a__ : List[Any] = use_labels a__ : str = vocab_size a__ : str = hidden_size a__ : List[Any] = num_hidden_layers a__ : Tuple = num_attention_heads a__ : Dict = intermediate_size a__ : Any = hidden_dropout_prob a__ : List[Any] = attention_probs_dropout_prob a__ : List[str] = max_position_embeddings a__ : List[Any] = eos_token_id a__ : Union[str, Any] = pad_token_id a__ : str = bos_token_id def __lowercase ( self) -> List[str]: '''simple docstring''' a__ : str = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size) a__ : str = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size) , 1) a__ : List[str] = tf.concat([input_ids, eos_tensor] , axis=1) a__ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size) a__ : Optional[Any] = self.config_cls( vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , ) a__ : Optional[int] = prepare_blenderbot_inputs_dict(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__) return config, inputs_dict def __lowercase ( self , lowercase , lowercase) -> Dict: '''simple docstring''' a__ : int = TFBlenderbotModel(config=lowerCAmelCase__).get_decoder() a__ : Any = inputs_dict['input_ids'] a__ : List[str] = input_ids[:1, :] a__ : Any = inputs_dict['attention_mask'][:1, :] a__ : Dict = inputs_dict['head_mask'] a__ : List[str] = 1 # first forward pass a__ : Tuple = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , head_mask=lowerCAmelCase__ , use_cache=lowerCAmelCase__) a__ , a__ : List[str] = outputs.to_tuple() # create hypothetical next token and extent to next_input_ids a__ : Union[str, Any] = ids_tensor((self.batch_size, 3) , config.vocab_size) a__ : Dict = tf.cast(ids_tensor((self.batch_size, 3) , 2) , tf.inta) # append to next input_ids and a__ : List[Any] = tf.concat([input_ids, next_tokens] , axis=-1) a__ : Union[str, Any] = tf.concat([attention_mask, next_attn_mask] , axis=-1) a__ : List[str] = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__)[0] a__ : str = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , past_key_values=lowerCAmelCase__)[0] self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1]) # select random slice a__ : int = int(ids_tensor((1,) , output_from_past.shape[-1])) a__ : Any = output_from_no_past[:, -3:, random_slice_idx] a__ : int = output_from_past[:, :, random_slice_idx] # test that outputs are equal for slice tf.debugging.assert_near(lowerCAmelCase__ , lowerCAmelCase__ , rtol=1e-3) def A_ ( A__ , A__ , A__ , A__=None , A__=None , A__=None , A__=None , A__=None , ) -> Optional[Any]: if attention_mask is None: a__ : int = tf.cast(tf.math.not_equal(A__ , config.pad_token_id ) , tf.inta ) if decoder_attention_mask is None: a__ : List[str] = tf.concat( [ tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ), tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ), ] , axis=-1 , ) if head_mask is None: a__ : Tuple = tf.ones((config.encoder_layers, config.encoder_attention_heads) ) if decoder_head_mask is None: a__ : Union[str, Any] = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) if cross_attn_head_mask is None: a__ : List[str] = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": decoder_attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, "cross_attn_head_mask": cross_attn_head_mask, } @require_tf class A__ ( lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ): """simple docstring""" __A : Dict = (TFBlenderbotForConditionalGeneration, TFBlenderbotModel) if is_tf_available() else () __A : str = (TFBlenderbotForConditionalGeneration,) if is_tf_available() else () __A : int = ( { '''conversational''': TFBlenderbotForConditionalGeneration, '''feature-extraction''': TFBlenderbotModel, '''summarization''': TFBlenderbotForConditionalGeneration, '''text2text-generation''': TFBlenderbotForConditionalGeneration, '''translation''': TFBlenderbotForConditionalGeneration, } if is_tf_available() else {} ) __A : List[str] = True __A : List[str] = False __A : List[str] = False def __lowercase ( self) -> Union[str, Any]: '''simple docstring''' a__ : int = TFBlenderbotModelTester(self) a__ : Optional[int] = ConfigTester(self , config_class=lowerCAmelCase__) def __lowercase ( self) -> Dict: '''simple docstring''' self.config_tester.run_common_tests() def __lowercase ( self) -> Tuple: '''simple docstring''' a__ : int = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_decoder_model_past_large_inputs(*lowerCAmelCase__) @require_tokenizers @require_tf class A__ ( unittest.TestCase ): """simple docstring""" __A : int = ['''My friends are cool but they eat too many carbs.'''] __A : Any = '''facebook/blenderbot-400M-distill''' @cached_property def __lowercase ( self) -> Union[str, Any]: '''simple docstring''' return BlenderbotTokenizer.from_pretrained(self.model_name) @cached_property def __lowercase ( self) -> Union[str, Any]: '''simple docstring''' a__ : Optional[Any] = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name) return model @slow def __lowercase ( self) -> Dict: '''simple docstring''' a__ : Dict = self.tokenizer(self.src_text , return_tensors='tf') a__ : List[Any] = self.model.generate( model_inputs.input_ids , ) a__ : Optional[int] = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=lowerCAmelCase__)[0] assert ( generated_words == " That's unfortunate. Are they trying to lose weight or are they just trying to be healthier?" )
99
"""simple docstring""" from __future__ import annotations import time from math import sqrt # 1 for manhattan, 0 for euclidean __UpperCamelCase = 0 __UpperCamelCase = [ [0, 0, 0, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles [0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0], [1, 0, 1, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0], ] __UpperCamelCase = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right __UpperCamelCase = tuple[int, int] class UpperCamelCase : def __init__( self, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, ) -> None: snake_case_ = pos_x snake_case_ = pos_y snake_case_ = (pos_y, pos_x) snake_case_ = goal_x snake_case_ = goal_y snake_case_ = g_cost snake_case_ = parent snake_case_ = self.calculate_heuristic() snake_case_ = self.g_cost + self.h_cost def a_ ( self) -> float: snake_case_ = self.pos_x - self.goal_x snake_case_ = self.pos_y - self.goal_y if HEURISTIC == 1: return abs(lowerCAmelCase__) + abs(lowerCAmelCase__) else: return sqrt(dy**2 + dx**2) def __lt__( self, lowerCAmelCase__) -> bool: return self.f_cost < other.f_cost class UpperCamelCase : def __init__( self, lowerCAmelCase__, lowerCAmelCase__) -> Union[str, Any]: snake_case_ = Node(start[1], start[0], goal[1], goal[0], 0, lowerCAmelCase__) snake_case_ = Node(goal[1], goal[0], goal[1], goal[0], 9_9999, lowerCAmelCase__) snake_case_ = [self.start] snake_case_ = [] snake_case_ = False def a_ ( self) -> list[TPosition]: while self.open_nodes: # Open Nodes are sorted using __lt__ self.open_nodes.sort() snake_case_ = self.open_nodes.pop(0) if current_node.pos == self.target.pos: return self.retrace_path(lowerCAmelCase__) self.closed_nodes.append(lowerCAmelCase__) snake_case_ = self.get_successors(lowerCAmelCase__) for child_node in successors: if child_node in self.closed_nodes: continue if child_node not in self.open_nodes: self.open_nodes.append(lowerCAmelCase__) else: # retrieve the best current path snake_case_ = self.open_nodes.pop(self.open_nodes.index(lowerCAmelCase__)) if child_node.g_cost < better_node.g_cost: self.open_nodes.append(lowerCAmelCase__) else: self.open_nodes.append(lowerCAmelCase__) return [self.start.pos] def a_ ( self, lowerCAmelCase__) -> list[Node]: snake_case_ = [] for action in delta: snake_case_ = parent.pos_x + action[1] snake_case_ = parent.pos_y + action[0] if not (0 <= pos_x <= len(grid[0]) - 1 and 0 <= pos_y <= len(lowerCAmelCase__) - 1): continue if grid[pos_y][pos_x] != 0: continue successors.append( Node( lowerCAmelCase__, lowerCAmelCase__, self.target.pos_y, self.target.pos_x, parent.g_cost + 1, lowerCAmelCase__, )) return successors def a_ ( self, lowerCAmelCase__) -> list[TPosition]: snake_case_ = node snake_case_ = [] while current_node is not None: path.append((current_node.pos_y, current_node.pos_x)) snake_case_ = current_node.parent path.reverse() return path class UpperCamelCase : def __init__( self, lowerCAmelCase__, lowerCAmelCase__) -> None: snake_case_ = AStar(lowerCAmelCase__, lowerCAmelCase__) snake_case_ = AStar(lowerCAmelCase__, lowerCAmelCase__) snake_case_ = False def a_ ( self) -> list[TPosition]: while self.fwd_astar.open_nodes or self.bwd_astar.open_nodes: self.fwd_astar.open_nodes.sort() self.bwd_astar.open_nodes.sort() snake_case_ = self.fwd_astar.open_nodes.pop(0) snake_case_ = self.bwd_astar.open_nodes.pop(0) if current_bwd_node.pos == current_fwd_node.pos: return self.retrace_bidirectional_path( lowerCAmelCase__, lowerCAmelCase__) self.fwd_astar.closed_nodes.append(lowerCAmelCase__) self.bwd_astar.closed_nodes.append(lowerCAmelCase__) snake_case_ = current_bwd_node snake_case_ = current_fwd_node snake_case_ = { self.fwd_astar: self.fwd_astar.get_successors(lowerCAmelCase__), self.bwd_astar: self.bwd_astar.get_successors(lowerCAmelCase__), } for astar in [self.fwd_astar, self.bwd_astar]: for child_node in successors[astar]: if child_node in astar.closed_nodes: continue if child_node not in astar.open_nodes: astar.open_nodes.append(lowerCAmelCase__) else: # retrieve the best current path snake_case_ = astar.open_nodes.pop( astar.open_nodes.index(lowerCAmelCase__)) if child_node.g_cost < better_node.g_cost: astar.open_nodes.append(lowerCAmelCase__) else: astar.open_nodes.append(lowerCAmelCase__) return [self.fwd_astar.start.pos] def a_ ( self, lowerCAmelCase__, lowerCAmelCase__) -> list[TPosition]: snake_case_ = self.fwd_astar.retrace_path(lowerCAmelCase__) snake_case_ = self.bwd_astar.retrace_path(lowerCAmelCase__) bwd_path.pop() bwd_path.reverse() snake_case_ = fwd_path + bwd_path return path if __name__ == "__main__": # all coordinates are given in format [y,x] __UpperCamelCase = (0, 0) __UpperCamelCase = (len(grid) - 1, len(grid[0]) - 1) for elem in grid: print(elem) __UpperCamelCase = time.time() __UpperCamelCase = AStar(init, goal) __UpperCamelCase = a_star.search() __UpperCamelCase = time.time() - start_time print(F"""AStar execution time = {end_time:f} seconds""") __UpperCamelCase = time.time() __UpperCamelCase = BidirectionalAStar(init, goal) __UpperCamelCase = time.time() - bd_start_time print(F"""BidirectionalAStar execution time = {bd_end_time:f} seconds""")
69
0
import itertools import random import unittest import numpy as np from transformers import is_speech_available from transformers.testing_utils import require_torch, require_torchaudio from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin if is_speech_available(): from transformers import SpeechaTextFeatureExtractor lowerCamelCase = random.Random() def lowerCamelCase_ ( _a , _a=1.0 , _a=None , _a=None ): """simple docstring""" if rng is None: lowerCAmelCase__ : List[Any] = global_rng lowerCAmelCase__ : Tuple = [] for batch_idx in range(shape[0] ): values.append([] ) for _ in range(shape[1] ): values[-1].append(rng.random() * scale ) return values @require_torch @require_torchaudio class _a ( unittest.TestCase): def __init__( self : Optional[Any] , _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : str=7 , _SCREAMING_SNAKE_CASE : Optional[Any]=400 , _SCREAMING_SNAKE_CASE : int=2000 , _SCREAMING_SNAKE_CASE : Optional[Any]=24 , _SCREAMING_SNAKE_CASE : Optional[int]=24 , _SCREAMING_SNAKE_CASE : Union[str, Any]=0.0 , _SCREAMING_SNAKE_CASE : str=1_6000 , _SCREAMING_SNAKE_CASE : Tuple=True , _SCREAMING_SNAKE_CASE : Tuple=True , )-> Optional[Any]: lowerCAmelCase__ : Tuple = parent lowerCAmelCase__ : int = batch_size lowerCAmelCase__ : int = min_seq_length lowerCAmelCase__ : Optional[int] = max_seq_length lowerCAmelCase__ : Any = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1) lowerCAmelCase__ : List[Any] = feature_size lowerCAmelCase__ : int = num_mel_bins lowerCAmelCase__ : Union[str, Any] = padding_value lowerCAmelCase__ : str = sampling_rate lowerCAmelCase__ : Any = return_attention_mask lowerCAmelCase__ : Dict = do_normalize def UpperCAmelCase__( self : Dict )-> int: return { "feature_size": self.feature_size, "num_mel_bins": self.num_mel_bins, "padding_value": self.padding_value, "sampling_rate": self.sampling_rate, "return_attention_mask": self.return_attention_mask, "do_normalize": self.do_normalize, } def UpperCAmelCase__( self : Optional[int] , _SCREAMING_SNAKE_CASE : Union[str, Any]=False , _SCREAMING_SNAKE_CASE : Dict=False )-> Any: def _flatten(_SCREAMING_SNAKE_CASE : List[Any] ): return list(itertools.chain(*_SCREAMING_SNAKE_CASE ) ) if equal_length: lowerCAmelCase__ : List[Any] = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )] else: # make sure that inputs increase in size lowerCAmelCase__ : Any = [ floats_list((x, self.feature_size) ) for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff ) ] if numpify: lowerCAmelCase__ : List[str] = [np.asarray(_SCREAMING_SNAKE_CASE ) for x in speech_inputs] return speech_inputs @require_torch @require_torchaudio class _a ( _lowercase , unittest.TestCase): _a : Tuple = SpeechaTextFeatureExtractor if is_speech_available() else None def UpperCAmelCase__( self : Union[str, Any] )-> Any: lowerCAmelCase__ : str = SpeechaTextFeatureExtractionTester(self ) def UpperCAmelCase__( self : List[Any] , _SCREAMING_SNAKE_CASE : str )-> Optional[int]: self.assertTrue(np.all(np.mean(_SCREAMING_SNAKE_CASE , axis=0 ) < 1E-3 ) ) self.assertTrue(np.all(np.abs(np.var(_SCREAMING_SNAKE_CASE , axis=0 ) - 1 ) < 1E-3 ) ) def UpperCAmelCase__( self : Optional[int] )-> int: # Tests that all call wrap to encode_plus and batch_encode_plus lowerCAmelCase__ : Optional[int] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) # create three inputs of length 800, 1000, and 1200 lowerCAmelCase__ : Optional[int] = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )] lowerCAmelCase__ : Optional[int] = [np.asarray(_SCREAMING_SNAKE_CASE ) for speech_input in speech_inputs] # Test feature size lowerCAmelCase__ : Dict = feature_extractor(_SCREAMING_SNAKE_CASE , padding=_SCREAMING_SNAKE_CASE , return_tensors='''np''' ).input_features self.assertTrue(input_features.ndim == 3 ) self.assertTrue(input_features.shape[-1] == feature_extractor.feature_size ) # Test not batched input lowerCAmelCase__ : str = feature_extractor(speech_inputs[0] , return_tensors='''np''' ).input_features lowerCAmelCase__ : Optional[Any] = feature_extractor(np_speech_inputs[0] , return_tensors='''np''' ).input_features self.assertTrue(np.allclose(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , atol=1E-3 ) ) # Test batched lowerCAmelCase__ : int = feature_extractor(_SCREAMING_SNAKE_CASE , return_tensors='''np''' ).input_features lowerCAmelCase__ : Union[str, Any] = feature_extractor(_SCREAMING_SNAKE_CASE , return_tensors='''np''' ).input_features for enc_seq_a, enc_seq_a in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): self.assertTrue(np.allclose(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , atol=1E-3 ) ) # Test 2-D numpy arrays are batched. lowerCAmelCase__ : Optional[int] = [floats_list((1, x) )[0] for x in (800, 800, 800)] lowerCAmelCase__ : str = np.asarray(_SCREAMING_SNAKE_CASE ) lowerCAmelCase__ : Tuple = feature_extractor(_SCREAMING_SNAKE_CASE , return_tensors='''np''' ).input_features lowerCAmelCase__ : List[Any] = feature_extractor(_SCREAMING_SNAKE_CASE , return_tensors='''np''' ).input_features for enc_seq_a, enc_seq_a in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): self.assertTrue(np.allclose(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , atol=1E-3 ) ) def UpperCAmelCase__( self : Any )-> Union[str, Any]: lowerCAmelCase__ : Optional[int] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) lowerCAmelCase__ : Optional[Any] = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )] lowerCAmelCase__ : Dict = ['''longest''', '''max_length''', '''do_not_pad'''] lowerCAmelCase__ : Any = [None, 16, None] for max_length, padding in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): lowerCAmelCase__ : Union[str, Any] = feature_extractor( _SCREAMING_SNAKE_CASE , padding=_SCREAMING_SNAKE_CASE , max_length=_SCREAMING_SNAKE_CASE , return_attention_mask=_SCREAMING_SNAKE_CASE ) lowerCAmelCase__ : int = inputs.input_features lowerCAmelCase__ : Optional[int] = inputs.attention_mask lowerCAmelCase__ : str = [np.sum(_SCREAMING_SNAKE_CASE ) for x in attention_mask] self._check_zero_mean_unit_variance(input_features[0][: fbank_feat_lengths[0]] ) self._check_zero_mean_unit_variance(input_features[1][: fbank_feat_lengths[1]] ) self._check_zero_mean_unit_variance(input_features[2][: fbank_feat_lengths[2]] ) def UpperCAmelCase__( self : List[Any] )-> int: lowerCAmelCase__ : Optional[int] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) lowerCAmelCase__ : Dict = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )] lowerCAmelCase__ : Optional[Any] = ['''longest''', '''max_length''', '''do_not_pad'''] lowerCAmelCase__ : Optional[int] = [None, 16, None] for max_length, padding in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): lowerCAmelCase__ : str = feature_extractor( _SCREAMING_SNAKE_CASE , max_length=_SCREAMING_SNAKE_CASE , padding=_SCREAMING_SNAKE_CASE , return_tensors='''np''' , return_attention_mask=_SCREAMING_SNAKE_CASE ) lowerCAmelCase__ : Union[str, Any] = inputs.input_features lowerCAmelCase__ : Optional[int] = inputs.attention_mask lowerCAmelCase__ : str = [np.sum(_SCREAMING_SNAKE_CASE ) for x in attention_mask] self._check_zero_mean_unit_variance(input_features[0][: fbank_feat_lengths[0]] ) self.assertTrue(input_features[0][fbank_feat_lengths[0] :].sum() < 1E-6 ) self._check_zero_mean_unit_variance(input_features[1][: fbank_feat_lengths[1]] ) self.assertTrue(input_features[0][fbank_feat_lengths[1] :].sum() < 1E-6 ) self._check_zero_mean_unit_variance(input_features[2][: fbank_feat_lengths[2]] ) def UpperCAmelCase__( self : str )-> Optional[int]: lowerCAmelCase__ : Optional[Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) lowerCAmelCase__ : List[str] = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )] lowerCAmelCase__ : Optional[Any] = feature_extractor( _SCREAMING_SNAKE_CASE , padding='''max_length''' , max_length=4 , truncation=_SCREAMING_SNAKE_CASE , return_tensors='''np''' , return_attention_mask=_SCREAMING_SNAKE_CASE , ) lowerCAmelCase__ : Union[str, Any] = inputs.input_features lowerCAmelCase__ : Any = inputs.attention_mask lowerCAmelCase__ : str = np.sum(attention_mask == 1 , axis=1 ) self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] ) self._check_zero_mean_unit_variance(input_features[1] ) self._check_zero_mean_unit_variance(input_features[2] ) def UpperCAmelCase__( self : Dict )-> Optional[int]: lowerCAmelCase__ : Any = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) lowerCAmelCase__ : List[str] = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )] lowerCAmelCase__ : Tuple = feature_extractor( _SCREAMING_SNAKE_CASE , padding='''longest''' , max_length=4 , truncation=_SCREAMING_SNAKE_CASE , return_tensors='''np''' , return_attention_mask=_SCREAMING_SNAKE_CASE , ) lowerCAmelCase__ : Optional[int] = inputs.input_features lowerCAmelCase__ : Optional[Any] = inputs.attention_mask lowerCAmelCase__ : Tuple = np.sum(attention_mask == 1 , axis=1 ) self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] ) self._check_zero_mean_unit_variance(input_features[1, : fbank_feat_lengths[1]] ) self._check_zero_mean_unit_variance(input_features[2] ) # make sure that if max_length < longest -> then pad to max_length self.assertEqual(input_features.shape , (3, 4, 24) ) lowerCAmelCase__ : List[Any] = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )] lowerCAmelCase__ : Optional[int] = feature_extractor( _SCREAMING_SNAKE_CASE , padding='''longest''' , max_length=16 , truncation=_SCREAMING_SNAKE_CASE , return_tensors='''np''' , return_attention_mask=_SCREAMING_SNAKE_CASE , ) lowerCAmelCase__ : Any = inputs.input_features lowerCAmelCase__ : int = inputs.attention_mask lowerCAmelCase__ : List[Any] = np.sum(attention_mask == 1 , axis=1 ) self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] ) self._check_zero_mean_unit_variance(input_features[1, : fbank_feat_lengths[1]] ) self._check_zero_mean_unit_variance(input_features[2] ) # make sure that if max_length < longest -> then pad to max_length self.assertEqual(input_features.shape , (3, 6, 24) ) def UpperCAmelCase__( self : str )-> Union[str, Any]: import torch lowerCAmelCase__ : Optional[int] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) lowerCAmelCase__ : Optional[Any] = np.random.rand(100 , 32 ).astype(np.floataa ) lowerCAmelCase__ : Any = np_speech_inputs.tolist() for inputs in [py_speech_inputs, np_speech_inputs]: lowerCAmelCase__ : List[Any] = feature_extractor.pad([{'''input_features''': inputs}] , return_tensors='''np''' ) self.assertTrue(np_processed.input_features.dtype == np.floataa ) lowerCAmelCase__ : int = feature_extractor.pad([{'''input_features''': inputs}] , return_tensors='''pt''' ) self.assertTrue(pt_processed.input_features.dtype == torch.floataa ) def UpperCAmelCase__( self : List[str] , _SCREAMING_SNAKE_CASE : Tuple )-> List[str]: from datasets import load_dataset lowerCAmelCase__ : str = load_dataset('''hf-internal-testing/librispeech_asr_dummy''' , '''clean''' , split='''validation''' ) # automatic decoding with librispeech lowerCAmelCase__ : List[str] = ds.sort('''id''' ).select(range(_SCREAMING_SNAKE_CASE ) )[:num_samples]['''audio'''] return [x["array"] for x in speech_samples] def UpperCAmelCase__( self : Optional[int] )-> int: # fmt: off lowerCAmelCase__ : Optional[Any] = np.array([ -1.5745, -1.7713, -1.7020, -1.6069, -1.2250, -1.1105, -0.9072, -0.8241, -1.2310, -0.8098, -0.3320, -0.4101, -0.7985, -0.4996, -0.8213, -0.9128, -1.0420, -1.1286, -1.0440, -0.7999, -0.8405, -1.2275, -1.5443, -1.4625, ] ) # fmt: on lowerCAmelCase__ : str = self._load_datasamples(1 ) lowerCAmelCase__ : int = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) lowerCAmelCase__ : int = feature_extractor(_SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).input_features self.assertEquals(input_features.shape , (1, 584, 24) ) self.assertTrue(np.allclose(input_features[0, 0, :30] , _SCREAMING_SNAKE_CASE , atol=1E-4 ) )
370
import random from .binary_exp_mod import bin_exp_mod def lowerCamelCase_ ( _a , _a=1_000 ): """simple docstring""" if n < 2: return False if n % 2 == 0: return n == 2 # this means n is odd lowerCAmelCase__ : int = n - 1 lowerCAmelCase__ : Any = 0 while d % 2 == 0: d /= 2 exp += 1 # n - 1=d*(2**exp) lowerCAmelCase__ : Optional[Any] = 0 while count < prec: lowerCAmelCase__ : Optional[Any] = random.randint(2 , n - 1 ) lowerCAmelCase__ : List[Any] = bin_exp_mod(_a , _a , _a ) if b != 1: lowerCAmelCase__ : Dict = True for _ in range(_a ): if b == n - 1: lowerCAmelCase__ : Union[str, Any] = False break lowerCAmelCase__ : Tuple = b * b b %= n if flag: return False count += 1 return True if __name__ == "__main__": lowerCamelCase = abs(int(input('''Enter bound : ''').strip())) print('''Here\'s the list of primes:''') print(''', '''.join(str(i) for i in range(n + 1) if is_prime_big(i)))
211
0
import absl # noqa: F401 # Here to have a nice missing dependency error message early on import nltk # noqa: F401 # Here to have a nice missing dependency error message early on import numpy # noqa: F401 # Here to have a nice missing dependency error message early on import six # noqa: F401 # Here to have a nice missing dependency error message early on from rouge_score import rouge_scorer, scoring import datasets __UpperCamelCase : Dict = '\\n@inproceedings{lin-2004-rouge,\n title = "{ROUGE}: A Package for Automatic Evaluation of Summaries",\n author = "Lin, Chin-Yew",\n booktitle = "Text Summarization Branches Out",\n month = jul,\n year = "2004",\n address = "Barcelona, Spain",\n publisher = "Association for Computational Linguistics",\n url = "https://www.aclweb.org/anthology/W04-1013",\n pages = "74--81",\n}\n' __UpperCamelCase : int = '\\nROUGE, or Recall-Oriented Understudy for Gisting Evaluation, is a set of metrics and a software package used for\nevaluating automatic summarization and machine translation software in natural language processing.\nThe metrics compare an automatically produced summary or translation against a reference or a set of references (human-produced) summary or translation.\n\nNote that ROUGE is case insensitive, meaning that upper case letters are treated the same way as lower case letters.\n\nThis metrics is a wrapper around Google Research reimplementation of ROUGE:\nhttps://github.com/google-research/google-research/tree/master/rouge\n' __UpperCamelCase : Any = '\nCalculates average rouge scores for a list of hypotheses and references\nArgs:\n predictions: list of predictions to score. Each prediction\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\n rouge_types: A list of rouge types to calculate.\n Valid names:\n `"rouge{n}"` (e.g. `"rouge1"`, `"rouge2"`) where: {n} is the n-gram based scoring,\n `"rougeL"`: Longest common subsequence based scoring.\n `"rougeLSum"`: rougeLsum splits text using `"\n"`.\n See details in https://github.com/huggingface/datasets/issues/617\n use_stemmer: Bool indicating whether Porter stemmer should be used to strip word suffixes.\n use_aggregator: Return aggregates if this is set to True\nReturns:\n rouge1: rouge_1 (precision, recall, f1),\n rouge2: rouge_2 (precision, recall, f1),\n rougeL: rouge_l (precision, recall, f1),\n rougeLsum: rouge_lsum (precision, recall, f1)\nExamples:\n\n >>> rouge = datasets.load_metric(\'rouge\')\n >>> predictions = ["hello there", "general kenobi"]\n >>> references = ["hello there", "general kenobi"]\n >>> results = rouge.compute(predictions=predictions, references=references)\n >>> print(list(results.keys()))\n [\'rouge1\', \'rouge2\', \'rougeL\', \'rougeLsum\']\n >>> print(results["rouge1"])\n AggregateScore(low=Score(precision=1.0, recall=1.0, fmeasure=1.0), mid=Score(precision=1.0, recall=1.0, fmeasure=1.0), high=Score(precision=1.0, recall=1.0, fmeasure=1.0))\n >>> print(results["rouge1"].mid.fmeasure)\n 1.0\n' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION) class lowercase__ ( datasets.Metric): def __A ( self : Union[str, Any] ): '''simple docstring''' return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { '''predictions''': datasets.Value('''string''' , id='''sequence''' ), '''references''': datasets.Value('''string''' , id='''sequence''' ), } ) , codebase_urls=['''https://github.com/google-research/google-research/tree/master/rouge'''] , reference_urls=[ '''https://en.wikipedia.org/wiki/ROUGE_(metric)''', '''https://github.com/google-research/google-research/tree/master/rouge''', ] , ) def __A ( self : Optional[int] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : int=None , UpperCamelCase__ : Tuple=True , UpperCamelCase__ : int=False ): '''simple docstring''' if rouge_types is None: SCREAMING_SNAKE_CASE : Dict = ['''rouge1''', '''rouge2''', '''rougeL''', '''rougeLsum'''] SCREAMING_SNAKE_CASE : Tuple = rouge_scorer.RougeScorer(rouge_types=UpperCamelCase__ , use_stemmer=UpperCamelCase__ ) if use_aggregator: SCREAMING_SNAKE_CASE : Optional[Any] = scoring.BootstrapAggregator() else: SCREAMING_SNAKE_CASE : int = [] for ref, pred in zip(UpperCamelCase__ , UpperCamelCase__ ): SCREAMING_SNAKE_CASE : List[str] = scorer.score(UpperCamelCase__ , UpperCamelCase__ ) if use_aggregator: aggregator.add_scores(UpperCamelCase__ ) else: scores.append(UpperCamelCase__ ) if use_aggregator: SCREAMING_SNAKE_CASE : int = aggregator.aggregate() else: SCREAMING_SNAKE_CASE : str = {} for key in scores[0]: SCREAMING_SNAKE_CASE : Dict = [score[key] for score in scores] return result
182
import argparse import datetime import json import time import warnings from logging import getLogger from pathlib import Path from typing import Dict, List import torch from tqdm import tqdm from transformers import AutoModelForSeqaSeqLM, AutoTokenizer from utils import calculate_bleu, calculate_rouge, chunks, parse_numeric_n_bool_cl_kwargs, use_task_specific_params __UpperCamelCase : Any = getLogger(__name__) __UpperCamelCase : int = 'cuda' if torch.cuda.is_available() else 'cpu' def A ( _lowercase , _lowercase , _lowercase , _lowercase = 8 , _lowercase = DEFAULT_DEVICE , _lowercase=False , _lowercase="summarization" , _lowercase=None , **_lowercase , ): SCREAMING_SNAKE_CASE : List[str] = Path(_lowercase ).open('''w''' , encoding='''utf-8''' ) SCREAMING_SNAKE_CASE : int = str(_lowercase ) SCREAMING_SNAKE_CASE : Any = AutoModelForSeqaSeqLM.from_pretrained(_lowercase ).to(_lowercase ) if fpaa: SCREAMING_SNAKE_CASE : Dict = model.half() SCREAMING_SNAKE_CASE : Optional[Any] = AutoTokenizer.from_pretrained(_lowercase ) logger.info(f"""Inferred tokenizer type: {tokenizer.__class__}""" ) # if this is wrong, check config.model_type. SCREAMING_SNAKE_CASE : str = time.time() # update config with task specific params use_task_specific_params(_lowercase , _lowercase ) if prefix is None: SCREAMING_SNAKE_CASE : Optional[int] = prefix or getattr(model.config , '''prefix''' , '''''' ) or '''''' for examples_chunk in tqdm(list(chunks(_lowercase , _lowercase ) ) ): SCREAMING_SNAKE_CASE : Union[str, Any] = [prefix + text for text in examples_chunk] SCREAMING_SNAKE_CASE : Dict = tokenizer(_lowercase , return_tensors='''pt''' , truncation=_lowercase , padding='''longest''' ).to(_lowercase ) SCREAMING_SNAKE_CASE : str = model.generate( input_ids=batch.input_ids , attention_mask=batch.attention_mask , **_lowercase , ) SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer.batch_decode(_lowercase , skip_special_tokens=_lowercase , clean_up_tokenization_spaces=_lowercase ) for hypothesis in dec: fout.write(hypothesis + '''\n''' ) fout.flush() fout.close() SCREAMING_SNAKE_CASE : Tuple = int(time.time() - start_time ) # seconds SCREAMING_SNAKE_CASE : str = len(_lowercase ) return {"n_obs": n_obs, "runtime": runtime, "seconds_per_sample": round(runtime / n_obs , 4 )} def A ( ): return datetime.datetime.now().strftime('''%Y-%m-%d %H:%M:%S''' ) def A ( _lowercase=True ): SCREAMING_SNAKE_CASE : int = argparse.ArgumentParser() parser.add_argument('''model_name''' , type=_lowercase , help='''like facebook/bart-large-cnn,t5-base, etc.''' ) parser.add_argument('''input_path''' , type=_lowercase , help='''like cnn_dm/test.source''' ) parser.add_argument('''save_path''' , type=_lowercase , help='''where to save summaries''' ) parser.add_argument('''--reference_path''' , type=_lowercase , required=_lowercase , help='''like cnn_dm/test.target''' ) parser.add_argument('''--score_path''' , type=_lowercase , required=_lowercase , default='''metrics.json''' , help='''where to save metrics''' ) parser.add_argument('''--device''' , type=_lowercase , required=_lowercase , default=_lowercase , help='''cuda, cuda:1, cpu etc.''' ) parser.add_argument( '''--prefix''' , type=_lowercase , required=_lowercase , default=_lowercase , help='''will be added to the begininng of src examples''' ) parser.add_argument('''--task''' , type=_lowercase , default='''summarization''' , help='''used for task_specific_params + metrics''' ) parser.add_argument('''--bs''' , type=_lowercase , default=8 , required=_lowercase , help='''batch size''' ) parser.add_argument( '''--n_obs''' , type=_lowercase , default=-1 , required=_lowercase , help='''How many observations. Defaults to all.''' ) parser.add_argument('''--fp16''' , action='''store_true''' ) parser.add_argument('''--dump-args''' , action='''store_true''' , help='''print the custom hparams with the results''' ) parser.add_argument( '''--info''' , nargs='''?''' , type=_lowercase , const=datetime_now() , help=( '''use in conjunction w/ --dump-args to print with the results whatever other info you\'d like, e.g.''' ''' lang=en-ru. If no value is passed, the current datetime string will be used.''' ) , ) # Unspecified args like --num_beams=2 --decoder_start_token_id=4 are passed to model.generate SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = parser.parse_known_args() SCREAMING_SNAKE_CASE : Optional[Any] = parse_numeric_n_bool_cl_kwargs(_lowercase ) if parsed_args and verbose: print(f"""parsed the following generate kwargs: {parsed_args}""" ) SCREAMING_SNAKE_CASE : Union[str, Any] = [''' ''' + x.rstrip() if '''t5''' in args.model_name else x.rstrip() for x in open(args.input_path ).readlines()] if args.n_obs > 0: SCREAMING_SNAKE_CASE : Any = examples[: args.n_obs] Path(args.save_path ).parent.mkdir(exist_ok=_lowercase ) if args.reference_path is None and Path(args.score_path ).exists(): warnings.warn(f"""score_path {args.score_path} will be overwritten unless you type ctrl-c.""" ) if args.device == "cpu" and args.fpaa: # this mix leads to RuntimeError: "threshold_cpu" not implemented for 'Half' raise ValueError('''Can\'t mix --fp16 and --device cpu''' ) SCREAMING_SNAKE_CASE : List[str] = generate_summaries_or_translations( _lowercase , args.save_path , args.model_name , batch_size=args.bs , device=args.device , fpaa=args.fpaa , task=args.task , prefix=args.prefix , **_lowercase , ) if args.reference_path is None: return {} # Compute scores SCREAMING_SNAKE_CASE : Dict = calculate_bleu if '''translation''' in args.task else calculate_rouge SCREAMING_SNAKE_CASE : Union[str, Any] = [x.rstrip() for x in open(args.save_path ).readlines()] SCREAMING_SNAKE_CASE : Optional[int] = [x.rstrip() for x in open(args.reference_path ).readlines()][: len(_lowercase )] SCREAMING_SNAKE_CASE : dict = score_fn(_lowercase , _lowercase ) scores.update(_lowercase ) if args.dump_args: scores.update(_lowercase ) if args.info: SCREAMING_SNAKE_CASE : Tuple = args.info if verbose: print(_lowercase ) if args.score_path is not None: json.dump(_lowercase , open(args.score_path , '''w''' ) ) return scores if __name__ == "__main__": # Usage for MT: # python run_eval.py MODEL_NAME $DATA_DIR/test.source $save_dir/test_translations.txt --reference_path $DATA_DIR/test.target --score_path $save_dir/test_bleu.json --task translation $@ run_generate(verbose=True)
182
1
from __future__ import annotations from collections.abc import Generator import requests from bsa import BeautifulSoup __UpperCAmelCase = "https://www.indeed.co.in/jobs?q=mobile+app+development&l=" def A__ ( __lowerCamelCase = "mumbai" ): SCREAMING_SNAKE_CASE_ = BeautifulSoup(requests.get(url + location ).content, '''html.parser''' ) # This attribute finds out all the specifics listed in a job for job in soup.find_all('''div''', attrs={'''data-tn-component''': '''organicJob'''} ): SCREAMING_SNAKE_CASE_ = job.find('''a''', attrs={'''data-tn-element''': '''jobTitle'''} ).text.strip() SCREAMING_SNAKE_CASE_ = job.find('''span''', {'''class''': '''company'''} ).text.strip() yield job_title, company_name if __name__ == "__main__": for i, job in enumerate(fetch_jobs("Bangalore"), 1): print(F"""Job {i:>2} is {job[0]} at {job[1]}""")
257
import unittest from transformers import is_flax_available from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, require_torch, slow if is_flax_available(): import optax from flax.training.common_utils import onehot from transformers import AutoTokenizer, FlaxMTaForConditionalGeneration from transformers.models.ta.modeling_flax_ta import shift_tokens_right @require_torch @require_sentencepiece @require_tokenizers @require_flax class UpperCamelCase__ ( unittest.TestCase ): """simple docstring""" @slow def _UpperCamelCase ( self ) -> List[str]: SCREAMING_SNAKE_CASE_ = FlaxMTaForConditionalGeneration.from_pretrained('''google/mt5-small''' ) SCREAMING_SNAKE_CASE_ = AutoTokenizer.from_pretrained('''google/mt5-small''' ) SCREAMING_SNAKE_CASE_ = tokenizer('''Hello there''' , return_tensors='''np''' ).input_ids SCREAMING_SNAKE_CASE_ = tokenizer('''Hi I am''' , return_tensors='''np''' ).input_ids SCREAMING_SNAKE_CASE_ = shift_tokens_right(_A , model.config.pad_token_id , model.config.decoder_start_token_id ) SCREAMING_SNAKE_CASE_ = model(_A , decoder_input_ids=_A ).logits SCREAMING_SNAKE_CASE_ = optax.softmax_cross_entropy(_A , onehot(_A , logits.shape[-1] ) ).mean() SCREAMING_SNAKE_CASE_ = -(labels.shape[-1] * loss.item()) SCREAMING_SNAKE_CASE_ = -84.9127 self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1E-4 )
257
1
import logging import os from dataclasses import dataclass, field from typing import Dict, Optional import datasets import numpy as np import tensorflow as tf from transformers import ( AutoConfig, AutoTokenizer, EvalPrediction, HfArgumentParser, PreTrainedTokenizer, TFAutoModelForSequenceClassification, TFTrainer, TFTrainingArguments, ) from transformers.utils import logging as hf_logging hf_logging.set_verbosity_info() hf_logging.enable_default_handler() hf_logging.enable_explicit_format() def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = None , ) -> List[Any]: UpperCamelCase__ : int = {} if train_file is not None: UpperCamelCase__ : str = [train_file] if eval_file is not None: UpperCamelCase__ : Any = [eval_file] if test_file is not None: UpperCamelCase__ : Dict = [test_file] UpperCamelCase__ : str = datasets.load_dataset("csv" , data_files=__lowerCAmelCase ) UpperCamelCase__ : List[str] = list(ds[list(files.keys() )[0]].features.keys() ) UpperCamelCase__ : List[Any] = features_name.pop(__lowerCAmelCase ) UpperCamelCase__ : Dict = list(set(ds[list(files.keys() )[0]][label_name] ) ) UpperCamelCase__ : List[Any] = {label: i for i, label in enumerate(__lowerCAmelCase )} UpperCamelCase__ : str = tokenizer.model_input_names UpperCamelCase__ : Union[str, Any] = {} if len(__lowerCAmelCase ) == 1: for k in files.keys(): UpperCamelCase__ : Optional[Any] = ds[k].map( lambda __lowerCAmelCase : tokenizer.batch_encode_plus( example[features_name[0]] , truncation=__lowerCAmelCase , max_length=__lowerCAmelCase , padding="max_length" ) , batched=__lowerCAmelCase , ) elif len(__lowerCAmelCase ) == 2: for k in files.keys(): UpperCamelCase__ : Any = ds[k].map( lambda __lowerCAmelCase : tokenizer.batch_encode_plus( (example[features_name[0]], example[features_name[1]]) , truncation=__lowerCAmelCase , max_length=__lowerCAmelCase , padding="max_length" , ) , batched=__lowerCAmelCase , ) def gen_train(): for ex in transformed_ds[datasets.Split.TRAIN]: UpperCamelCase__ : Tuple = {k: v for k, v in ex.items() if k in input_names} UpperCamelCase__ : int = labelaid[ex[label_name]] yield (d, label) def gen_val(): for ex in transformed_ds[datasets.Split.VALIDATION]: UpperCamelCase__ : List[Any] = {k: v for k, v in ex.items() if k in input_names} UpperCamelCase__ : str = labelaid[ex[label_name]] yield (d, label) def gen_test(): for ex in transformed_ds[datasets.Split.TEST]: UpperCamelCase__ : Dict = {k: v for k, v in ex.items() if k in input_names} UpperCamelCase__ : Union[str, Any] = labelaid[ex[label_name]] yield (d, label) UpperCamelCase__ : Optional[Any] = ( tf.data.Dataset.from_generator( __lowerCAmelCase , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , ) if datasets.Split.TRAIN in transformed_ds else None ) if train_ds is not None: UpperCamelCase__ : Union[str, Any] = train_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TRAIN] ) ) ) UpperCamelCase__ : Optional[Any] = ( tf.data.Dataset.from_generator( __lowerCAmelCase , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , ) if datasets.Split.VALIDATION in transformed_ds else None ) if val_ds is not None: UpperCamelCase__ : Tuple = val_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.VALIDATION] ) ) ) UpperCamelCase__ : int = ( tf.data.Dataset.from_generator( __lowerCAmelCase , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , ) if datasets.Split.TEST in transformed_ds else None ) if test_ds is not None: UpperCamelCase__ : Optional[int] = test_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TEST] ) ) ) return train_ds, val_ds, test_ds, labelaid lowerCamelCase : int =logging.getLogger(__name__) @dataclass class __a : _lowerCAmelCase : int = field(metadata={'''help''': '''Which column contains the label'''} ) _lowerCAmelCase : str = field(default=A__ , metadata={'''help''': '''The path of the training file'''} ) _lowerCAmelCase : Optional[str] = field(default=A__ , metadata={'''help''': '''The path of the development file'''} ) _lowerCAmelCase : Optional[str] = field(default=A__ , metadata={'''help''': '''The path of the test file'''} ) _lowerCAmelCase : int = field( default=1_2_8 , metadata={ '''help''': ( '''The maximum total input sequence length after tokenization. Sequences longer ''' '''than this will be truncated, sequences shorter will be padded.''' ) } , ) _lowerCAmelCase : bool = field( default=A__ , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} ) @dataclass class __a : _lowerCAmelCase : str = field( metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} ) _lowerCAmelCase : Optional[str] = field( default=A__ , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} ) _lowerCAmelCase : Optional[str] = field( default=A__ , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} ) _lowerCAmelCase : bool = field(default=A__ , metadata={'''help''': '''Set this flag to use fast tokenization.'''} ) # If you want to tweak more attributes on your tokenizer, you should do it in a distinct script, # or just modify its tokenizer_config.json. _lowerCAmelCase : Optional[str] = field( default=A__ , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , ) def SCREAMING_SNAKE_CASE ( ) -> Tuple: # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. UpperCamelCase__ : str = HfArgumentParser((ModelArguments, DataTrainingArguments, TFTrainingArguments) ) UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ : List[str] = parser.parse_args_into_dataclasses() if ( os.path.exists(training_args.output_dir ) and os.listdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir ): raise ValueError( f'Output directory ({training_args.output_dir}) already exists and is not empty. Use' " --overwrite_output_dir to overcome." ) # Setup logging logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , level=logging.INFO , ) logger.info( f'n_replicas: {training_args.n_replicas}, distributed training: {bool(training_args.n_replicas > 1 )}, ' f'16-bits training: {training_args.fpaa}' ) logger.info(f'Training/evaluation parameters {training_args}' ) # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. UpperCamelCase__ : Optional[Any] = AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , ) UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ : str = get_tfds( train_file=data_args.train_file , eval_file=data_args.dev_file , test_file=data_args.test_file , tokenizer=__lowerCAmelCase , label_column_id=data_args.label_column_id , max_seq_length=data_args.max_seq_length , ) UpperCamelCase__ : List[str] = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=len(__lowerCAmelCase ) , labelaid=__lowerCAmelCase , idalabel={id: label for label, id in labelaid.items()} , finetuning_task="text-classification" , cache_dir=model_args.cache_dir , ) with training_args.strategy.scope(): UpperCamelCase__ : List[str] = TFAutoModelForSequenceClassification.from_pretrained( model_args.model_name_or_path , from_pt=bool(".bin" in model_args.model_name_or_path ) , config=__lowerCAmelCase , cache_dir=model_args.cache_dir , ) def compute_metrics(__lowerCAmelCase ) -> Dict: UpperCamelCase__ : List[str] = np.argmax(p.predictions , axis=1 ) return {"acc": (preds == p.label_ids).mean()} # Initialize our Trainer UpperCamelCase__ : List[Any] = TFTrainer( model=__lowerCAmelCase , args=__lowerCAmelCase , train_dataset=__lowerCAmelCase , eval_dataset=__lowerCAmelCase , compute_metrics=__lowerCAmelCase , ) # Training if training_args.do_train: trainer.train() trainer.save_model() tokenizer.save_pretrained(training_args.output_dir ) # Evaluation UpperCamelCase__ : Dict = {} if training_args.do_eval: logger.info("*** Evaluate ***" ) UpperCamelCase__ : Optional[int] = trainer.evaluate() UpperCamelCase__ : Union[str, Any] = os.path.join(training_args.output_dir , "eval_results.txt" ) with open(__lowerCAmelCase , "w" ) as writer: logger.info("***** Eval results *****" ) for key, value in result.items(): logger.info(f' {key} = {value}' ) writer.write(f'{key} = {value}\n' ) results.update(__lowerCAmelCase ) return results if __name__ == "__main__": main()
189
import copy import os from collections import OrderedDict from typing import TYPE_CHECKING, Any, Dict, Mapping, Optional, Union if TYPE_CHECKING: from ...processing_utils import ProcessorMixin from ...utils import TensorType from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging lowerCamelCase : Optional[Any] =logging.get_logger(__name__) lowerCamelCase : Optional[int] ={ '''google/owlvit-base-patch32''': '''https://huggingface.co/google/owlvit-base-patch32/resolve/main/config.json''', '''google/owlvit-base-patch16''': '''https://huggingface.co/google/owlvit-base-patch16/resolve/main/config.json''', '''google/owlvit-large-patch14''': '''https://huggingface.co/google/owlvit-large-patch14/resolve/main/config.json''', } class __a ( A__ ): _lowerCAmelCase : Optional[int] = '''owlvit_text_model''' def __init__( self : Any , SCREAMING_SNAKE_CASE : Union[str, Any]=4_94_08 , SCREAMING_SNAKE_CASE : List[str]=5_12 , SCREAMING_SNAKE_CASE : List[Any]=20_48 , SCREAMING_SNAKE_CASE : Any=12 , SCREAMING_SNAKE_CASE : Any=8 , SCREAMING_SNAKE_CASE : Dict=16 , SCREAMING_SNAKE_CASE : Union[str, Any]="quick_gelu" , SCREAMING_SNAKE_CASE : List[str]=1e-5 , SCREAMING_SNAKE_CASE : Union[str, Any]=0.0 , SCREAMING_SNAKE_CASE : Any=0.0_2 , SCREAMING_SNAKE_CASE : int=1.0 , SCREAMING_SNAKE_CASE : Any=0 , SCREAMING_SNAKE_CASE : int=4_94_06 , SCREAMING_SNAKE_CASE : List[str]=4_94_07 , **SCREAMING_SNAKE_CASE : Optional[Any] , ): '''simple docstring''' super().__init__(pad_token_id=SCREAMING_SNAKE_CASE , bos_token_id=SCREAMING_SNAKE_CASE , eos_token_id=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) UpperCamelCase__ : Tuple = vocab_size UpperCamelCase__ : int = hidden_size UpperCamelCase__ : List[str] = intermediate_size UpperCamelCase__ : Tuple = num_hidden_layers UpperCamelCase__ : str = num_attention_heads UpperCamelCase__ : Any = max_position_embeddings UpperCamelCase__ : List[Any] = hidden_act UpperCamelCase__ : str = layer_norm_eps UpperCamelCase__ : List[Any] = attention_dropout UpperCamelCase__ : Tuple = initializer_range UpperCamelCase__ : Optional[Any] = initializer_factor @classmethod def __lowercase ( cls : Optional[int] , SCREAMING_SNAKE_CASE : Union[str, os.PathLike] , **SCREAMING_SNAKE_CASE : Optional[Any] ): '''simple docstring''' cls._set_token_in_kwargs(SCREAMING_SNAKE_CASE ) UpperCamelCase__ , UpperCamelCase__ : Any = cls.get_config_dict(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) # get the text config dict if we are loading from OwlViTConfig if config_dict.get("model_type" ) == "owlvit": UpperCamelCase__ : Dict = config_dict["text_config"] if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type: logger.warning( F'You are using a model of type {config_dict["model_type"]} to instantiate a model of type ' F'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' ) return cls.from_dict(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) class __a ( A__ ): _lowerCAmelCase : str = '''owlvit_vision_model''' def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE : str=7_68 , SCREAMING_SNAKE_CASE : Dict=30_72 , SCREAMING_SNAKE_CASE : int=12 , SCREAMING_SNAKE_CASE : Union[str, Any]=12 , SCREAMING_SNAKE_CASE : Dict=3 , SCREAMING_SNAKE_CASE : Union[str, Any]=7_68 , SCREAMING_SNAKE_CASE : Optional[int]=32 , SCREAMING_SNAKE_CASE : Dict="quick_gelu" , SCREAMING_SNAKE_CASE : Optional[Any]=1e-5 , SCREAMING_SNAKE_CASE : List[str]=0.0 , SCREAMING_SNAKE_CASE : Dict=0.0_2 , SCREAMING_SNAKE_CASE : Optional[int]=1.0 , **SCREAMING_SNAKE_CASE : Tuple , ): '''simple docstring''' super().__init__(**SCREAMING_SNAKE_CASE ) UpperCamelCase__ : Any = hidden_size UpperCamelCase__ : str = intermediate_size UpperCamelCase__ : Any = num_hidden_layers UpperCamelCase__ : str = num_attention_heads UpperCamelCase__ : int = num_channels UpperCamelCase__ : Union[str, Any] = image_size UpperCamelCase__ : List[Any] = patch_size UpperCamelCase__ : Tuple = hidden_act UpperCamelCase__ : Optional[int] = layer_norm_eps UpperCamelCase__ : Optional[Any] = attention_dropout UpperCamelCase__ : Dict = initializer_range UpperCamelCase__ : int = initializer_factor @classmethod def __lowercase ( cls : Dict , SCREAMING_SNAKE_CASE : Union[str, os.PathLike] , **SCREAMING_SNAKE_CASE : List[Any] ): '''simple docstring''' cls._set_token_in_kwargs(SCREAMING_SNAKE_CASE ) UpperCamelCase__ , UpperCamelCase__ : List[Any] = cls.get_config_dict(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) # get the vision config dict if we are loading from OwlViTConfig if config_dict.get("model_type" ) == "owlvit": UpperCamelCase__ : List[str] = config_dict["vision_config"] if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type: logger.warning( F'You are using a model of type {config_dict["model_type"]} to instantiate a model of type ' F'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' ) return cls.from_dict(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) class __a ( A__ ): _lowerCAmelCase : str = '''owlvit''' _lowerCAmelCase : Tuple = True def __init__( self : Any , SCREAMING_SNAKE_CASE : Optional[Any]=None , SCREAMING_SNAKE_CASE : int=None , SCREAMING_SNAKE_CASE : str=5_12 , SCREAMING_SNAKE_CASE : Any=2.6_5_9_2 , SCREAMING_SNAKE_CASE : Union[str, Any]=True , **SCREAMING_SNAKE_CASE : List[Any] , ): '''simple docstring''' super().__init__(**SCREAMING_SNAKE_CASE ) if text_config is None: UpperCamelCase__ : str = {} logger.info("text_config is None. Initializing the OwlViTTextConfig with default values." ) if vision_config is None: UpperCamelCase__ : List[str] = {} logger.info("vision_config is None. initializing the OwlViTVisionConfig with default values." ) UpperCamelCase__ : Dict = OwlViTTextConfig(**SCREAMING_SNAKE_CASE ) UpperCamelCase__ : Dict = OwlViTVisionConfig(**SCREAMING_SNAKE_CASE ) UpperCamelCase__ : Dict = projection_dim UpperCamelCase__ : Union[str, Any] = logit_scale_init_value UpperCamelCase__ : int = return_dict UpperCamelCase__ : Tuple = 1.0 @classmethod def __lowercase ( cls : Any , SCREAMING_SNAKE_CASE : Union[str, os.PathLike] , **SCREAMING_SNAKE_CASE : Dict ): '''simple docstring''' cls._set_token_in_kwargs(SCREAMING_SNAKE_CASE ) UpperCamelCase__ , UpperCamelCase__ : Dict = cls.get_config_dict(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type: logger.warning( F'You are using a model of type {config_dict["model_type"]} to instantiate a model of type ' F'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' ) return cls.from_dict(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) @classmethod def __lowercase ( cls : int , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Dict , **SCREAMING_SNAKE_CASE : Optional[int] ): '''simple docstring''' UpperCamelCase__ : List[Any] = {} UpperCamelCase__ : Union[str, Any] = text_config UpperCamelCase__ : Optional[int] = vision_config return cls.from_dict(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) def __lowercase ( self : Tuple ): '''simple docstring''' UpperCamelCase__ : Optional[int] = copy.deepcopy(self.__dict__ ) UpperCamelCase__ : Union[str, Any] = self.text_config.to_dict() UpperCamelCase__ : List[str] = self.vision_config.to_dict() UpperCamelCase__ : Optional[int] = self.__class__.model_type return output class __a ( A__ ): @property def __lowercase ( self : Any ): '''simple docstring''' return OrderedDict( [ ("input_ids", {0: "batch", 1: "sequence"}), ("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}), ("attention_mask", {0: "batch", 1: "sequence"}), ] ) @property def __lowercase ( self : Optional[int] ): '''simple docstring''' return OrderedDict( [ ("logits_per_image", {0: "batch"}), ("logits_per_text", {0: "batch"}), ("text_embeds", {0: "batch"}), ("image_embeds", {0: "batch"}), ] ) @property def __lowercase ( self : Dict ): '''simple docstring''' return 1e-4 def __lowercase ( self : Optional[int] , SCREAMING_SNAKE_CASE : "ProcessorMixin" , SCREAMING_SNAKE_CASE : int = -1 , SCREAMING_SNAKE_CASE : int = -1 , SCREAMING_SNAKE_CASE : Optional["TensorType"] = None , ): '''simple docstring''' UpperCamelCase__ : Optional[int] = super().generate_dummy_inputs( processor.tokenizer , batch_size=SCREAMING_SNAKE_CASE , seq_length=SCREAMING_SNAKE_CASE , framework=SCREAMING_SNAKE_CASE ) UpperCamelCase__ : List[str] = super().generate_dummy_inputs( processor.image_processor , batch_size=SCREAMING_SNAKE_CASE , framework=SCREAMING_SNAKE_CASE ) return {**text_input_dict, **image_input_dict} @property def __lowercase ( self : Tuple ): '''simple docstring''' return 14
189
1
import unittest from transformers import AutoTokenizer, is_flax_available from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, slow if is_flax_available(): import jax.numpy as jnp from transformers import FlaxXLMRobertaModel @require_sentencepiece @require_tokenizers @require_flax class A__ ( unittest.TestCase ): @slow def snake_case_ ( self ) -> str: '''simple docstring''' A_ = FlaxXLMRobertaModel.from_pretrained("""xlm-roberta-base""" ) A_ = AutoTokenizer.from_pretrained("""xlm-roberta-base""" ) A_ = """The dog is cute and lives in the garden house""" A_ = jnp.array([tokenizer.encode(UpperCamelCase__ )] ) A_ = (1, 12, 768) # batch_size, sequence_length, embedding_vector_dim A_ = jnp.array( [[-0.0101, 0.1218, -0.0803, 0.0801, 0.1327, 0.0776, -0.1215, 0.2383, 0.3338, 0.3106, 0.0300, 0.0252]] ) A_ = model(UpperCamelCase__ )["""last_hidden_state"""] self.assertEqual(output.shape , UpperCamelCase__ ) # compare the actual values for a slice of last dim self.assertTrue(jnp.allclose(output[:, :, -1] , UpperCamelCase__ , atol=1e-3 ) )
351
'''simple docstring''' import heapq as hq import math from collections.abc import Iterator class A__ : def __init__( self , UpperCamelCase__ ) -> Dict: '''simple docstring''' A_ = str(id_ ) A_ = None A_ = None A_ = [] A_ = {} # {vertex:distance} def __lt__( self , UpperCamelCase__ ) -> Optional[int]: '''simple docstring''' return self.key < other.key def __repr__( self ) -> Dict: '''simple docstring''' return self.id def snake_case_ ( self , UpperCamelCase__ ) -> Dict: '''simple docstring''' self.neighbors.append(UpperCamelCase__ ) def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ ) -> Optional[Any]: '''simple docstring''' A_ = weight def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ ) -> Optional[int]: # add the neighbors: graph[a - 1].add_neighbor(graph[b - 1] ) graph[b - 1].add_neighbor(graph[a - 1] ) # add the edges: graph[a - 1].add_edge(graph[b - 1], UpperCAmelCase__ ) graph[b - 1].add_edge(graph[a - 1], UpperCAmelCase__ ) def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__ ) -> list: A_ = [] for u in graph: A_ = math.inf A_ = None A_ = 0 A_ = graph[:] while q: A_ = min(UpperCAmelCase__ ) q.remove(UpperCAmelCase__ ) for v in u.neighbors: if (v in q) and (u.edges[v.id] < v.key): A_ = u A_ = u.edges[v.id] for i in range(1, len(UpperCAmelCase__ ) ): a.append((int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1) ) return a def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__ ) -> Iterator[tuple]: for u in graph: A_ = math.inf A_ = None A_ = 0 A_ = list(UpperCAmelCase__ ) hq.heapify(UpperCAmelCase__ ) while h: A_ = hq.heappop(UpperCAmelCase__ ) for v in u.neighbors: if (v in h) and (u.edges[v.id] < v.key): A_ = u A_ = u.edges[v.id] hq.heapify(UpperCAmelCase__ ) for i in range(1, len(UpperCAmelCase__ ) ): yield (int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1) def UpperCAmelCase__ ( ) -> None: pass if __name__ == "__main__": import doctest doctest.testmod()
101
0
import argparse import json import os import numpy as np import PIL import requests import tensorflow.keras.applications.efficientnet as efficientnet import torch from huggingface_hub import hf_hub_download from PIL import Image from tensorflow.keras.preprocessing import image from transformers import ( EfficientNetConfig, EfficientNetForImageClassification, EfficientNetImageProcessor, ) from transformers.utils import logging logging.set_verbosity_info() __snake_case = logging.get_logger(__name__) __snake_case = { """b0""": efficientnet.EfficientNetBa, """b1""": efficientnet.EfficientNetBa, """b2""": efficientnet.EfficientNetBa, """b3""": efficientnet.EfficientNetBa, """b4""": efficientnet.EfficientNetBa, """b5""": efficientnet.EfficientNetBa, """b6""": efficientnet.EfficientNetBa, """b7""": efficientnet.EfficientNetBa, } __snake_case = { """b0""": { """hidden_dim""": 12_80, """width_coef""": 1.0, """depth_coef""": 1.0, """image_size""": 2_24, """dropout_rate""": 0.2, """dw_padding""": [], }, """b1""": { """hidden_dim""": 12_80, """width_coef""": 1.0, """depth_coef""": 1.1, """image_size""": 2_40, """dropout_rate""": 0.2, """dw_padding""": [16], }, """b2""": { """hidden_dim""": 14_08, """width_coef""": 1.1, """depth_coef""": 1.2, """image_size""": 2_60, """dropout_rate""": 0.3, """dw_padding""": [5, 8, 16], }, """b3""": { """hidden_dim""": 15_36, """width_coef""": 1.2, """depth_coef""": 1.4, """image_size""": 3_00, """dropout_rate""": 0.3, """dw_padding""": [5, 18], }, """b4""": { """hidden_dim""": 17_92, """width_coef""": 1.4, """depth_coef""": 1.8, """image_size""": 3_80, """dropout_rate""": 0.4, """dw_padding""": [6], }, """b5""": { """hidden_dim""": 20_48, """width_coef""": 1.6, """depth_coef""": 2.2, """image_size""": 4_56, """dropout_rate""": 0.4, """dw_padding""": [13, 27], }, """b6""": { """hidden_dim""": 23_04, """width_coef""": 1.8, """depth_coef""": 2.6, """image_size""": 5_28, """dropout_rate""": 0.5, """dw_padding""": [31], }, """b7""": { """hidden_dim""": 25_60, """width_coef""": 2.0, """depth_coef""": 3.1, """image_size""": 6_00, """dropout_rate""": 0.5, """dw_padding""": [18], }, } def _A ( SCREAMING_SNAKE_CASE__ : List[str] ): UpperCamelCase :int = EfficientNetConfig() UpperCamelCase :Optional[Any] = CONFIG_MAP[model_name]['''hidden_dim'''] UpperCamelCase :Dict = CONFIG_MAP[model_name]['''width_coef'''] UpperCamelCase :int = CONFIG_MAP[model_name]['''depth_coef'''] UpperCamelCase :Optional[int] = CONFIG_MAP[model_name]['''image_size'''] UpperCamelCase :Optional[int] = CONFIG_MAP[model_name]['''dropout_rate'''] UpperCamelCase :int = CONFIG_MAP[model_name]['''dw_padding'''] UpperCamelCase :str = '''huggingface/label-files''' UpperCamelCase :Optional[int] = '''imagenet-1k-id2label.json''' UpperCamelCase :List[Any] = 1000 UpperCamelCase :Optional[int] = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , repo_type='''dataset''' ) , '''r''' ) ) UpperCamelCase :str = {int(SCREAMING_SNAKE_CASE__ ): v for k, v in idalabel.items()} UpperCamelCase :Dict = idalabel UpperCamelCase :int = {v: k for k, v in idalabel.items()} return config def _A ( ): UpperCamelCase :Any = '''http://images.cocodataset.org/val2017/000000039769.jpg''' UpperCamelCase :Dict = Image.open(requests.get(SCREAMING_SNAKE_CASE__ , stream=SCREAMING_SNAKE_CASE__ ).raw ) return im def _A ( SCREAMING_SNAKE_CASE__ : Dict ): UpperCamelCase :str = CONFIG_MAP[model_name]['''image_size'''] UpperCamelCase :List[Any] = EfficientNetImageProcessor( size={'''height''': size, '''width''': size} , image_mean=[0.4_85, 0.4_56, 0.4_06] , image_std=[0.47_85_39_44, 0.4_73_28_64, 0.47_43_41_63] , do_center_crop=SCREAMING_SNAKE_CASE__ , ) return preprocessor def _A ( SCREAMING_SNAKE_CASE__ : Any ): UpperCamelCase :Union[str, Any] = [v.split('''_''' )[0].split('''block''' )[1] for v in original_param_names if v.startswith('''block''' )] UpperCamelCase :Dict = sorted(set(SCREAMING_SNAKE_CASE__ ) ) UpperCamelCase :Tuple = len(SCREAMING_SNAKE_CASE__ ) UpperCamelCase :List[Any] = {b: str(SCREAMING_SNAKE_CASE__ ) for b, i in zip(SCREAMING_SNAKE_CASE__ , range(SCREAMING_SNAKE_CASE__ ) )} UpperCamelCase :List[Any] = [] rename_keys.append(('''stem_conv/kernel:0''', '''embeddings.convolution.weight''') ) rename_keys.append(('''stem_bn/gamma:0''', '''embeddings.batchnorm.weight''') ) rename_keys.append(('''stem_bn/beta:0''', '''embeddings.batchnorm.bias''') ) rename_keys.append(('''stem_bn/moving_mean:0''', '''embeddings.batchnorm.running_mean''') ) rename_keys.append(('''stem_bn/moving_variance:0''', '''embeddings.batchnorm.running_var''') ) for b in block_names: UpperCamelCase :str = block_name_mapping[b] rename_keys.append((F'''block{b}_expand_conv/kernel:0''', F'''encoder.blocks.{hf_b}.expansion.expand_conv.weight''') ) rename_keys.append((F'''block{b}_expand_bn/gamma:0''', F'''encoder.blocks.{hf_b}.expansion.expand_bn.weight''') ) rename_keys.append((F'''block{b}_expand_bn/beta:0''', F'''encoder.blocks.{hf_b}.expansion.expand_bn.bias''') ) rename_keys.append( (F'''block{b}_expand_bn/moving_mean:0''', F'''encoder.blocks.{hf_b}.expansion.expand_bn.running_mean''') ) rename_keys.append( (F'''block{b}_expand_bn/moving_variance:0''', F'''encoder.blocks.{hf_b}.expansion.expand_bn.running_var''') ) rename_keys.append( (F'''block{b}_dwconv/depthwise_kernel:0''', F'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_conv.weight''') ) rename_keys.append((F'''block{b}_bn/gamma:0''', F'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.weight''') ) rename_keys.append((F'''block{b}_bn/beta:0''', F'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.bias''') ) rename_keys.append( (F'''block{b}_bn/moving_mean:0''', F'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_mean''') ) rename_keys.append( (F'''block{b}_bn/moving_variance:0''', F'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_var''') ) rename_keys.append((F'''block{b}_se_reduce/kernel:0''', F'''encoder.blocks.{hf_b}.squeeze_excite.reduce.weight''') ) rename_keys.append((F'''block{b}_se_reduce/bias:0''', F'''encoder.blocks.{hf_b}.squeeze_excite.reduce.bias''') ) rename_keys.append((F'''block{b}_se_expand/kernel:0''', F'''encoder.blocks.{hf_b}.squeeze_excite.expand.weight''') ) rename_keys.append((F'''block{b}_se_expand/bias:0''', F'''encoder.blocks.{hf_b}.squeeze_excite.expand.bias''') ) rename_keys.append( (F'''block{b}_project_conv/kernel:0''', F'''encoder.blocks.{hf_b}.projection.project_conv.weight''') ) rename_keys.append((F'''block{b}_project_bn/gamma:0''', F'''encoder.blocks.{hf_b}.projection.project_bn.weight''') ) rename_keys.append((F'''block{b}_project_bn/beta:0''', F'''encoder.blocks.{hf_b}.projection.project_bn.bias''') ) rename_keys.append( (F'''block{b}_project_bn/moving_mean:0''', F'''encoder.blocks.{hf_b}.projection.project_bn.running_mean''') ) rename_keys.append( (F'''block{b}_project_bn/moving_variance:0''', F'''encoder.blocks.{hf_b}.projection.project_bn.running_var''') ) rename_keys.append(('''top_conv/kernel:0''', '''encoder.top_conv.weight''') ) rename_keys.append(('''top_bn/gamma:0''', '''encoder.top_bn.weight''') ) rename_keys.append(('''top_bn/beta:0''', '''encoder.top_bn.bias''') ) rename_keys.append(('''top_bn/moving_mean:0''', '''encoder.top_bn.running_mean''') ) rename_keys.append(('''top_bn/moving_variance:0''', '''encoder.top_bn.running_var''') ) UpperCamelCase :Union[str, Any] = {} for item in rename_keys: if item[0] in original_param_names: UpperCamelCase :Optional[int] = '''efficientnet.''' + item[1] UpperCamelCase :List[str] = '''classifier.weight''' UpperCamelCase :Dict = '''classifier.bias''' return key_mapping def _A ( SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Tuple ): for key, value in tf_params.items(): if "normalization" in key: continue UpperCamelCase :List[Any] = key_mapping[key] if "_conv" in key and "kernel" in key: UpperCamelCase :Optional[Any] = torch.from_numpy(SCREAMING_SNAKE_CASE__ ).permute(3 , 2 , 0 , 1 ) elif "depthwise_kernel" in key: UpperCamelCase :Dict = torch.from_numpy(SCREAMING_SNAKE_CASE__ ).permute(2 , 3 , 0 , 1 ) elif "kernel" in key: UpperCamelCase :str = torch.from_numpy(np.transpose(SCREAMING_SNAKE_CASE__ ) ) else: UpperCamelCase :int = torch.from_numpy(SCREAMING_SNAKE_CASE__ ) # Replace HF parameters with original TF model parameters assert hf_params[hf_key].shape == new_hf_value.shape hf_params[hf_key].copy_(SCREAMING_SNAKE_CASE__ ) @torch.no_grad() def _A ( SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Dict ): UpperCamelCase :Any = model_classes[model_name]( include_top=SCREAMING_SNAKE_CASE__ , weights='''imagenet''' , input_tensor=SCREAMING_SNAKE_CASE__ , input_shape=SCREAMING_SNAKE_CASE__ , pooling=SCREAMING_SNAKE_CASE__ , classes=1000 , classifier_activation='''softmax''' , ) UpperCamelCase :Dict = original_model.trainable_variables UpperCamelCase :List[Any] = original_model.non_trainable_variables UpperCamelCase :Tuple = {param.name: param.numpy() for param in tf_params} for param in tf_non_train_params: UpperCamelCase :Union[str, Any] = param.numpy() UpperCamelCase :Union[str, Any] = list(tf_params.keys() ) # Load HuggingFace model UpperCamelCase :List[str] = get_efficientnet_config(SCREAMING_SNAKE_CASE__ ) UpperCamelCase :List[str] = EfficientNetForImageClassification(SCREAMING_SNAKE_CASE__ ).eval() UpperCamelCase :Union[str, Any] = hf_model.state_dict() # Create src-to-dst parameter name mapping dictionary print('''Converting parameters...''' ) UpperCamelCase :Optional[int] = rename_keys(SCREAMING_SNAKE_CASE__ ) replace_params(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) # Initialize preprocessor and preprocess input image UpperCamelCase :Tuple = convert_image_processor(SCREAMING_SNAKE_CASE__ ) UpperCamelCase :List[Any] = preprocessor(images=prepare_img() , return_tensors='''pt''' ) # HF model inference hf_model.eval() with torch.no_grad(): UpperCamelCase :List[str] = hf_model(**SCREAMING_SNAKE_CASE__ ) UpperCamelCase :Union[str, Any] = outputs.logits.detach().numpy() # Original model inference UpperCamelCase :Optional[int] = False UpperCamelCase :Optional[Any] = CONFIG_MAP[model_name]['''image_size'''] UpperCamelCase :Any = prepare_img().resize((image_size, image_size) , resample=PIL.Image.NEAREST ) UpperCamelCase :Optional[int] = image.img_to_array(SCREAMING_SNAKE_CASE__ ) UpperCamelCase :Union[str, Any] = np.expand_dims(SCREAMING_SNAKE_CASE__ , axis=0 ) UpperCamelCase :Any = original_model.predict(SCREAMING_SNAKE_CASE__ ) # Check whether original and HF model outputs match -> np.allclose assert np.allclose(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , atol=1e-3 ), "The predicted logits are not the same." print('''Model outputs match!''' ) if save_model: # Create folder to save model if not os.path.isdir(SCREAMING_SNAKE_CASE__ ): os.mkdir(SCREAMING_SNAKE_CASE__ ) # Save converted model and image processor hf_model.save_pretrained(SCREAMING_SNAKE_CASE__ ) preprocessor.save_pretrained(SCREAMING_SNAKE_CASE__ ) if push_to_hub: # Push model and image processor to hub print(F'''Pushing converted {model_name} to the hub...''' ) UpperCamelCase :Union[str, Any] = F'''efficientnet-{model_name}''' preprocessor.push_to_hub(SCREAMING_SNAKE_CASE__ ) hf_model.push_to_hub(SCREAMING_SNAKE_CASE__ ) if __name__ == "__main__": __snake_case = argparse.ArgumentParser() # Required parameters parser.add_argument( """--model_name""", default="""b0""", type=str, help="""Version name of the EfficientNet model you want to convert, select from [b0, b1, b2, b3, b4, b5, b6, b7].""", ) parser.add_argument( """--pytorch_dump_folder_path""", default="""hf_model""", type=str, help="""Path to the output PyTorch model directory.""", ) parser.add_argument("""--save_model""", action="""store_true""", help="""Save model to local""") parser.add_argument("""--push_to_hub""", action="""store_true""", help="""Push model and image processor to the hub""") __snake_case = parser.parse_args() convert_efficientnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.save_model, args.push_to_hub)
259
from __future__ import annotations from typing import Any def _A ( SCREAMING_SNAKE_CASE__ : list[Any] ): create_state_space_tree(SCREAMING_SNAKE_CASE__ , [] , 0 ) def _A ( SCREAMING_SNAKE_CASE__ : list[Any] , SCREAMING_SNAKE_CASE__ : list[Any] , SCREAMING_SNAKE_CASE__ : int ): if index == len(SCREAMING_SNAKE_CASE__ ): print(SCREAMING_SNAKE_CASE__ ) return create_state_space_tree(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , index + 1 ) current_subsequence.append(sequence[index] ) create_state_space_tree(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , index + 1 ) current_subsequence.pop() if __name__ == "__main__": __snake_case = [3, 1, 2, 4] generate_all_subsequences(seq) seq.clear() seq.extend(["""A""", """B""", """C"""]) generate_all_subsequences(seq)
259
1
import argparse import shutil from pathlib import Path from tqdm import tqdm from transformers import AutoTokenizer def a__ ( A__, A__, A__, A__=1_0_2_4 ): SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : int = [], [] SCREAMING_SNAKE_CASE_ : Optional[Any] = list(zip(__A, __A ) ) SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Tuple = sorted_examples[0] def is_too_big(A__ ): return tok(__A, return_tensors='pt' ).input_ids.shape[1] > max_tokens for src, tgt in tqdm(sorted_examples[1:] ): SCREAMING_SNAKE_CASE_ : List[Any] = new_src + ' ' + src SCREAMING_SNAKE_CASE_ : Dict = new_tgt + ' ' + tgt if is_too_big(__A ) or is_too_big(__A ): # cant fit, finalize example finished_src.append(__A ) finished_tgt.append(__A ) SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[Any] = src, tgt else: # can fit, keep adding SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Any = cand_src, cand_tgt # cleanup if new_src: assert new_tgt finished_src.append(__A ) finished_tgt.append(__A ) return finished_src, finished_tgt def a__ ( A__, A__, A__, A__ ): SCREAMING_SNAKE_CASE_ : Dict = Path(__A ) save_path.mkdir(exist_ok=__A ) for split in ["train"]: SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : int = data_dir / F'''{split}.source''', data_dir / F'''{split}.target''' SCREAMING_SNAKE_CASE_ : int = [x.rstrip() for x in Path(__A ).open().readlines()] SCREAMING_SNAKE_CASE_ : Dict = [x.rstrip() for x in Path(__A ).open().readlines()] SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : int = pack_examples(__A, __A, __A, __A ) print(F'''packed {split} split from {len(__A )} examples -> {len(__A )}.''' ) Path(save_path / F'''{split}.source''' ).open('w' ).write('\n'.join(__A ) ) Path(save_path / F'''{split}.target''' ).open('w' ).write('\n'.join(__A ) ) for split in ["val", "test"]: SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : str = data_dir / F'''{split}.source''', data_dir / F'''{split}.target''' shutil.copyfile(__A, save_path / F'''{split}.source''' ) shutil.copyfile(__A, save_path / F'''{split}.target''' ) def a__ ( ): SCREAMING_SNAKE_CASE_ : Tuple = argparse.ArgumentParser() parser.add_argument('--tok_name', type=__A, help='like facebook/bart-large-cnn,t5-base, etc.' ) parser.add_argument('--max_seq_len', type=__A, default=1_2_8 ) parser.add_argument('--data_dir', type=__A ) parser.add_argument('--save_path', type=__A ) SCREAMING_SNAKE_CASE_ : Any = parser.parse_args() SCREAMING_SNAKE_CASE_ : Any = AutoTokenizer.from_pretrained(args.tok_name ) return pack_data_dir(__A, Path(args.data_dir ), args.max_seq_len, args.save_path ) if __name__ == "__main__": packer_cli()
357
import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import LevitImageProcessor class __lowercase (unittest.TestCase ): """simple docstring""" def __init__( self , lowerCAmelCase__ , lowerCAmelCase__=7 , lowerCAmelCase__=3 , lowerCAmelCase__=1_8 , lowerCAmelCase__=3_0 , lowerCAmelCase__=4_0_0 , lowerCAmelCase__=True , lowerCAmelCase__=None , lowerCAmelCase__=True , lowerCAmelCase__=None , lowerCAmelCase__=True , lowerCAmelCase__=[0.5, 0.5, 0.5] , lowerCAmelCase__=[0.5, 0.5, 0.5] , ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Dict = size if size is not None else {'shortest_edge': 1_8} SCREAMING_SNAKE_CASE_ : Optional[Any] = crop_size if crop_size is not None else {'height': 1_8, 'width': 1_8} SCREAMING_SNAKE_CASE_ : int = parent SCREAMING_SNAKE_CASE_ : str = batch_size SCREAMING_SNAKE_CASE_ : str = num_channels SCREAMING_SNAKE_CASE_ : List[Any] = image_size SCREAMING_SNAKE_CASE_ : str = min_resolution SCREAMING_SNAKE_CASE_ : Union[str, Any] = max_resolution SCREAMING_SNAKE_CASE_ : int = do_resize SCREAMING_SNAKE_CASE_ : List[Any] = size SCREAMING_SNAKE_CASE_ : Optional[int] = do_center_crop SCREAMING_SNAKE_CASE_ : Any = crop_size SCREAMING_SNAKE_CASE_ : List[Any] = do_normalize SCREAMING_SNAKE_CASE_ : List[str] = image_mean SCREAMING_SNAKE_CASE_ : Optional[int] = image_std def UpperCamelCase__ ( self ): """simple docstring""" return { "image_mean": self.image_mean, "image_std": self.image_std, "do_normalize": self.do_normalize, "do_resize": self.do_resize, "do_center_crop": self.do_center_crop, "size": self.size, "crop_size": self.crop_size, } @require_torch @require_vision class __lowercase (__SCREAMING_SNAKE_CASE , unittest.TestCase ): """simple docstring""" _UpperCAmelCase = LevitImageProcessor if is_vision_available() else None def UpperCamelCase__ ( self ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Tuple = LevitImageProcessingTester(self ) @property def UpperCamelCase__ ( self ): """simple docstring""" return self.image_processor_tester.prepare_image_processor_dict() def UpperCamelCase__ ( self ): """simple docstring""" SCREAMING_SNAKE_CASE_ : int = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(lowerCAmelCase__ , 'image_mean' ) ) self.assertTrue(hasattr(lowerCAmelCase__ , 'image_std' ) ) self.assertTrue(hasattr(lowerCAmelCase__ , 'do_normalize' ) ) self.assertTrue(hasattr(lowerCAmelCase__ , 'do_resize' ) ) self.assertTrue(hasattr(lowerCAmelCase__ , 'do_center_crop' ) ) self.assertTrue(hasattr(lowerCAmelCase__ , 'size' ) ) def UpperCamelCase__ ( self ): """simple docstring""" SCREAMING_SNAKE_CASE_ : int = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {'shortest_edge': 1_8} ) self.assertEqual(image_processor.crop_size , {'height': 1_8, 'width': 1_8} ) SCREAMING_SNAKE_CASE_ : List[Any] = self.image_processing_class.from_dict(self.image_processor_dict , size=4_2 , crop_size=8_4 ) self.assertEqual(image_processor.size , {'shortest_edge': 4_2} ) self.assertEqual(image_processor.crop_size , {'height': 8_4, 'width': 8_4} ) def UpperCamelCase__ ( self ): """simple docstring""" pass def UpperCamelCase__ ( self ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Dict = self.image_processing_class(**self.image_processor_dict ) # create random PIL images SCREAMING_SNAKE_CASE_ : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ ) for image in image_inputs: self.assertIsInstance(lowerCAmelCase__ , Image.Image ) # Test not batched input SCREAMING_SNAKE_CASE_ : str = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) # Test batched SCREAMING_SNAKE_CASE_ : str = image_processing(lowerCAmelCase__ , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) def UpperCamelCase__ ( self ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors SCREAMING_SNAKE_CASE_ : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ , numpify=lowerCAmelCase__ ) for image in image_inputs: self.assertIsInstance(lowerCAmelCase__ , np.ndarray ) # Test not batched input SCREAMING_SNAKE_CASE_ : List[Any] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) # Test batched SCREAMING_SNAKE_CASE_ : Optional[int] = image_processing(lowerCAmelCase__ , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) def UpperCamelCase__ ( self ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Any = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors SCREAMING_SNAKE_CASE_ : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ , torchify=lowerCAmelCase__ ) for image in image_inputs: self.assertIsInstance(lowerCAmelCase__ , torch.Tensor ) # Test not batched input SCREAMING_SNAKE_CASE_ : Dict = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) # Test batched SCREAMING_SNAKE_CASE_ : List[Any] = image_processing(lowerCAmelCase__ , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , )
162
0
'''simple docstring''' import unittest from datasets import load_dataset from transformers import BloomTokenizerFast from transformers.testing_utils import require_tokenizers from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class lowerCamelCase_ ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ): """simple docstring""" a_ =None a_ =BloomTokenizerFast a_ =BloomTokenizerFast a_ =True a_ =False a_ ="""tokenizer_file""" a_ ={"""bos_token""": """<s>""", """eos_token""": """</s>""", """unk_token""": """<unk>""", """pad_token""": """<pad>"""} def _lowercase ( self : List[Any] ) -> int: super().setUp() __lowerCamelCase : List[Any] = BloomTokenizerFast.from_pretrained('bigscience/tokenizer' ) tokenizer.save_pretrained(self.tmpdirname ) def _lowercase ( self : List[str] , **_a : Optional[Any] ) -> Tuple: kwargs.update(self.special_tokens_map ) return BloomTokenizerFast.from_pretrained(self.tmpdirname , **_a ) def _lowercase ( self : Dict ) -> int: __lowerCamelCase : int = self.get_rust_tokenizer() __lowerCamelCase : Any = ['The quick brown fox</s>', 'jumps over the lazy dog</s>'] __lowerCamelCase : Union[str, Any] = [[2175, 2_3714, 7_3173, 14_4252, 2], [77, 13_2619, 3478, 368, 10_9586, 3_5433, 2]] __lowerCamelCase : Union[str, Any] = tokenizer.batch_encode_plus(_a )['input_ids'] self.assertListEqual(_a , _a ) __lowerCamelCase : List[str] = tokenizer.batch_decode(_a ) self.assertListEqual(_a , _a ) def _lowercase ( self : Optional[Any] , _a : Union[str, Any]=6 ) -> str: for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ): __lowerCamelCase : Tuple = self.rust_tokenizer_class.from_pretrained(_a , **_a ) # tokenizer_r.pad_token = None # Hotfixing padding = None # Simple input __lowerCamelCase : List[Any] = 'This is a simple input' __lowerCamelCase : str = ['This is a simple input 1', 'This is a simple input 2'] __lowerCamelCase : str = ('This is a simple input', 'This is a pair') __lowerCamelCase : Optional[int] = [ ('This is a simple input 1', 'This is a simple input 2'), ('This is a simple pair 1', 'This is a simple pair 2'), ] # Simple input tests try: tokenizer_r.encode(_a , max_length=_a ) tokenizer_r.encode_plus(_a , max_length=_a ) tokenizer_r.batch_encode_plus(_a , max_length=_a ) tokenizer_r.encode(_a , max_length=_a ) tokenizer_r.batch_encode_plus(_a , max_length=_a ) except ValueError: self.fail('Bloom Tokenizer should be able to deal with padding' ) __lowerCamelCase : Optional[Any] = None # Hotfixing padding = None self.assertRaises(_a , tokenizer_r.encode , _a , max_length=_a , padding='max_length' ) # Simple input self.assertRaises(_a , tokenizer_r.encode_plus , _a , max_length=_a , padding='max_length' ) # Simple input self.assertRaises( _a , tokenizer_r.batch_encode_plus , _a , max_length=_a , padding='max_length' , ) # Pair input self.assertRaises(_a , tokenizer_r.encode , _a , max_length=_a , padding='max_length' ) # Pair input self.assertRaises(_a , tokenizer_r.encode_plus , _a , max_length=_a , padding='max_length' ) # Pair input self.assertRaises( _a , tokenizer_r.batch_encode_plus , _a , max_length=_a , padding='max_length' , ) def _lowercase ( self : Optional[int] ) -> Optional[Any]: __lowerCamelCase : Any = self.get_rust_tokenizer() __lowerCamelCase : List[Any] = load_dataset('xnli' , 'all_languages' , split='test' , streaming=_a ) __lowerCamelCase : Union[str, Any] = next(iter(_a ) )['premise'] # pick up one data __lowerCamelCase : Any = list(sample_data.values() ) __lowerCamelCase : Optional[int] = list(map(tokenizer.encode , _a ) ) __lowerCamelCase : Optional[Any] = [tokenizer.decode(_a , clean_up_tokenization_spaces=_a ) for x in output_tokens] self.assertListEqual(_a , _a ) def _lowercase ( self : Dict ) -> Optional[int]: # The test has to be overriden because BLOOM uses ALiBi positional embeddings that does not have # any sequence length constraints. This test of the parent class will fail since it relies on the # maximum sequence length of the positoonal embeddings. self.assertGreaterEqual(len(self.tokenizer_class.pretrained_vocab_files_map ) , 1 ) self.assertGreaterEqual(len(list(self.tokenizer_class.pretrained_vocab_files_map.values() )[0] ) , 1 )
208
'''simple docstring''' import inspect import warnings from typing import Any, Dict, Optional, Union from packaging import version def a_ ( *_lowerCAmelCase ,_lowerCAmelCase = None ,_lowerCAmelCase=True ,_lowerCAmelCase=2 ) -> List[str]: from .. import __version__ __lowerCamelCase : Any = take_from __lowerCamelCase : Optional[int] = () if not isinstance(args[0] ,_lowerCAmelCase ): __lowerCamelCase : Optional[Any] = (args,) for attribute, version_name, message in args: if version.parse(version.parse(_lowerCAmelCase ).base_version ) >= version.parse(_lowerCAmelCase ): raise ValueError( F'The deprecation tuple {(attribute, version_name, message)} should be removed since diffusers\'' F' version {__version__} is >= {version_name}' ) __lowerCamelCase : Union[str, Any] = None if isinstance(_lowerCAmelCase ,_lowerCAmelCase ) and attribute in deprecated_kwargs: values += (deprecated_kwargs.pop(_lowerCAmelCase ),) __lowerCamelCase : Optional[Any] = F'The `{attribute}` argument is deprecated and will be removed in version {version_name}.' elif hasattr(_lowerCAmelCase ,_lowerCAmelCase ): values += (getattr(_lowerCAmelCase ,_lowerCAmelCase ),) __lowerCamelCase : List[str] = F'The `{attribute}` attribute is deprecated and will be removed in version {version_name}.' elif deprecated_kwargs is None: __lowerCamelCase : Optional[Any] = F'`{attribute}` is deprecated and will be removed in version {version_name}.' if warning is not None: __lowerCamelCase : Optional[int] = warning + ' ' if standard_warn else '' warnings.warn(warning + message ,_lowerCAmelCase ,stacklevel=_lowerCAmelCase ) if isinstance(_lowerCAmelCase ,_lowerCAmelCase ) and len(_lowerCAmelCase ) > 0: __lowerCamelCase : Optional[Any] = inspect.getouterframes(inspect.currentframe() )[1] __lowerCamelCase : List[str] = call_frame.filename __lowerCamelCase : int = call_frame.lineno __lowerCamelCase : Union[str, Any] = call_frame.function __lowerCamelCase ,__lowerCamelCase : Union[str, Any] = next(iter(deprecated_kwargs.items() ) ) raise TypeError(F'{function} in {filename} line {line_number-1} got an unexpected keyword argument `{key}`' ) if len(_lowerCAmelCase ) == 0: return elif len(_lowerCAmelCase ) == 1: return values[0] return values
208
1
import mpmath # for roots of unity import numpy as np class __lowercase : """simple docstring""" def __init__( self : str , lowerCAmelCase__ : int=None , lowerCAmelCase__ : int=None): # Input as list SCREAMING_SNAKE_CASE_: int = list(poly_a or [0])[:] SCREAMING_SNAKE_CASE_: Tuple = list(poly_b or [0])[:] # Remove leading zero coefficients while self.polyA[-1] == 0: self.polyA.pop() SCREAMING_SNAKE_CASE_: Tuple = len(self.polyA) while self.polyB[-1] == 0: self.polyB.pop() SCREAMING_SNAKE_CASE_: Any = len(self.polyB) # Add 0 to make lengths equal a power of 2 SCREAMING_SNAKE_CASE_: Tuple = int( 2 ** np.ceil(np.loga(len(self.polyA) + len(self.polyB) - 1))) while len(self.polyA) < self.c_max_length: self.polyA.append(0) while len(self.polyB) < self.c_max_length: self.polyB.append(0) # A complex root used for the fourier transform SCREAMING_SNAKE_CASE_: Tuple = complex(mpmath.root(x=1 , n=self.c_max_length , k=1)) # The product SCREAMING_SNAKE_CASE_: Union[str, Any] = self.__multiply() def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowerCAmelCase__ : int): SCREAMING_SNAKE_CASE_: Dict = [[x] for x in self.polyA] if which == "A" else [[x] for x in self.polyB] # Corner case if len(lowerCamelCase__) <= 1: return dft[0] # SCREAMING_SNAKE_CASE_: Optional[Any] = self.c_max_length // 2 while next_ncol > 0: SCREAMING_SNAKE_CASE_: Any = [[] for i in range(lowerCamelCase__)] SCREAMING_SNAKE_CASE_: Optional[Any] = self.root**next_ncol # First half of next step SCREAMING_SNAKE_CASE_: Optional[Any] = 1 for j in range(self.c_max_length // (next_ncol * 2)): for i in range(lowerCamelCase__): new_dft[i].append(dft[i][j] + current_root * dft[i + next_ncol][j]) current_root *= root # Second half of next step SCREAMING_SNAKE_CASE_: Optional[Any] = 1 for j in range(self.c_max_length // (next_ncol * 2)): for i in range(lowerCamelCase__): new_dft[i].append(dft[i][j] - current_root * dft[i + next_ncol][j]) current_root *= root # Update SCREAMING_SNAKE_CASE_: Optional[int] = new_dft SCREAMING_SNAKE_CASE_: Any = next_ncol // 2 return dft[0] def _SCREAMING_SNAKE_CASE ( self : Optional[int]): SCREAMING_SNAKE_CASE_: Optional[Any] = self.__dft("A") SCREAMING_SNAKE_CASE_: Tuple = self.__dft("B") SCREAMING_SNAKE_CASE_: Any = [[dft_a[i] * dft_b[i] for i in range(self.c_max_length)]] del dft_a del dft_b # Corner Case if len(inverce_c[0]) <= 1: return inverce_c[0] # Inverse DFT SCREAMING_SNAKE_CASE_: Union[str, Any] = 2 while next_ncol <= self.c_max_length: SCREAMING_SNAKE_CASE_: Union[str, Any] = [[] for i in range(lowerCamelCase__)] SCREAMING_SNAKE_CASE_: Any = self.root ** (next_ncol // 2) SCREAMING_SNAKE_CASE_: int = 1 # First half of next step for j in range(self.c_max_length // next_ncol): for i in range(next_ncol // 2): # Even positions new_inverse_c[i].append( ( inverce_c[i][j] + inverce_c[i][j + self.c_max_length // next_ncol] ) / 2) # Odd positions new_inverse_c[i + next_ncol // 2].append( ( inverce_c[i][j] - inverce_c[i][j + self.c_max_length // next_ncol] ) / (2 * current_root)) current_root *= root # Update SCREAMING_SNAKE_CASE_: List[str] = new_inverse_c next_ncol *= 2 # Unpack SCREAMING_SNAKE_CASE_: List[Any] = [round(x[0].real , 8) + round(x[0].imag , 8) * 1J for x in inverce_c] # Remove leading 0's while inverce_c[-1] == 0: inverce_c.pop() return inverce_c def __str__( self : List[Any]): SCREAMING_SNAKE_CASE_: str = "A = " + " + ".join( F"{coef}*x^{i}" for coef, i in enumerate(self.polyA[: self.len_A])) SCREAMING_SNAKE_CASE_: List[str] = "B = " + " + ".join( F"{coef}*x^{i}" for coef, i in enumerate(self.polyB[: self.len_B])) SCREAMING_SNAKE_CASE_: Tuple = "A*B = " + " + ".join( F"{coef}*x^{i}" for coef, i in enumerate(self.product)) return F"{a}\n{b}\n{c}" # Unit tests if __name__ == "__main__": import doctest doctest.testmod()
357
import argparse import json import os import numpy as np import PIL import requests import tensorflow.keras.applications.efficientnet as efficientnet import torch from huggingface_hub import hf_hub_download from PIL import Image from tensorflow.keras.preprocessing import image from transformers import ( EfficientNetConfig, EfficientNetForImageClassification, EfficientNetImageProcessor, ) from transformers.utils import logging logging.set_verbosity_info() lowerCAmelCase : Tuple = logging.get_logger(__name__) lowerCAmelCase : Any = { """b0""": efficientnet.EfficientNetBa, """b1""": efficientnet.EfficientNetBa, """b2""": efficientnet.EfficientNetBa, """b3""": efficientnet.EfficientNetBa, """b4""": efficientnet.EfficientNetBa, """b5""": efficientnet.EfficientNetBa, """b6""": efficientnet.EfficientNetBa, """b7""": efficientnet.EfficientNetBa, } lowerCAmelCase : str = { """b0""": { """hidden_dim""": 1280, """width_coef""": 1.0, """depth_coef""": 1.0, """image_size""": 224, """dropout_rate""": 0.2, """dw_padding""": [], }, """b1""": { """hidden_dim""": 1280, """width_coef""": 1.0, """depth_coef""": 1.1, """image_size""": 240, """dropout_rate""": 0.2, """dw_padding""": [16], }, """b2""": { """hidden_dim""": 1408, """width_coef""": 1.1, """depth_coef""": 1.2, """image_size""": 260, """dropout_rate""": 0.3, """dw_padding""": [5, 8, 16], }, """b3""": { """hidden_dim""": 1536, """width_coef""": 1.2, """depth_coef""": 1.4, """image_size""": 300, """dropout_rate""": 0.3, """dw_padding""": [5, 18], }, """b4""": { """hidden_dim""": 1792, """width_coef""": 1.4, """depth_coef""": 1.8, """image_size""": 380, """dropout_rate""": 0.4, """dw_padding""": [6], }, """b5""": { """hidden_dim""": 2048, """width_coef""": 1.6, """depth_coef""": 2.2, """image_size""": 456, """dropout_rate""": 0.4, """dw_padding""": [13, 27], }, """b6""": { """hidden_dim""": 2304, """width_coef""": 1.8, """depth_coef""": 2.6, """image_size""": 528, """dropout_rate""": 0.5, """dw_padding""": [31], }, """b7""": { """hidden_dim""": 2560, """width_coef""": 2.0, """depth_coef""": 3.1, """image_size""": 600, """dropout_rate""": 0.5, """dw_padding""": [18], }, } def A_ ( _UpperCAmelCase ): SCREAMING_SNAKE_CASE_: Dict = EfficientNetConfig() SCREAMING_SNAKE_CASE_: Any = CONFIG_MAP[model_name]["hidden_dim"] SCREAMING_SNAKE_CASE_: Optional[Any] = CONFIG_MAP[model_name]["width_coef"] SCREAMING_SNAKE_CASE_: List[Any] = CONFIG_MAP[model_name]["depth_coef"] SCREAMING_SNAKE_CASE_: Union[str, Any] = CONFIG_MAP[model_name]["image_size"] SCREAMING_SNAKE_CASE_: Optional[int] = CONFIG_MAP[model_name]["dropout_rate"] SCREAMING_SNAKE_CASE_: Dict = CONFIG_MAP[model_name]["dw_padding"] SCREAMING_SNAKE_CASE_: str = "huggingface/label-files" SCREAMING_SNAKE_CASE_: str = "imagenet-1k-id2label.json" SCREAMING_SNAKE_CASE_: int = 10_00 SCREAMING_SNAKE_CASE_: int = json.load(open(hf_hub_download(_UpperCAmelCase , _UpperCAmelCase , repo_type="dataset" ) , "r" ) ) SCREAMING_SNAKE_CASE_: int = {int(_UpperCAmelCase ): v for k, v in idalabel.items()} SCREAMING_SNAKE_CASE_: Any = idalabel SCREAMING_SNAKE_CASE_: Optional[Any] = {v: k for k, v in idalabel.items()} return config def A_ ( ): SCREAMING_SNAKE_CASE_: Tuple = "http://images.cocodataset.org/val2017/000000039769.jpg" SCREAMING_SNAKE_CASE_: int = Image.open(requests.get(_UpperCAmelCase , stream=_UpperCAmelCase ).raw ) return im def A_ ( _UpperCAmelCase ): SCREAMING_SNAKE_CASE_: int = CONFIG_MAP[model_name]["image_size"] SCREAMING_SNAKE_CASE_: Optional[Any] = EfficientNetImageProcessor( size={"height": size, "width": size} , image_mean=[0.4_8_5, 0.4_5_6, 0.4_0_6] , image_std=[0.4_7_8_5_3_9_4_4, 0.4_7_3_2_8_6_4, 0.4_7_4_3_4_1_6_3] , do_center_crop=_UpperCAmelCase , ) return preprocessor def A_ ( _UpperCAmelCase ): SCREAMING_SNAKE_CASE_: Optional[int] = [v.split("_" )[0].split("block" )[1] for v in original_param_names if v.startswith("block" )] SCREAMING_SNAKE_CASE_: Optional[Any] = sorted(set(_UpperCAmelCase ) ) SCREAMING_SNAKE_CASE_: int = len(_UpperCAmelCase ) SCREAMING_SNAKE_CASE_: Optional[int] = {b: str(_UpperCAmelCase ) for b, i in zip(_UpperCAmelCase , range(_UpperCAmelCase ) )} SCREAMING_SNAKE_CASE_: List[Any] = [] rename_keys.append(("stem_conv/kernel:0", "embeddings.convolution.weight") ) rename_keys.append(("stem_bn/gamma:0", "embeddings.batchnorm.weight") ) rename_keys.append(("stem_bn/beta:0", "embeddings.batchnorm.bias") ) rename_keys.append(("stem_bn/moving_mean:0", "embeddings.batchnorm.running_mean") ) rename_keys.append(("stem_bn/moving_variance:0", "embeddings.batchnorm.running_var") ) for b in block_names: SCREAMING_SNAKE_CASE_: List[str] = block_name_mapping[b] rename_keys.append((f"block{b}_expand_conv/kernel:0", f"encoder.blocks.{hf_b}.expansion.expand_conv.weight") ) rename_keys.append((f"block{b}_expand_bn/gamma:0", f"encoder.blocks.{hf_b}.expansion.expand_bn.weight") ) rename_keys.append((f"block{b}_expand_bn/beta:0", f"encoder.blocks.{hf_b}.expansion.expand_bn.bias") ) rename_keys.append( (f"block{b}_expand_bn/moving_mean:0", f"encoder.blocks.{hf_b}.expansion.expand_bn.running_mean") ) rename_keys.append( (f"block{b}_expand_bn/moving_variance:0", f"encoder.blocks.{hf_b}.expansion.expand_bn.running_var") ) rename_keys.append( (f"block{b}_dwconv/depthwise_kernel:0", f"encoder.blocks.{hf_b}.depthwise_conv.depthwise_conv.weight") ) rename_keys.append((f"block{b}_bn/gamma:0", f"encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.weight") ) rename_keys.append((f"block{b}_bn/beta:0", f"encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.bias") ) rename_keys.append( (f"block{b}_bn/moving_mean:0", f"encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_mean") ) rename_keys.append( (f"block{b}_bn/moving_variance:0", f"encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_var") ) rename_keys.append((f"block{b}_se_reduce/kernel:0", f"encoder.blocks.{hf_b}.squeeze_excite.reduce.weight") ) rename_keys.append((f"block{b}_se_reduce/bias:0", f"encoder.blocks.{hf_b}.squeeze_excite.reduce.bias") ) rename_keys.append((f"block{b}_se_expand/kernel:0", f"encoder.blocks.{hf_b}.squeeze_excite.expand.weight") ) rename_keys.append((f"block{b}_se_expand/bias:0", f"encoder.blocks.{hf_b}.squeeze_excite.expand.bias") ) rename_keys.append( (f"block{b}_project_conv/kernel:0", f"encoder.blocks.{hf_b}.projection.project_conv.weight") ) rename_keys.append((f"block{b}_project_bn/gamma:0", f"encoder.blocks.{hf_b}.projection.project_bn.weight") ) rename_keys.append((f"block{b}_project_bn/beta:0", f"encoder.blocks.{hf_b}.projection.project_bn.bias") ) rename_keys.append( (f"block{b}_project_bn/moving_mean:0", f"encoder.blocks.{hf_b}.projection.project_bn.running_mean") ) rename_keys.append( (f"block{b}_project_bn/moving_variance:0", f"encoder.blocks.{hf_b}.projection.project_bn.running_var") ) rename_keys.append(("top_conv/kernel:0", "encoder.top_conv.weight") ) rename_keys.append(("top_bn/gamma:0", "encoder.top_bn.weight") ) rename_keys.append(("top_bn/beta:0", "encoder.top_bn.bias") ) rename_keys.append(("top_bn/moving_mean:0", "encoder.top_bn.running_mean") ) rename_keys.append(("top_bn/moving_variance:0", "encoder.top_bn.running_var") ) SCREAMING_SNAKE_CASE_: Optional[Any] = {} for item in rename_keys: if item[0] in original_param_names: SCREAMING_SNAKE_CASE_: str = "efficientnet." + item[1] SCREAMING_SNAKE_CASE_: List[str] = "classifier.weight" SCREAMING_SNAKE_CASE_: Optional[Any] = "classifier.bias" return key_mapping def A_ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): for key, value in tf_params.items(): if "normalization" in key: continue SCREAMING_SNAKE_CASE_: List[str] = key_mapping[key] if "_conv" in key and "kernel" in key: SCREAMING_SNAKE_CASE_: str = torch.from_numpy(_UpperCAmelCase ).permute(3 , 2 , 0 , 1 ) elif "depthwise_kernel" in key: SCREAMING_SNAKE_CASE_: int = torch.from_numpy(_UpperCAmelCase ).permute(2 , 3 , 0 , 1 ) elif "kernel" in key: SCREAMING_SNAKE_CASE_: Tuple = torch.from_numpy(np.transpose(_UpperCAmelCase ) ) else: SCREAMING_SNAKE_CASE_: List[str] = torch.from_numpy(_UpperCAmelCase ) # Replace HF parameters with original TF model parameters assert hf_params[hf_key].shape == new_hf_value.shape hf_params[hf_key].copy_(_UpperCAmelCase ) @torch.no_grad() def A_ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): SCREAMING_SNAKE_CASE_: Any = model_classes[model_name]( include_top=_UpperCAmelCase , weights="imagenet" , input_tensor=_UpperCAmelCase , input_shape=_UpperCAmelCase , pooling=_UpperCAmelCase , classes=10_00 , classifier_activation="softmax" , ) SCREAMING_SNAKE_CASE_: Tuple = original_model.trainable_variables SCREAMING_SNAKE_CASE_: Dict = original_model.non_trainable_variables SCREAMING_SNAKE_CASE_: List[Any] = {param.name: param.numpy() for param in tf_params} for param in tf_non_train_params: SCREAMING_SNAKE_CASE_: str = param.numpy() SCREAMING_SNAKE_CASE_: Union[str, Any] = list(tf_params.keys() ) # Load HuggingFace model SCREAMING_SNAKE_CASE_: Any = get_efficientnet_config(_UpperCAmelCase ) SCREAMING_SNAKE_CASE_: List[str] = EfficientNetForImageClassification(_UpperCAmelCase ).eval() SCREAMING_SNAKE_CASE_: str = hf_model.state_dict() # Create src-to-dst parameter name mapping dictionary print("Converting parameters..." ) SCREAMING_SNAKE_CASE_: Tuple = rename_keys(_UpperCAmelCase ) replace_params(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) # Initialize preprocessor and preprocess input image SCREAMING_SNAKE_CASE_: Optional[Any] = convert_image_processor(_UpperCAmelCase ) SCREAMING_SNAKE_CASE_: int = preprocessor(images=prepare_img() , return_tensors="pt" ) # HF model inference hf_model.eval() with torch.no_grad(): SCREAMING_SNAKE_CASE_: Union[str, Any] = hf_model(**_UpperCAmelCase ) SCREAMING_SNAKE_CASE_: Any = outputs.logits.detach().numpy() # Original model inference SCREAMING_SNAKE_CASE_: Dict = False SCREAMING_SNAKE_CASE_: Optional[int] = CONFIG_MAP[model_name]["image_size"] SCREAMING_SNAKE_CASE_: int = prepare_img().resize((image_size, image_size) , resample=PIL.Image.NEAREST ) SCREAMING_SNAKE_CASE_: Tuple = image.img_to_array(_UpperCAmelCase ) SCREAMING_SNAKE_CASE_: List[str] = np.expand_dims(_UpperCAmelCase , axis=0 ) SCREAMING_SNAKE_CASE_: str = original_model.predict(_UpperCAmelCase ) # Check whether original and HF model outputs match -> np.allclose assert np.allclose(_UpperCAmelCase , _UpperCAmelCase , atol=1e-3 ), "The predicted logits are not the same." print("Model outputs match!" ) if save_model: # Create folder to save model if not os.path.isdir(_UpperCAmelCase ): os.mkdir(_UpperCAmelCase ) # Save converted model and image processor hf_model.save_pretrained(_UpperCAmelCase ) preprocessor.save_pretrained(_UpperCAmelCase ) if push_to_hub: # Push model and image processor to hub print(f"Pushing converted {model_name} to the hub..." ) SCREAMING_SNAKE_CASE_: Optional[Any] = f"efficientnet-{model_name}" preprocessor.push_to_hub(_UpperCAmelCase ) hf_model.push_to_hub(_UpperCAmelCase ) if __name__ == "__main__": lowerCAmelCase : List[str] = argparse.ArgumentParser() # Required parameters parser.add_argument( """--model_name""", default="""b0""", type=str, help="""Version name of the EfficientNet model you want to convert, select from [b0, b1, b2, b3, b4, b5, b6, b7].""", ) parser.add_argument( """--pytorch_dump_folder_path""", default="""hf_model""", type=str, help="""Path to the output PyTorch model directory.""", ) parser.add_argument("""--save_model""", action="""store_true""", help="""Save model to local""") parser.add_argument("""--push_to_hub""", action="""store_true""", help="""Push model and image processor to the hub""") lowerCAmelCase : Any = parser.parse_args() convert_efficientnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.save_model, args.push_to_hub)
127
0
from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging UpperCAmelCase__ = logging.get_logger(__name__) UpperCAmelCase__ = { '''google/vit-base-patch16-224''': '''https://huggingface.co/vit-base-patch16-224/resolve/main/config.json''', # See all ViT models at https://huggingface.co/models?filter=vit } class lowerCamelCase__ ( lowerCAmelCase): SCREAMING_SNAKE_CASE__ = '''vit''' def __init__(self , UpperCAmelCase=7_6_8 , UpperCAmelCase=1_2 , UpperCAmelCase=1_2 , UpperCAmelCase=3_0_7_2 , UpperCAmelCase="gelu" , UpperCAmelCase=0.0 , UpperCAmelCase=0.0 , UpperCAmelCase=0.02 , UpperCAmelCase=1e-12 , UpperCAmelCase=2_2_4 , UpperCAmelCase=1_6 , UpperCAmelCase=3 , UpperCAmelCase=True , UpperCAmelCase=1_6 , **UpperCAmelCase , ) -> List[str]: super().__init__(**UpperCAmelCase ) _lowercase =hidden_size _lowercase =num_hidden_layers _lowercase =num_attention_heads _lowercase =intermediate_size _lowercase =hidden_act _lowercase =hidden_dropout_prob _lowercase =attention_probs_dropout_prob _lowercase =initializer_range _lowercase =layer_norm_eps _lowercase =image_size _lowercase =patch_size _lowercase =num_channels _lowercase =qkv_bias _lowercase =encoder_stride class lowerCamelCase__ ( lowerCAmelCase): SCREAMING_SNAKE_CASE__ = version.parse('''1.11''') @property def __A (self ) -> Mapping[str, Mapping[int, str]]: return OrderedDict( [ ('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}), ] ) @property def __A (self ) -> float: return 1e-4
5
import math import numpy as np import qiskit from qiskit import Aer, ClassicalRegister, QuantumCircuit, QuantumRegister, execute def __UpperCamelCase ( _A = 3 ): if isinstance(_A , _A ): raise TypeError('''number of qubits must be a integer.''' ) if number_of_qubits <= 0: raise ValueError('''number of qubits must be > 0.''' ) if math.floor(_A ) != number_of_qubits: raise ValueError('''number of qubits must be exact integer.''' ) if number_of_qubits > 10: raise ValueError('''number of qubits too large to simulate(>10).''' ) lowerCAmelCase_ = QuantumRegister(_A , '''qr''' ) lowerCAmelCase_ = ClassicalRegister(_A , '''cr''' ) lowerCAmelCase_ = QuantumCircuit(_A , _A ) lowerCAmelCase_ = number_of_qubits for i in range(_A ): quantum_circuit.h(number_of_qubits - i - 1 ) counter -= 1 for j in range(_A ): quantum_circuit.cp(np.pi / 2 ** (counter - j) , _A , _A ) for k in range(number_of_qubits // 2 ): quantum_circuit.swap(_A , number_of_qubits - k - 1 ) # measure all the qubits quantum_circuit.measure(_A , _A ) # simulate with 10000 shots lowerCAmelCase_ = Aer.get_backend('''qasm_simulator''' ) lowerCAmelCase_ = execute(_A , _A , shots=10000 ) return job.result().get_counts(_A ) if __name__ == "__main__": print( f"Total count for quantum fourier transform state is: \ {quantum_fourier_transform(3)}" )
278
0
from scipy.stats import pearsonr import datasets UpperCamelCase = ''' Pearson correlation coefficient and p-value for testing non-correlation. The Pearson correlation coefficient measures the linear relationship between two datasets. The calculation of the p-value relies on the assumption that each dataset is normally distributed. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Correlations of -1 or +1 imply an exact linear relationship. Positive correlations imply that as x increases, so does y. Negative correlations imply that as x increases, y decreases. The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets. ''' UpperCamelCase = ''' Args: predictions (`list` of `int`): Predicted class labels, as returned by a model. references (`list` of `int`): Ground truth labels. return_pvalue (`boolean`): If `True`, returns the p-value, along with the correlation coefficient. If `False`, returns only the correlation coefficient. Defaults to `False`. Returns: pearsonr (`float`): Pearson correlation coefficient. Minimum possible value is -1. Maximum possible value is 1. Values of 1 and -1 indicate exact linear positive and negative relationships, respectively. A value of 0 implies no correlation. p-value (`float`): P-value, which roughly indicates the probability of an The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets. Minimum possible value is 0. Maximum possible value is 1. Higher values indicate higher probabilities. Examples: Example 1-A simple example using only predictions and references. >>> pearsonr_metric = datasets.load_metric("pearsonr") >>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5]) >>> print(round(results[\'pearsonr\'], 2)) -0.74 Example 2-The same as Example 1, but that also returns the `p-value`. >>> pearsonr_metric = datasets.load_metric("pearsonr") >>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5], return_pvalue=True) >>> print(sorted(list(results.keys()))) [\'p-value\', \'pearsonr\'] >>> print(round(results[\'pearsonr\'], 2)) -0.74 >>> print(round(results[\'p-value\'], 2)) 0.15 ''' UpperCamelCase = ''' @article{2020SciPy-NMeth, author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and Haberland, Matt and Reddy, Tyler and Cournapeau, David and Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and Bright, Jonathan and {van der Walt}, St{\'e}fan J. and Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and Kern, Robert and Larson, Eric and Carey, C J and Polat, Ilhan and Feng, Yu and Moore, Eric W. and {VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and Harris, Charles R. and Archibald, Anne M. and Ribeiro, Antonio H. and Pedregosa, Fabian and {van Mulbregt}, Paul and {SciPy 1.0 Contributors}}, title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific Computing in Python}}, journal = {Nature Methods}, year = {2020}, volume = {17}, pages = {261--272}, adsurl = {https://rdcu.be/b08Wh}, doi = {10.1038/s41592-019-0686-2}, } ''' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION ) class snake_case_ ( datasets.Metric ): def __UpperCamelCase ( self : List[str] ) -> List[Any]: return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { "predictions": datasets.Value("float" ), "references": datasets.Value("float" ), } ) , reference_urls=["https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.pearsonr.html"] , ) def __UpperCamelCase ( self : Optional[int] , lowercase_ : Tuple , lowercase_ : Union[str, Any] , lowercase_ : Tuple=False ) -> Optional[Any]: if return_pvalue: lowercase__ : List[Any] = pearsonr(lowercase_ , lowercase_ ) return {"pearsonr": results[0], "p-value": results[1]} else: return {"pearsonr": float(pearsonr(lowercase_ , lowercase_ )[0] )}
366
import asyncio import os import shutil import subprocess import sys import tempfile import unittest from distutils.util import strtobool from functools import partial from pathlib import Path from typing import List, Union from unittest import mock import torch from ..state import AcceleratorState, PartialState from ..utils import ( gather, is_bnb_available, is_comet_ml_available, is_datasets_available, is_deepspeed_available, is_mps_available, is_safetensors_available, is_tensorboard_available, is_torch_version, is_tpu_available, is_transformers_available, is_wandb_available, is_xpu_available, ) def lowercase_ ( _lowerCamelCase : Union[str, Any] , _lowerCamelCase : int=False): try: lowercase__ : str = os.environ[key] except KeyError: # KEY isn't set, default to `default`. lowercase__ : Union[str, Any] = default else: # KEY is set, convert it to True or False. try: lowercase__ : Union[str, Any] = strtobool(_lowerCamelCase) except ValueError: # More values are supported, but let's keep the message simple. raise ValueError(f'''If set, {key} must be yes or no.''') return _value UpperCamelCase = parse_flag_from_env('''RUN_SLOW''', default=False) def lowercase_ ( _lowerCamelCase : int): return unittest.skip("Test was skipped")(_lowerCamelCase) def lowercase_ ( _lowerCamelCase : Tuple): return unittest.skipUnless(_run_slow_tests , "test is slow")(_lowerCamelCase) def lowercase_ ( _lowerCamelCase : Union[str, Any]): return unittest.skipUnless(not torch.cuda.is_available() , "test requires only a CPU")(_lowerCamelCase) def lowercase_ ( _lowerCamelCase : Dict): return unittest.skipUnless(torch.cuda.is_available() , "test requires a GPU")(_lowerCamelCase) def lowercase_ ( _lowerCamelCase : int): return unittest.skipUnless(is_xpu_available() , "test requires a XPU")(_lowerCamelCase) def lowercase_ ( _lowerCamelCase : List[str]): return unittest.skipUnless(is_mps_available() , "test requires a `mps` backend support in `torch`")(_lowerCamelCase) def lowercase_ ( _lowerCamelCase : List[str]): return unittest.skipUnless( is_transformers_available() and is_datasets_available() , "test requires the Hugging Face suite")(_lowerCamelCase) def lowercase_ ( _lowerCamelCase : Union[str, Any]): return unittest.skipUnless(is_bnb_available() , "test requires the bitsandbytes library")(_lowerCamelCase) def lowercase_ ( _lowerCamelCase : Union[str, Any]): return unittest.skipUnless(is_tpu_available() , "test requires TPU")(_lowerCamelCase) def lowercase_ ( _lowerCamelCase : List[Any]): return unittest.skipUnless(torch.cuda.device_count() == 1 , "test requires a GPU")(_lowerCamelCase) def lowercase_ ( _lowerCamelCase : Union[str, Any]): return unittest.skipUnless(torch.xpu.device_count() == 1 , "test requires a XPU")(_lowerCamelCase) def lowercase_ ( _lowerCamelCase : List[str]): return unittest.skipUnless(torch.cuda.device_count() > 1 , "test requires multiple GPUs")(_lowerCamelCase) def lowercase_ ( _lowerCamelCase : int): return unittest.skipUnless(torch.xpu.device_count() > 1 , "test requires multiple XPUs")(_lowerCamelCase) def lowercase_ ( _lowerCamelCase : List[str]): return unittest.skipUnless(is_safetensors_available() , "test requires safetensors")(_lowerCamelCase) def lowercase_ ( _lowerCamelCase : str): return unittest.skipUnless(is_deepspeed_available() , "test requires DeepSpeed")(_lowerCamelCase) def lowercase_ ( _lowerCamelCase : Any): return unittest.skipUnless(is_torch_version(">=" , "1.12.0") , "test requires torch version >= 1.12.0")(_lowerCamelCase) def lowercase_ ( _lowerCamelCase : List[Any]=None , _lowerCamelCase : Dict=None): if test_case is None: return partial(_lowerCamelCase , version=_lowerCamelCase) return unittest.skipUnless(is_torch_version(">=" , _lowerCamelCase) , f'''test requires torch version >= {version}''')(_lowerCamelCase) def lowercase_ ( _lowerCamelCase : List[Any]): return unittest.skipUnless(is_tensorboard_available() , "test requires Tensorboard")(_lowerCamelCase) def lowercase_ ( _lowerCamelCase : int): return unittest.skipUnless(is_wandb_available() , "test requires wandb")(_lowerCamelCase) def lowercase_ ( _lowerCamelCase : List[str]): return unittest.skipUnless(is_comet_ml_available() , "test requires comet_ml")(_lowerCamelCase) UpperCamelCase = ( any([is_wandb_available(), is_tensorboard_available()]) and not is_comet_ml_available() ) def lowercase_ ( _lowerCamelCase : Any): return unittest.skipUnless( _atleast_one_tracker_available , "test requires at least one tracker to be available and for `comet_ml` to not be installed" , )(_lowerCamelCase) class snake_case_ ( unittest.TestCase ): __A : int = True @classmethod def __UpperCamelCase ( cls : str ) -> str: lowercase__ : str = tempfile.mkdtemp() @classmethod def __UpperCamelCase ( cls : List[str] ) -> Optional[Any]: if os.path.exists(cls.tmpdir ): shutil.rmtree(cls.tmpdir ) def __UpperCamelCase ( self : str ) -> Optional[int]: if self.clear_on_setup: for path in Path(self.tmpdir ).glob("**/*" ): if path.is_file(): path.unlink() elif path.is_dir(): shutil.rmtree(lowercase_ ) class snake_case_ ( unittest.TestCase ): def __UpperCamelCase ( self : List[str] ) -> Union[str, Any]: super().tearDown() # Reset the state of the AcceleratorState singleton. AcceleratorState._reset_state() PartialState._reset_state() class snake_case_ ( unittest.TestCase ): def __UpperCamelCase ( self : List[Any] , lowercase_ : Union[mock.Mock, List[mock.Mock]] ) -> str: lowercase__ : Tuple = mocks if isinstance(lowercase_ , (tuple, list) ) else [mocks] for m in self.mocks: m.start() self.addCleanup(m.stop ) def lowercase_ ( _lowerCamelCase : int): lowercase__ : Tuple = AcceleratorState() lowercase__ : Optional[int] = tensor[None].clone().to(state.device) lowercase__ : Optional[int] = gather(_lowerCamelCase).cpu() lowercase__ : Optional[Any] = tensor[0].cpu() for i in range(tensors.shape[0]): if not torch.equal(tensors[i] , _lowerCamelCase): return False return True class snake_case_ : def __init__( self : str , lowercase_ : int , lowercase_ : Optional[Any] , lowercase_ : int ) -> Union[str, Any]: lowercase__ : int = returncode lowercase__ : Dict = stdout lowercase__ : List[Any] = stderr async def lowercase_ ( _lowerCamelCase : Optional[int] , _lowerCamelCase : str): while True: lowercase__ : int = await stream.readline() if line: callback(_lowerCamelCase) else: break async def lowercase_ ( _lowerCamelCase : List[Any] , _lowerCamelCase : Dict=None , _lowerCamelCase : Tuple=None , _lowerCamelCase : Optional[Any]=None , _lowerCamelCase : Tuple=False , _lowerCamelCase : str=False): if echo: print("\nRunning: " , " ".join(_lowerCamelCase)) lowercase__ : str = await asyncio.create_subprocess_exec( cmd[0] , *cmd[1:] , stdin=_lowerCamelCase , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=_lowerCamelCase , ) # note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe # https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait # # If it starts hanging, will need to switch to the following code. The problem is that no data # will be seen until it's done and if it hangs for example there will be no debug info. # out, err = await p.communicate() # return _RunOutput(p.returncode, out, err) lowercase__ : Tuple = [] lowercase__ : List[Any] = [] def tee(_lowerCamelCase : str , _lowerCamelCase : str , _lowerCamelCase : int , _lowerCamelCase : Optional[int]=""): lowercase__ : Optional[int] = line.decode("utf-8").rstrip() sink.append(_lowerCamelCase) if not quiet: print(_lowerCamelCase , _lowerCamelCase , file=_lowerCamelCase) # XXX: the timeout doesn't seem to make any difference here await asyncio.wait( [ asyncio.create_task(_read_stream(p.stdout , lambda _lowerCamelCase: tee(_lowerCamelCase , _lowerCamelCase , sys.stdout , label="stdout:"))), asyncio.create_task(_read_stream(p.stderr , lambda _lowerCamelCase: tee(_lowerCamelCase , _lowerCamelCase , sys.stderr , label="stderr:"))), ] , timeout=_lowerCamelCase , ) return _RunOutput(await p.wait() , _lowerCamelCase , _lowerCamelCase) def lowercase_ ( _lowerCamelCase : int , _lowerCamelCase : Tuple=None , _lowerCamelCase : Optional[Any]=None , _lowerCamelCase : List[str]=180 , _lowerCamelCase : Dict=False , _lowerCamelCase : Dict=True): lowercase__ : Optional[Any] = asyncio.get_event_loop() lowercase__ : List[Any] = loop.run_until_complete( _stream_subprocess(_lowerCamelCase , env=_lowerCamelCase , stdin=_lowerCamelCase , timeout=_lowerCamelCase , quiet=_lowerCamelCase , echo=_lowerCamelCase)) lowercase__ : str = " ".join(_lowerCamelCase) if result.returncode > 0: lowercase__ : Dict = "\n".join(result.stderr) raise RuntimeError( f'''\'{cmd_str}\' failed with returncode {result.returncode}\n\n''' f'''The combined stderr from workers follows:\n{stderr}''') return result class snake_case_ ( __A ): pass def lowercase_ ( _lowerCamelCase : List[str] , _lowerCamelCase : Any=False): try: lowercase__ : Optional[int] = subprocess.check_output(_lowerCamelCase , stderr=subprocess.STDOUT) if return_stdout: if hasattr(_lowerCamelCase , "decode"): lowercase__ : Optional[Any] = output.decode("utf-8") return output except subprocess.CalledProcessError as e: raise SubprocessCallException( f'''Command `{" ".join(_lowerCamelCase)}` failed with the following error:\n\n{e.output.decode()}''') from e
333
0
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available, is_torch_available, ) lowerCAmelCase_ = {'configuration_fnet': ['FNET_PRETRAINED_CONFIG_ARCHIVE_MAP', 'FNetConfig']} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = ['FNetTokenizer'] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = ['FNetTokenizerFast'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = [ 'FNET_PRETRAINED_MODEL_ARCHIVE_LIST', 'FNetForMaskedLM', 'FNetForMultipleChoice', 'FNetForNextSentencePrediction', 'FNetForPreTraining', 'FNetForQuestionAnswering', 'FNetForSequenceClassification', 'FNetForTokenClassification', 'FNetLayer', 'FNetModel', 'FNetPreTrainedModel', ] if TYPE_CHECKING: from .configuration_fnet import FNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FNetConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_fnet import FNetTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_fnet_fast import FNetTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_fnet import ( FNET_PRETRAINED_MODEL_ARCHIVE_LIST, FNetForMaskedLM, FNetForMultipleChoice, FNetForNextSentencePrediction, FNetForPreTraining, FNetForQuestionAnswering, FNetForSequenceClassification, FNetForTokenClassification, FNetLayer, FNetModel, FNetPreTrainedModel, ) else: import sys lowerCAmelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
16
"""simple docstring""" from ...configuration_utils import PretrainedConfig lowerCAmelCase_ = { 'google/tapas-base-finetuned-sqa': ( 'https://huggingface.co/google/tapas-base-finetuned-sqa/resolve/main/config.json' ), 'google/tapas-base-finetuned-wtq': ( 'https://huggingface.co/google/tapas-base-finetuned-wtq/resolve/main/config.json' ), 'google/tapas-base-finetuned-wikisql-supervised': ( 'https://huggingface.co/google/tapas-base-finetuned-wikisql-supervised/resolve/main/config.json' ), 'google/tapas-base-finetuned-tabfact': ( 'https://huggingface.co/google/tapas-base-finetuned-tabfact/resolve/main/config.json' ), } class __A ( A_ ): '''simple docstring''' lowerCAmelCase : str = "tapas" def __init__( self : List[Any] ,_snake_case : Dict=30_522 ,_snake_case : Union[str, Any]=768 ,_snake_case : int=12 ,_snake_case : Union[str, Any]=12 ,_snake_case : Union[str, Any]=3_072 ,_snake_case : List[Any]="gelu" ,_snake_case : Optional[int]=0.1 ,_snake_case : Tuple=0.1 ,_snake_case : List[Any]=1_024 ,_snake_case : Any=[3, 256, 256, 2, 256, 256, 10] ,_snake_case : List[Any]=0.02 ,_snake_case : Union[str, Any]=1e-12 ,_snake_case : str=0 ,_snake_case : Any=10.0 ,_snake_case : int=0 ,_snake_case : Optional[Any]=1.0 ,_snake_case : List[str]=None ,_snake_case : Tuple=1.0 ,_snake_case : Tuple=False ,_snake_case : List[Any]=None ,_snake_case : int=1.0 ,_snake_case : List[Any]=1.0 ,_snake_case : Optional[int]=False ,_snake_case : Optional[int]=False ,_snake_case : Optional[int]="ratio" ,_snake_case : Any=None ,_snake_case : Union[str, Any]=None ,_snake_case : List[str]=64 ,_snake_case : Optional[Any]=32 ,_snake_case : Optional[Any]=False ,_snake_case : Optional[int]=True ,_snake_case : Dict=False ,_snake_case : Tuple=False ,_snake_case : int=True ,_snake_case : List[str]=False ,_snake_case : Dict=None ,_snake_case : Optional[int]=None ,**_snake_case : int ,) -> List[Any]: """simple docstring""" super().__init__(pad_token_id=_snake_case ,**_snake_case ) # BERT hyperparameters (with updated max_position_embeddings and type_vocab_sizes) lowercase__ : Optional[int] = vocab_size lowercase__ : List[str] = hidden_size lowercase__ : Any = num_hidden_layers lowercase__ : Optional[Any] = num_attention_heads lowercase__ : Optional[int] = hidden_act lowercase__ : List[Any] = intermediate_size lowercase__ : List[Any] = hidden_dropout_prob lowercase__ : Dict = attention_probs_dropout_prob lowercase__ : str = max_position_embeddings lowercase__ : Dict = type_vocab_sizes lowercase__ : Optional[Any] = initializer_range lowercase__ : Dict = layer_norm_eps # Fine-tuning task hyperparameters lowercase__ : Any = positive_label_weight lowercase__ : int = num_aggregation_labels lowercase__ : List[str] = aggregation_loss_weight lowercase__ : Optional[int] = use_answer_as_supervision lowercase__ : Optional[Any] = answer_loss_importance lowercase__ : Union[str, Any] = use_normalized_answer_loss lowercase__ : str = huber_loss_delta lowercase__ : str = temperature lowercase__ : int = aggregation_temperature lowercase__ : List[Any] = use_gumbel_for_cells lowercase__ : Tuple = use_gumbel_for_aggregation lowercase__ : Union[str, Any] = average_approximation_function lowercase__ : Union[str, Any] = cell_selection_preference lowercase__ : Any = answer_loss_cutoff lowercase__ : List[Any] = max_num_rows lowercase__ : str = max_num_columns lowercase__ : int = average_logits_per_cell lowercase__ : str = select_one_column lowercase__ : str = allow_empty_column_selection lowercase__ : Any = init_cell_selection_weights_to_zero lowercase__ : Optional[int] = reset_position_index_per_cell lowercase__ : Union[str, Any] = disable_per_token_loss # Aggregation hyperparameters lowercase__ : Optional[Any] = aggregation_labels lowercase__ : List[Any] = no_aggregation_label_index if isinstance(self.aggregation_labels ,_snake_case ): lowercase__ : Union[str, Any] = {int(_snake_case ): v for k, v in aggregation_labels.items()}
16
1
"""simple docstring""" def _lowerCAmelCase ( lowerCAmelCase ): '''simple docstring''' if upper_limit < 0: raise ValueError("""Limit for the Catalan sequence must be ≥ 0""" ) UpperCAmelCase = [0] * (upper_limit + 1) # Base case: C(0) = C(1) = 1 UpperCAmelCase = 1 if upper_limit > 0: UpperCAmelCase = 1 # Recurrence relation: C(i) = sum(C(j).C(i-j-1)), from j = 0 to i for i in range(2 , upper_limit + 1 ): for j in range(lowerCAmelCase ): catalan_list[i] += catalan_list[j] * catalan_list[i - j - 1] return catalan_list if __name__ == "__main__": print('''\n********* Catalan Numbers Using Dynamic Programming ************\n''') print('''\n*** Enter -1 at any time to quit ***''') print('''\nEnter the upper limit (≥ 0) for the Catalan number sequence: ''', end='''''') try: while True: lowerCAmelCase_ : Any = int(input().strip()) if N < 0: print('''\n********* Goodbye!! ************''') break else: print(F'The Catalan numbers from 0 through {N} are:') print(catalan_numbers(N)) print('''Try another upper limit for the sequence: ''', end='''''') except (NameError, ValueError): print('''\n********* Invalid input, goodbye! ************\n''') import doctest doctest.testmod()
248
"""simple docstring""" import unittest from pathlib import Path from tempfile import TemporaryDirectory from transformers import AutoConfig, TFGPTaLMHeadModel, is_keras_nlp_available, is_tf_available from transformers.models.gpta.tokenization_gpta import GPTaTokenizer from transformers.testing_utils import require_keras_nlp, require_tf, slow if is_tf_available(): import tensorflow as tf if is_keras_nlp_available(): from transformers.models.gpta import TFGPTaTokenizer lowerCAmelCase_ : Any = ['''gpt2'''] lowerCAmelCase_ : Optional[int] = '''gpt2''' if is_tf_available(): class UpperCamelCase_ ( tf.Module ): def __init__( self , snake_case__ ) -> List[str]: """simple docstring""" super().__init__() UpperCAmelCase = tokenizer UpperCAmelCase = AutoConfig.from_pretrained(snake_case__ ) UpperCAmelCase = TFGPTaLMHeadModel.from_config(snake_case__ ) @tf.function(input_signature=(tf.TensorSpec((None,) , tf.string , name="""text""" ),) ) def UpperCamelCase_ ( self , snake_case__ ) -> List[str]: """simple docstring""" UpperCAmelCase = self.tokenizer(snake_case__ ) UpperCAmelCase = tokenized["""input_ids"""].to_tensor() UpperCAmelCase = tf.cast(input_ids_dense > 0 , tf.intaa ) # input_mask = tf.reshape(input_mask, [-1, MAX_SEQ_LEN]) UpperCAmelCase = self.model(input_ids=snake_case__ , attention_mask=snake_case__ )["""logits"""] return outputs @require_tf @require_keras_nlp class UpperCamelCase_ ( unittest.TestCase ): def UpperCamelCase_ ( self ) -> Union[str, Any]: """simple docstring""" super().setUp() UpperCAmelCase = [GPTaTokenizer.from_pretrained(snake_case__ ) for checkpoint in (TOKENIZER_CHECKPOINTS)] UpperCAmelCase = [TFGPTaTokenizer.from_pretrained(snake_case__ ) for checkpoint in TOKENIZER_CHECKPOINTS] assert len(self.tokenizers ) == len(self.tf_tokenizers ) UpperCAmelCase = [ """This is a straightforward English test sentence.""", """This one has some weird characters\rto\nsee\r\nif those\u00E9break things.""", """Now we're going to add some Chinese: 一 二 三 一二三""", """And some much more rare Chinese: 齉 堃 齉堃""", """Je vais aussi écrire en français pour tester les accents""", """Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ""", ] UpperCAmelCase = list(zip(self.test_sentences , self.test_sentences[::-1] ) ) def UpperCamelCase_ ( self ) -> Optional[int]: """simple docstring""" for tokenizer, tf_tokenizer in zip(self.tokenizers , self.tf_tokenizers ): for test_inputs in self.test_sentences: UpperCAmelCase = tokenizer([test_inputs] , return_tensors="""tf""" ) UpperCAmelCase = tf_tokenizer([test_inputs] ) for key in python_outputs.keys(): # convert them to numpy to avoid messing with ragged tensors UpperCAmelCase = python_outputs[key].numpy() UpperCAmelCase = tf_outputs[key].numpy() self.assertTrue(tf.reduce_all(python_outputs_values.shape == tf_outputs_values.shape ) ) self.assertTrue(tf.reduce_all(tf.cast(snake_case__ , tf.intaa ) == tf_outputs_values ) ) @slow def UpperCamelCase_ ( self ) -> Optional[Any]: """simple docstring""" for tf_tokenizer in self.tf_tokenizers: UpperCAmelCase = tf.function(snake_case__ ) for test_inputs in self.test_sentences: UpperCAmelCase = tf.constant(snake_case__ ) UpperCAmelCase = compiled_tokenizer(snake_case__ ) UpperCAmelCase = tf_tokenizer(snake_case__ ) for key in eager_outputs.keys(): self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key] ) ) @slow def UpperCamelCase_ ( self ) -> Any: """simple docstring""" for tf_tokenizer in self.tf_tokenizers: UpperCAmelCase = ModelToSave(tokenizer=snake_case__ ) UpperCAmelCase = tf.convert_to_tensor([self.test_sentences[0]] ) UpperCAmelCase = model.serving(snake_case__ ) # Build model with some sample inputs with TemporaryDirectory() as tempdir: UpperCAmelCase = Path(snake_case__ ) / """saved.model""" tf.saved_model.save(snake_case__ , snake_case__ , signatures={"""serving_default""": model.serving} ) UpperCAmelCase = tf.saved_model.load(snake_case__ ) UpperCAmelCase = loaded_model.signatures["""serving_default"""](snake_case__ )["""output_0"""] # We may see small differences because the loaded model is compiled, so we need an epsilon for the test self.assertTrue(tf.reduce_all(out == loaded_output ) ) @slow def UpperCamelCase_ ( self ) -> Any: """simple docstring""" for tf_tokenizer in self.tf_tokenizers: UpperCAmelCase = tf.convert_to_tensor([self.test_sentences[0]] ) UpperCAmelCase = tf_tokenizer(snake_case__ ) # Build model with some sample inputs UpperCAmelCase = tf_tokenizer.get_config() UpperCAmelCase = TFGPTaTokenizer.from_config(snake_case__ ) UpperCAmelCase = model_from_config(snake_case__ ) for key in from_config_output.keys(): self.assertTrue(tf.reduce_all(from_config_output[key] == out[key] ) ) @slow def UpperCamelCase_ ( self ) -> int: """simple docstring""" for tf_tokenizer in self.tf_tokenizers: # for the test to run UpperCAmelCase = 12_31_23 for max_length in [3, 5, 10_24]: UpperCAmelCase = tf.convert_to_tensor([self.test_sentences[0]] ) UpperCAmelCase = tf_tokenizer(snake_case__ , max_length=snake_case__ ) UpperCAmelCase = out["""input_ids"""].numpy().shape[1] assert out_length == max_length
248
1
'''simple docstring''' import json import os import shutil import tempfile import unittest import numpy as np import pytest from transformers import CLIPTokenizer, CLIPTokenizerFast from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES from transformers.testing_utils import require_vision from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available if is_vision_available(): from PIL import Image from transformers import CLIPSegProcessor, ViTImageProcessor @require_vision class UpperCAmelCase_ (unittest.TestCase ): """simple docstring""" def lowercase_ ( self ) -> Any: __lowerCamelCase : Optional[int] = tempfile.mkdtemp() # fmt: off __lowerCamelCase : Dict = ['l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', 'lo', 'l</w>', 'w</w>', 'r</w>', 't</w>', 'low</w>', 'er</w>', 'lowest</w>', 'newer</w>', 'wider', '<unk>', '<|startoftext|>', '<|endoftext|>'] # fmt: on __lowerCamelCase : Dict = dict(zip(SCREAMING_SNAKE_CASE_ , range(len(SCREAMING_SNAKE_CASE_ ) ) ) ) __lowerCamelCase : List[Any] = ['#version: 0.2', 'l o', 'lo w</w>', 'e r</w>', ''] __lowerCamelCase : Tuple = {'unk_token': '<unk>'} __lowerCamelCase : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] ) __lowerCamelCase : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] ) with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp: fp.write(json.dumps(SCREAMING_SNAKE_CASE_ ) + '\n' ) with open(self.merges_file , 'w' , encoding='utf-8' ) as fp: fp.write('\n'.join(SCREAMING_SNAKE_CASE_ ) ) __lowerCamelCase : str = { 'do_resize': True, 'size': 20, 'do_center_crop': True, 'crop_size': 18, 'do_normalize': True, 'image_mean': [0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3], 'image_std': [0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1], } __lowerCamelCase : Optional[Any] = os.path.join(self.tmpdirname , SCREAMING_SNAKE_CASE_ ) with open(self.image_processor_file , 'w' , encoding='utf-8' ) as fp: json.dump(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) def lowercase_ ( self , **SCREAMING_SNAKE_CASE_ ) -> List[str]: return CLIPTokenizer.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE_ ) def lowercase_ ( self , **SCREAMING_SNAKE_CASE_ ) -> Any: return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE_ ) def lowercase_ ( self , **SCREAMING_SNAKE_CASE_ ) -> List[Any]: return ViTImageProcessor.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE_ ) def lowercase_ ( self ) -> int: shutil.rmtree(self.tmpdirname ) def lowercase_ ( self ) -> List[str]: __lowerCamelCase : int = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )] __lowerCamelCase : Union[str, Any] = [Image.fromarray(np.moveaxis(SCREAMING_SNAKE_CASE_ , 0 , -1 ) ) for x in image_inputs] return image_inputs def lowercase_ ( self ) -> Optional[int]: __lowerCamelCase : str = self.get_tokenizer() __lowerCamelCase : str = self.get_rust_tokenizer() __lowerCamelCase : Any = self.get_image_processor() __lowerCamelCase : Optional[Any] = CLIPSegProcessor(tokenizer=SCREAMING_SNAKE_CASE_ , image_processor=SCREAMING_SNAKE_CASE_ ) processor_slow.save_pretrained(self.tmpdirname ) __lowerCamelCase : str = CLIPSegProcessor.from_pretrained(self.tmpdirname , use_fast=SCREAMING_SNAKE_CASE_ ) __lowerCamelCase : Optional[int] = CLIPSegProcessor(tokenizer=SCREAMING_SNAKE_CASE_ , image_processor=SCREAMING_SNAKE_CASE_ ) processor_fast.save_pretrained(self.tmpdirname ) __lowerCamelCase : List[Any] = CLIPSegProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() ) self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() ) self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() ) self.assertIsInstance(processor_slow.tokenizer , SCREAMING_SNAKE_CASE_ ) self.assertIsInstance(processor_fast.tokenizer , SCREAMING_SNAKE_CASE_ ) self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertIsInstance(processor_slow.image_processor , SCREAMING_SNAKE_CASE_ ) self.assertIsInstance(processor_fast.image_processor , SCREAMING_SNAKE_CASE_ ) def lowercase_ ( self ) -> Tuple: __lowerCamelCase : Optional[Any] = CLIPSegProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) __lowerCamelCase : Tuple = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' ) __lowerCamelCase : List[str] = self.get_image_processor(do_normalize=SCREAMING_SNAKE_CASE_ , padding_value=1.0 ) __lowerCamelCase : Optional[int] = CLIPSegProcessor.from_pretrained( self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=SCREAMING_SNAKE_CASE_ , padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , SCREAMING_SNAKE_CASE_ ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , SCREAMING_SNAKE_CASE_ ) def lowercase_ ( self ) -> Any: __lowerCamelCase : Dict = self.get_image_processor() __lowerCamelCase : Optional[int] = self.get_tokenizer() __lowerCamelCase : Optional[int] = CLIPSegProcessor(tokenizer=SCREAMING_SNAKE_CASE_ , image_processor=SCREAMING_SNAKE_CASE_ ) __lowerCamelCase : Optional[Any] = self.prepare_image_inputs() __lowerCamelCase : Any = image_processor(SCREAMING_SNAKE_CASE_ , return_tensors='np' ) __lowerCamelCase : int = processor(images=SCREAMING_SNAKE_CASE_ , return_tensors='np' ) for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 ) def lowercase_ ( self ) -> Optional[int]: __lowerCamelCase : Optional[Any] = self.get_image_processor() __lowerCamelCase : Any = self.get_tokenizer() __lowerCamelCase : Tuple = CLIPSegProcessor(tokenizer=SCREAMING_SNAKE_CASE_ , image_processor=SCREAMING_SNAKE_CASE_ ) __lowerCamelCase : str = 'lower newer' __lowerCamelCase : Tuple = processor(text=SCREAMING_SNAKE_CASE_ ) __lowerCamelCase : List[Any] = tokenizer(SCREAMING_SNAKE_CASE_ ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def lowercase_ ( self ) -> Union[str, Any]: __lowerCamelCase : Tuple = self.get_image_processor() __lowerCamelCase : Optional[Any] = self.get_tokenizer() __lowerCamelCase : List[str] = CLIPSegProcessor(tokenizer=SCREAMING_SNAKE_CASE_ , image_processor=SCREAMING_SNAKE_CASE_ ) __lowerCamelCase : List[Any] = 'lower newer' __lowerCamelCase : List[Any] = self.prepare_image_inputs() __lowerCamelCase : Tuple = processor(text=SCREAMING_SNAKE_CASE_ , images=SCREAMING_SNAKE_CASE_ ) self.assertListEqual(list(inputs.keys() ) , ['input_ids', 'attention_mask', 'pixel_values'] ) # test if it raises when no input is passed with pytest.raises(SCREAMING_SNAKE_CASE_ ): processor() def lowercase_ ( self ) -> Dict: __lowerCamelCase : List[Any] = self.get_image_processor() __lowerCamelCase : Optional[int] = self.get_tokenizer() __lowerCamelCase : Optional[int] = CLIPSegProcessor(tokenizer=SCREAMING_SNAKE_CASE_ , image_processor=SCREAMING_SNAKE_CASE_ ) __lowerCamelCase : List[Any] = self.prepare_image_inputs() __lowerCamelCase : Any = self.prepare_image_inputs() __lowerCamelCase : Tuple = processor(images=SCREAMING_SNAKE_CASE_ , visual_prompt=SCREAMING_SNAKE_CASE_ ) self.assertListEqual(list(inputs.keys() ) , ['pixel_values', 'conditional_pixel_values'] ) # test if it raises when no input is passed with pytest.raises(SCREAMING_SNAKE_CASE_ ): processor() def lowercase_ ( self ) -> Any: __lowerCamelCase : int = self.get_image_processor() __lowerCamelCase : List[Any] = self.get_tokenizer() __lowerCamelCase : List[str] = CLIPSegProcessor(tokenizer=SCREAMING_SNAKE_CASE_ , image_processor=SCREAMING_SNAKE_CASE_ ) __lowerCamelCase : Union[str, Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] __lowerCamelCase : List[str] = processor.batch_decode(SCREAMING_SNAKE_CASE_ ) __lowerCamelCase : Union[str, Any] = tokenizer.batch_decode(SCREAMING_SNAKE_CASE_ ) self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
185
'''simple docstring''' from numpy import exp, pi, sqrt def UpperCAmelCase__ ( UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : float = 0.0 , UpperCAmelCase_ : float = 1.0 ) -> int: return 1 / sqrt(2 * pi * sigma**2 ) * exp(-((x - mu) ** 2) / (2 * sigma**2) ) if __name__ == "__main__": import doctest doctest.testmod()
185
1
"""simple docstring""" import math import random from typing import Any from .hill_climbing import SearchProblem def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = math.inf , _SCREAMING_SNAKE_CASE = -math.inf , _SCREAMING_SNAKE_CASE = math.inf , _SCREAMING_SNAKE_CASE = -math.inf , _SCREAMING_SNAKE_CASE = False , _SCREAMING_SNAKE_CASE = 100 , _SCREAMING_SNAKE_CASE = 0.01 , _SCREAMING_SNAKE_CASE = 1 , ) ->Any: a__: Tuple = False a__: Tuple = search_prob a__: int = start_temperate a__: Dict = [] a__: List[str] = 0 a__: List[str] = None while not search_end: a__: Optional[int] = current_state.score() if best_state is None or current_score > best_state.score(): a__: List[Any] = current_state scores.append(_SCREAMING_SNAKE_CASE ) iterations += 1 a__: int = None a__: str = current_state.get_neighbors() while ( next_state is None and neighbors ): # till we do not find a neighbor that we can move to a__: Optional[int] = random.randint(0 , len(_SCREAMING_SNAKE_CASE ) - 1 ) # picking a random neighbor a__: Tuple = neighbors.pop(_SCREAMING_SNAKE_CASE ) a__: str = picked_neighbor.score() - current_score if ( picked_neighbor.x > max_x or picked_neighbor.x < min_x or picked_neighbor.y > max_y or picked_neighbor.y < min_y ): continue # neighbor outside our bounds if not find_max: a__: List[Any] = change * -1 # in case we are finding minimum if change > 0: # improves the solution a__: str = picked_neighbor else: a__: int = (math.e) ** ( change / current_temp ) # probability generation function if random.random() < probability: # random number within probability a__: Tuple = picked_neighbor a__: str = current_temp - (current_temp * rate_of_decrease) if current_temp < threshold_temp or next_state is None: # temperature below threshold, or could not find a suitable neighbor a__: List[Any] = True else: a__: Any = next_state if visualization: from matplotlib import pyplot as plt plt.plot(range(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE ) plt.xlabel('Iterations' ) plt.ylabel('Function values' ) plt.show() return best_state if __name__ == "__main__": def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->Optional[int]: return (x**2) + (y**2) # starting the problem with initial coordinates (12, 47) lowercase__ = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa) lowercase__ = simulated_annealing( prob, find_max=False, max_x=100, min_x=5, max_y=50, min_y=-5, visualization=True ) print( 'The minimum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 ' f"and 50 > y > - 5 found via hill climbing: {local_min.score()}" ) # starting the problem with initial coordinates (12, 47) lowercase__ = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa) lowercase__ = simulated_annealing( prob, find_max=True, max_x=100, min_x=5, max_y=50, min_y=-5, visualization=True ) print( 'The maximum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 ' f"and 50 > y > - 5 found via hill climbing: {local_min.score()}" ) def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->List[Any]: return (3 * x**2) - (6 * y) lowercase__ = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa) lowercase__ = simulated_annealing(prob, find_max=False, visualization=True) print( 'The minimum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: ' f"{local_min.score()}" ) lowercase__ = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa) lowercase__ = simulated_annealing(prob, find_max=True, visualization=True) print( 'The maximum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: ' f"{local_min.score()}" )
203
"""simple docstring""" import unittest from knapsack import knapsack as k class __snake_case ( unittest.TestCase ): def lowerCamelCase_ ( self) -> Dict: '''simple docstring''' a__: List[Any] = 0 a__: Dict = [0] a__: int = [0] a__: Optional[Any] = len(lowercase) self.assertEqual(k.knapsack(lowercase , lowercase , lowercase , lowercase) , 0) a__: str = [60] a__: Dict = [10] a__: List[str] = len(lowercase) self.assertEqual(k.knapsack(lowercase , lowercase , lowercase , lowercase) , 0) def lowerCamelCase_ ( self) -> List[Any]: '''simple docstring''' a__: int = 3 a__: str = [1, 2, 3] a__: Dict = [3, 2, 1] a__: Optional[int] = len(lowercase) self.assertEqual(k.knapsack(lowercase , lowercase , lowercase , lowercase) , 5) def lowerCamelCase_ ( self) -> Union[str, Any]: '''simple docstring''' a__: Any = 50 a__: Optional[int] = [60, 1_00, 1_20] a__: str = [10, 20, 30] a__: int = len(lowercase) self.assertEqual(k.knapsack(lowercase , lowercase , lowercase , lowercase) , 2_20) if __name__ == "__main__": unittest.main()
203
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) UpperCamelCase = { '''configuration_blenderbot''': [ '''BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''BlenderbotConfig''', '''BlenderbotOnnxConfig''', ], '''tokenization_blenderbot''': ['''BlenderbotTokenizer'''], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase = ['''BlenderbotTokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase = [ '''BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''BlenderbotForCausalLM''', '''BlenderbotForConditionalGeneration''', '''BlenderbotModel''', '''BlenderbotPreTrainedModel''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase = [ '''TFBlenderbotForConditionalGeneration''', '''TFBlenderbotModel''', '''TFBlenderbotPreTrainedModel''', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase = [ '''FlaxBlenderbotForConditionalGeneration''', '''FlaxBlenderbotModel''', '''FlaxBlenderbotPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_blenderbot import ( BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP, BlenderbotConfig, BlenderbotOnnxConfig, ) from .tokenization_blenderbot import BlenderbotTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_blenderbot_fast import BlenderbotTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_blenderbot import ( BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST, BlenderbotForCausalLM, BlenderbotForConditionalGeneration, BlenderbotModel, BlenderbotPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_blenderbot import ( TFBlenderbotForConditionalGeneration, TFBlenderbotModel, TFBlenderbotPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_blenderbot import ( FlaxBlenderbotForConditionalGeneration, FlaxBlenderbotModel, FlaxBlenderbotPreTrainedModel, ) else: import sys UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
319
'''simple docstring''' import json import os from functools import lru_cache from typing import List, Optional, Tuple import regex as re from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging UpperCamelCase = logging.get_logger(__name__) UpperCamelCase = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt'''} UpperCamelCase = { '''vocab_file''': { '''allenai/longformer-base-4096''': '''https://huggingface.co/allenai/longformer-base-4096/resolve/main/vocab.json''', '''allenai/longformer-large-4096''': ( '''https://huggingface.co/allenai/longformer-large-4096/resolve/main/vocab.json''' ), '''allenai/longformer-large-4096-finetuned-triviaqa''': ( '''https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/vocab.json''' ), '''allenai/longformer-base-4096-extra.pos.embd.only''': ( '''https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/vocab.json''' ), '''allenai/longformer-large-4096-extra.pos.embd.only''': ( '''https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/vocab.json''' ), }, '''merges_file''': { '''allenai/longformer-base-4096''': '''https://huggingface.co/allenai/longformer-base-4096/resolve/main/merges.txt''', '''allenai/longformer-large-4096''': ( '''https://huggingface.co/allenai/longformer-large-4096/resolve/main/merges.txt''' ), '''allenai/longformer-large-4096-finetuned-triviaqa''': ( '''https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/merges.txt''' ), '''allenai/longformer-base-4096-extra.pos.embd.only''': ( '''https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/merges.txt''' ), '''allenai/longformer-large-4096-extra.pos.embd.only''': ( '''https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/merges.txt''' ), }, } UpperCamelCase = { '''allenai/longformer-base-4096''': 4096, '''allenai/longformer-large-4096''': 4096, '''allenai/longformer-large-4096-finetuned-triviaqa''': 4096, '''allenai/longformer-base-4096-extra.pos.embd.only''': 4096, '''allenai/longformer-large-4096-extra.pos.embd.only''': 4096, } @lru_cache() # Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode def SCREAMING_SNAKE_CASE( ) -> Dict: A: Dict = ( list(range(ord('''!''' ) , ord('''~''' ) + 1 ) ) + list(range(ord('''¡''' ) , ord('''¬''' ) + 1 ) ) + list(range(ord('''®''' ) , ord('''ÿ''' ) + 1 ) ) ) A: Union[str, Any] = bs[:] A: List[str] = 0 for b in range(2**8 ): if b not in bs: bs.append(__lowercase ) cs.append(2**8 + n ) n += 1 A: List[Any] = [chr(__lowercase ) for n in cs] return dict(zip(__lowercase , __lowercase ) ) def SCREAMING_SNAKE_CASE( __lowercase ) -> Optional[int]: A: Optional[Any] = set() A: Tuple = word[0] for char in word[1:]: pairs.add((prev_char, char) ) A: List[Any] = char return pairs class lowerCAmelCase_ ( UpperCAmelCase_ ): '''simple docstring''' UpperCamelCase_ : int = VOCAB_FILES_NAMES UpperCamelCase_ : int = PRETRAINED_VOCAB_FILES_MAP UpperCamelCase_ : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCamelCase_ : int = ["""input_ids""", """attention_mask"""] def __init__( self : int , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : str="replace" , SCREAMING_SNAKE_CASE_ : str="<s>" , SCREAMING_SNAKE_CASE_ : Any="</s>" , SCREAMING_SNAKE_CASE_ : int="</s>" , SCREAMING_SNAKE_CASE_ : List[Any]="<s>" , SCREAMING_SNAKE_CASE_ : str="<unk>" , SCREAMING_SNAKE_CASE_ : Dict="<pad>" , SCREAMING_SNAKE_CASE_ : Dict="<mask>" , SCREAMING_SNAKE_CASE_ : Union[str, Any]=False , **SCREAMING_SNAKE_CASE_ : Tuple , ) -> List[str]: '''simple docstring''' A: int = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else bos_token A: Dict = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else eos_token A: int = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else sep_token A: Dict = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else cls_token A: Any = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else unk_token A: str = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else pad_token # Mask token behave like a normal word, i.e. include the space before it A: Dict = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else mask_token super().__init__( errors=SCREAMING_SNAKE_CASE_ , bos_token=SCREAMING_SNAKE_CASE_ , eos_token=SCREAMING_SNAKE_CASE_ , unk_token=SCREAMING_SNAKE_CASE_ , sep_token=SCREAMING_SNAKE_CASE_ , cls_token=SCREAMING_SNAKE_CASE_ , pad_token=SCREAMING_SNAKE_CASE_ , mask_token=SCREAMING_SNAKE_CASE_ , add_prefix_space=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , ) with open(SCREAMING_SNAKE_CASE_ , encoding='''utf-8''' ) as vocab_handle: A: str = json.load(SCREAMING_SNAKE_CASE_ ) A: str = {v: k for k, v in self.encoder.items()} A: Union[str, Any] = errors # how to handle errors in decoding A: Optional[int] = bytes_to_unicode() A: Union[str, Any] = {v: k for k, v in self.byte_encoder.items()} with open(SCREAMING_SNAKE_CASE_ , encoding='''utf-8''' ) as merges_handle: A: int = merges_handle.read().split('''\n''' )[1:-1] A: str = [tuple(merge.split() ) for merge in bpe_merges] A: Any = dict(zip(SCREAMING_SNAKE_CASE_ , range(len(SCREAMING_SNAKE_CASE_ ) ) ) ) A: Union[str, Any] = {} A: Tuple = add_prefix_space # Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions A: Dict = re.compile(R'''\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+''' ) @property def _snake_case ( self : int ) -> List[Any]: '''simple docstring''' return len(self.encoder ) def _snake_case ( self : Optional[Any] ) -> int: '''simple docstring''' return dict(self.encoder , **self.added_tokens_encoder ) def _snake_case ( self : str , SCREAMING_SNAKE_CASE_ : Optional[int] ) -> Optional[Any]: '''simple docstring''' if token in self.cache: return self.cache[token] A: str = tuple(SCREAMING_SNAKE_CASE_ ) A: str = get_pairs(SCREAMING_SNAKE_CASE_ ) if not pairs: return token while True: A: Dict = min(SCREAMING_SNAKE_CASE_ , key=lambda SCREAMING_SNAKE_CASE_ : self.bpe_ranks.get(SCREAMING_SNAKE_CASE_ , float('''inf''' ) ) ) if bigram not in self.bpe_ranks: break A , A: Optional[Any] = bigram A: Tuple = [] A: List[Any] = 0 while i < len(SCREAMING_SNAKE_CASE_ ): try: A: Union[str, Any] = word.index(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) except ValueError: new_word.extend(word[i:] ) break else: new_word.extend(word[i:j] ) A: int = j if word[i] == first and i < len(SCREAMING_SNAKE_CASE_ ) - 1 and word[i + 1] == second: new_word.append(first + second ) i += 2 else: new_word.append(word[i] ) i += 1 A: Optional[Any] = tuple(SCREAMING_SNAKE_CASE_ ) A: Any = new_word if len(SCREAMING_SNAKE_CASE_ ) == 1: break else: A: Union[str, Any] = get_pairs(SCREAMING_SNAKE_CASE_ ) A: str = ''' '''.join(SCREAMING_SNAKE_CASE_ ) A: str = word return word def _snake_case ( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Optional[Any] ) -> Optional[int]: '''simple docstring''' A: Dict = [] for token in re.findall(self.pat , SCREAMING_SNAKE_CASE_ ): A: Tuple = ''''''.join( self.byte_encoder[b] for b in token.encode('''utf-8''' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case) bpe_tokens.extend(bpe_token for bpe_token in self.bpe(SCREAMING_SNAKE_CASE_ ).split(''' ''' ) ) return bpe_tokens def _snake_case ( self : List[Any] , SCREAMING_SNAKE_CASE_ : Optional[Any] ) -> Optional[Any]: '''simple docstring''' return self.encoder.get(SCREAMING_SNAKE_CASE_ , self.encoder.get(self.unk_token ) ) def _snake_case ( self : Tuple , SCREAMING_SNAKE_CASE_ : Optional[Any] ) -> str: '''simple docstring''' return self.decoder.get(SCREAMING_SNAKE_CASE_ ) def _snake_case ( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Optional[int] ) -> Tuple: '''simple docstring''' A: Optional[int] = ''''''.join(SCREAMING_SNAKE_CASE_ ) A: Tuple = bytearray([self.byte_decoder[c] for c in text] ).decode('''utf-8''' , errors=self.errors ) return text def _snake_case ( self : int , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Optional[str] = None ) -> Tuple[str]: '''simple docstring''' if not os.path.isdir(SCREAMING_SNAKE_CASE_ ): logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" ) return A: Union[str, Any] = os.path.join( SCREAMING_SNAKE_CASE_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) A: int = os.path.join( SCREAMING_SNAKE_CASE_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] ) with open(SCREAMING_SNAKE_CASE_ , '''w''' , encoding='''utf-8''' ) as f: f.write(json.dumps(self.encoder , indent=2 , sort_keys=SCREAMING_SNAKE_CASE_ , ensure_ascii=SCREAMING_SNAKE_CASE_ ) + '''\n''' ) A: Any = 0 with open(SCREAMING_SNAKE_CASE_ , '''w''' , encoding='''utf-8''' ) as writer: writer.write('''#version: 0.2\n''' ) for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda SCREAMING_SNAKE_CASE_ : kv[1] ): if index != token_index: logger.warning( f"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.""" ''' Please check that the tokenizer is not corrupted!''' ) A: Union[str, Any] = token_index writer.write(''' '''.join(SCREAMING_SNAKE_CASE_ ) + '''\n''' ) index += 1 return vocab_file, merge_file def _snake_case ( self : List[Any] , SCREAMING_SNAKE_CASE_ : List[int] , SCREAMING_SNAKE_CASE_ : Optional[List[int]] = None ) -> List[int]: '''simple docstring''' if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] A: int = [self.cls_token_id] A: str = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def _snake_case ( self : Tuple , SCREAMING_SNAKE_CASE_ : List[int] , SCREAMING_SNAKE_CASE_ : Optional[List[int]] = None , SCREAMING_SNAKE_CASE_ : bool = False ) -> List[int]: '''simple docstring''' if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=SCREAMING_SNAKE_CASE_ , token_ids_a=SCREAMING_SNAKE_CASE_ , already_has_special_tokens=SCREAMING_SNAKE_CASE_ ) if token_ids_a is None: return [1] + ([0] * len(SCREAMING_SNAKE_CASE_ )) + [1] return [1] + ([0] * len(SCREAMING_SNAKE_CASE_ )) + [1, 1] + ([0] * len(SCREAMING_SNAKE_CASE_ )) + [1] def _snake_case ( self : Tuple , SCREAMING_SNAKE_CASE_ : List[int] , SCREAMING_SNAKE_CASE_ : Optional[List[int]] = None ) -> List[int]: '''simple docstring''' A: Dict = [self.sep_token_id] A: Optional[Any] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def _snake_case ( self : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Dict=False , **SCREAMING_SNAKE_CASE_ : Optional[int] ) -> int: '''simple docstring''' A: Tuple = kwargs.pop('''add_prefix_space''' , self.add_prefix_space ) if (is_split_into_words or add_prefix_space) and (len(SCREAMING_SNAKE_CASE_ ) > 0 and not text[0].isspace()): A: List[Any] = ''' ''' + text return (text, kwargs)
319
1
'''simple docstring''' import shutil import tempfile import unittest import numpy as np import pytest from transformers.testing_utils import require_vision from transformers.utils import is_vision_available if is_vision_available(): from PIL import Image from transformers import AutoProcessor, BertTokenizer, BlipImageProcessor, BlipProcessor, PreTrainedTokenizerFast @require_vision class __lowerCAmelCase ( unittest.TestCase ): def snake_case_ (self ): _UpperCAmelCase : Any = tempfile.mkdtemp() _UpperCAmelCase : str = BlipImageProcessor() _UpperCAmelCase : Any = BertTokenizer.from_pretrained("""hf-internal-testing/tiny-random-BertModel""" ) _UpperCAmelCase : Any = BlipProcessor(lowerCAmelCase__ , lowerCAmelCase__ ) processor.save_pretrained(self.tmpdirname ) def snake_case_ (self , **lowerCAmelCase__ ): return AutoProcessor.from_pretrained(self.tmpdirname , **lowerCAmelCase__ ).tokenizer def snake_case_ (self , **lowerCAmelCase__ ): return AutoProcessor.from_pretrained(self.tmpdirname , **lowerCAmelCase__ ).image_processor def snake_case_ (self ): shutil.rmtree(self.tmpdirname ) def snake_case_ (self ): _UpperCAmelCase : str = [np.random.randint(2_5_5 , size=(3, 3_0, 4_0_0) , dtype=np.uinta )] _UpperCAmelCase : List[str] = [Image.fromarray(np.moveaxis(lowerCAmelCase__ , 0 , -1 ) ) for x in image_inputs] return image_inputs def snake_case_ (self ): _UpperCAmelCase : List[str] = BlipProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) _UpperCAmelCase : Optional[int] = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" ) _UpperCAmelCase : Optional[int] = self.get_image_processor(do_normalize=lowerCAmelCase__ , padding_value=1.0 ) _UpperCAmelCase : List[str] = BlipProcessor.from_pretrained( self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=lowerCAmelCase__ , padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , lowerCAmelCase__ ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , lowerCAmelCase__ ) def snake_case_ (self ): _UpperCAmelCase : Dict = self.get_image_processor() _UpperCAmelCase : Union[str, Any] = self.get_tokenizer() _UpperCAmelCase : str = BlipProcessor(tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__ ) _UpperCAmelCase : int = self.prepare_image_inputs() _UpperCAmelCase : Optional[Any] = image_processor(lowerCAmelCase__ , return_tensors="""np""" ) _UpperCAmelCase : Optional[Any] = processor(images=lowerCAmelCase__ , return_tensors="""np""" ) for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 ) def snake_case_ (self ): _UpperCAmelCase : Optional[int] = self.get_image_processor() _UpperCAmelCase : Union[str, Any] = self.get_tokenizer() _UpperCAmelCase : List[str] = BlipProcessor(tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__ ) _UpperCAmelCase : List[Any] = "lower newer" _UpperCAmelCase : Optional[Any] = processor(text=lowerCAmelCase__ ) _UpperCAmelCase : Tuple = tokenizer(lowerCAmelCase__ , return_token_type_ids=lowerCAmelCase__ ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def snake_case_ (self ): _UpperCAmelCase : str = self.get_image_processor() _UpperCAmelCase : Optional[Any] = self.get_tokenizer() _UpperCAmelCase : Optional[int] = BlipProcessor(tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__ ) _UpperCAmelCase : str = "lower newer" _UpperCAmelCase : List[str] = self.prepare_image_inputs() _UpperCAmelCase : Optional[int] = processor(text=lowerCAmelCase__ , images=lowerCAmelCase__ ) self.assertListEqual(list(inputs.keys() ) , ["""pixel_values""", """input_ids""", """attention_mask"""] ) # test if it raises when no input is passed with pytest.raises(lowerCAmelCase__ ): processor() def snake_case_ (self ): _UpperCAmelCase : List[str] = self.get_image_processor() _UpperCAmelCase : int = self.get_tokenizer() _UpperCAmelCase : Tuple = BlipProcessor(tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__ ) _UpperCAmelCase : Union[str, Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] _UpperCAmelCase : str = processor.batch_decode(lowerCAmelCase__ ) _UpperCAmelCase : Tuple = tokenizer.batch_decode(lowerCAmelCase__ ) self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ ) def snake_case_ (self ): _UpperCAmelCase : Dict = self.get_image_processor() _UpperCAmelCase : Tuple = self.get_tokenizer() _UpperCAmelCase : List[str] = BlipProcessor(tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__ ) _UpperCAmelCase : Optional[Any] = "lower newer" _UpperCAmelCase : Optional[Any] = self.prepare_image_inputs() _UpperCAmelCase : Optional[Any] = processor(text=lowerCAmelCase__ , images=lowerCAmelCase__ ) # For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask'] self.assertListEqual(list(inputs.keys() ) , ["""pixel_values""", """input_ids""", """attention_mask"""] )
367
'''simple docstring''' def __A ( lowerCAmelCase_ ): _UpperCAmelCase : Optional[Any] = 0 while len(lowerCAmelCase_ ) > 1: _UpperCAmelCase : List[Any] = 0 # Consider two files with minimum cost to be merged for _ in range(2 ): _UpperCAmelCase : Optional[Any] = files.index(min(lowerCAmelCase_ ) ) temp += files[min_index] files.pop(lowerCAmelCase_ ) files.append(lowerCAmelCase_ ) optimal_merge_cost += temp return optimal_merge_cost if __name__ == "__main__": import doctest doctest.testmod()
170
0
from __future__ import annotations from collections import deque from collections.abc import Sequence from dataclasses import dataclass from typing import Any @dataclass class a__ : """simple docstring""" __lowerCamelCase = 42 __lowerCamelCase = None __lowerCamelCase = None def lowerCAmelCase__ ( ) -> Node | None: '''simple docstring''' A__ = Node(1 ) A__ = Node(2 ) A__ = Node(3 ) A__ = Node(4 ) A__ = Node(5 ) return tree def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Node | None ) -> list[int]: '''simple docstring''' return [root.data, *preorder(root.left ), *preorder(root.right )] if root else [] def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Node | None ) -> list[int]: '''simple docstring''' return postorder(root.left ) + postorder(root.right ) + [root.data] if root else [] def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Node | None ) -> list[int]: '''simple docstring''' return [*inorder(root.left ), root.data, *inorder(root.right )] if root else [] def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Node | None ) -> int: '''simple docstring''' return (max(height(root.left ) , height(root.right ) ) + 1) if root else 0 def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Node | None ) -> Sequence[Node | None]: '''simple docstring''' A__ = [] if root is None: return output A__ = deque([root] ) while process_queue: A__ = process_queue.popleft() output.append(node.data ) if node.left: process_queue.append(node.left ) if node.right: process_queue.append(node.right ) return output def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Node | None , SCREAMING_SNAKE_CASE_: int ) -> Sequence[Node | None]: '''simple docstring''' A__ = [] def populate_output(SCREAMING_SNAKE_CASE_: Node | None , SCREAMING_SNAKE_CASE_: int ) -> None: if not root: return if level == 1: output.append(root.data ) elif level > 1: populate_output(root.left , level - 1 ) populate_output(root.right , level - 1 ) populate_output(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) return output def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Node | None , SCREAMING_SNAKE_CASE_: int ) -> Sequence[Node | None]: '''simple docstring''' A__ = [] def populate_output(SCREAMING_SNAKE_CASE_: Node | None , SCREAMING_SNAKE_CASE_: int ) -> None: if root is None: return if level == 1: output.append(root.data ) elif level > 1: populate_output(root.right , level - 1 ) populate_output(root.left , level - 1 ) populate_output(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) return output def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Node | None ) -> Sequence[Node | None] | list[Any]: '''simple docstring''' if root is None: return [] A__ = [] A__ = 0 A__ = height(SCREAMING_SNAKE_CASE_ ) for h in range(1 , height_tree + 1 ): if not flag: output.append(get_nodes_from_left_to_right(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ) A__ = 1 else: output.append(get_nodes_from_right_to_left(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ) A__ = 0 return output def lowerCAmelCase__ ( ) -> None: # Main function for testing. '''simple docstring''' A__ = make_tree() print(F'In-order Traversal: {inorder(SCREAMING_SNAKE_CASE_ )}' ) print(F'Pre-order Traversal: {preorder(SCREAMING_SNAKE_CASE_ )}' ) print(F'Post-order Traversal: {postorder(SCREAMING_SNAKE_CASE_ )}' , "\n" ) print(F'Height of Tree: {height(SCREAMING_SNAKE_CASE_ )}' , "\n" ) print("Complete Level Order Traversal: " ) print(level_order(SCREAMING_SNAKE_CASE_ ) , "\n" ) print("Level-wise order Traversal: " ) for level in range(1 , height(SCREAMING_SNAKE_CASE_ ) + 1 ): print(F'Level {level}:' , get_nodes_from_left_to_right(SCREAMING_SNAKE_CASE_ , level=SCREAMING_SNAKE_CASE_ ) ) print("\nZigZag order Traversal: " ) print(zigzag(SCREAMING_SNAKE_CASE_ ) ) if __name__ == "__main__": import doctest doctest.testmod() main()
68
"""simple docstring""" from typing import Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature from ...image_transforms import get_image_size, pad, rescale, to_channel_dimension_format from ...image_utils import ChannelDimension, ImageInput, make_list_of_images, to_numpy_array, valid_images from ...utils import TensorType, logging UpperCamelCase : Any = logging.get_logger(__name__) class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ): lowercase = ["pixel_values"] def __init__( self , __UpperCAmelCase = True , __UpperCAmelCase = 1 / 255 , __UpperCAmelCase = True , __UpperCAmelCase = 8 , **__UpperCAmelCase , ): '''simple docstring''' super().__init__(**__UpperCAmelCase ) __UpperCamelCase = do_rescale __UpperCamelCase = rescale_factor __UpperCamelCase = do_pad __UpperCamelCase = pad_size def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = None , **__UpperCAmelCase ): '''simple docstring''' return rescale(__UpperCAmelCase , scale=__UpperCAmelCase , data_format=__UpperCAmelCase , **__UpperCAmelCase ) def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = None ): '''simple docstring''' __UpperCamelCase , __UpperCamelCase = get_image_size(__UpperCAmelCase ) __UpperCamelCase = (old_height // size + 1) * size - old_height __UpperCamelCase = (old_width // size + 1) * size - old_width return pad(__UpperCAmelCase , ((0, pad_height), (0, pad_width)) , mode='symmetric' , data_format=__UpperCAmelCase ) def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = ChannelDimension.FIRST , **__UpperCAmelCase , ): '''simple docstring''' __UpperCamelCase = do_rescale if do_rescale is not None else self.do_rescale __UpperCamelCase = rescale_factor if rescale_factor is not None else self.rescale_factor __UpperCamelCase = do_pad if do_pad is not None else self.do_pad __UpperCamelCase = pad_size if pad_size is not None else self.pad_size __UpperCamelCase = make_list_of_images(__UpperCAmelCase ) if not valid_images(__UpperCAmelCase ): raise ValueError( 'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ' 'torch.Tensor, tf.Tensor or jax.ndarray.' ) if do_rescale and rescale_factor is None: raise ValueError('Rescale factor must be specified if do_rescale is True.' ) # All transformations expect numpy arrays. __UpperCamelCase = [to_numpy_array(__UpperCAmelCase ) for image in images] if do_rescale: __UpperCamelCase = [self.rescale(image=__UpperCAmelCase , scale=__UpperCAmelCase ) for image in images] if do_pad: __UpperCamelCase = [self.pad(__UpperCAmelCase , size=__UpperCAmelCase ) for image in images] __UpperCamelCase = [to_channel_dimension_format(__UpperCAmelCase , __UpperCAmelCase ) for image in images] __UpperCamelCase = {'pixel_values': images} return BatchFeature(data=__UpperCAmelCase , tensor_type=__UpperCAmelCase )
316
0
import argparse import torch from transformers import GPTaLMHeadModel, RobertaForMaskedLM if __name__ == "__main__": lowerCamelCase_ : Optional[int] = argparse.ArgumentParser( description=( """Extraction some layers of the full RobertaForMaskedLM or GPT2LMHeadModel for Transfer Learned""" """ Distillation""" ) ) parser.add_argument("""--model_type""", default="""roberta""", choices=["""roberta""", """gpt2"""]) parser.add_argument("""--model_name""", default="""roberta-large""", type=str) parser.add_argument("""--dump_checkpoint""", default="""serialization_dir/tf_roberta_048131723.pth""", type=str) parser.add_argument("""--vocab_transform""", action="""store_true""") lowerCamelCase_ : Optional[int] = parser.parse_args() if args.model_type == "roberta": lowerCamelCase_ : int = RobertaForMaskedLM.from_pretrained(args.model_name) lowerCamelCase_ : Optional[Any] = """roberta""" elif args.model_type == "gpt2": lowerCamelCase_ : Dict = GPTaLMHeadModel.from_pretrained(args.model_name) lowerCamelCase_ : List[str] = """transformer""" lowerCamelCase_ : Optional[Any] = model.state_dict() lowerCamelCase_ : Union[str, Any] = {} # Embeddings # if args.model_type == "gpt2": for param_name in ["wte.weight", "wpe.weight"]: lowerCamelCase_ : Dict = state_dict[F'''{prefix}.{param_name}'''] else: for w in ["word_embeddings", "position_embeddings", "token_type_embeddings"]: lowerCamelCase_ : Optional[Any] = F'''{prefix}.embeddings.{w}.weight''' lowerCamelCase_ : Any = state_dict[param_name] for w in ["weight", "bias"]: lowerCamelCase_ : str = F'''{prefix}.embeddings.LayerNorm.{w}''' lowerCamelCase_ : Optional[Any] = state_dict[param_name] # Transformer Blocks # lowerCamelCase_ : List[str] = 0 for teacher_idx in [0, 2, 4, 7, 9, 11]: if args.model_type == "gpt2": for layer in ["ln_1", "attn.c_attn", "attn.c_proj", "ln_2", "mlp.c_fc", "mlp.c_proj"]: for w in ["weight", "bias"]: lowerCamelCase_ : Dict = state_dict[ F'''{prefix}.h.{teacher_idx}.{layer}.{w}''' ] lowerCamelCase_ : List[str] = state_dict[F'''{prefix}.h.{teacher_idx}.attn.bias'''] else: for layer in [ "attention.self.query", "attention.self.key", "attention.self.value", "attention.output.dense", "attention.output.LayerNorm", "intermediate.dense", "output.dense", "output.LayerNorm", ]: for w in ["weight", "bias"]: lowerCamelCase_ : Optional[int] = state_dict[ F'''{prefix}.encoder.layer.{teacher_idx}.{layer}.{w}''' ] std_idx += 1 # Language Modeling Head ###s if args.model_type == "roberta": for layer in ["lm_head.decoder.weight", "lm_head.bias"]: lowerCamelCase_ : Optional[Any] = state_dict[F'''{layer}'''] if args.vocab_transform: for w in ["weight", "bias"]: lowerCamelCase_ : List[Any] = state_dict[F'''lm_head.dense.{w}'''] lowerCamelCase_ : Union[str, Any] = state_dict[F'''lm_head.layer_norm.{w}'''] elif args.model_type == "gpt2": for w in ["weight", "bias"]: lowerCamelCase_ : Optional[int] = state_dict[F'''{prefix}.ln_f.{w}'''] lowerCamelCase_ : List[str] = state_dict["""lm_head.weight"""] print(F'''N layers selected for distillation: {std_idx}''') print(F'''Number of params transferred for distillation: {len(compressed_sd.keys())}''') print(F'''Save transferred checkpoint to {args.dump_checkpoint}.''') torch.save(compressed_sd, args.dump_checkpoint)
197
from ..utils import DummyObject, requires_backends class a__ ( metaclass=__snake_case ): A__ : List[Any] = ['torch', 'transformers', 'onnx'] def __init__( self , *UpperCAmelCase , **UpperCAmelCase ) -> Any: requires_backends(self , ['torch', 'transformers', 'onnx'] ) @classmethod def __SCREAMING_SNAKE_CASE ( cls , *UpperCAmelCase , **UpperCAmelCase ) -> Union[str, Any]: requires_backends(cls , ['torch', 'transformers', 'onnx'] ) @classmethod def __SCREAMING_SNAKE_CASE ( cls , *UpperCAmelCase , **UpperCAmelCase ) -> List[str]: requires_backends(cls , ['torch', 'transformers', 'onnx'] ) class a__ ( metaclass=__snake_case ): A__ : Union[str, Any] = ['torch', 'transformers', 'onnx'] def __init__( self , *UpperCAmelCase , **UpperCAmelCase ) -> int: requires_backends(self , ['torch', 'transformers', 'onnx'] ) @classmethod def __SCREAMING_SNAKE_CASE ( cls , *UpperCAmelCase , **UpperCAmelCase ) -> int: requires_backends(cls , ['torch', 'transformers', 'onnx'] ) @classmethod def __SCREAMING_SNAKE_CASE ( cls , *UpperCAmelCase , **UpperCAmelCase ) -> Dict: requires_backends(cls , ['torch', 'transformers', 'onnx'] ) class a__ ( metaclass=__snake_case ): A__ : Dict = ['torch', 'transformers', 'onnx'] def __init__( self , *UpperCAmelCase , **UpperCAmelCase ) -> Union[str, Any]: requires_backends(self , ['torch', 'transformers', 'onnx'] ) @classmethod def __SCREAMING_SNAKE_CASE ( cls , *UpperCAmelCase , **UpperCAmelCase ) -> Any: requires_backends(cls , ['torch', 'transformers', 'onnx'] ) @classmethod def __SCREAMING_SNAKE_CASE ( cls , *UpperCAmelCase , **UpperCAmelCase ) -> int: requires_backends(cls , ['torch', 'transformers', 'onnx'] ) class a__ ( metaclass=__snake_case ): A__ : Union[str, Any] = ['torch', 'transformers', 'onnx'] def __init__( self , *UpperCAmelCase , **UpperCAmelCase ) -> Optional[Any]: requires_backends(self , ['torch', 'transformers', 'onnx'] ) @classmethod def __SCREAMING_SNAKE_CASE ( cls , *UpperCAmelCase , **UpperCAmelCase ) -> int: requires_backends(cls , ['torch', 'transformers', 'onnx'] ) @classmethod def __SCREAMING_SNAKE_CASE ( cls , *UpperCAmelCase , **UpperCAmelCase ) -> Union[str, Any]: requires_backends(cls , ['torch', 'transformers', 'onnx'] ) class a__ ( metaclass=__snake_case ): A__ : Dict = ['torch', 'transformers', 'onnx'] def __init__( self , *UpperCAmelCase , **UpperCAmelCase ) -> Optional[int]: requires_backends(self , ['torch', 'transformers', 'onnx'] ) @classmethod def __SCREAMING_SNAKE_CASE ( cls , *UpperCAmelCase , **UpperCAmelCase ) -> Any: requires_backends(cls , ['torch', 'transformers', 'onnx'] ) @classmethod def __SCREAMING_SNAKE_CASE ( cls , *UpperCAmelCase , **UpperCAmelCase ) -> str: requires_backends(cls , ['torch', 'transformers', 'onnx'] ) class a__ ( metaclass=__snake_case ): A__ : Tuple = ['torch', 'transformers', 'onnx'] def __init__( self , *UpperCAmelCase , **UpperCAmelCase ) -> int: requires_backends(self , ['torch', 'transformers', 'onnx'] ) @classmethod def __SCREAMING_SNAKE_CASE ( cls , *UpperCAmelCase , **UpperCAmelCase ) -> List[str]: requires_backends(cls , ['torch', 'transformers', 'onnx'] ) @classmethod def __SCREAMING_SNAKE_CASE ( cls , *UpperCAmelCase , **UpperCAmelCase ) -> Any: requires_backends(cls , ['torch', 'transformers', 'onnx'] )
197
1
from ...configuration_utils import PretrainedConfig from ...utils import logging _lowerCamelCase =logging.get_logger(__name__) _lowerCamelCase ={ "microsoft/biogpt": "https://huggingface.co/microsoft/biogpt/resolve/main/config.json", # See all BioGPT models at https://huggingface.co/models?filter=biogpt } class a_ ( lowerCamelCase_ ): """simple docstring""" __UpperCAmelCase = 'biogpt' def __init__( self : Optional[int] ,snake_case : Optional[int]=42384 ,snake_case : List[Any]=1024 ,snake_case : Optional[int]=24 ,snake_case : Optional[int]=16 ,snake_case : Tuple=4096 ,snake_case : Any="gelu" ,snake_case : Tuple=0.1 ,snake_case : List[Any]=0.1 ,snake_case : Tuple=1024 ,snake_case : Any=0.02 ,snake_case : Optional[Any]=1e-12 ,snake_case : List[str]=True ,snake_case : Optional[int]=True ,snake_case : Optional[Any]=0.0 ,snake_case : Optional[Any]=0.0 ,snake_case : Union[str, Any]=1 ,snake_case : List[str]=0 ,snake_case : Union[str, Any]=2 ,**snake_case : Any ,): SCREAMING_SNAKE_CASE =vocab_size SCREAMING_SNAKE_CASE =max_position_embeddings SCREAMING_SNAKE_CASE =hidden_size SCREAMING_SNAKE_CASE =num_hidden_layers SCREAMING_SNAKE_CASE =num_attention_heads SCREAMING_SNAKE_CASE =intermediate_size SCREAMING_SNAKE_CASE =hidden_act SCREAMING_SNAKE_CASE =hidden_dropout_prob SCREAMING_SNAKE_CASE =attention_probs_dropout_prob SCREAMING_SNAKE_CASE =initializer_range SCREAMING_SNAKE_CASE =layer_norm_eps SCREAMING_SNAKE_CASE =scale_embedding SCREAMING_SNAKE_CASE =use_cache SCREAMING_SNAKE_CASE =layerdrop SCREAMING_SNAKE_CASE =activation_dropout super().__init__(pad_token_id=snake_case ,bos_token_id=snake_case ,eos_token_id=snake_case ,**snake_case )
334
'''simple docstring''' from typing import TYPE_CHECKING from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available lowercase_ = {"configuration_mmbt": ["MMBTConfig"]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase_ = ["MMBTForClassification", "MMBTModel", "ModalEmbeddings"] if TYPE_CHECKING: from .configuration_mmbt import MMBTConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mmbt import MMBTForClassification, MMBTModel, ModalEmbeddings else: import sys lowercase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
211
0
'''simple docstring''' UpperCamelCase = '''0.18.2''' from .configuration_utils import ConfigMixin from .utils import ( OptionalDependencyNotAvailable, is_flax_available, is_inflect_available, is_invisible_watermark_available, is_k_diffusion_available, is_k_diffusion_version, is_librosa_available, is_note_seq_available, is_onnx_available, is_scipy_available, is_torch_available, is_torchsde_available, is_transformers_available, is_transformers_version, is_unidecode_available, logging, ) try: if not is_onnx_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_onnx_objects import * # noqa F403 else: from .pipelines import OnnxRuntimeModel try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_pt_objects import * # noqa F403 else: from .models import ( AutoencoderKL, ControlNetModel, ModelMixin, PriorTransformer, TaFilmDecoder, TransformeraDModel, UNetaDModel, UNetaDConditionModel, UNetaDModel, UNetaDConditionModel, VQModel, ) from .optimization import ( get_constant_schedule, get_constant_schedule_with_warmup, get_cosine_schedule_with_warmup, get_cosine_with_hard_restarts_schedule_with_warmup, get_linear_schedule_with_warmup, get_polynomial_decay_schedule_with_warmup, get_scheduler, ) from .pipelines import ( AudioPipelineOutput, ConsistencyModelPipeline, DanceDiffusionPipeline, DDIMPipeline, DDPMPipeline, DiffusionPipeline, DiTPipeline, ImagePipelineOutput, KarrasVePipeline, LDMPipeline, LDMSuperResolutionPipeline, PNDMPipeline, RePaintPipeline, ScoreSdeVePipeline, ) from .schedulers import ( CMStochasticIterativeScheduler, DDIMInverseScheduler, DDIMParallelScheduler, DDIMScheduler, DDPMParallelScheduler, DDPMScheduler, DEISMultistepScheduler, DPMSolverMultistepInverseScheduler, DPMSolverMultistepScheduler, DPMSolverSinglestepScheduler, EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, HeunDiscreteScheduler, IPNDMScheduler, KarrasVeScheduler, KDPMaAncestralDiscreteScheduler, KDPMaDiscreteScheduler, PNDMScheduler, RePaintScheduler, SchedulerMixin, ScoreSdeVeScheduler, UnCLIPScheduler, UniPCMultistepScheduler, VQDiffusionScheduler, ) from .training_utils import EMAModel try: if not (is_torch_available() and is_scipy_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_torch_and_scipy_objects import * # noqa F403 else: from .schedulers import LMSDiscreteScheduler try: if not (is_torch_available() and is_torchsde_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_torch_and_torchsde_objects import * # noqa F403 else: from .schedulers import DPMSolverSDEScheduler try: if not (is_torch_available() and is_transformers_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_torch_and_transformers_objects import * # noqa F403 else: from .pipelines import ( AltDiffusionImgaImgPipeline, AltDiffusionPipeline, AudioLDMPipeline, CycleDiffusionPipeline, IFImgaImgPipeline, IFImgaImgSuperResolutionPipeline, IFInpaintingPipeline, IFInpaintingSuperResolutionPipeline, IFPipeline, IFSuperResolutionPipeline, ImageTextPipelineOutput, KandinskyImgaImgPipeline, KandinskyInpaintPipeline, KandinskyPipeline, KandinskyPriorPipeline, KandinskyVaaControlnetImgaImgPipeline, KandinskyVaaControlnetPipeline, KandinskyVaaImgaImgPipeline, KandinskyVaaInpaintPipeline, KandinskyVaaPipeline, KandinskyVaaPriorEmbaEmbPipeline, KandinskyVaaPriorPipeline, LDMTextToImagePipeline, PaintByExamplePipeline, SemanticStableDiffusionPipeline, ShapEImgaImgPipeline, ShapEPipeline, StableDiffusionAttendAndExcitePipeline, StableDiffusionControlNetImgaImgPipeline, StableDiffusionControlNetInpaintPipeline, StableDiffusionControlNetPipeline, StableDiffusionDepthaImgPipeline, StableDiffusionDiffEditPipeline, StableDiffusionImageVariationPipeline, StableDiffusionImgaImgPipeline, StableDiffusionInpaintPipeline, StableDiffusionInpaintPipelineLegacy, StableDiffusionInstructPixaPixPipeline, StableDiffusionLatentUpscalePipeline, StableDiffusionLDMaDPipeline, StableDiffusionModelEditingPipeline, StableDiffusionPanoramaPipeline, StableDiffusionParadigmsPipeline, StableDiffusionPipeline, StableDiffusionPipelineSafe, StableDiffusionPixaPixZeroPipeline, StableDiffusionSAGPipeline, StableDiffusionUpscalePipeline, StableUnCLIPImgaImgPipeline, StableUnCLIPPipeline, TextToVideoSDPipeline, TextToVideoZeroPipeline, UnCLIPImageVariationPipeline, UnCLIPPipeline, UniDiffuserModel, UniDiffuserPipeline, UniDiffuserTextDecoder, VersatileDiffusionDualGuidedPipeline, VersatileDiffusionImageVariationPipeline, VersatileDiffusionPipeline, VersatileDiffusionTextToImagePipeline, VideoToVideoSDPipeline, VQDiffusionPipeline, ) try: if not (is_torch_available() and is_transformers_available() and is_invisible_watermark_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_torch_and_transformers_and_invisible_watermark_objects import * # noqa F403 else: from .pipelines import StableDiffusionXLImgaImgPipeline, StableDiffusionXLPipeline try: if not (is_torch_available() and is_transformers_available() and is_k_diffusion_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_torch_and_transformers_and_k_diffusion_objects import * # noqa F403 else: from .pipelines import StableDiffusionKDiffusionPipeline try: if not (is_torch_available() and is_transformers_available() and is_onnx_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_torch_and_transformers_and_onnx_objects import * # noqa F403 else: from .pipelines import ( OnnxStableDiffusionImgaImgPipeline, OnnxStableDiffusionInpaintPipeline, OnnxStableDiffusionInpaintPipelineLegacy, OnnxStableDiffusionPipeline, OnnxStableDiffusionUpscalePipeline, StableDiffusionOnnxPipeline, ) try: if not (is_torch_available() and is_librosa_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_torch_and_librosa_objects import * # noqa F403 else: from .pipelines import AudioDiffusionPipeline, Mel try: if not (is_transformers_available() and is_torch_available() and is_note_seq_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403 else: from .pipelines import SpectrogramDiffusionPipeline try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_flax_objects import * # noqa F403 else: from .models.controlnet_flax import FlaxControlNetModel from .models.modeling_flax_utils import FlaxModelMixin from .models.unet_ad_condition_flax import FlaxUNetaDConditionModel from .models.vae_flax import FlaxAutoencoderKL from .pipelines import FlaxDiffusionPipeline from .schedulers import ( FlaxDDIMScheduler, FlaxDDPMScheduler, FlaxDPMSolverMultistepScheduler, FlaxKarrasVeScheduler, FlaxLMSDiscreteScheduler, FlaxPNDMScheduler, FlaxSchedulerMixin, FlaxScoreSdeVeScheduler, ) try: if not (is_flax_available() and is_transformers_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_flax_and_transformers_objects import * # noqa F403 else: from .pipelines import ( FlaxStableDiffusionControlNetPipeline, FlaxStableDiffusionImgaImgPipeline, FlaxStableDiffusionInpaintPipeline, FlaxStableDiffusionPipeline, ) try: if not (is_note_seq_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_note_seq_objects import * # noqa F403 else: from .pipelines import MidiProcessor
334
'''simple docstring''' import os from distutils.util import strtobool def SCREAMING_SNAKE_CASE( __lowercase , __lowercase ) -> List[Any]: for e in env_keys: A: Dict = int(os.environ.get(__lowercase , -1 ) ) if val >= 0: return val return default def SCREAMING_SNAKE_CASE( __lowercase , __lowercase=False ) -> List[str]: A: str = os.environ.get(__lowercase , str(__lowercase ) ) return strtobool(__lowercase ) == 1 # As its name indicates `strtobool` actually returns an int... def SCREAMING_SNAKE_CASE( __lowercase , __lowercase="no" ) -> str: A: Optional[int] = os.environ.get(__lowercase , str(__lowercase ) ) return value
334
1
import argparse import importlib from pathlib import Path # Test all the extensions added in the setup lowerCAmelCase__ : Optional[int] =[ '''kernels/rwkv/wkv_cuda.cu''', '''kernels/rwkv/wkv_op.cpp''', '''kernels/deformable_detr/ms_deform_attn.h''', '''kernels/deformable_detr/cuda/ms_deform_im2col_cuda.cuh''', '''models/graphormer/algos_graphormer.pyx''', ] def __lowercase ( a__ ) -> List[Any]: # Test all the extensions added in the setup for file in FILES_TO_FIND: if not (transformers_path / file).exists(): return False return True if __name__ == "__main__": lowerCAmelCase__ : Optional[int] =argparse.ArgumentParser() parser.add_argument('''--check_lib''', action='''store_true''', help='''Whether to check the build or the actual package.''') lowerCAmelCase__ : int =parser.parse_args() if args.check_lib: lowerCAmelCase__ : str =importlib.import_module('''transformers''') lowerCAmelCase__ : Dict =Path(transformers_module.__file__).parent else: lowerCAmelCase__ : List[Any] =Path.cwd() / '''build/lib/transformers''' if not test_custom_files_are_present(transformers_path): raise ValueError('''The built release does not contain the custom files. Fix this before going further!''')
257
import numpy as np import torch from torch.utils.data import Dataset, IterableDataset from ..utils.generic import ModelOutput class UpperCAmelCase_ ( UpperCamelCase_ ): '''simple docstring''' def __init__( self , _A , _A , _A ): '''simple docstring''' __SCREAMING_SNAKE_CASE = dataset __SCREAMING_SNAKE_CASE = process __SCREAMING_SNAKE_CASE = params def __len__( self ): '''simple docstring''' return len(self.dataset ) def __getitem__( self , _A ): '''simple docstring''' __SCREAMING_SNAKE_CASE = self.dataset[i] __SCREAMING_SNAKE_CASE = self.process(_A , **self.params ) return processed class UpperCAmelCase_ ( UpperCamelCase_ ): '''simple docstring''' def __init__( self , _A , _A , _A , _A=None ): '''simple docstring''' __SCREAMING_SNAKE_CASE = loader __SCREAMING_SNAKE_CASE = infer __SCREAMING_SNAKE_CASE = params if loader_batch_size == 1: # Let's spare some time by deactivating altogether __SCREAMING_SNAKE_CASE = None __SCREAMING_SNAKE_CASE = loader_batch_size # Internal bookkeeping __SCREAMING_SNAKE_CASE = None __SCREAMING_SNAKE_CASE = None def __len__( self ): '''simple docstring''' return len(self.loader ) def __iter__( self ): '''simple docstring''' __SCREAMING_SNAKE_CASE = iter(self.loader ) return self def _A ( self ): '''simple docstring''' if isinstance(self._loader_batch_data , torch.Tensor ): # Batch data is simple tensor, just fetch the slice __SCREAMING_SNAKE_CASE = self._loader_batch_data[self._loader_batch_index] else: # Batch data is assumed to be BaseModelOutput (or dict) __SCREAMING_SNAKE_CASE = {} for k, element in self._loader_batch_data.items(): if isinstance(_A , _A ): # Convert ModelOutput to tuple first __SCREAMING_SNAKE_CASE = element.to_tuple() if isinstance(element[0] , torch.Tensor ): __SCREAMING_SNAKE_CASE = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element ) elif isinstance(element[0] , np.ndarray ): __SCREAMING_SNAKE_CASE = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element ) continue if k in {"hidden_states", "past_key_values", "attentions"} and isinstance(_A , _A ): # Those are stored as lists of tensors so need specific unbatching. if isinstance(element[0] , torch.Tensor ): __SCREAMING_SNAKE_CASE = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element ) elif isinstance(element[0] , np.ndarray ): __SCREAMING_SNAKE_CASE = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element ) continue if element is None: # This can happen for optional data that get passed around __SCREAMING_SNAKE_CASE = None elif isinstance(element[self._loader_batch_index] , torch.Tensor ): # Take correct batch data, but make it looked like batch_size=1 # For compatibility with other methods within transformers __SCREAMING_SNAKE_CASE = element[self._loader_batch_index].unsqueeze(0 ) elif isinstance(element[self._loader_batch_index] , np.ndarray ): # Take correct batch data, but make it looked like batch_size=1 # For compatibility with other methods within transformers __SCREAMING_SNAKE_CASE = np.expand_dims(element[self._loader_batch_index] , 0 ) else: # This is typically a list, so no need to `unsqueeze`. __SCREAMING_SNAKE_CASE = element[self._loader_batch_index] # Recreate the element by reusing the original class to make it look # batch_size=1 __SCREAMING_SNAKE_CASE = self._loader_batch_data.__class__(_A ) self._loader_batch_index += 1 return result def _A ( self ): '''simple docstring''' if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size: # We are currently unrolling a batch so we just need to return # the current item within a batch return self.loader_batch_item() # We're out of items within a batch __SCREAMING_SNAKE_CASE = next(self.iterator ) __SCREAMING_SNAKE_CASE = self.infer(_A , **self.params ) # We now have a batch of "inferred things". if self.loader_batch_size is not None: # Try to infer the size of the batch if isinstance(_A , torch.Tensor ): __SCREAMING_SNAKE_CASE = processed else: __SCREAMING_SNAKE_CASE = list(processed.keys() )[0] __SCREAMING_SNAKE_CASE = processed[key] if isinstance(_A , _A ): __SCREAMING_SNAKE_CASE = len(_A ) else: __SCREAMING_SNAKE_CASE = first_tensor.shape[0] if 0 < observed_batch_size < self.loader_batch_size: # could be last batch so we can't unroll as many # elements. __SCREAMING_SNAKE_CASE = observed_batch_size # Setting internal index to unwrap the batch __SCREAMING_SNAKE_CASE = processed __SCREAMING_SNAKE_CASE = 0 return self.loader_batch_item() else: # We're not unrolling batches return processed class UpperCAmelCase_ ( UpperCamelCase_ ): '''simple docstring''' def __init__( self , _A , _A , _A , _A=None ): '''simple docstring''' super().__init__(_A , _A , _A ) def __iter__( self ): '''simple docstring''' __SCREAMING_SNAKE_CASE = iter(self.loader ) __SCREAMING_SNAKE_CASE = None return self def _A ( self ): '''simple docstring''' if self.subiterator is None: __SCREAMING_SNAKE_CASE = self.infer(next(self.iterator ) , **self.params ) try: # Try to return next item __SCREAMING_SNAKE_CASE = next(self.subiterator ) except StopIteration: # When a preprocess iterator ends, we can start lookig at the next item # ChunkIterator will keep feeding until ALL elements of iterator # all have created their subiterator and have been iterating against. # # Another way to look at it, is we're basically flattening lists of lists # into a single list, but with generators __SCREAMING_SNAKE_CASE = self.infer(next(self.iterator ) , **self.params ) __SCREAMING_SNAKE_CASE = next(self.subiterator ) return processed class UpperCAmelCase_ ( UpperCamelCase_ ): '''simple docstring''' def __iter__( self ): '''simple docstring''' __SCREAMING_SNAKE_CASE = iter(self.loader ) return self def _A ( self ): '''simple docstring''' __SCREAMING_SNAKE_CASE = False __SCREAMING_SNAKE_CASE = [] if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size: while self._loader_batch_index < self.loader_batch_size: __SCREAMING_SNAKE_CASE = self.loader_batch_item() __SCREAMING_SNAKE_CASE = item.pop('is_last' ) accumulator.append(_A ) if is_last: return accumulator while not is_last: __SCREAMING_SNAKE_CASE = self.infer(next(self.iterator ) , **self.params ) if self.loader_batch_size is not None: if isinstance(_A , torch.Tensor ): __SCREAMING_SNAKE_CASE = processed else: __SCREAMING_SNAKE_CASE = list(processed.keys() )[0] __SCREAMING_SNAKE_CASE = processed[key] if isinstance(_A , _A ): __SCREAMING_SNAKE_CASE = len(_A ) else: __SCREAMING_SNAKE_CASE = first_tensor.shape[0] if 0 < observed_batch_size < self.loader_batch_size: # could be last batch so we can't unroll as many # elements. __SCREAMING_SNAKE_CASE = observed_batch_size __SCREAMING_SNAKE_CASE = processed __SCREAMING_SNAKE_CASE = 0 while self._loader_batch_index < self.loader_batch_size: __SCREAMING_SNAKE_CASE = self.loader_batch_item() __SCREAMING_SNAKE_CASE = item.pop('is_last' ) accumulator.append(_A ) if is_last: return accumulator else: __SCREAMING_SNAKE_CASE = processed __SCREAMING_SNAKE_CASE = item.pop('is_last' ) accumulator.append(_A ) return accumulator class UpperCAmelCase_ ( UpperCamelCase_ ): '''simple docstring''' def __init__( self , _A , _A ): '''simple docstring''' __SCREAMING_SNAKE_CASE = dataset __SCREAMING_SNAKE_CASE = key def __len__( self ): '''simple docstring''' return len(self.dataset ) def __getitem__( self , _A ): '''simple docstring''' return self.dataset[i][self.key] class UpperCAmelCase_ ( UpperCamelCase_ ): '''simple docstring''' def __init__( self , _A , _A , _A ): '''simple docstring''' __SCREAMING_SNAKE_CASE = dataset __SCREAMING_SNAKE_CASE = keya __SCREAMING_SNAKE_CASE = keya def __len__( self ): '''simple docstring''' return len(self.dataset ) def __getitem__( self , _A ): '''simple docstring''' return {"text": self.dataset[i][self.keya], "text_pair": self.dataset[i][self.keya]}
257
1
"""simple docstring""" from collections import OrderedDict from typing import Any, Mapping, Optional from ... import PreTrainedTokenizer, TensorType, is_torch_available from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfigWithPast from ...utils import logging lowerCamelCase__ = logging.get_logger(__name__) lowerCamelCase__ = { '''EleutherAI/gpt-neo-1.3B''': '''https://huggingface.co/EleutherAI/gpt-neo-1.3B/resolve/main/config.json''', # See all GPTNeo models at https://huggingface.co/models?filter=gpt_neo } class __magic_name__ (lowerCamelCase__ ): lowerCamelCase__ = '''gpt_neo''' lowerCamelCase__ = ['''past_key_values'''] lowerCamelCase__ = {'''num_attention_heads''': '''num_heads''', '''num_hidden_layers''': '''num_layers'''} def __init__( self , _a=50257 , _a=2048 , _a=2048 , _a=24 , _a=[[["global", "local"], 12]] , _a=16 , _a=None , _a=256 , _a="gelu_new" , _a=0.0 , _a=0.0 , _a=0.0 , _a=0.1 , _a=1E-5 , _a=0.0_2 , _a=True , _a=50256 , _a=50256 , **_a , ) -> Any: lowerCAmelCase_ = vocab_size lowerCAmelCase_ = max_position_embeddings lowerCAmelCase_ = hidden_size lowerCAmelCase_ = num_layers lowerCAmelCase_ = num_heads lowerCAmelCase_ = intermediate_size lowerCAmelCase_ = window_size lowerCAmelCase_ = activation_function lowerCAmelCase_ = resid_dropout lowerCAmelCase_ = embed_dropout lowerCAmelCase_ = attention_dropout lowerCAmelCase_ = classifier_dropout lowerCAmelCase_ = layer_norm_epsilon lowerCAmelCase_ = initializer_range lowerCAmelCase_ = use_cache lowerCAmelCase_ = bos_token_id lowerCAmelCase_ = eos_token_id lowerCAmelCase_ = attention_types lowerCAmelCase_ = self.expand_attention_types_params(_a ) if len(self.attention_layers ) != self.num_layers: raise ValueError( "Configuration for convolutional module is incorrect. " "It is required that `len(config.attention_layers)` == `config.num_layers` " f"but is `len(config.attention_layers) = {len(self.attention_layers )}`, " f"`config.num_layers = {self.num_layers}`. " "`config.attention_layers` is prepared using `config.attention_types`. " "Please verify the value of `config.attention_types` argument." ) super().__init__(bos_token_id=_a , eos_token_id=_a , **_a ) @staticmethod def __a ( _a ) -> Optional[Any]: lowerCAmelCase_ = [] for item in attention_types: for _ in range(item[1] ): attentions.extend(item[0] ) return attentions def A(__a: List[str] , __a: Tuple , __a: List[str] , __a: Any ): import torch lowerCAmelCase_ = input.size() lowerCAmelCase_ = len(_lowerCAmelCase ) lowerCAmelCase_ = shape[dimension] lowerCAmelCase_ = torch.arange(0 , _lowerCAmelCase , _lowerCAmelCase ) lowerCAmelCase_ = torch.div(sizedim - size , _lowerCAmelCase , rounding_mode="floor" ) + 1 lowerCAmelCase_ = torch.arange(_lowerCAmelCase ) + low_indices[:min_length][:, None] lowerCAmelCase_ = [slice(_lowerCAmelCase )] * rank lowerCAmelCase_ = indices lowerCAmelCase_ = input[s] lowerCAmelCase_ = list(range(0 , rank + 1 ) ) perm.append(perm.pop(dimension + 1 ) ) return sliced.permute(_lowerCAmelCase ) def A(__a: List[str] , __a: int ): import torch lowerCAmelCase_ = torch.arange(1 , _lowerCAmelCase ) lowerCAmelCase_ = torch.remainder(_lowerCAmelCase , _lowerCAmelCase ) lowerCAmelCase_ = remainders == 0 lowerCAmelCase_ = candidates[divisor_indices] lowerCAmelCase_ = torch.max(_lowerCAmelCase ) return largest_divisor, torch.div(_lowerCAmelCase , _lowerCAmelCase , rounding_mode="floor" ) class __magic_name__ (lowerCamelCase__ ): @property def __a ( self ) -> Union[str, Any]: lowerCAmelCase_ = OrderedDict({"input_ids": {0: "batch", 1: "sequence"}} ) if self.use_past: self.fill_with_past_key_values_(_a , direction="inputs" ) lowerCAmelCase_ = {0: "batch", 1: "past_sequence + sequence"} else: lowerCAmelCase_ = {0: "batch", 1: "sequence"} return common_inputs @property def __a ( self ) -> List[Any]: return self._config.num_heads def __a ( self , _a , _a = -1 , _a = -1 , _a = False , _a = None , ) -> Dict: lowerCAmelCase_ = super(_a , self ).generate_dummy_inputs( _a , batch_size=_a , seq_length=_a , is_pair=_a , framework=_a ) # We need to order the input in the way they appears in the forward() lowerCAmelCase_ = OrderedDict({"input_ids": common_inputs["input_ids"]} ) # Need to add the past_keys if self.use_past: if not is_torch_available(): raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." ) else: import torch lowerCAmelCase_ = common_inputs["input_ids"].shape # Not using the same length for past_key_values lowerCAmelCase_ = seqlen + 2 lowerCAmelCase_ = ( batch, self.num_attention_heads, past_key_values_length, self._config.hidden_size // self.num_attention_heads, ) lowerCAmelCase_ = [ (torch.zeros(_a ), torch.zeros(_a )) for _ in range(self.num_layers ) ] lowerCAmelCase_ = common_inputs["attention_mask"] if self.use_past: lowerCAmelCase_ = ordered_inputs["attention_mask"].dtype lowerCAmelCase_ = torch.cat( [ordered_inputs["attention_mask"], torch.ones(_a , _a , dtype=_a )] , dim=1 ) return ordered_inputs @property def __a ( self ) -> str: return 13
353
import warnings from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding class __magic_name__ (__lowercase ): lowerCamelCase__ = ['''image_processor''', '''tokenizer'''] lowerCamelCase__ = '''ViTImageProcessor''' lowerCamelCase__ = ('''CLIPTokenizer''', '''CLIPTokenizerFast''') def __init__( self , _a=None , _a=None , **_a ) -> Tuple: lowerCAmelCase_ = None if "feature_extractor" in kwargs: warnings.warn( "The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`" " instead." , _a , ) lowerCAmelCase_ = kwargs.pop("feature_extractor" ) lowerCAmelCase_ = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError("You need to specify an `image_processor`." ) if tokenizer is None: raise ValueError("You need to specify a `tokenizer`." ) super().__init__(_a , _a ) def __call__( self , _a=None , _a=None , _a=None , _a=None , **_a ) -> Dict: if text is None and visual_prompt is None and images is None: raise ValueError("You have to specify either text, visual prompt or images." ) if text is not None and visual_prompt is not None: raise ValueError("You have to specify exactly one type of prompt. Either text or visual prompt." ) if text is not None: lowerCAmelCase_ = self.tokenizer(_a , return_tensors=_a , **_a ) if visual_prompt is not None: lowerCAmelCase_ = self.image_processor(_a , return_tensors=_a , **_a ) if images is not None: lowerCAmelCase_ = self.image_processor(_a , return_tensors=_a , **_a ) if visual_prompt is not None and images is not None: lowerCAmelCase_ = { "pixel_values": image_features.pixel_values, "conditional_pixel_values": prompt_features.pixel_values, } return encoding elif text is not None and images is not None: lowerCAmelCase_ = image_features.pixel_values return encoding elif text is not None: return encoding elif visual_prompt is not None: lowerCAmelCase_ = { "conditional_pixel_values": prompt_features.pixel_values, } return encoding else: return BatchEncoding(data=dict(**_a ) , tensor_type=_a ) def __a ( self , *_a , **_a ) -> List[str]: return self.tokenizer.batch_decode(*_a , **_a ) def __a ( self , *_a , **_a ) -> Optional[int]: return self.tokenizer.decode(*_a , **_a ) @property def __a ( self ) -> List[str]: warnings.warn( "`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , _a , ) return self.image_processor_class @property def __a ( self ) -> Optional[Any]: warnings.warn( "`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , _a , ) return self.image_processor
22
0
import math from enum import Enum from typing import Optional, Union from torch.optim import Optimizer from torch.optim.lr_scheduler import LambdaLR from .utils import logging UpperCamelCase = logging.get_logger(__name__) class snake_case_ ( __A ): __A : Tuple = "linear" __A : Union[str, Any] = "cosine" __A : Any = "cosine_with_restarts" __A : int = "polynomial" __A : Union[str, Any] = "constant" __A : Tuple = "constant_with_warmup" __A : str = "piecewise_constant" def lowercase_ ( _lowerCamelCase : Optimizer , _lowerCamelCase : int = -1): return LambdaLR(_lowerCamelCase , lambda _lowerCamelCase: 1 , last_epoch=_lowerCamelCase) def lowercase_ ( _lowerCamelCase : Optimizer , _lowerCamelCase : int , _lowerCamelCase : int = -1): def lr_lambda(_lowerCamelCase : int): if current_step < num_warmup_steps: return float(_lowerCamelCase) / float(max(1.0 , _lowerCamelCase)) return 1.0 return LambdaLR(_lowerCamelCase , _lowerCamelCase , last_epoch=_lowerCamelCase) def lowercase_ ( _lowerCamelCase : Optimizer , _lowerCamelCase : str , _lowerCamelCase : int = -1): lowercase__ : Optional[Any] = {} lowercase__ : Any = step_rules.split(",") for rule_str in rule_list[:-1]: lowercase__ , lowercase__ : str = rule_str.split(":") lowercase__ : Optional[Any] = int(_lowerCamelCase) lowercase__ : Optional[Any] = float(_lowerCamelCase) lowercase__ : Union[str, Any] = value lowercase__ : Optional[int] = float(rule_list[-1]) def create_rules_function(_lowerCamelCase : str , _lowerCamelCase : Tuple): def rule_func(_lowerCamelCase : int) -> float: lowercase__ : str = sorted(rules_dict.keys()) for i, sorted_step in enumerate(_lowerCamelCase): if steps < sorted_step: return rules_dict[sorted_steps[i]] return last_lr_multiple return rule_func lowercase__ : Optional[int] = create_rules_function(_lowerCamelCase , _lowerCamelCase) return LambdaLR(_lowerCamelCase , _lowerCamelCase , last_epoch=_lowerCamelCase) def lowercase_ ( _lowerCamelCase : Optional[int] , _lowerCamelCase : Optional[Any] , _lowerCamelCase : Dict , _lowerCamelCase : Any=-1): def lr_lambda(_lowerCamelCase : int): if current_step < num_warmup_steps: return float(_lowerCamelCase) / float(max(1 , _lowerCamelCase)) return max( 0.0 , float(num_training_steps - current_step) / float(max(1 , num_training_steps - num_warmup_steps))) return LambdaLR(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase) def lowercase_ ( _lowerCamelCase : Optimizer , _lowerCamelCase : int , _lowerCamelCase : int , _lowerCamelCase : float = 0.5 , _lowerCamelCase : int = -1): def lr_lambda(_lowerCamelCase : List[str]): if current_step < num_warmup_steps: return float(_lowerCamelCase) / float(max(1 , _lowerCamelCase)) lowercase__ : str = float(current_step - num_warmup_steps) / float(max(1 , num_training_steps - num_warmup_steps)) return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * float(_lowerCamelCase) * 2.0 * progress))) return LambdaLR(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase) def lowercase_ ( _lowerCamelCase : Optimizer , _lowerCamelCase : int , _lowerCamelCase : int , _lowerCamelCase : int = 1 , _lowerCamelCase : int = -1): def lr_lambda(_lowerCamelCase : Optional[Any]): if current_step < num_warmup_steps: return float(_lowerCamelCase) / float(max(1 , _lowerCamelCase)) lowercase__ : Tuple = float(current_step - num_warmup_steps) / float(max(1 , num_training_steps - num_warmup_steps)) if progress >= 1.0: return 0.0 return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * ((float(_lowerCamelCase) * progress) % 1.0)))) return LambdaLR(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase) def lowercase_ ( _lowerCamelCase : int , _lowerCamelCase : Optional[Any] , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Union[str, Any]=1E-7 , _lowerCamelCase : Optional[Any]=1.0 , _lowerCamelCase : int=-1): lowercase__ : Any = optimizer.defaults["lr"] if not (lr_init > lr_end): raise ValueError(f'''lr_end ({lr_end}) must be be smaller than initial lr ({lr_init})''') def lr_lambda(_lowerCamelCase : int): if current_step < num_warmup_steps: return float(_lowerCamelCase) / float(max(1 , _lowerCamelCase)) elif current_step > num_training_steps: return lr_end / lr_init # as LambdaLR multiplies by lr_init else: lowercase__ : Any = lr_init - lr_end lowercase__ : List[Any] = num_training_steps - num_warmup_steps lowercase__ : Any = 1 - (current_step - num_warmup_steps) / decay_steps lowercase__ : List[str] = lr_range * pct_remaining**power + lr_end return decay / lr_init # as LambdaLR multiplies by lr_init return LambdaLR(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase) UpperCamelCase = { SchedulerType.LINEAR: get_linear_schedule_with_warmup, SchedulerType.COSINE: get_cosine_schedule_with_warmup, SchedulerType.COSINE_WITH_RESTARTS: get_cosine_with_hard_restarts_schedule_with_warmup, SchedulerType.POLYNOMIAL: get_polynomial_decay_schedule_with_warmup, SchedulerType.CONSTANT: get_constant_schedule, SchedulerType.CONSTANT_WITH_WARMUP: get_constant_schedule_with_warmup, SchedulerType.PIECEWISE_CONSTANT: get_piecewise_constant_schedule, } def lowercase_ ( _lowerCamelCase : Union[str, SchedulerType] , _lowerCamelCase : Optimizer , _lowerCamelCase : Optional[str] = None , _lowerCamelCase : Optional[int] = None , _lowerCamelCase : Optional[int] = None , _lowerCamelCase : int = 1 , _lowerCamelCase : float = 1.0 , _lowerCamelCase : int = -1 , ): lowercase__ : List[str] = SchedulerType(_lowerCamelCase) lowercase__ : int = TYPE_TO_SCHEDULER_FUNCTION[name] if name == SchedulerType.CONSTANT: return schedule_func(_lowerCamelCase , last_epoch=_lowerCamelCase) if name == SchedulerType.PIECEWISE_CONSTANT: return schedule_func(_lowerCamelCase , step_rules=_lowerCamelCase , last_epoch=_lowerCamelCase) # All other schedulers require `num_warmup_steps` if num_warmup_steps is None: raise ValueError(f'''{name} requires `num_warmup_steps`, please provide that argument.''') if name == SchedulerType.CONSTANT_WITH_WARMUP: return schedule_func(_lowerCamelCase , num_warmup_steps=_lowerCamelCase , last_epoch=_lowerCamelCase) # All other schedulers require `num_training_steps` if num_training_steps is None: raise ValueError(f'''{name} requires `num_training_steps`, please provide that argument.''') if name == SchedulerType.COSINE_WITH_RESTARTS: return schedule_func( _lowerCamelCase , num_warmup_steps=_lowerCamelCase , num_training_steps=_lowerCamelCase , num_cycles=_lowerCamelCase , last_epoch=_lowerCamelCase , ) if name == SchedulerType.POLYNOMIAL: return schedule_func( _lowerCamelCase , num_warmup_steps=_lowerCamelCase , num_training_steps=_lowerCamelCase , power=_lowerCamelCase , last_epoch=_lowerCamelCase , ) return schedule_func( _lowerCamelCase , num_warmup_steps=_lowerCamelCase , num_training_steps=_lowerCamelCase , last_epoch=_lowerCamelCase)
87
import gc import random import unittest import torch from diffusers import ( IFImgaImgPipeline, IFImgaImgSuperResolutionPipeline, IFInpaintingPipeline, IFInpaintingSuperResolutionPipeline, IFPipeline, IFSuperResolutionPipeline, ) from diffusers.models.attention_processor import AttnAddedKVProcessor from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import floats_tensor, load_numpy, require_torch_gpu, skip_mps, slow, torch_device from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference from . import IFPipelineTesterMixin @skip_mps class lowercase ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ): lowercase_ : Optional[Any] =IFPipeline lowercase_ : List[str] =TEXT_TO_IMAGE_PARAMS - {'''width''', '''height''', '''latents'''} lowercase_ : List[str] =TEXT_TO_IMAGE_BATCH_PARAMS lowercase_ : int =PipelineTesterMixin.required_optional_params - {'''latents'''} def A__ ( self): return self._get_dummy_components() def A__ ( self ,A__ ,A__=0): if str(A__).startswith('''mps'''): lowercase = torch.manual_seed(A__) else: lowercase = torch.Generator(device=A__).manual_seed(A__) lowercase = { '''prompt''': '''A painting of a squirrel eating a burger''', '''generator''': generator, '''num_inference_steps''': 2, '''output_type''': '''numpy''', } return inputs def A__ ( self): self._test_save_load_optional_components() @unittest.skipIf(torch_device != '''cuda''' ,reason='''float16 requires CUDA''') def A__ ( self): # Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder super().test_save_load_floataa(expected_max_diff=1E-1) def A__ ( self): self._test_attention_slicing_forward_pass(expected_max_diff=1E-2) def A__ ( self): self._test_save_load_local() def A__ ( self): self._test_inference_batch_single_identical( expected_max_diff=1E-2 ,) @unittest.skipIf( torch_device != '''cuda''' or not is_xformers_available() ,reason='''XFormers attention is only available with CUDA and `xformers` installed''' ,) def A__ ( self): self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3) @slow @require_torch_gpu class lowercase ( unittest.TestCase ): def A__ ( self): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def A__ ( self): # if lowercase = IFPipeline.from_pretrained('''DeepFloyd/IF-I-XL-v1.0''' ,variant='''fp16''' ,torch_dtype=torch.floataa) lowercase = IFSuperResolutionPipeline.from_pretrained( '''DeepFloyd/IF-II-L-v1.0''' ,variant='''fp16''' ,torch_dtype=torch.floataa ,text_encoder=A__ ,tokenizer=A__) # pre compute text embeddings and remove T5 to save memory pipe_a.text_encoder.to('''cuda''') lowercase , lowercase = pipe_a.encode_prompt('''anime turtle''' ,device='''cuda''') del pipe_a.tokenizer del pipe_a.text_encoder gc.collect() lowercase = None lowercase = None pipe_a.enable_model_cpu_offload() pipe_a.enable_model_cpu_offload() pipe_a.unet.set_attn_processor(AttnAddedKVProcessor()) pipe_a.unet.set_attn_processor(AttnAddedKVProcessor()) self._test_if(A__ ,A__ ,A__ ,A__) pipe_a.remove_all_hooks() pipe_a.remove_all_hooks() # img2img lowercase = IFImgaImgPipeline(**pipe_a.components) lowercase = IFImgaImgSuperResolutionPipeline(**pipe_a.components) pipe_a.enable_model_cpu_offload() pipe_a.enable_model_cpu_offload() pipe_a.unet.set_attn_processor(AttnAddedKVProcessor()) pipe_a.unet.set_attn_processor(AttnAddedKVProcessor()) self._test_if_imgaimg(A__ ,A__ ,A__ ,A__) pipe_a.remove_all_hooks() pipe_a.remove_all_hooks() # inpainting lowercase = IFInpaintingPipeline(**pipe_a.components) lowercase = IFInpaintingSuperResolutionPipeline(**pipe_a.components) pipe_a.enable_model_cpu_offload() pipe_a.enable_model_cpu_offload() pipe_a.unet.set_attn_processor(AttnAddedKVProcessor()) pipe_a.unet.set_attn_processor(AttnAddedKVProcessor()) self._test_if_inpainting(A__ ,A__ ,A__ ,A__) def A__ ( self ,A__ ,A__ ,A__ ,A__): # pipeline 1 _start_torch_memory_measurement() lowercase = torch.Generator(device='''cpu''').manual_seed(0) lowercase = pipe_a( prompt_embeds=A__ ,negative_prompt_embeds=A__ ,num_inference_steps=2 ,generator=A__ ,output_type='''np''' ,) lowercase = output.images[0] assert image.shape == (6_4, 6_4, 3) lowercase = torch.cuda.max_memory_allocated() assert mem_bytes < 1_3 * 1_0**9 lowercase = load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if.npy''') assert_mean_pixel_difference(A__ ,A__) # pipeline 2 _start_torch_memory_measurement() lowercase = torch.Generator(device='''cpu''').manual_seed(0) lowercase = floats_tensor((1, 3, 6_4, 6_4) ,rng=random.Random(0)).to(A__) lowercase = pipe_a( prompt_embeds=A__ ,negative_prompt_embeds=A__ ,image=A__ ,generator=A__ ,num_inference_steps=2 ,output_type='''np''' ,) lowercase = output.images[0] assert image.shape == (2_5_6, 2_5_6, 3) lowercase = torch.cuda.max_memory_allocated() assert mem_bytes < 4 * 1_0**9 lowercase = load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_superresolution_stage_II.npy''') assert_mean_pixel_difference(A__ ,A__) def A__ ( self ,A__ ,A__ ,A__ ,A__): # pipeline 1 _start_torch_memory_measurement() lowercase = floats_tensor((1, 3, 6_4, 6_4) ,rng=random.Random(0)).to(A__) lowercase = torch.Generator(device='''cpu''').manual_seed(0) lowercase = pipe_a( prompt_embeds=A__ ,negative_prompt_embeds=A__ ,image=A__ ,num_inference_steps=2 ,generator=A__ ,output_type='''np''' ,) lowercase = output.images[0] assert image.shape == (6_4, 6_4, 3) lowercase = torch.cuda.max_memory_allocated() assert mem_bytes < 1_0 * 1_0**9 lowercase = load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img.npy''') assert_mean_pixel_difference(A__ ,A__) # pipeline 2 _start_torch_memory_measurement() lowercase = torch.Generator(device='''cpu''').manual_seed(0) lowercase = floats_tensor((1, 3, 2_5_6, 2_5_6) ,rng=random.Random(0)).to(A__) lowercase = floats_tensor((1, 3, 6_4, 6_4) ,rng=random.Random(0)).to(A__) lowercase = pipe_a( prompt_embeds=A__ ,negative_prompt_embeds=A__ ,image=A__ ,original_image=A__ ,generator=A__ ,num_inference_steps=2 ,output_type='''np''' ,) lowercase = output.images[0] assert image.shape == (2_5_6, 2_5_6, 3) lowercase = torch.cuda.max_memory_allocated() assert mem_bytes < 4 * 1_0**9 lowercase = load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img_superresolution_stage_II.npy''') assert_mean_pixel_difference(A__ ,A__) def A__ ( self ,A__ ,A__ ,A__ ,A__): # pipeline 1 _start_torch_memory_measurement() lowercase = floats_tensor((1, 3, 6_4, 6_4) ,rng=random.Random(0)).to(A__) lowercase = floats_tensor((1, 3, 6_4, 6_4) ,rng=random.Random(1)).to(A__) lowercase = torch.Generator(device='''cpu''').manual_seed(0) lowercase = pipe_a( prompt_embeds=A__ ,negative_prompt_embeds=A__ ,image=A__ ,mask_image=A__ ,num_inference_steps=2 ,generator=A__ ,output_type='''np''' ,) lowercase = output.images[0] assert image.shape == (6_4, 6_4, 3) lowercase = torch.cuda.max_memory_allocated() assert mem_bytes < 1_0 * 1_0**9 lowercase = load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting.npy''') assert_mean_pixel_difference(A__ ,A__) # pipeline 2 _start_torch_memory_measurement() lowercase = torch.Generator(device='''cpu''').manual_seed(0) lowercase = floats_tensor((1, 3, 6_4, 6_4) ,rng=random.Random(0)).to(A__) lowercase = floats_tensor((1, 3, 2_5_6, 2_5_6) ,rng=random.Random(0)).to(A__) lowercase = floats_tensor((1, 3, 2_5_6, 2_5_6) ,rng=random.Random(1)).to(A__) lowercase = pipe_a( prompt_embeds=A__ ,negative_prompt_embeds=A__ ,image=A__ ,mask_image=A__ ,original_image=A__ ,generator=A__ ,num_inference_steps=2 ,output_type='''np''' ,) lowercase = output.images[0] assert image.shape == (2_5_6, 2_5_6, 3) lowercase = torch.cuda.max_memory_allocated() assert mem_bytes < 4 * 1_0**9 lowercase = load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting_superresolution_stage_II.npy''') assert_mean_pixel_difference(A__ ,A__) def UpperCamelCase ( ): '''simple docstring''' torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats()
101
0
import logging import os import sys from dataclasses import dataclass, field from itertools import chain from typing import Optional, Union import datasets import numpy as np import torch from datasets import load_dataset import transformers from transformers import ( AutoConfig, AutoModelForMultipleChoice, AutoTokenizer, HfArgumentParser, Trainer, TrainingArguments, default_data_collator, set_seed, ) from transformers.tokenization_utils_base import PreTrainedTokenizerBase from transformers.trainer_utils import get_last_checkpoint from transformers.utils import PaddingStrategy, check_min_version, send_example_telemetry # Will error if the minimal version of Transformers is not installed. Remove at your own risks. check_min_version("4.31.0") _snake_case = logging.getLogger(__name__) @dataclass class lowercase : _a = field( metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} ) _a = field( default=UpperCamelCase__,metadata={"help": "Pretrained config name or path if not the same as model_name"} ) _a = field( default=UpperCamelCase__,metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} ) _a = field( default=UpperCamelCase__,metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"},) _a = field( default=UpperCamelCase__,metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."},) _a = field( default="main",metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."},) _a = field( default=UpperCamelCase__,metadata={ "help": ( "Will use the token generated when running `huggingface-cli login` (necessary to use this script " "with private models)." ) },) @dataclass class lowercase : _a = field(default=UpperCamelCase__,metadata={"help": "The input training data file (a text file)."} ) _a = field( default=UpperCamelCase__,metadata={"help": "An optional input evaluation data file to evaluate the perplexity on (a text file)."},) _a = field( default=UpperCamelCase__,metadata={"help": "Overwrite the cached training and evaluation sets"} ) _a = field( default=UpperCamelCase__,metadata={"help": "The number of processes to use for the preprocessing."},) _a = field( default=UpperCamelCase__,metadata={ "help": ( "The maximum total input sequence length after tokenization. If passed, sequences longer " "than this will be truncated, sequences shorter will be padded." ) },) _a = field( default=UpperCamelCase__,metadata={ "help": ( "Whether to pad all samples to the maximum sentence length. " "If False, will pad the samples dynamically when batching to the maximum length in the batch. More " "efficient on GPU but very bad for TPU." ) },) _a = field( default=UpperCamelCase__,metadata={ "help": ( "For debugging purposes or quicker training, truncate the number of training examples to this " "value if set." ) },) _a = field( default=UpperCamelCase__,metadata={ "help": ( "For debugging purposes or quicker training, truncate the number of evaluation examples to this " "value if set." ) },) def a__ ( self ) -> Dict: if self.train_file is not None: _A : List[str] = self.train_file.split(""".""" )[-1] assert extension in ["csv", "json"], "`train_file` should be a csv or a json file." if self.validation_file is not None: _A : List[str] = self.validation_file.split(""".""" )[-1] assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file." @dataclass class lowercase : _a = 42 _a = True _a = None _a = None def __call__( self , _a ) -> Optional[Any]: _A : Tuple = """label""" if """label""" in features[0].keys() else """labels""" _A : Any = [feature.pop(_a ) for feature in features] _A : List[str] = len(_a ) _A : Optional[int] = len(features[0]["""input_ids"""] ) _A : Tuple = [ [{k: v[i] for k, v in feature.items()} for i in range(_a )] for feature in features ] _A : str = list(chain(*_a ) ) _A : Optional[Any] = self.tokenizer.pad( _a , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors="""pt""" , ) # Un-flatten _A : str = {k: v.view(_a , _a , -1 ) for k, v in batch.items()} # Add back labels _A : Union[str, Any] = torch.tensor(_a , dtype=torch.intaa ) return batch def lowerCAmelCase_ ( ): # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. _A : str = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. _A , _A , _A : List[str] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: _A , _A , _A : Tuple = parser.parse_args_into_dataclasses() # Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The # information sent is the one passed as arguments along with your Python/PyTorch versions. send_example_telemetry("""run_swag""",snake_case_,snake_case_ ) # Setup logging logging.basicConfig( format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""",datefmt="""%m/%d/%Y %H:%M:%S""",handlers=[logging.StreamHandler(sys.stdout )],) if training_args.should_log: # The default of training_args.log_level is passive, so we set log level at info here to have that default. transformers.utils.logging.set_verbosity_info() _A : Tuple = training_args.get_process_log_level() logger.setLevel(snake_case_ ) datasets.utils.logging.set_verbosity(snake_case_ ) transformers.utils.logging.set_verbosity(snake_case_ ) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Log on each process the small summary: logger.warning( f'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}''' + f'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' ) logger.info(f'''Training/evaluation parameters {training_args}''' ) # Detecting last checkpoint. _A : List[str] = None if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir: _A : Any = get_last_checkpoint(training_args.output_dir ) if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0: raise ValueError( f'''Output directory ({training_args.output_dir}) already exists and is not empty. ''' """Use --overwrite_output_dir to overcome.""" ) elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: logger.info( f'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change ''' """the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" ) # Set seed before initializing model. set_seed(training_args.seed ) # Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below) # or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/ # (the dataset will be downloaded automatically from the datasets Hub). # For CSV/JSON files, this script will use the column called 'text' or the first column if no column called # 'text' is found. You can easily tweak this behavior (see below). # In distributed training, the load_dataset function guarantee that only one local process can concurrently # download the dataset. if data_args.train_file is not None or data_args.validation_file is not None: _A : Tuple = {} if data_args.train_file is not None: _A : Optional[int] = data_args.train_file if data_args.validation_file is not None: _A : Any = data_args.validation_file _A : Dict = data_args.train_file.split(""".""" )[-1] _A : Tuple = load_dataset( snake_case_,data_files=snake_case_,cache_dir=model_args.cache_dir,use_auth_token=True if model_args.use_auth_token else None,) else: # Downloading and loading the swag dataset from the hub. _A : Optional[Any] = load_dataset( """swag""","""regular""",cache_dir=model_args.cache_dir,use_auth_token=True if model_args.use_auth_token else None,) # See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at # https://huggingface.co/docs/datasets/loading_datasets.html. # Load pretrained model and tokenizer # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. _A : int = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path,cache_dir=model_args.cache_dir,revision=model_args.model_revision,use_auth_token=True if model_args.use_auth_token else None,) _A : str = AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path,cache_dir=model_args.cache_dir,use_fast=model_args.use_fast_tokenizer,revision=model_args.model_revision,use_auth_token=True if model_args.use_auth_token else None,) _A : List[str] = AutoModelForMultipleChoice.from_pretrained( model_args.model_name_or_path,from_tf=bool(""".ckpt""" in model_args.model_name_or_path ),config=snake_case_,cache_dir=model_args.cache_dir,revision=model_args.model_revision,use_auth_token=True if model_args.use_auth_token else None,) # When using your own dataset or a different dataset from swag, you will probably need to change this. _A : Dict = [f'''ending{i}''' for i in range(4 )] _A : Tuple = """sent1""" _A : Union[str, Any] = """sent2""" if data_args.max_seq_length is None: _A : Optional[Any] = tokenizer.model_max_length if max_seq_length > 1024: logger.warning( """The chosen tokenizer supports a `model_max_length` that is longer than the default `block_size` value""" """ of 1024. If you would like to use a longer `block_size` up to `tokenizer.model_max_length` you can""" """ override this default with `--block_size xxx`.""" ) _A : Tuple = 1024 else: if data_args.max_seq_length > tokenizer.model_max_length: logger.warning( f'''The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the''' f'''model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.''' ) _A : List[Any] = min(data_args.max_seq_length,tokenizer.model_max_length ) # Preprocessing the datasets. def preprocess_function(snake_case_ ): _A : Optional[int] = [[context] * 4 for context in examples[context_name]] _A : Optional[Any] = examples[question_header_name] _A : Union[str, Any] = [ [f'''{header} {examples[end][i]}''' for end in ending_names] for i, header in enumerate(snake_case_ ) ] # Flatten out _A : Union[str, Any] = list(chain(*snake_case_ ) ) _A : Optional[Any] = list(chain(*snake_case_ ) ) # Tokenize _A : Union[str, Any] = tokenizer( snake_case_,snake_case_,truncation=snake_case_,max_length=snake_case_,padding="""max_length""" if data_args.pad_to_max_length else False,) # Un-flatten return {k: [v[i : i + 4] for i in range(0,len(snake_case_ ),4 )] for k, v in tokenized_examples.items()} if training_args.do_train: if "train" not in raw_datasets: raise ValueError("""--do_train requires a train dataset""" ) _A : List[Any] = raw_datasets["""train"""] if data_args.max_train_samples is not None: _A : Tuple = min(len(snake_case_ ),data_args.max_train_samples ) _A : Any = train_dataset.select(range(snake_case_ ) ) with training_args.main_process_first(desc="""train dataset map pre-processing""" ): _A : Optional[Any] = train_dataset.map( snake_case_,batched=snake_case_,num_proc=data_args.preprocessing_num_workers,load_from_cache_file=not data_args.overwrite_cache,) if training_args.do_eval: if "validation" not in raw_datasets: raise ValueError("""--do_eval requires a validation dataset""" ) _A : List[str] = raw_datasets["""validation"""] if data_args.max_eval_samples is not None: _A : int = min(len(snake_case_ ),data_args.max_eval_samples ) _A : List[str] = eval_dataset.select(range(snake_case_ ) ) with training_args.main_process_first(desc="""validation dataset map pre-processing""" ): _A : Optional[int] = eval_dataset.map( snake_case_,batched=snake_case_,num_proc=data_args.preprocessing_num_workers,load_from_cache_file=not data_args.overwrite_cache,) # Data collator _A : int = ( default_data_collator if data_args.pad_to_max_length else DataCollatorForMultipleChoice(tokenizer=snake_case_,pad_to_multiple_of=8 if training_args.fpaa else None ) ) # Metric def compute_metrics(snake_case_ ): _A , _A : Dict = eval_predictions _A : str = np.argmax(snake_case_,axis=1 ) return {"accuracy": (preds == label_ids).astype(np.floataa ).mean().item()} # Initialize our Trainer _A : List[str] = Trainer( model=snake_case_,args=snake_case_,train_dataset=train_dataset if training_args.do_train else None,eval_dataset=eval_dataset if training_args.do_eval else None,tokenizer=snake_case_,data_collator=snake_case_,compute_metrics=snake_case_,) # Training if training_args.do_train: _A : Tuple = None if training_args.resume_from_checkpoint is not None: _A : int = training_args.resume_from_checkpoint elif last_checkpoint is not None: _A : Dict = last_checkpoint _A : Optional[Any] = trainer.train(resume_from_checkpoint=snake_case_ ) trainer.save_model() # Saves the tokenizer too for easy upload _A : Optional[int] = train_result.metrics _A : Dict = ( data_args.max_train_samples if data_args.max_train_samples is not None else len(snake_case_ ) ) _A : Tuple = min(snake_case_,len(snake_case_ ) ) trainer.log_metrics("""train""",snake_case_ ) trainer.save_metrics("""train""",snake_case_ ) trainer.save_state() # Evaluation if training_args.do_eval: logger.info("""*** Evaluate ***""" ) _A : Any = trainer.evaluate() _A : List[Any] = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(snake_case_ ) _A : Dict = min(snake_case_,len(snake_case_ ) ) trainer.log_metrics("""eval""",snake_case_ ) trainer.save_metrics("""eval""",snake_case_ ) _A : Union[str, Any] = { """finetuned_from""": model_args.model_name_or_path, """tasks""": """multiple-choice""", """dataset_tags""": """swag""", """dataset_args""": """regular""", """dataset""": """SWAG""", """language""": """en""", } if training_args.push_to_hub: trainer.push_to_hub(**snake_case_ ) else: trainer.create_model_card(**snake_case_ ) def lowerCAmelCase_ ( snake_case_ ): # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
343
import unittest from accelerate import debug_launcher from accelerate.test_utils import require_cpu, test_ops, test_script @require_cpu class lowercase ( unittest.TestCase ): def a__ ( self ) -> List[str]: debug_launcher(test_script.main ) def a__ ( self ) -> Any: debug_launcher(test_ops.main )
343
1
'''simple docstring''' import logging import sys from dataclasses import dataclass, field from typing import Any, Dict, List, Optional, Union import librosa import torch from datasets import DatasetDict, load_dataset from packaging import version from torch import nn from transformers import ( HfArgumentParser, Trainer, TrainingArguments, WavaVecaConfig, WavaVecaFeatureExtractor, WavaVecaForPreTraining, is_apex_available, trainer_utils, ) from transformers.models.wavaveca.modeling_wavaveca import _compute_mask_indices if is_apex_available(): from apex import amp if version.parse(version.parse(torch.__version__).base_version) >= version.parse("""1.6"""): __snake_case =True from torch.cuda.amp import autocast __snake_case =logging.getLogger(__name__) @dataclass class UpperCAmelCase_ : lowerCamelCase : str = field( metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} ) lowerCamelCase : Optional[str] = field( default=__lowercase , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , ) lowerCamelCase : Optional[bool] = field( default=__lowercase , metadata={'''help''': '''Whether to freeze the feature extractor layers of the model.'''} ) lowerCamelCase : Optional[bool] = field( default=__lowercase , metadata={'''help''': '''Whether to log verbose messages or not.'''} , ) lowerCamelCase : Optional[float] = field( default=2.0 , metadata={'''help''': '''Maximum temperature for gumbel softmax.'''} ) lowerCamelCase : Optional[float] = field( default=0.5 , metadata={'''help''': '''Minimum temperature for gumbel softmax.'''} ) lowerCamelCase : Optional[float] = field( default=0.9_9_9_9_9_5 , metadata={'''help''': '''Decay of gumbel temperature during training.'''} ) def a_ ( lowerCamelCase : ModelArguments , lowerCamelCase : TrainingArguments ): logging.basicConfig( format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , ) lowerCAmelCase = logging.WARNING if model_args.verbose_logging: lowerCAmelCase = logging.DEBUG elif trainer_utils.is_main_process(training_args.local_rank ): lowerCAmelCase = logging.INFO logger.setLevel(lowerCamelCase ) @dataclass class UpperCAmelCase_ : lowerCamelCase : str = field( default=__lowercase , metadata={'''help''': '''The name of the dataset to use (via the datasets library).'''} ) lowerCamelCase : Optional[str] = field( default=__lowercase , metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''} ) lowerCamelCase : Optional[str] = field( default='''train''' , metadata={ '''help''': '''The name of the training data set split to use (via the datasets library). Defaults to \'train\'''' } , ) lowerCamelCase : Optional[str] = field( default='''validation''' , metadata={ '''help''': ( '''The name of the validation data set split to use (via the datasets library). Defaults to \'validation\'''' ) } , ) lowerCamelCase : Optional[str] = field( default='''file''' , metadata={'''help''': '''Column in the dataset that contains speech file path. Defaults to \'file\''''} , ) lowerCamelCase : bool = field( default=__lowercase , metadata={'''help''': '''Overwrite the cached preprocessed datasets or not.'''} ) lowerCamelCase : Optional[int] = field( default=1 , metadata={ '''help''': '''The percentage of the train set used as validation set in case there\'s no validation split''' } , ) lowerCamelCase : Optional[int] = field( default=__lowercase , metadata={'''help''': '''The number of processes to use for the preprocessing.'''} , ) lowerCamelCase : Optional[float] = field( default=2_0.0 , metadata={'''help''': '''Filter audio files that are longer than `max_duration_in_seconds` seconds'''} ) @dataclass class UpperCAmelCase_ : lowerCamelCase : WavaVecaForPreTraining lowerCamelCase : WavaVecaFeatureExtractor lowerCamelCase : Union[bool, str] = "longest" lowerCamelCase : Optional[int] = None lowerCamelCase : Optional[int] = None def __call__( self : Union[str, Any] , UpperCAmelCase__ : List[Dict[str, Union[List[int], torch.Tensor]]] ) -> Dict[str, torch.Tensor]: # reformat list to dict and set to pytorch format lowerCAmelCase = self.feature_extractor.pad( UpperCAmelCase__ , max_length=self.max_length , padding=self.padding , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors='pt' , ) lowerCAmelCase = self.model._get_feat_extract_output_lengths(batch['input_values'].shape[-1] ) lowerCAmelCase = batch['input_values'].shape[0] # make sure that no loss is computed on padded inputs if batch["attention_mask"] is not None: # compute real output lengths according to convolution formula lowerCAmelCase = self.model._get_feat_extract_output_lengths(batch['attention_mask'].sum(-1 ) ).to( torch.long ) lowerCAmelCase = torch.zeros( (batch_size, mask_indices_seq_length) , dtype=torch.long , device=batch['input_values'].device ) # these two operations makes sure that all values # before the output lengths indices are attended to lowerCAmelCase = 1 lowerCAmelCase = attention_mask.flip([-1] ).cumsum(-1 ).flip([-1] ).bool() # sample randomly masked indices lowerCAmelCase = _compute_mask_indices( (batch_size, mask_indices_seq_length) , self.model.config.mask_time_prob , self.model.config.mask_time_length , attention_mask=UpperCAmelCase__ , min_masks=2 , ) return batch class UpperCAmelCase_ ( __lowercase ): def __init__( self : Optional[int] , *UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Optional[int]=1 , UpperCAmelCase__ : Dict=0 , UpperCAmelCase__ : List[str]=1.0 , **UpperCAmelCase__ : int ) -> int: super().__init__(*UpperCAmelCase__ , **UpperCAmelCase__ ) lowerCAmelCase = 0 lowerCAmelCase = max_gumbel_temp lowerCAmelCase = min_gumbel_temp lowerCAmelCase = gumbel_temp_decay def __UpperCAmelCase ( self : Any , UpperCAmelCase__ : nn.Module , UpperCAmelCase__ : Dict[str, Union[torch.Tensor, Any]] ) -> torch.Tensor: model.train() lowerCAmelCase = self._prepare_inputs(UpperCAmelCase__ ) if self.use_amp: with autocast(): lowerCAmelCase = self.compute_loss(UpperCAmelCase__ , UpperCAmelCase__ ) else: lowerCAmelCase = self.compute_loss(UpperCAmelCase__ , UpperCAmelCase__ ) if self.args.n_gpu > 1 or self.deepspeed: if model.module.config.ctc_loss_reduction == "mean": lowerCAmelCase = loss.mean() elif model.module.config.ctc_loss_reduction == "sum": lowerCAmelCase = loss.sum() / (inputs['mask_time_indices']).sum() else: raise ValueError(F'''{model.config.ctc_loss_reduction} is not valid. Choose one of [\'mean\', \'sum\']''' ) if self.args.gradient_accumulation_steps > 1: lowerCAmelCase = loss / self.args.gradient_accumulation_steps if self.use_amp: self.scaler.scale(UpperCAmelCase__ ).backward() elif self.use_apex: with amp.scale_loss(UpperCAmelCase__ , self.optimizer ) as scaled_loss: scaled_loss.backward() elif self.deepspeed: self.deepspeed.backward(UpperCAmelCase__ ) else: loss.backward() self.num_update_step += 1 # make sure gumbel softmax temperature is decayed if self.args.n_gpu > 1 or self.deepspeed: model.module.set_gumbel_temperature( max(self.max_gumbel_temp * self.gumbel_temp_decay**self.num_update_step , self.min_gumbel_temp ) ) else: model.set_gumbel_temperature( max(self.max_gumbel_temp * self.gumbel_temp_decay**self.num_update_step , self.min_gumbel_temp ) ) return loss.detach() def a_ ( ): # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. lowerCAmelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = parser.parse_args_into_dataclasses() configure_logger(lowerCamelCase , lowerCamelCase ) # Downloading and loading a dataset from the hub. lowerCAmelCase = load_dataset(data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir ) if "validation" not in datasets.keys(): # make sure only "validation" and "train" keys remain" lowerCAmelCase = DatasetDict() lowerCAmelCase = load_dataset( data_args.dataset_name , data_args.dataset_config_name , split=f'''{data_args.train_split_name}[:{data_args.validation_split_percentage}%]''' , cache_dir=model_args.cache_dir , ) lowerCAmelCase = load_dataset( data_args.dataset_name , data_args.dataset_config_name , split=f'''{data_args.train_split_name}[{data_args.validation_split_percentage}%:]''' , cache_dir=model_args.cache_dir , ) else: # make sure only "validation" and "train" keys remain" lowerCAmelCase = DatasetDict() lowerCAmelCase = load_dataset( data_args.dataset_name , data_args.dataset_config_name , split='validation' , cache_dir=model_args.cache_dir , ) lowerCAmelCase = load_dataset( data_args.dataset_name , data_args.dataset_config_name , split=f'''{data_args.train_split_name}''' , cache_dir=model_args.cache_dir , ) # only normalized-inputs-training is supported lowerCAmelCase = WavaVecaFeatureExtractor.from_pretrained( model_args.model_name_or_path , cache_dir=model_args.cache_dir , do_normalize=lowerCamelCase ) def prepare_dataset(lowerCamelCase : Optional[Any] ): # check that all files have the correct sampling rate lowerCAmelCase , lowerCAmelCase = librosa.load(batch[data_args.speech_file_column] , sr=feature_extractor.sampling_rate ) return batch # load audio files into numpy arrays lowerCAmelCase = datasets.map( lowerCamelCase , num_proc=data_args.preprocessing_num_workers , remove_columns=datasets['train'].column_names ) # filter audio files that are too long lowerCAmelCase = vectorized_datasets.filter( lambda lowerCamelCase : len(data['speech'] ) < int(data_args.max_duration_in_seconds * feature_extractor.sampling_rate ) ) def normalize(lowerCamelCase : Dict ): return feature_extractor(batch['speech'] , sampling_rate=feature_extractor.sampling_rate ) # normalize and transform to `BatchFeatures` lowerCAmelCase = vectorized_datasets.map( lowerCamelCase , batched=lowerCamelCase , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , remove_columns=vectorized_datasets['train'].column_names , ) # pretraining is only supported for "newer" stable layer norm architecture # apply_spec_augment has to be True, mask_feature_prob has to be 0.0 lowerCAmelCase = WavaVecaConfig.from_pretrained( model_args.model_name_or_path , cache_dir=model_args.cache_dir , gradient_checkpointing=training_args.gradient_checkpointing , ) if not config.do_stable_layer_norm or config.feat_extract_norm != "layer": raise ValueError( 'PreTraining is only supported for ``config.do_stable_layer_norm=True`` and' ' ``config.feat_extract_norm=\'layer\'' ) lowerCAmelCase = WavaVecaForPreTraining(lowerCamelCase ) lowerCAmelCase = DataCollatorForWavaVecaPretraining(model=lowerCamelCase , feature_extractor=lowerCamelCase ) lowerCAmelCase = WavaVecaPreTrainer( model=lowerCamelCase , data_collator=lowerCamelCase , args=lowerCamelCase , train_dataset=vectorized_datasets['train'] , eval_dataset=vectorized_datasets['validation'] , tokenizer=lowerCamelCase , max_gumbel_temp=model_args.max_gumbel_temperature , min_gumbel_temp=model_args.min_gumbel_temperature , gumbel_temp_decay=model_args.gumbel_temperature_decay , ) trainer.train() if __name__ == "__main__": main()
4
'''simple docstring''' import itertools import json import linecache import os import pickle import re import socket import string from collections import Counter from logging import getLogger from pathlib import Path from typing import Callable, Dict, Iterable, List import git import torch from torch.utils.data import Dataset from transformers import BartTokenizer, RagTokenizer, TaTokenizer def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__=True, UpperCAmelCase__="pt" ) -> str: A_ = {"""add_prefix_space""": True} if isinstance(UpperCAmelCase__, UpperCAmelCase__ ) and not line.startswith(""" """ ) else {} A_ = padding_side return tokenizer( [line], max_length=UpperCAmelCase__, padding="""max_length""" if pad_to_max_length else None, truncation=UpperCAmelCase__, return_tensors=UpperCAmelCase__, add_special_tokens=UpperCAmelCase__, **UpperCAmelCase__, ) def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__=None, ) -> List[str]: A_ = input_ids.ne(UpperCAmelCase__ ).any(dim=0 ) if attention_mask is None: return input_ids[:, keep_column_mask] else: return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask]) class A__ ( _snake_case ): def __init__( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__="train" , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__="" , ) -> Union[str, Any]: '''simple docstring''' super().__init__() A_ = Path(UpperCamelCase__ ).joinpath(type_path + """.source""" ) A_ = Path(UpperCamelCase__ ).joinpath(type_path + """.target""" ) A_ = self.get_char_lens(self.src_file ) A_ = max_source_length A_ = max_target_length assert min(self.src_lens ) > 0, f'''found empty line in {self.src_file}''' A_ = tokenizer A_ = prefix if n_obs is not None: A_ = self.src_lens[:n_obs] A_ = src_lang A_ = tgt_lang def __len__( self ) -> Dict: '''simple docstring''' return len(self.src_lens ) def __getitem__( self , UpperCamelCase__ ) -> Dict[str, torch.Tensor]: '''simple docstring''' A_ = index + 1 # linecache starts at 1 A_ = self.prefix + linecache.getline(str(self.src_file ) , UpperCamelCase__ ).rstrip("""\n""" ) A_ = linecache.getline(str(self.tgt_file ) , UpperCamelCase__ ).rstrip("""\n""" ) assert source_line, f'''empty source line for index {index}''' assert tgt_line, f'''empty tgt line for index {index}''' # Need to add eos token manually for T5 if isinstance(self.tokenizer , UpperCamelCase__ ): source_line += self.tokenizer.eos_token tgt_line += self.tokenizer.eos_token # Pad source and target to the right A_ = ( self.tokenizer.question_encoder if isinstance(self.tokenizer , UpperCamelCase__ ) else self.tokenizer ) A_ = self.tokenizer.generator if isinstance(self.tokenizer , UpperCamelCase__ ) else self.tokenizer A_ = encode_line(UpperCamelCase__ , UpperCamelCase__ , self.max_source_length , """right""" ) A_ = encode_line(UpperCamelCase__ , UpperCamelCase__ , self.max_target_length , """right""" ) A_ = source_inputs["""input_ids"""].squeeze() A_ = target_inputs["""input_ids"""].squeeze() A_ = source_inputs["""attention_mask"""].squeeze() return { "input_ids": source_ids, "attention_mask": src_mask, "decoder_input_ids": target_ids, } @staticmethod def snake_case_ ( UpperCamelCase__ ) -> Any: '''simple docstring''' return [len(UpperCamelCase__ ) for x in Path(UpperCamelCase__ ).open().readlines()] def snake_case_ ( self , UpperCamelCase__ ) -> Dict[str, torch.Tensor]: '''simple docstring''' A_ = torch.stack([x["""input_ids"""] for x in batch] ) A_ = torch.stack([x["""attention_mask"""] for x in batch] ) A_ = torch.stack([x["""decoder_input_ids"""] for x in batch] ) A_ = ( self.tokenizer.generator.pad_token_id if isinstance(self.tokenizer , UpperCamelCase__ ) else self.tokenizer.pad_token_id ) A_ = ( self.tokenizer.question_encoder.pad_token_id if isinstance(self.tokenizer , UpperCamelCase__ ) else self.tokenizer.pad_token_id ) A_ = trim_batch(UpperCamelCase__ , UpperCamelCase__ ) A_ , A_ = trim_batch(UpperCamelCase__ , UpperCamelCase__ , attention_mask=UpperCamelCase__ ) A_ = { """input_ids""": source_ids, """attention_mask""": source_mask, """decoder_input_ids""": y, } return batch __lowerCamelCase = getLogger(__name__) def UpperCAmelCase__ ( UpperCAmelCase__ ) -> Dict: return list(itertools.chain.from_iterable(UpperCAmelCase__ ) ) def UpperCAmelCase__ ( UpperCAmelCase__ ) -> None: A_ = get_git_info() save_json(UpperCAmelCase__, os.path.join(UpperCAmelCase__, """git_log.json""" ) ) def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__=4, **UpperCAmelCase__ ) -> Dict: with open(UpperCAmelCase__, """w""" ) as f: json.dump(UpperCAmelCase__, UpperCAmelCase__, indent=UpperCAmelCase__, **UpperCAmelCase__ ) def UpperCAmelCase__ ( UpperCAmelCase__ ) -> str: with open(UpperCAmelCase__ ) as f: return json.load(UpperCAmelCase__ ) def UpperCAmelCase__ ( ) -> Any: A_ = git.Repo(search_parent_directories=UpperCAmelCase__ ) A_ = { """repo_id""": str(UpperCAmelCase__ ), """repo_sha""": str(repo.head.object.hexsha ), """repo_branch""": str(repo.active_branch ), """hostname""": str(socket.gethostname() ), } return repo_infos def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__ ) -> List: return list(map(UpperCAmelCase__, UpperCAmelCase__ ) ) def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__ ) -> List[Any]: with open(UpperCAmelCase__, """wb""" ) as f: return pickle.dump(UpperCAmelCase__, UpperCAmelCase__ ) def UpperCAmelCase__ ( UpperCAmelCase__ ) -> Union[str, Any]: def remove_articles(UpperCAmelCase__ ): return re.sub(r"""\b(a|an|the)\b""", """ """, UpperCAmelCase__ ) def white_space_fix(UpperCAmelCase__ ): return " ".join(text.split() ) def remove_punc(UpperCAmelCase__ ): A_ = set(string.punctuation ) return "".join(ch for ch in text if ch not in exclude ) def lower(UpperCAmelCase__ ): return text.lower() return white_space_fix(remove_articles(remove_punc(lower(UpperCAmelCase__ ) ) ) ) def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__ ) -> Optional[int]: A_ = normalize_answer(UpperCAmelCase__ ).split() A_ = normalize_answer(UpperCAmelCase__ ).split() A_ = Counter(UpperCAmelCase__ ) & Counter(UpperCAmelCase__ ) A_ = sum(common.values() ) if num_same == 0: return 0 A_ = 1.0 * num_same / len(UpperCAmelCase__ ) A_ = 1.0 * num_same / len(UpperCAmelCase__ ) A_ = (2 * precision * recall) / (precision + recall) return fa def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__ ) -> Optional[Any]: return normalize_answer(UpperCAmelCase__ ) == normalize_answer(UpperCAmelCase__ ) def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__ ) -> Dict: assert len(UpperCAmelCase__ ) == len(UpperCAmelCase__ ) A_ = 0 for hypo, pred in zip(UpperCAmelCase__, UpperCAmelCase__ ): em += exact_match_score(UpperCAmelCase__, UpperCAmelCase__ ) if len(UpperCAmelCase__ ) > 0: em /= len(UpperCAmelCase__ ) return {"em": em} def UpperCAmelCase__ ( UpperCAmelCase__ ) -> Optional[Any]: return model_prefix.startswith("""rag""" ) def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ ) -> List[str]: A_ = {p: p for p in extra_params} # T5 models don't have `dropout` param, they have `dropout_rate` instead A_ = """dropout_rate""" for p in extra_params: if getattr(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ ): if not hasattr(UpperCAmelCase__, UpperCAmelCase__ ) and not hasattr(UpperCAmelCase__, equivalent_param[p] ): logger.info("""config doesn't have a `{}` attribute""".format(UpperCAmelCase__ ) ) delattr(UpperCAmelCase__, UpperCAmelCase__ ) continue A_ = p if hasattr(UpperCAmelCase__, UpperCAmelCase__ ) else equivalent_param[p] setattr(UpperCAmelCase__, UpperCAmelCase__, getattr(UpperCAmelCase__, UpperCAmelCase__ ) ) delattr(UpperCAmelCase__, UpperCAmelCase__ ) return hparams, config
162
0
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, is_vision_available, ) __a = {"configuration_vit": ["VIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "ViTConfig", "ViTOnnxConfig"]} try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a = ["ViTFeatureExtractor"] __a = ["ViTImageProcessor"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a = [ "VIT_PRETRAINED_MODEL_ARCHIVE_LIST", "ViTForImageClassification", "ViTForMaskedImageModeling", "ViTModel", "ViTPreTrainedModel", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a = [ "TFViTForImageClassification", "TFViTModel", "TFViTPreTrainedModel", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a = [ "FlaxViTForImageClassification", "FlaxViTModel", "FlaxViTPreTrainedModel", ] if TYPE_CHECKING: from .configuration_vit import VIT_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTConfig, ViTOnnxConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_vit import ViTFeatureExtractor from .image_processing_vit import ViTImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_vit import ( VIT_PRETRAINED_MODEL_ARCHIVE_LIST, ViTForImageClassification, ViTForMaskedImageModeling, ViTModel, ViTPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_vit import TFViTForImageClassification, TFViTModel, TFViTPreTrainedModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel, FlaxViTPreTrainedModel else: import sys __a = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
354
'''simple docstring''' import math import qiskit def __snake_case( _lowerCAmelCase = 1 , _lowerCAmelCase = 1 , _lowerCAmelCase = 1 ) -> qiskit.result.counts.Counts: if ( isinstance(_lowerCAmelCase , _lowerCAmelCase ) or isinstance(_lowerCAmelCase , _lowerCAmelCase ) or isinstance(_lowerCAmelCase , _lowerCAmelCase ) ): raise TypeError("""inputs must be integers.""" ) if (input_a < 0) or (input_a < 0) or (carry_in < 0): raise ValueError("""inputs must be positive.""" ) if ( (math.floor(_lowerCAmelCase ) != input_a) or (math.floor(_lowerCAmelCase ) != input_a) or (math.floor(_lowerCAmelCase ) != carry_in) ): raise ValueError("""inputs must be exact integers.""" ) if (input_a > 2) or (input_a > 2) or (carry_in > 2): raise ValueError("""inputs must be less or equal to 2.""" ) # build registers snake_case__ : List[str] = qiskit.QuantumRegister(4 , """qr""" ) snake_case__ : Optional[int] = qiskit.ClassicalRegister(2 , """cr""" ) # list the entries snake_case__ : List[Any] = [input_a, input_a, carry_in] snake_case__ : Union[str, Any] = qiskit.QuantumCircuit(_lowerCAmelCase , _lowerCAmelCase ) for i in range(0 , 3 ): if entry[i] == 2: quantum_circuit.h(_lowerCAmelCase ) # for hadamard entries elif entry[i] == 1: quantum_circuit.x(_lowerCAmelCase ) # for 1 entries elif entry[i] == 0: quantum_circuit.i(_lowerCAmelCase ) # for 0 entries # build the circuit quantum_circuit.ccx(0 , 1 , 3 ) # ccx = toffoli gate quantum_circuit.cx(0 , 1 ) quantum_circuit.ccx(1 , 2 , 3 ) quantum_circuit.cx(1 , 2 ) quantum_circuit.cx(0 , 1 ) quantum_circuit.measure([2, 3] , _lowerCAmelCase ) # measure the last two qbits snake_case__ : int = qiskit.Aer.get_backend("""aer_simulator""" ) snake_case__ : Tuple = qiskit.execute(_lowerCAmelCase , _lowerCAmelCase , shots=1_000 ) return job.result().get_counts(_lowerCAmelCase ) if __name__ == "__main__": print(F"Total sum count for state is: {quantum_full_adder(1, 1, 1)}")
43
0
'''simple docstring''' import unittest import numpy as np import requests from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11 else: A__ : int =False if is_vision_available(): from PIL import Image from transformers import PixaStructImageProcessor class UpperCAmelCase ( unittest.TestCase ): def __init__( self : Union[str, Any] , __snake_case : int , __snake_case : Union[str, Any]=7 , __snake_case : Any=3 , __snake_case : Dict=18 , __snake_case : List[Any]=30 , __snake_case : int=4_00 , __snake_case : List[Any]=None , __snake_case : List[str]=True , __snake_case : Any=True , __snake_case : Tuple=None , ) -> Union[str, Any]: _lowerCAmelCase = size if size is not None else {"""height""": 20, """width""": 20} _lowerCAmelCase = parent _lowerCAmelCase = batch_size _lowerCAmelCase = num_channels _lowerCAmelCase = image_size _lowerCAmelCase = min_resolution _lowerCAmelCase = max_resolution _lowerCAmelCase = size _lowerCAmelCase = do_normalize _lowerCAmelCase = do_convert_rgb _lowerCAmelCase = [5_12, 10_24, 20_48, 40_96] _lowerCAmelCase = patch_size if patch_size is not None else {"""height""": 16, """width""": 16} def lowercase__ ( self : Any ) -> str: return {"do_normalize": self.do_normalize, "do_convert_rgb": self.do_convert_rgb} def lowercase__ ( self : int ) -> Optional[int]: _lowerCAmelCase = """https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/australia.jpg""" _lowerCAmelCase = Image.open(requests.get(__snake_case , stream=__snake_case ).raw ).convert("""RGB""" ) return raw_image @unittest.skipIf( not is_torch_greater_or_equal_than_1_11 , reason='''`Pix2StructImageProcessor` requires `torch>=1.11.0`.''' , ) @require_torch @require_vision class UpperCAmelCase ( snake_case_ , unittest.TestCase ): _lowercase: str = PixaStructImageProcessor if is_vision_available() else None def lowercase__ ( self : Optional[Any] ) -> Optional[int]: _lowerCAmelCase = PixaStructImageProcessingTester(self ) @property def lowercase__ ( self : List[Any] ) -> Any: return self.image_processor_tester.prepare_image_processor_dict() def lowercase__ ( self : List[str] ) -> Any: _lowerCAmelCase = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(__snake_case , """do_normalize""" ) ) self.assertTrue(hasattr(__snake_case , """do_convert_rgb""" ) ) def lowercase__ ( self : str ) -> List[str]: _lowerCAmelCase = self.image_processor_tester.prepare_dummy_image() _lowerCAmelCase = self.image_processing_class(**self.image_processor_dict ) _lowerCAmelCase = 20_48 _lowerCAmelCase = image_processor(__snake_case , return_tensors="""pt""" , max_patches=__snake_case ) self.assertTrue(torch.allclose(inputs.flattened_patches.mean() , torch.tensor(0.06_06 ) , atol=1E-3 , rtol=1E-3 ) ) def lowercase__ ( self : Union[str, Any] ) -> Tuple: # Initialize image_processor _lowerCAmelCase = self.image_processing_class(**self.image_processor_dict ) # create random PIL images _lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__snake_case ) for image in image_inputs: self.assertIsInstance(__snake_case , Image.Image ) # Test not batched input _lowerCAmelCase = ( (self.image_processor_tester.patch_size["""height"""] * self.image_processor_tester.patch_size["""width"""]) * self.image_processor_tester.num_channels ) + 2 for max_patch in self.image_processor_tester.max_patches: # Test not batched input _lowerCAmelCase = image_processor( image_inputs[0] , return_tensors="""pt""" , max_patches=__snake_case ).flattened_patches self.assertEqual( encoded_images.shape , (1, max_patch, expected_hidden_dim) , ) # Test batched _lowerCAmelCase = image_processor( __snake_case , return_tensors="""pt""" , max_patches=__snake_case ).flattened_patches self.assertEqual( encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , ) def lowercase__ ( self : Any ) -> Tuple: # Initialize image_processor _lowerCAmelCase = self.image_processing_class(**self.image_processor_dict ) # create random PIL images _lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__snake_case ) for image in image_inputs: self.assertIsInstance(__snake_case , Image.Image ) # Test not batched input _lowerCAmelCase = ( (self.image_processor_tester.patch_size["""height"""] * self.image_processor_tester.patch_size["""width"""]) * self.image_processor_tester.num_channels ) + 2 _lowerCAmelCase = True for max_patch in self.image_processor_tester.max_patches: # Test not batched input with self.assertRaises(__snake_case ): _lowerCAmelCase = image_processor( image_inputs[0] , return_tensors="""pt""" , max_patches=__snake_case ).flattened_patches _lowerCAmelCase = """Hello""" _lowerCAmelCase = image_processor( image_inputs[0] , return_tensors="""pt""" , max_patches=__snake_case , header_text=__snake_case ).flattened_patches self.assertEqual( encoded_images.shape , (1, max_patch, expected_hidden_dim) , ) # Test batched _lowerCAmelCase = image_processor( __snake_case , return_tensors="""pt""" , max_patches=__snake_case , header_text=__snake_case ).flattened_patches self.assertEqual( encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , ) def lowercase__ ( self : Optional[int] ) -> Optional[int]: # Initialize image_processor _lowerCAmelCase = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors _lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__snake_case , numpify=__snake_case ) for image in image_inputs: self.assertIsInstance(__snake_case , np.ndarray ) _lowerCAmelCase = ( (self.image_processor_tester.patch_size["""height"""] * self.image_processor_tester.patch_size["""width"""]) * self.image_processor_tester.num_channels ) + 2 for max_patch in self.image_processor_tester.max_patches: # Test not batched input _lowerCAmelCase = image_processor( image_inputs[0] , return_tensors="""pt""" , max_patches=__snake_case ).flattened_patches self.assertEqual( encoded_images.shape , (1, max_patch, expected_hidden_dim) , ) # Test batched _lowerCAmelCase = image_processor( __snake_case , return_tensors="""pt""" , max_patches=__snake_case ).flattened_patches self.assertEqual( encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , ) def lowercase__ ( self : Optional[int] ) -> Optional[Any]: # Initialize image_processor _lowerCAmelCase = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors _lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__snake_case , torchify=__snake_case ) for image in image_inputs: self.assertIsInstance(__snake_case , torch.Tensor ) # Test not batched input _lowerCAmelCase = ( (self.image_processor_tester.patch_size["""height"""] * self.image_processor_tester.patch_size["""width"""]) * self.image_processor_tester.num_channels ) + 2 for max_patch in self.image_processor_tester.max_patches: # Test not batched input _lowerCAmelCase = image_processor( image_inputs[0] , return_tensors="""pt""" , max_patches=__snake_case ).flattened_patches self.assertEqual( encoded_images.shape , (1, max_patch, expected_hidden_dim) , ) # Test batched _lowerCAmelCase = image_processor( __snake_case , return_tensors="""pt""" , max_patches=__snake_case ).flattened_patches self.assertEqual( encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , ) @unittest.skipIf( not is_torch_greater_or_equal_than_1_11 , reason='''`Pix2StructImageProcessor` requires `torch>=1.11.0`.''' , ) @require_torch @require_vision class UpperCAmelCase ( snake_case_ , unittest.TestCase ): _lowercase: Tuple = PixaStructImageProcessor if is_vision_available() else None def lowercase__ ( self : Optional[Any] ) -> int: _lowerCAmelCase = PixaStructImageProcessingTester(self , num_channels=4 ) _lowerCAmelCase = 3 @property def lowercase__ ( self : List[str] ) -> Tuple: return self.image_processor_tester.prepare_image_processor_dict() def lowercase__ ( self : Optional[Any] ) -> List[str]: _lowerCAmelCase = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(__snake_case , """do_normalize""" ) ) self.assertTrue(hasattr(__snake_case , """do_convert_rgb""" ) ) def lowercase__ ( self : Optional[int] ) -> Tuple: # Initialize image_processor _lowerCAmelCase = self.image_processing_class(**self.image_processor_dict ) # create random PIL images _lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__snake_case ) for image in image_inputs: self.assertIsInstance(__snake_case , Image.Image ) # Test not batched input _lowerCAmelCase = ( (self.image_processor_tester.patch_size["""height"""] * self.image_processor_tester.patch_size["""width"""]) * (self.image_processor_tester.num_channels - 1) ) + 2 for max_patch in self.image_processor_tester.max_patches: # Test not batched input _lowerCAmelCase = image_processor( image_inputs[0] , return_tensors="""pt""" , max_patches=__snake_case ).flattened_patches self.assertEqual( encoded_images.shape , (1, max_patch, expected_hidden_dim) , ) # Test batched _lowerCAmelCase = image_processor( __snake_case , return_tensors="""pt""" , max_patches=__snake_case ).flattened_patches self.assertEqual( encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
70
from __future__ import annotations import unittest from transformers import XGLMConfig, XGLMTokenizer, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers.models.xglm.modeling_tf_xglm import ( TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, TFXGLMForCausalLM, TFXGLMModel, ) @require_tf class A__ : """simple docstring""" __magic_name__ = XGLMConfig __magic_name__ = {} __magic_name__ = 'gelu' def __init__( self , __snake_case , __snake_case=1_4 , __snake_case=7 , __snake_case=True , __snake_case=True , __snake_case=True , __snake_case=9_9 , __snake_case=3_2 , __snake_case=2 , __snake_case=4 , __snake_case=3_7 , __snake_case="gelu" , __snake_case=0.1 , __snake_case=0.1 , __snake_case=5_1_2 , __snake_case=0.02 , ): snake_case = parent snake_case = batch_size snake_case = seq_length snake_case = is_training snake_case = use_input_mask snake_case = use_labels snake_case = vocab_size snake_case = d_model snake_case = num_hidden_layers snake_case = num_attention_heads snake_case = ffn_dim snake_case = activation_function snake_case = activation_dropout snake_case = attention_dropout snake_case = max_position_embeddings snake_case = initializer_range snake_case = None snake_case = 0 snake_case = 2 snake_case = 1 def a_ ( self ): return XGLMConfig.from_pretrained('''facebook/xglm-564M''' ) def a_ ( self ): snake_case = tf.clip_by_value( ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) , clip_value_min=0 , clip_value_max=3 ) snake_case = None if self.use_input_mask: snake_case = random_attention_mask([self.batch_size, self.seq_length] ) snake_case = self.get_config() snake_case = floats_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 ) return ( config, input_ids, input_mask, head_mask, ) def a_ ( self ): return XGLMConfig( vocab_size=self.vocab_size , d_model=self.hidden_size , num_layers=self.num_hidden_layers , attention_heads=self.num_attention_heads , ffn_dim=self.ffn_dim , activation_function=self.activation_function , activation_dropout=self.activation_dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , use_cache=__snake_case , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , return_dict=__snake_case , ) def a_ ( self ): snake_case = self.prepare_config_and_inputs() ( ( snake_case ) , ( snake_case ) , ( snake_case ) , ( snake_case ) , ) = config_and_inputs snake_case = { '''input_ids''': input_ids, '''head_mask''': head_mask, } return config, inputs_dict @require_tf class A__ ( snake_case__ , snake_case__ , unittest.TestCase ): """simple docstring""" __magic_name__ = (TFXGLMModel, TFXGLMForCausalLM) if is_tf_available() else () __magic_name__ = (TFXGLMForCausalLM,) if is_tf_available() else () __magic_name__ = ( {'feature-extraction': TFXGLMModel, 'text-generation': TFXGLMForCausalLM} if is_tf_available() else {} ) __magic_name__ = False __magic_name__ = False __magic_name__ = False def a_ ( self ): snake_case = TFXGLMModelTester(self ) snake_case = ConfigTester(self , config_class=__snake_case , n_embd=3_7 ) def a_ ( self ): self.config_tester.run_common_tests() @slow def a_ ( self ): for model_name in TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: snake_case = TFXGLMModel.from_pretrained(__snake_case ) self.assertIsNotNone(__snake_case ) @unittest.skip(reason='''Currently, model embeddings are going to undergo a major refactor.''' ) def a_ ( self ): super().test_resize_token_embeddings() @require_tf class A__ ( unittest.TestCase ): """simple docstring""" @slow def a_ ( self , __snake_case=True ): snake_case = TFXGLMForCausalLM.from_pretrained('''facebook/xglm-564M''' ) snake_case = tf.convert_to_tensor([[2, 2_6_8, 9_8_6_5]] , dtype=tf.intaa ) # The dog # </s> The dog is a very friendly dog. He is very affectionate and loves to play with other # fmt: off snake_case = [2, 2_6_8, 9_8_6_5, 6_7, 1_1, 1_9_8_8, 5_7_2_5_2, 9_8_6_5, 5, 9_8_4, 6_7, 1_9_8_8, 2_1_3_8_3_8, 1_6_5_8, 5_3, 7_0_4_4_6, 3_3, 6_6_5_7, 2_7_8, 1_5_8_1] # fmt: on snake_case = model.generate(__snake_case , do_sample=__snake_case , num_beams=1 ) if verify_outputs: self.assertListEqual(output_ids[0].numpy().tolist() , __snake_case ) @slow def a_ ( self ): snake_case = XGLMTokenizer.from_pretrained('''facebook/xglm-564M''' ) snake_case = TFXGLMForCausalLM.from_pretrained('''facebook/xglm-564M''' ) tf.random.set_seed(0 ) snake_case = tokenizer('''Today is a nice day and''' , return_tensors='''tf''' ) snake_case = tokenized.input_ids # forces the generation to happen on CPU, to avoid GPU-related quirks (and assure same output regardless of the available devices) with tf.device(''':/CPU:0''' ): snake_case = model.generate(__snake_case , do_sample=__snake_case , seed=[7, 0] ) snake_case = tokenizer.decode(output_ids[0] , skip_special_tokens=__snake_case ) snake_case = ( '''Today is a nice day and warm evening here over Southern Alberta!! Today when they closed schools due''' ) self.assertEqual(__snake_case , __snake_case ) @slow def a_ ( self ): snake_case = TFXGLMForCausalLM.from_pretrained('''facebook/xglm-564M''' ) snake_case = XGLMTokenizer.from_pretrained('''facebook/xglm-564M''' ) snake_case = '''left''' # use different length sentences to test batching snake_case = [ '''This is an extremelly long sentence that only exists to test the ability of the model to cope with ''' '''left-padding, such as in batched generation. The output for the sequence below should be the same ''' '''regardless of whether left padding is applied or not. When''', '''Hello, my dog is a little''', ] snake_case = tokenizer(__snake_case , return_tensors='''tf''' , padding=__snake_case ) snake_case = inputs['''input_ids'''] snake_case = model.generate(input_ids=__snake_case , attention_mask=inputs['''attention_mask'''] , max_new_tokens=1_2 ) snake_case = tokenizer(sentences[0] , return_tensors='''tf''' ).input_ids snake_case = model.generate(input_ids=__snake_case , max_new_tokens=1_2 ) snake_case = tokenizer(sentences[1] , return_tensors='''tf''' ).input_ids snake_case = model.generate(input_ids=__snake_case , max_new_tokens=1_2 ) snake_case = tokenizer.batch_decode(__snake_case , skip_special_tokens=__snake_case ) snake_case = tokenizer.decode(output_non_padded[0] , skip_special_tokens=__snake_case ) snake_case = tokenizer.decode(output_padded[0] , skip_special_tokens=__snake_case ) snake_case = [ '''This is an extremelly long sentence that only exists to test the ability of the model to cope with ''' '''left-padding, such as in batched generation. The output for the sequence below should be the same ''' '''regardless of whether left padding is applied or not. When left padding is applied, the sequence will be ''' '''a single''', '''Hello, my dog is a little bit of a shy one, but he is very friendly''', ] self.assertListEqual(__snake_case , __snake_case ) self.assertListEqual(__snake_case , [non_padded_sentence, padded_sentence] )
127
0
'''simple docstring''' import tempfile import torch from diffusers import ( DEISMultistepScheduler, DPMSolverMultistepScheduler, DPMSolverSinglestepScheduler, UniPCMultistepScheduler, ) from .test_schedulers import SchedulerCommonTest class _UpperCamelCase ( SCREAMING_SNAKE_CASE_ ): '''simple docstring''' lowerCAmelCase__ = (DEISMultistepScheduler,) lowerCAmelCase__ = (("""num_inference_steps""", 25),) def __lowerCamelCase ( self : Tuple , **_lowerCAmelCase : Dict): '''simple docstring''' __lowercase ={ 'num_train_timesteps': 1_0_0_0, 'beta_start': 0.0001, 'beta_end': 0.02, 'beta_schedule': 'linear', 'solver_order': 2, } config.update(**__a) return config def __lowerCamelCase ( self : List[str] , _lowerCAmelCase : List[str]=0 , **_lowerCAmelCase : List[str]): '''simple docstring''' __lowercase =dict(self.forward_default_kwargs) __lowercase =kwargs.pop('num_inference_steps' , __a) __lowercase =self.dummy_sample __lowercase =0.1 * sample __lowercase =[residual + 0.2, residual + 0.15, residual + 0.10] for scheduler_class in self.scheduler_classes: __lowercase =self.get_scheduler_config(**__a) __lowercase =scheduler_class(**__a) scheduler.set_timesteps(__a) # copy over dummy past residuals __lowercase =dummy_past_residuals[: scheduler.config.solver_order] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(__a) __lowercase =scheduler_class.from_pretrained(__a) new_scheduler.set_timesteps(__a) # copy over dummy past residuals __lowercase =dummy_past_residuals[: new_scheduler.config.solver_order] __lowercase =sample, sample for t in range(__a , time_step + scheduler.config.solver_order + 1): __lowercase =scheduler.step(__a , __a , __a , **__a).prev_sample __lowercase =new_scheduler.step(__a , __a , __a , **__a).prev_sample assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical" def __lowerCamelCase ( self : Union[str, Any]): '''simple docstring''' pass def __lowerCamelCase ( self : Tuple , _lowerCAmelCase : List[str]=0 , **_lowerCAmelCase : List[str]): '''simple docstring''' __lowercase =dict(self.forward_default_kwargs) __lowercase =kwargs.pop('num_inference_steps' , __a) __lowercase =self.dummy_sample __lowercase =0.1 * sample __lowercase =[residual + 0.2, residual + 0.15, residual + 0.10] for scheduler_class in self.scheduler_classes: __lowercase =self.get_scheduler_config() __lowercase =scheduler_class(**__a) scheduler.set_timesteps(__a) # copy over dummy past residuals (must be after setting timesteps) __lowercase =dummy_past_residuals[: scheduler.config.solver_order] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(__a) __lowercase =scheduler_class.from_pretrained(__a) # copy over dummy past residuals new_scheduler.set_timesteps(__a) # copy over dummy past residual (must be after setting timesteps) __lowercase =dummy_past_residuals[: new_scheduler.config.solver_order] __lowercase =scheduler.step(__a , __a , __a , **__a).prev_sample __lowercase =new_scheduler.step(__a , __a , __a , **__a).prev_sample assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical" def __lowerCamelCase ( self : List[str] , _lowerCAmelCase : List[Any]=None , **_lowerCAmelCase : List[str]): '''simple docstring''' if scheduler is None: __lowercase =self.scheduler_classes[0] __lowercase =self.get_scheduler_config(**__a) __lowercase =scheduler_class(**__a) __lowercase =self.scheduler_classes[0] __lowercase =self.get_scheduler_config(**__a) __lowercase =scheduler_class(**__a) __lowercase =1_0 __lowercase =self.dummy_model() __lowercase =self.dummy_sample_deter scheduler.set_timesteps(__a) for i, t in enumerate(scheduler.timesteps): __lowercase =model(__a , __a) __lowercase =scheduler.step(__a , __a , __a).prev_sample return sample def __lowerCamelCase ( self : Any): '''simple docstring''' __lowercase =dict(self.forward_default_kwargs) __lowercase =kwargs.pop('num_inference_steps' , __a) for scheduler_class in self.scheduler_classes: __lowercase =self.get_scheduler_config() __lowercase =scheduler_class(**__a) __lowercase =self.dummy_sample __lowercase =0.1 * sample if num_inference_steps is not None and hasattr(__a , 'set_timesteps'): scheduler.set_timesteps(__a) elif num_inference_steps is not None and not hasattr(__a , 'set_timesteps'): __lowercase =num_inference_steps # copy over dummy past residuals (must be done after set_timesteps) __lowercase =[residual + 0.2, residual + 0.15, residual + 0.10] __lowercase =dummy_past_residuals[: scheduler.config.solver_order] __lowercase =scheduler.timesteps[5] __lowercase =scheduler.timesteps[6] __lowercase =scheduler.step(__a , __a , __a , **__a).prev_sample __lowercase =scheduler.step(__a , __a , __a , **__a).prev_sample self.assertEqual(output_a.shape , sample.shape) self.assertEqual(output_a.shape , output_a.shape) def __lowerCamelCase ( self : Optional[int]): '''simple docstring''' __lowercase =DEISMultistepScheduler(**self.get_scheduler_config()) __lowercase =self.full_loop(scheduler=__a) __lowercase =torch.mean(torch.abs(__a)) assert abs(result_mean.item() - 0.2_3916) < 1e-3 __lowercase =DPMSolverSinglestepScheduler.from_config(scheduler.config) __lowercase =DPMSolverMultistepScheduler.from_config(scheduler.config) __lowercase =UniPCMultistepScheduler.from_config(scheduler.config) __lowercase =DEISMultistepScheduler.from_config(scheduler.config) __lowercase =self.full_loop(scheduler=__a) __lowercase =torch.mean(torch.abs(__a)) assert abs(result_mean.item() - 0.2_3916) < 1e-3 def __lowerCamelCase ( self : Union[str, Any]): '''simple docstring''' for timesteps in [2_5, 5_0, 1_0_0, 9_9_9, 1_0_0_0]: self.check_over_configs(num_train_timesteps=__a) def __lowerCamelCase ( self : Union[str, Any]): '''simple docstring''' self.check_over_configs(thresholding=__a) for order in [1, 2, 3]: for solver_type in ["logrho"]: for threshold in [0.5, 1.0, 2.0]: for prediction_type in ["epsilon", "sample"]: self.check_over_configs( thresholding=__a , prediction_type=__a , sample_max_value=__a , algorithm_type='deis' , solver_order=__a , solver_type=__a , ) def __lowerCamelCase ( self : List[str]): '''simple docstring''' for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=__a) def __lowerCamelCase ( self : List[str]): '''simple docstring''' for algorithm_type in ["deis"]: for solver_type in ["logrho"]: for order in [1, 2, 3]: for prediction_type in ["epsilon", "sample"]: self.check_over_configs( solver_order=__a , solver_type=__a , prediction_type=__a , algorithm_type=__a , ) __lowercase =self.full_loop( solver_order=__a , solver_type=__a , prediction_type=__a , algorithm_type=__a , ) assert not torch.isnan(__a).any(), "Samples have nan numbers" def __lowerCamelCase ( self : Optional[Any]): '''simple docstring''' self.check_over_configs(lower_order_final=__a) self.check_over_configs(lower_order_final=__a) def __lowerCamelCase ( self : Optional[int]): '''simple docstring''' for num_inference_steps in [1, 2, 3, 5, 1_0, 5_0, 1_0_0, 9_9_9, 1_0_0_0]: self.check_over_forward(num_inference_steps=__a , time_step=0) def __lowerCamelCase ( self : List[Any]): '''simple docstring''' __lowercase =self.full_loop() __lowercase =torch.mean(torch.abs(__a)) assert abs(result_mean.item() - 0.2_3916) < 1e-3 def __lowerCamelCase ( self : Optional[Any]): '''simple docstring''' __lowercase =self.full_loop(prediction_type='v_prediction') __lowercase =torch.mean(torch.abs(__a)) assert abs(result_mean.item() - 0.091) < 1e-3 def __lowerCamelCase ( self : Tuple): '''simple docstring''' __lowercase =self.scheduler_classes[0] __lowercase =self.get_scheduler_config(thresholding=__a , dynamic_thresholding_ratio=0) __lowercase =scheduler_class(**__a) __lowercase =1_0 __lowercase =self.dummy_model() __lowercase =self.dummy_sample_deter.half() scheduler.set_timesteps(__a) for i, t in enumerate(scheduler.timesteps): __lowercase =model(__a , __a) __lowercase =scheduler.step(__a , __a , __a).prev_sample assert sample.dtype == torch.floataa
350
'''simple docstring''' from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices lowerCamelCase = logging.get_logger(__name__) lowerCamelCase = { """microsoft/resnet-50""": """https://huggingface.co/microsoft/resnet-50/blob/main/config.json""", } class _UpperCamelCase ( A , A ): '''simple docstring''' lowerCAmelCase__ = """resnet""" lowerCAmelCase__ = ["""basic""", """bottleneck"""] def __init__( self : Any , _lowerCAmelCase : List[str]=3 , _lowerCAmelCase : Optional[int]=6_4 , _lowerCAmelCase : str=[2_5_6, 5_1_2, 1_0_2_4, 2_0_4_8] , _lowerCAmelCase : Any=[3, 4, 6, 3] , _lowerCAmelCase : List[Any]="bottleneck" , _lowerCAmelCase : List[str]="relu" , _lowerCAmelCase : int=False , _lowerCAmelCase : int=None , _lowerCAmelCase : Any=None , **_lowerCAmelCase : Any , ): '''simple docstring''' super().__init__(**_lowerCAmelCase) if layer_type not in self.layer_types: raise ValueError(f"""layer_type={layer_type} is not one of {','.join(self.layer_types)}""") __lowercase =num_channels __lowercase =embedding_size __lowercase =hidden_sizes __lowercase =depths __lowercase =layer_type __lowercase =hidden_act __lowercase =downsample_in_first_stage __lowercase =['stem'] + [f"""stage{idx}""" for idx in range(1 , len(_lowerCAmelCase) + 1)] __lowercase , __lowercase =get_aligned_output_features_output_indices( out_features=_lowerCAmelCase , out_indices=_lowerCAmelCase , stage_names=self.stage_names) class _UpperCamelCase ( A ): '''simple docstring''' lowerCAmelCase__ = version.parse("""1.11""" ) @property def __lowerCamelCase ( self : List[Any]): '''simple docstring''' return OrderedDict( [ ('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}), ]) @property def __lowerCamelCase ( self : Tuple): '''simple docstring''' return 1e-3
48
0
"""simple docstring""" import dataclasses import json import sys import types from argparse import ArgumentDefaultsHelpFormatter, ArgumentParser, ArgumentTypeError from copy import copy from enum import Enum from inspect import isclass from pathlib import Path from typing import Any, Callable, Dict, Iterable, List, Literal, NewType, Optional, Tuple, Union, get_type_hints import yaml a_ = NewType('DataClass', Any) a_ = NewType('DataClassType', Any) def __UpperCAmelCase ( __UpperCamelCase ): if isinstance(__UpperCamelCase , __UpperCamelCase ): return v if v.lower() in ("yes", "true", "t", "y", "1"): return True elif v.lower() in ("no", "false", "f", "n", "0"): return False else: raise ArgumentTypeError( f"""Truthy value expected: got {v} but expected one of yes/no, true/false, t/f, y/n, 1/0 (case insensitive).""" ) def __UpperCAmelCase ( __UpperCamelCase ): __lowercase : int = {str(__UpperCamelCase ): choice for choice in choices} return lambda __UpperCamelCase : str_to_choice.get(__UpperCamelCase , __UpperCamelCase ) def __UpperCAmelCase ( *, __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = dataclasses.MISSING , __UpperCamelCase = dataclasses.MISSING , __UpperCamelCase = None , **__UpperCamelCase , ): if metadata is None: # Important, don't use as default param in function signature because dict is mutable and shared across function calls __lowercase : List[str] = {} if aliases is not None: __lowercase : Dict = aliases if help is not None: __lowercase : Tuple = help return dataclasses.field(metadata=__UpperCamelCase , default=__UpperCamelCase , default_factory=__UpperCamelCase , **__UpperCamelCase ) class UpperCAmelCase_ ( _a ): UpperCamelCase =42 def __init__( self , UpperCamelCase_ , **UpperCamelCase_ ) -> int: # To make the default appear when using --help if "formatter_class" not in kwargs: __lowercase : Optional[int] = ArgumentDefaultsHelpFormatter super().__init__(**lowercase__ ) if dataclasses.is_dataclass(lowercase__ ): __lowercase : Union[str, Any] = [dataclass_types] __lowercase : List[Any] = list(lowercase__ ) for dtype in self.dataclass_types: self._add_dataclass_arguments(lowercase__ ) @staticmethod def _lowerCamelCase ( UpperCamelCase_ , UpperCamelCase_ ) -> Optional[Any]: __lowercase : Optional[Any] = F"""--{field.name}""" __lowercase : str = field.metadata.copy() # field.metadata is not used at all by Data Classes, # it is provided as a third-party extension mechanism. if isinstance(field.type , lowercase__ ): raise RuntimeError( '''Unresolved type detected, which should have been done with the help of ''' '''`typing.get_type_hints` method by default''' ) __lowercase : Optional[int] = kwargs.pop('''aliases''' , [] ) if isinstance(lowercase__ , lowercase__ ): __lowercase : Optional[Any] = [aliases] __lowercase : Union[str, Any] = getattr(field.type , '''__origin__''' , field.type ) if origin_type is Union or (hasattr(lowercase__ , '''UnionType''' ) and isinstance(lowercase__ , types.UnionType )): if str not in field.type.__args__ and ( len(field.type.__args__ ) != 2 or type(lowercase__ ) not in field.type.__args__ ): raise ValueError( '''Only `Union[X, NoneType]` (i.e., `Optional[X]`) is allowed for `Union` because''' ''' the argument parser only supports one type per argument.''' F""" Problem encountered in field \'{field.name}\'.""" ) if type(lowercase__ ) not in field.type.__args__: # filter `str` in Union __lowercase : Optional[Any] = field.type.__args__[0] if field.type.__args__[1] == str else field.type.__args__[1] __lowercase : Any = getattr(field.type , '''__origin__''' , field.type ) elif bool not in field.type.__args__: # filter `NoneType` in Union (except for `Union[bool, NoneType]`) __lowercase : Dict = ( field.type.__args__[0] if isinstance(lowercase__ , field.type.__args__[1] ) else field.type.__args__[1] ) __lowercase : Union[str, Any] = getattr(field.type , '''__origin__''' , field.type ) # A variable to store kwargs for a boolean field, if needed # so that we can init a `no_*` complement argument (see below) __lowercase : Optional[int] = {} if origin_type is Literal or (isinstance(field.type , lowercase__ ) and issubclass(field.type , lowercase__ )): if origin_type is Literal: __lowercase : List[str] = field.type.__args__ else: __lowercase : Optional[Any] = [x.value for x in field.type] __lowercase : Union[str, Any] = make_choice_type_function(kwargs['''choices'''] ) if field.default is not dataclasses.MISSING: __lowercase : Optional[int] = field.default else: __lowercase : Optional[Any] = True elif field.type is bool or field.type == Optional[bool]: # Copy the currect kwargs to use to instantiate a `no_*` complement argument below. # We do not initialize it here because the `no_*` alternative must be instantiated after the real argument __lowercase : Any = copy(lowercase__ ) # Hack because type=bool in argparse does not behave as we want. __lowercase : Any = string_to_bool if field.type is bool or (field.default is not None and field.default is not dataclasses.MISSING): # Default value is False if we have no default when of type bool. __lowercase : Union[str, Any] = False if field.default is dataclasses.MISSING else field.default # This is the value that will get picked if we don't include --field_name in any way __lowercase : Tuple = default # This tells argparse we accept 0 or 1 value after --field_name __lowercase : Dict = '''?''' # This is the value that will get picked if we do --field_name (without value) __lowercase : Dict = True elif isclass(lowercase__ ) and issubclass(lowercase__ , lowercase__ ): __lowercase : int = field.type.__args__[0] __lowercase : Dict = '''+''' if field.default_factory is not dataclasses.MISSING: __lowercase : List[str] = field.default_factory() elif field.default is dataclasses.MISSING: __lowercase : Tuple = True else: __lowercase : Any = field.type if field.default is not dataclasses.MISSING: __lowercase : int = field.default elif field.default_factory is not dataclasses.MISSING: __lowercase : Optional[int] = field.default_factory() else: __lowercase : List[Any] = True parser.add_argument(lowercase__ , *lowercase__ , **lowercase__ ) # Add a complement `no_*` argument for a boolean field AFTER the initial field has already been added. # Order is important for arguments with the same destination! # We use a copy of earlier kwargs because the original kwargs have changed a lot before reaching down # here and we do not need those changes/additional keys. if field.default is True and (field.type is bool or field.type == Optional[bool]): __lowercase : str = False parser.add_argument(F"""--no_{field.name}""" , action='''store_false''' , dest=field.name , **lowercase__ ) def _lowerCamelCase ( self , UpperCamelCase_ ) -> str: if hasattr(lowercase__ , '''_argument_group_name''' ): __lowercase : int = self.add_argument_group(dtype._argument_group_name ) else: __lowercase : Dict = self try: __lowercase : List[Any] = get_type_hints(lowercase__ ) except NameError: raise RuntimeError( F"""Type resolution failed for {dtype}. Try declaring the class in global scope or """ '''removing line of `from __future__ import annotations` which opts in Postponed ''' '''Evaluation of Annotations (PEP 563)''' ) except TypeError as ex: # Remove this block when we drop Python 3.9 support if sys.version_info[:2] < (3, 10) and "unsupported operand type(s) for |" in str(lowercase__ ): __lowercase : Any = '''.'''.join(map(lowercase__ , sys.version_info[:3] ) ) raise RuntimeError( F"""Type resolution failed for {dtype} on Python {python_version}. Try removing """ '''line of `from __future__ import annotations` which opts in union types as ''' '''`X | Y` (PEP 604) via Postponed Evaluation of Annotations (PEP 563). To ''' '''support Python versions that lower than 3.10, you need to use ''' '''`typing.Union[X, Y]` instead of `X | Y` and `typing.Optional[X]` instead of ''' '''`X | None`.''' ) from ex raise for field in dataclasses.fields(lowercase__ ): if not field.init: continue __lowercase : str = type_hints[field.name] self._parse_dataclass_field(lowercase__ , lowercase__ ) def _lowerCamelCase ( self , UpperCamelCase_=None , UpperCamelCase_=False , UpperCamelCase_=True , UpperCamelCase_=None , UpperCamelCase_=None , ) -> Tuple[DataClass, ...]: if args_file_flag or args_filename or (look_for_args_file and len(sys.argv )): __lowercase : List[Any] = [] if args_filename: args_files.append(Path(lowercase__ ) ) elif look_for_args_file and len(sys.argv ): args_files.append(Path(sys.argv[0] ).with_suffix('''.args''' ) ) # args files specified via command line flag should overwrite default args files so we add them last if args_file_flag: # Create special parser just to extract the args_file_flag values __lowercase : Optional[Any] = ArgumentParser() args_file_parser.add_argument(lowercase__ , type=lowercase__ , action='''append''' ) # Use only remaining args for further parsing (remove the args_file_flag) __lowercase ,__lowercase : Any = args_file_parser.parse_known_args(args=lowercase__ ) __lowercase : Tuple = vars(lowercase__ ).get(args_file_flag.lstrip('''-''' ) , lowercase__ ) if cmd_args_file_paths: args_files.extend([Path(lowercase__ ) for p in cmd_args_file_paths] ) __lowercase : int = [] for args_file in args_files: if args_file.exists(): file_args += args_file.read_text().split() # in case of duplicate arguments the last one has precedence # args specified via the command line should overwrite args from files, so we add them last __lowercase : int = file_args + args if args is not None else file_args + sys.argv[1:] __lowercase ,__lowercase : List[Any] = self.parse_known_args(args=lowercase__ ) __lowercase : Dict = [] for dtype in self.dataclass_types: __lowercase : Union[str, Any] = {f.name for f in dataclasses.fields(lowercase__ ) if f.init} __lowercase : int = {k: v for k, v in vars(lowercase__ ).items() if k in keys} for k in keys: delattr(lowercase__ , lowercase__ ) __lowercase : Dict = dtype(**lowercase__ ) outputs.append(lowercase__ ) if len(namespace.__dict__ ) > 0: # additional namespace. outputs.append(lowercase__ ) if return_remaining_strings: return (*outputs, remaining_args) else: if remaining_args: raise ValueError(F"""Some specified arguments are not used by the HfArgumentParser: {remaining_args}""" ) return (*outputs,) def _lowerCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ = False ) -> Tuple[DataClass, ...]: __lowercase : str = set(args.keys() ) __lowercase : Tuple = [] for dtype in self.dataclass_types: __lowercase : List[Any] = {f.name for f in dataclasses.fields(lowercase__ ) if f.init} __lowercase : Optional[Any] = {k: v for k, v in args.items() if k in keys} unused_keys.difference_update(inputs.keys() ) __lowercase : Optional[Any] = dtype(**lowercase__ ) outputs.append(lowercase__ ) if not allow_extra_keys and unused_keys: raise ValueError(F"""Some keys are not used by the HfArgumentParser: {sorted(lowercase__ )}""" ) return tuple(lowercase__ ) def _lowerCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ = False ) -> Tuple[DataClass, ...]: with open(Path(lowercase__ ) , encoding='''utf-8''' ) as open_json_file: __lowercase : str = json.loads(open_json_file.read() ) __lowercase : Union[str, Any] = self.parse_dict(lowercase__ , allow_extra_keys=lowercase__ ) return tuple(lowercase__ ) def _lowerCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ = False ) -> Tuple[DataClass, ...]: __lowercase : Any = self.parse_dict(yaml.safe_load(Path(lowercase__ ).read_text() ) , allow_extra_keys=lowercase__ ) return tuple(lowercase__ )
249
from copy import deepcopy import torch import torch.nn.functional as F from torch.optim import AdamW from torch.optim.lr_scheduler import LambdaLR from torch.utils.data import DataLoader from accelerate.accelerator import Accelerator from accelerate.state import GradientState from accelerate.test_utils import RegressionDataset, RegressionModel from accelerate.utils import DistributedType, is_torch_version, set_seed def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Dict: '''simple docstring''' for param, grad_param in zip(model_a.parameters() , model_b.parameters() ): if not param.requires_grad: continue if not did_step: # Grads should not be in sync assert ( torch.allclose(param.grad , grad_param.grad ) is False ), f'''Gradients in sync when they should not be at iteration {iteration}:\nmodel_a grad ({param.grad}) == model_b grad ({grad_param.grad})''' else: # Grads should be in sync assert ( torch.allclose(param.grad , grad_param.grad ) is True ), f'''Gradients not in sync when they should be at iteration {iteration}:\nmodel_a grad ({param.grad}) != model_b grad ({grad_param.grad})''' def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=True ) -> Dict: '''simple docstring''' model.train() __UpperCAmelCase = model(SCREAMING_SNAKE_CASE ) __UpperCAmelCase = F.mse_loss(SCREAMING_SNAKE_CASE , target.to(output.device ) ) if not do_backward: loss /= accelerator.gradient_accumulation_steps loss.backward() else: accelerator.backward(SCREAMING_SNAKE_CASE ) def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=False ) -> List[Any]: '''simple docstring''' set_seed(4_2 ) __UpperCAmelCase = RegressionModel() __UpperCAmelCase = deepcopy(SCREAMING_SNAKE_CASE ) __UpperCAmelCase = RegressionDataset(length=8_0 ) __UpperCAmelCase = DataLoader(SCREAMING_SNAKE_CASE , batch_size=1_6 ) model.to(accelerator.device ) if sched: __UpperCAmelCase = AdamW(params=model.parameters() , lr=1e-3 ) __UpperCAmelCase = AdamW(params=ddp_model.parameters() , lr=1e-3 ) __UpperCAmelCase = LambdaLR(SCREAMING_SNAKE_CASE , lr_lambda=lambda SCREAMING_SNAKE_CASE : epoch**0.65 ) __UpperCAmelCase = LambdaLR(SCREAMING_SNAKE_CASE , lr_lambda=lambda SCREAMING_SNAKE_CASE : epoch**0.65 ) # Make a copy of `model` if sched: __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = accelerator.prepare(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) else: __UpperCAmelCase , __UpperCAmelCase = accelerator.prepare(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) if sched: return (model, opt, sched, dataloader, ddp_model, ddp_opt, ddp_sched) return model, ddp_model, dataloader def __a ( SCREAMING_SNAKE_CASE ) -> List[Any]: '''simple docstring''' # Test when on a single CPU or GPU that the context manager does nothing __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = get_training_setup(SCREAMING_SNAKE_CASE ) # Use a single batch __UpperCAmelCase , __UpperCAmelCase = next(iter(SCREAMING_SNAKE_CASE ) ).values() for iteration in range(3 ): # Gather the distributed inputs and targs for the base model __UpperCAmelCase , __UpperCAmelCase = accelerator.gather((ddp_input, ddp_target) ) __UpperCAmelCase , __UpperCAmelCase = input.to(accelerator.device ), target.to(accelerator.device ) # Perform our initial ground truth step in non "DDP" step_model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) # Do "gradient accumulation" (noop) if iteration % 2 == 0: # Accumulate grads locally with accelerator.no_sync(SCREAMING_SNAKE_CASE ): step_model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) else: # Sync grads step_model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) # Since `no_sync` is a noop, `ddp_model` and `model` grads should always be in sync check_model_parameters(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ): if not param.requires_grad: continue assert torch.allclose( param.grad , ddp_param.grad ), f'''Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})''' # Shuffle ddp_input on each iteration torch.manual_seed(1_3_3_7 + iteration ) __UpperCAmelCase = ddp_input[torch.randperm(len(SCREAMING_SNAKE_CASE ) )] def __a ( SCREAMING_SNAKE_CASE ) -> List[str]: '''simple docstring''' # Test on distributed setup that context manager behaves properly __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = get_training_setup(SCREAMING_SNAKE_CASE ) # Use a single batch __UpperCAmelCase , __UpperCAmelCase = next(iter(SCREAMING_SNAKE_CASE ) ).values() for iteration in range(3 ): # Gather the distributed inputs and targs for the base model __UpperCAmelCase , __UpperCAmelCase = accelerator.gather((ddp_input, ddp_target) ) __UpperCAmelCase , __UpperCAmelCase = input.to(accelerator.device ), target.to(accelerator.device ) # Perform our initial ground truth step in non "DDP" step_model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) # Do "gradient accumulation" (noop) if iteration % 2 == 0: # Accumulate grads locally with accelerator.no_sync(SCREAMING_SNAKE_CASE ): step_model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) else: # Sync grads step_model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) # DDP model and model should only be in sync when not (iteration % 2 == 0) for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ): if not param.requires_grad: continue if iteration % 2 == 0: # Grads should not be in sync assert ( torch.allclose(param.grad , ddp_param.grad ) is False ), f'''Gradients in sync when they should not be:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})''' else: # Grads should be in sync assert ( torch.allclose(param.grad , ddp_param.grad ) is True ), f'''Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})''' # Shuffle ddp_input on each iteration torch.manual_seed(1_3_3_7 + iteration ) __UpperCAmelCase = ddp_input[torch.randperm(len(SCREAMING_SNAKE_CASE ) )] def __a ( SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=False ) -> List[str]: '''simple docstring''' __UpperCAmelCase = Accelerator( split_batches=SCREAMING_SNAKE_CASE , dispatch_batches=SCREAMING_SNAKE_CASE , gradient_accumulation_steps=2 ) # Test that context manager behaves properly __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = get_training_setup(SCREAMING_SNAKE_CASE ) for iteration, batch in enumerate(SCREAMING_SNAKE_CASE ): __UpperCAmelCase , __UpperCAmelCase = batch.values() # Gather the distributed inputs and targs for the base model __UpperCAmelCase , __UpperCAmelCase = accelerator.gather((ddp_input, ddp_target) ) __UpperCAmelCase , __UpperCAmelCase = input.to(accelerator.device ), target.to(accelerator.device ) # Perform our initial ground truth step in non "DDP" step_model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) # Do "gradient accumulation" (noop) with accelerator.accumulate(SCREAMING_SNAKE_CASE ): step_model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) # DDP model and model should only be in sync when not (iteration % 2 == 0) for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ): if not param.requires_grad: continue if ((iteration + 1) % 2 == 0) or (iteration == len(SCREAMING_SNAKE_CASE ) - 1): # Grads should be in sync assert ( torch.allclose(param.grad , ddp_param.grad ) is True ), f'''Gradients not in sync when they should be at iteration {iteration}:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})''' else: # Grads should not be in sync assert ( torch.allclose(param.grad , ddp_param.grad ) is False ), f'''Gradients in sync when they should not be at iteration {iteration}:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})''' # Shuffle ddp_input on each iteration torch.manual_seed(1_3_3_7 + iteration ) __UpperCAmelCase = ddp_input[torch.randperm(len(SCREAMING_SNAKE_CASE ) )] GradientState._reset_state() def __a ( SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=False ) -> List[Any]: '''simple docstring''' __UpperCAmelCase = Accelerator( split_batches=SCREAMING_SNAKE_CASE , dispatch_batches=SCREAMING_SNAKE_CASE , gradient_accumulation_steps=2 ) # Test that context manager behaves properly __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = get_training_setup(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) for iteration, batch in enumerate(SCREAMING_SNAKE_CASE ): __UpperCAmelCase , __UpperCAmelCase = batch.values() # Gather the distributed inputs and targs for the base model __UpperCAmelCase , __UpperCAmelCase = accelerator.gather((ddp_input, ddp_target) ) __UpperCAmelCase , __UpperCAmelCase = input.to(accelerator.device ), target.to(accelerator.device ) # Perform our initial ground truth step in non "DDP" model.train() ddp_model.train() step_model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) opt.step() if ((iteration + 1) % 2 == 0) or ((iteration + 1) == len(SCREAMING_SNAKE_CASE )): if split_batches: sched.step() else: for _ in range(accelerator.num_processes ): sched.step() opt.zero_grad() # Perform gradient accumulation under wrapper with accelerator.accumulate(SCREAMING_SNAKE_CASE ): step_model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) ddp_opt.step() ddp_sched.step() ddp_opt.zero_grad() # Learning rates should be the same assert ( opt.param_groups[0]["lr"] == ddp_opt.param_groups[0]["lr"] ), f'''Learning rates found in each optimizer did not align\nopt: {opt.param_groups[0]['lr']}\nDDP opt: {ddp_opt.param_groups[0]['lr']}\n''' __UpperCAmelCase = (((iteration + 1) % 2) == 0) or ((iteration + 1) == len(SCREAMING_SNAKE_CASE )) if accelerator.num_processes > 1: check_model_parameters(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) # Shuffle ddp_input on each iteration torch.manual_seed(1_3_3_7 + iteration ) GradientState._reset_state() def __a ( ) -> str: '''simple docstring''' __UpperCAmelCase = Accelerator() __UpperCAmelCase = RegressionDataset(length=8_0 ) __UpperCAmelCase = DataLoader(SCREAMING_SNAKE_CASE , batch_size=1_6 ) __UpperCAmelCase = RegressionDataset(length=9_6 ) __UpperCAmelCase = DataLoader(SCREAMING_SNAKE_CASE , batch_size=1_6 ) __UpperCAmelCase , __UpperCAmelCase = accelerator.prepare(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) assert accelerator.gradient_state.active_dataloader is None for iteration, _ in enumerate(SCREAMING_SNAKE_CASE ): assert id(accelerator.gradient_state.active_dataloader ) == id(SCREAMING_SNAKE_CASE ) if iteration < len(SCREAMING_SNAKE_CASE ) - 1: assert not accelerator.gradient_state.end_of_dataloader if iteration == 1: for batch_num, _ in enumerate(SCREAMING_SNAKE_CASE ): assert id(accelerator.gradient_state.active_dataloader ) == id(SCREAMING_SNAKE_CASE ) if batch_num < len(SCREAMING_SNAKE_CASE ) - 1: assert not accelerator.gradient_state.end_of_dataloader else: assert accelerator.gradient_state.end_of_dataloader else: assert accelerator.gradient_state.end_of_dataloader assert accelerator.gradient_state.active_dataloader is None def __a ( ) -> Optional[int]: '''simple docstring''' __UpperCAmelCase = Accelerator() __UpperCAmelCase = accelerator.state if state.local_process_index == 0: print('''**Test `accumulate` gradient accumulation with dataloader break**''' ) test_dataloader_break() if state.distributed_type == DistributedType.NO: if state.local_process_index == 0: print('''**Test NOOP `no_sync` context manager**''' ) test_noop_sync(SCREAMING_SNAKE_CASE ) if state.distributed_type in (DistributedType.MULTI_GPU, DistributedType.MULTI_CPU): if state.local_process_index == 0: print('''**Test Distributed `no_sync` context manager**''' ) test_distributed_sync(SCREAMING_SNAKE_CASE ) if state.distributed_type == DistributedType.MULTI_GPU: for split_batch in [True, False]: for dispatch_batches in [True, False]: if state.local_process_index == 0: print( '''**Test `accumulate` gradient accumulation, ''' , f'''`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**''' , ) test_gradient_accumulation(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) # Currently will break on torch 2.0 +, need to investigate why if is_torch_version('''<''' , '''2.0''' ) or state.distributed_type == DistributedType.NO: if state.local_process_index == 0: print( '''**Test `accumulate` gradient accumulation with optimizer and scheduler, ''' , '''`split_batches=False`, `dispatch_batches=False`**''' , ) test_gradient_accumulation_with_opt_and_scheduler() if state.distributed_type == DistributedType.MULTI_GPU: for split_batch in [True, False]: for dispatch_batches in [True, False]: if not split_batch and not dispatch_batches: continue if state.local_process_index == 0: print( '''**Test `accumulate` gradient accumulation with optimizer and scheduler, ''' , f'''`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**''' , ) test_gradient_accumulation_with_opt_and_scheduler(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) def __a ( SCREAMING_SNAKE_CASE ) -> Union[str, Any]: '''simple docstring''' # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
333
0
import json import os import unittest from transformers import DebertaTokenizer, DebertaTokenizerFast from transformers.models.deberta.tokenization_deberta import VOCAB_FILES_NAMES from transformers.testing_utils import slow from ...test_tokenization_common import TokenizerTesterMixin class snake_case__ (__SCREAMING_SNAKE_CASE , unittest.TestCase ): """simple docstring""" __lowerCAmelCase :List[Any] = DebertaTokenizer __lowerCAmelCase :List[Any] = True __lowerCAmelCase :Union[str, Any] = DebertaTokenizerFast def SCREAMING_SNAKE_CASE__( self ) -> str: """simple docstring""" super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt a__ : int = [ '''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''\u0120''', '''\u0120l''', '''\u0120n''', '''\u0120lo''', '''\u0120low''', '''er''', '''\u0120lowest''', '''\u0120newer''', '''\u0120wider''', '''[UNK]''', ] a__ : Optional[int] = dict(zip(UpperCamelCase__ , range(len(UpperCamelCase__ ) ) ) ) a__ : Tuple = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', ''''''] a__ : List[Any] = {'''unk_token''': '''[UNK]'''} a__ : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] ) a__ : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] ) with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp: fp.write(json.dumps(UpperCamelCase__ ) + """\n""" ) with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp: fp.write("""\n""".join(UpperCamelCase__ ) ) def SCREAMING_SNAKE_CASE__( self , **__lowercase ) -> Dict: """simple docstring""" kwargs.update(self.special_tokens_map ) return self.tokenizer_class.from_pretrained(self.tmpdirname , **UpperCamelCase__ ) def SCREAMING_SNAKE_CASE__( self , __lowercase ) -> Optional[int]: """simple docstring""" a__ : Tuple = '''lower newer''' a__ : Optional[Any] = '''lower newer''' return input_text, output_text def SCREAMING_SNAKE_CASE__( self ) -> List[Any]: """simple docstring""" a__ : Dict = self.get_tokenizer() a__ : Optional[Any] = '''lower newer''' a__ : Tuple = ['''l''', '''o''', '''w''', '''er''', '''\u0120''', '''n''', '''e''', '''w''', '''er'''] a__ : Optional[Any] = tokenizer.tokenize(UpperCamelCase__ ) self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ ) a__ : Union[str, Any] = tokens + [tokenizer.unk_token] a__ : List[str] = [0, 1, 2, 1_5, 1_0, 9, 3, 2, 1_5, 1_9] self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCamelCase__ ) , UpperCamelCase__ ) def SCREAMING_SNAKE_CASE__( self ) -> str: """simple docstring""" a__ : int = self.get_tokenizer() a__ : Optional[int] = tokenizer("""Hello""" , """World""" ) a__ : Optional[Any] = [0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1] self.assertListEqual(tokd["""token_type_ids"""] , UpperCamelCase__ ) @slow def SCREAMING_SNAKE_CASE__( self ) -> Dict: """simple docstring""" a__ : Optional[int] = self.tokenizer_class.from_pretrained("""microsoft/deberta-base""" ) a__ : Tuple = tokenizer.encode("""sequence builders""" , add_special_tokens=UpperCamelCase__ ) a__ : List[Any] = tokenizer.encode("""multi-sequence build""" , add_special_tokens=UpperCamelCase__ ) a__ : Dict = tokenizer.encode( """sequence builders""" , add_special_tokens=UpperCamelCase__ , add_prefix_space=UpperCamelCase__ ) a__ : Optional[int] = tokenizer.encode( """sequence builders""" , """multi-sequence build""" , add_special_tokens=UpperCamelCase__ , add_prefix_space=UpperCamelCase__ ) a__ : List[Any] = tokenizer.build_inputs_with_special_tokens(UpperCamelCase__ ) a__ : int = tokenizer.build_inputs_with_special_tokens(UpperCamelCase__ , UpperCamelCase__ ) assert encoded_sentence == encoded_text_from_decode assert encoded_pair == encoded_pair_from_decode @slow def SCREAMING_SNAKE_CASE__( self ) -> str: """simple docstring""" a__ : Dict = [self.tokenizer_class] if self.test_rust_tokenizer: tokenizer_classes.append(self.rust_tokenizer_class ) for tokenizer_class in tokenizer_classes: a__ : Any = tokenizer_class.from_pretrained("""microsoft/deberta-base""" ) a__ : Optional[Any] = [ '''ALBERT: A Lite BERT for Self-supervised Learning of Language Representations''', '''ALBERT incorporates two parameter reduction techniques''', '''The first one is a factorized embedding parameterization. By decomposing the large vocabulary''' ''' embedding matrix into two small matrices, we separate the size of the hidden layers from the size of''' ''' vocabulary embedding.''', ] a__ : Optional[Any] = tokenizer(UpperCamelCase__ , padding=UpperCamelCase__ ) a__ : List[str] = [tokenizer.decode(UpperCamelCase__ , skip_special_tokens=UpperCamelCase__ ) for seq in encoding['''input_ids''']] # fmt: off a__ : Optional[int] = { '''input_ids''': [ [1, 2_1_1_8, 1_1_1_2_6, 5_6_5, 3_5, 8_3, 2_5_1_9_1, 1_6_3, 1_8_8_5_4, 1_3, 1_2_1_5_6, 1_2, 1_6_1_0_1, 2_5_3_7_6, 1_3_8_0_7, 9, 2_2_2_0_5, 2_7_8_9_3, 1_6_3_5, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 2_1_1_8, 1_1_1_2_6, 5_6_5, 2_4_5_3_6, 8_0, 4_3_7_9_7, 4_8_7_8, 7_3_7_3, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1_3_3, 7_8, 6_5, 1_6, 1_0, 3_7_2_4, 1_5_3_8, 3_3_1_8_3, 1_1_3_0_3, 4_3_7_9_7, 1_9_3_8, 4, 8_7_0, 2_4_1_6_5, 2_9_1_0_5, 5, 7_3_9, 3_2_6_4_4, 3_3_1_8_3, 1_1_3_0_3, 3_6_1_7_3, 8_8, 8_0, 6_5_0, 7_8_2_1, 4_5_9_4_0, 6, 5_2, 2_5_5_9, 5, 1_8_3_6, 9, 5, 7_3_9_7, 1_3_1_7_1, 3_1, 5, 1_8_3_6, 9, 3_2_6_4_4, 3_3_1_8_3, 1_1_3_0_3, 4, 2] ], '''token_type_ids''': [ [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] ], '''attention_mask''': [ [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1] ] } # fmt: on a__ : Any = [ '''ALBERT: A Lite BERT for Self-supervised Learning of Language Representations''', '''ALBERT incorporates two parameter reduction techniques''', '''The first one is a factorized embedding parameterization. By decomposing the large vocabulary''' ''' embedding matrix into two small matrices, we separate the size of the hidden layers from the size of''' ''' vocabulary embedding.''', ] self.assertDictEqual(encoding.data , UpperCamelCase__ ) for expected, decoded in zip(UpperCamelCase__ , UpperCamelCase__ ): self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
367
from string import ascii_uppercase _lowercase : str ={char: i for i, char in enumerate(ascii_uppercase)} _lowercase : Dict =dict(enumerate(ascii_uppercase)) def lowerCAmelCase_ ( _lowercase : str , _lowercase : str) -> str: """simple docstring""" a__ : Any = len(_lowercase) a__ : Optional[int] = 0 while True: if x == i: a__ : Optional[Any] = 0 if len(_lowercase) == len(_lowercase): break key += key[i] i += 1 return key def lowerCAmelCase_ ( _lowercase : str , _lowercase : str) -> str: """simple docstring""" a__ : Tuple = """""" a__ : str = 0 for letter in message: if letter == " ": cipher_text += " " else: a__ : List[str] = (dicta[letter] - dicta[key_new[i]]) % 26 i += 1 cipher_text += dicta[x] return cipher_text def lowerCAmelCase_ ( _lowercase : str , _lowercase : str) -> str: """simple docstring""" a__ : int = """""" a__ : int = 0 for letter in cipher_text: if letter == " ": or_txt += " " else: a__ : Dict = (dicta[letter] + dicta[key_new[i]] + 26) % 26 i += 1 or_txt += dicta[x] return or_txt def lowerCAmelCase_ ( ) -> None: """simple docstring""" a__ : List[Any] = """THE GERMAN ATTACK""" a__ : List[Any] = """SECRET""" a__ : Tuple = generate_key(_lowercase , _lowercase) a__ : str = cipher_text(_lowercase , _lowercase) print(F'''Encrypted Text = {s}''') print(F'''Original Text = {original_text(_lowercase , _lowercase)}''') if __name__ == "__main__": import doctest doctest.testmod() main()
266
0
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available, is_vision_available, ) __snake_case : Dict = { """configuration_convnext""": ["""CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ConvNextConfig""", """ConvNextOnnxConfig"""] } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __snake_case : int = ["""ConvNextFeatureExtractor"""] __snake_case : int = ["""ConvNextImageProcessor"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __snake_case : Tuple = [ """CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST""", """ConvNextForImageClassification""", """ConvNextModel""", """ConvNextPreTrainedModel""", """ConvNextBackbone""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __snake_case : Optional[Any] = [ """TFConvNextForImageClassification""", """TFConvNextModel""", """TFConvNextPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_convnext import CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvNextConfig, ConvNextOnnxConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_convnext import ConvNextFeatureExtractor from .image_processing_convnext import ConvNextImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_convnext import ( CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST, ConvNextBackbone, ConvNextForImageClassification, ConvNextModel, ConvNextPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_convnext import TFConvNextForImageClassification, TFConvNextModel, TFConvNextPreTrainedModel else: import sys __snake_case : Tuple = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
248
from typing import Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature from ...image_transforms import get_image_size, pad, rescale, to_channel_dimension_format from ...image_utils import ChannelDimension, ImageInput, make_list_of_images, to_numpy_array, valid_images from ...utils import TensorType, logging __snake_case : Dict = logging.get_logger(__name__) class A__(a_ ): """simple docstring""" _A : Dict = ['''pixel_values'''] def __init__( self , _lowercase = True , _lowercase = 1 / 255 , _lowercase = True , _lowercase = 8 , **_lowercase , ) -> None: super().__init__(**_lowercase ) a_ : Tuple = do_rescale a_ : Dict = rescale_factor a_ : int = do_pad a_ : Optional[int] = pad_size def UpperCamelCase__ ( self , _lowercase , _lowercase , _lowercase = None , **_lowercase ) -> np.ndarray: return rescale(_lowercase , scale=_lowercase , data_format=_lowercase , **_lowercase ) def UpperCamelCase__ ( self , _lowercase , _lowercase , _lowercase = None ) -> int: a_ , a_ : str = get_image_size(_lowercase ) a_ : Tuple = (old_height // size + 1) * size - old_height a_ : List[Any] = (old_width // size + 1) * size - old_width return pad(_lowercase , ((0, pad_height), (0, pad_width)) , mode="""symmetric""" , data_format=_lowercase ) def UpperCamelCase__ ( self , _lowercase , _lowercase = None , _lowercase = None , _lowercase = None , _lowercase = None , _lowercase = None , _lowercase = ChannelDimension.FIRST , **_lowercase , ) -> List[str]: a_ : Optional[int] = do_rescale if do_rescale is not None else self.do_rescale a_ : Optional[int] = rescale_factor if rescale_factor is not None else self.rescale_factor a_ : Tuple = do_pad if do_pad is not None else self.do_pad a_ : Tuple = pad_size if pad_size is not None else self.pad_size a_ : Tuple = make_list_of_images(_lowercase ) if not valid_images(_lowercase ): raise ValueError( """Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """ """torch.Tensor, tf.Tensor or jax.ndarray.""" ) if do_rescale and rescale_factor is None: raise ValueError("""Rescale factor must be specified if do_rescale is True.""" ) # All transformations expect numpy arrays. a_ : Tuple = [to_numpy_array(_lowercase ) for image in images] if do_rescale: a_ : Dict = [self.rescale(image=_lowercase , scale=_lowercase ) for image in images] if do_pad: a_ : str = [self.pad(_lowercase , size=_lowercase ) for image in images] a_ : Optional[int] = [to_channel_dimension_format(_lowercase , _lowercase ) for image in images] a_ : Optional[Any] = {"""pixel_values""": images} return BatchFeature(data=_lowercase , tensor_type=_lowercase )
248
1
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_sentencepiece_available, is_tf_available, is_tokenizers_available, is_torch_available, ) UpperCAmelCase = {"""configuration_xglm""": ["""XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP""", """XGLMConfig"""]} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase = ["""XGLMTokenizer"""] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase = ["""XGLMTokenizerFast"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase = [ """XGLM_PRETRAINED_MODEL_ARCHIVE_LIST""", """XGLMForCausalLM""", """XGLMModel""", """XGLMPreTrainedModel""", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase = [ """FlaxXGLMForCausalLM""", """FlaxXGLMModel""", """FlaxXGLMPreTrainedModel""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase = [ """TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFXGLMForCausalLM""", """TFXGLMModel""", """TFXGLMPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_xglm import XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XGLMConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_xglm import XGLMTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_xglm_fast import XGLMTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_xglm import XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, XGLMForCausalLM, XGLMModel, XGLMPreTrainedModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_xglm import FlaxXGLMForCausalLM, FlaxXGLMModel, FlaxXGLMPreTrainedModel try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_xglm import ( TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, TFXGLMForCausalLM, TFXGLMModel, TFXGLMPreTrainedModel, ) else: import sys UpperCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
267
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_sentencepiece_available, is_tf_available, is_tokenizers_available, is_torch_available, ) UpperCAmelCase = {"""configuration_xglm""": ["""XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP""", """XGLMConfig"""]} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase = ["""XGLMTokenizer"""] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase = ["""XGLMTokenizerFast"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase = [ """XGLM_PRETRAINED_MODEL_ARCHIVE_LIST""", """XGLMForCausalLM""", """XGLMModel""", """XGLMPreTrainedModel""", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase = [ """FlaxXGLMForCausalLM""", """FlaxXGLMModel""", """FlaxXGLMPreTrainedModel""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase = [ """TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFXGLMForCausalLM""", """TFXGLMModel""", """TFXGLMPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_xglm import XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XGLMConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_xglm import XGLMTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_xglm_fast import XGLMTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_xglm import XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, XGLMForCausalLM, XGLMModel, XGLMPreTrainedModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_xglm import FlaxXGLMForCausalLM, FlaxXGLMModel, FlaxXGLMPreTrainedModel try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_xglm import ( TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, TFXGLMForCausalLM, TFXGLMModel, TFXGLMPreTrainedModel, ) else: import sys UpperCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
267
1
"""simple docstring""" print((lambda quine: quine % quine)("""print((lambda quine: quine %% quine)(%r))"""))
203
"""simple docstring""" def __lowerCAmelCase ( lowercase : list[int] ) -> float: """simple docstring""" if not nums: # Makes sure that the list is not empty raise ValueError("List is empty" ) snake_case : List[str] = sum(lowercase ) / len(lowercase ) # Calculate the average return sum(abs(x - average ) for x in nums ) / len(lowercase ) if __name__ == "__main__": import doctest doctest.testmod()
203
1
import os import unittest from transformers import BertTokenizerFast from transformers.models.bert.tokenization_bert import ( VOCAB_FILES_NAMES, BasicTokenizer, BertTokenizer, WordpieceTokenizer, _is_control, _is_punctuation, _is_whitespace, ) from transformers.testing_utils import require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english @require_tokenizers class A( UpperCamelCase , unittest.TestCase ): '''simple docstring''' UpperCamelCase = BertTokenizer UpperCamelCase = BertTokenizerFast UpperCamelCase = True UpperCamelCase = True UpperCamelCase = filter_non_english def a__ ( self : Any ) -> Any: """simple docstring""" super().setUp() lowerCamelCase_ = [ '[UNK]', '[CLS]', '[SEP]', '[PAD]', '[MASK]', 'want', '##want', '##ed', 'wa', 'un', 'runn', '##ing', ',', 'low', 'lowest', ] lowerCamelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] ) with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer: vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) ) def a__ ( self : List[str] , A_ : List[str] ) -> List[str]: """simple docstring""" lowerCamelCase_ = 'UNwant\u00E9d,running' lowerCamelCase_ = 'unwanted, running' return input_text, output_text def a__ ( self : str ) -> List[str]: """simple docstring""" lowerCamelCase_ = self.tokenizer_class(self.vocab_file ) lowerCamelCase_ = tokenizer.tokenize('UNwant\u00E9d,running' ) self.assertListEqual(A_ , ['un', '##want', '##ed', ',', 'runn', '##ing'] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(A_ ) , [9, 6, 7, 12, 10, 11] ) def a__ ( self : Optional[int] ) -> Optional[int]: """simple docstring""" if not self.test_rust_tokenizer: return lowerCamelCase_ = self.get_tokenizer() lowerCamelCase_ = self.get_rust_tokenizer() lowerCamelCase_ = 'UNwant\u00E9d,running' lowerCamelCase_ = tokenizer.tokenize(A_ ) lowerCamelCase_ = rust_tokenizer.tokenize(A_ ) self.assertListEqual(A_ , A_ ) lowerCamelCase_ = tokenizer.encode(A_ , add_special_tokens=A_ ) lowerCamelCase_ = rust_tokenizer.encode(A_ , add_special_tokens=A_ ) self.assertListEqual(A_ , A_ ) lowerCamelCase_ = self.get_rust_tokenizer() lowerCamelCase_ = tokenizer.encode(A_ ) lowerCamelCase_ = rust_tokenizer.encode(A_ ) self.assertListEqual(A_ , A_ ) # With lower casing lowerCamelCase_ = self.get_tokenizer(do_lower_case=A_ ) lowerCamelCase_ = self.get_rust_tokenizer(do_lower_case=A_ ) lowerCamelCase_ = 'UNwant\u00E9d,running' lowerCamelCase_ = tokenizer.tokenize(A_ ) lowerCamelCase_ = rust_tokenizer.tokenize(A_ ) self.assertListEqual(A_ , A_ ) lowerCamelCase_ = tokenizer.encode(A_ , add_special_tokens=A_ ) lowerCamelCase_ = rust_tokenizer.encode(A_ , add_special_tokens=A_ ) self.assertListEqual(A_ , A_ ) lowerCamelCase_ = self.get_rust_tokenizer() lowerCamelCase_ = tokenizer.encode(A_ ) lowerCamelCase_ = rust_tokenizer.encode(A_ ) self.assertListEqual(A_ , A_ ) def a__ ( self : Dict ) -> int: """simple docstring""" lowerCamelCase_ = BasicTokenizer() self.assertListEqual(tokenizer.tokenize('ah\u535A\u63A8zz' ) , ['ah', '\u535A', '\u63A8', 'zz'] ) def a__ ( self : Union[str, Any] ) -> List[str]: """simple docstring""" lowerCamelCase_ = BasicTokenizer(do_lower_case=A_ ) self.assertListEqual( tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ) , ['hello', '!', 'how', 'are', 'you', '?'] ) self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] ) def a__ ( self : str ) -> Union[str, Any]: """simple docstring""" lowerCamelCase_ = BasicTokenizer(do_lower_case=A_ , strip_accents=A_ ) self.assertListEqual( tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hällo', '!', 'how', 'are', 'you', '?'] ) self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['h\u00E9llo'] ) def a__ ( self : List[Any] ) -> Any: """simple docstring""" lowerCamelCase_ = BasicTokenizer(do_lower_case=A_ , strip_accents=A_ ) self.assertListEqual( tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hallo', '!', 'how', 'are', 'you', '?'] ) self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] ) def a__ ( self : str ) -> Dict: """simple docstring""" lowerCamelCase_ = BasicTokenizer(do_lower_case=A_ ) self.assertListEqual( tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hallo', '!', 'how', 'are', 'you', '?'] ) self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] ) def a__ ( self : Optional[int] ) -> str: """simple docstring""" lowerCamelCase_ = BasicTokenizer(do_lower_case=A_ ) self.assertListEqual( tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?'] ) def a__ ( self : Dict ) -> Union[str, Any]: """simple docstring""" lowerCamelCase_ = BasicTokenizer(do_lower_case=A_ , strip_accents=A_ ) self.assertListEqual( tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['HäLLo', '!', 'how', 'Are', 'yoU', '?'] ) def a__ ( self : str ) -> List[str]: """simple docstring""" lowerCamelCase_ = BasicTokenizer(do_lower_case=A_ , strip_accents=A_ ) self.assertListEqual( tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['HaLLo', '!', 'how', 'Are', 'yoU', '?'] ) def a__ ( self : Union[str, Any] ) -> Optional[Any]: """simple docstring""" lowerCamelCase_ = BasicTokenizer(do_lower_case=A_ , never_split=['[UNK]'] ) self.assertListEqual( tokenizer.tokenize(' \tHeLLo!how \n Are yoU? [UNK]' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?', '[UNK]'] ) def a__ ( self : int ) -> Optional[Any]: """simple docstring""" lowerCamelCase_ = BasicTokenizer() lowerCamelCase_ = 'a\n\'ll !!to?\'d of, can\'t.' lowerCamelCase_ = ['a', '\'', 'll', '!', '!', 'to', '?', '\'', 'd', 'of', ',', 'can', '\'', 't', '.'] self.assertListEqual(tokenizer.tokenize(A_ ) , A_ ) def a__ ( self : Optional[int] ) -> Union[str, Any]: """simple docstring""" lowerCamelCase_ = ['[UNK]', '[CLS]', '[SEP]', 'want', '##want', '##ed', 'wa', 'un', 'runn', '##ing'] lowerCamelCase_ = {} for i, token in enumerate(A_ ): lowerCamelCase_ = i lowerCamelCase_ = WordpieceTokenizer(vocab=A_ , unk_token='[UNK]' ) self.assertListEqual(tokenizer.tokenize('' ) , [] ) self.assertListEqual(tokenizer.tokenize('unwanted running' ) , ['un', '##want', '##ed', 'runn', '##ing'] ) self.assertListEqual(tokenizer.tokenize('unwantedX running' ) , ['[UNK]', 'runn', '##ing'] ) def a__ ( self : Tuple ) -> Any: """simple docstring""" self.assertTrue(_is_whitespace(' ' ) ) self.assertTrue(_is_whitespace('\t' ) ) self.assertTrue(_is_whitespace('\r' ) ) self.assertTrue(_is_whitespace('\n' ) ) self.assertTrue(_is_whitespace('\u00A0' ) ) self.assertFalse(_is_whitespace('A' ) ) self.assertFalse(_is_whitespace('-' ) ) def a__ ( self : str ) -> Optional[Any]: """simple docstring""" self.assertTrue(_is_control('\u0005' ) ) self.assertFalse(_is_control('A' ) ) self.assertFalse(_is_control(' ' ) ) self.assertFalse(_is_control('\t' ) ) self.assertFalse(_is_control('\r' ) ) def a__ ( self : List[str] ) -> Optional[int]: """simple docstring""" self.assertTrue(_is_punctuation('-' ) ) self.assertTrue(_is_punctuation('$' ) ) self.assertTrue(_is_punctuation('`' ) ) self.assertTrue(_is_punctuation('.' ) ) self.assertFalse(_is_punctuation('A' ) ) self.assertFalse(_is_punctuation(' ' ) ) def a__ ( self : int ) -> str: """simple docstring""" lowerCamelCase_ = self.get_tokenizer() lowerCamelCase_ = self.get_rust_tokenizer() # Example taken from the issue https://github.com/huggingface/tokenizers/issues/340 self.assertListEqual([tokenizer.tokenize(A_ ) for t in ['Test', '\xad', 'test']] , [['[UNK]'], [], ['[UNK]']] ) self.assertListEqual( [rust_tokenizer.tokenize(A_ ) for t in ['Test', '\xad', 'test']] , [['[UNK]'], [], ['[UNK]']] ) @slow def a__ ( self : List[str] ) -> int: """simple docstring""" lowerCamelCase_ = self.tokenizer_class.from_pretrained('bert-base-uncased' ) lowerCamelCase_ = tokenizer.encode('sequence builders' , add_special_tokens=A_ ) lowerCamelCase_ = tokenizer.encode('multi-sequence build' , add_special_tokens=A_ ) lowerCamelCase_ = tokenizer.build_inputs_with_special_tokens(A_ ) lowerCamelCase_ = tokenizer.build_inputs_with_special_tokens(A_ , A_ ) assert encoded_sentence == [101] + text + [102] assert encoded_pair == [101] + text + [102] + text_a + [102] def a__ ( self : int ) -> str: """simple docstring""" for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ): lowerCamelCase_ = self.rust_tokenizer_class.from_pretrained(A_ , **A_ ) lowerCamelCase_ = f"""A, naïve {tokenizer_r.mask_token} AllenNLP sentence.""" lowerCamelCase_ = tokenizer_r.encode_plus( A_ , return_attention_mask=A_ , return_token_type_ids=A_ , return_offsets_mapping=A_ , add_special_tokens=A_ , ) lowerCamelCase_ = tokenizer_r.do_lower_case if hasattr(A_ , 'do_lower_case' ) else False lowerCamelCase_ = ( [ ((0, 0), tokenizer_r.cls_token), ((0, 1), 'A'), ((1, 2), ','), ((3, 5), 'na'), ((5, 6), '##ï'), ((6, 8), '##ve'), ((9, 15), tokenizer_r.mask_token), ((16, 21), 'Allen'), ((21, 23), '##NL'), ((23, 24), '##P'), ((25, 33), 'sentence'), ((33, 34), '.'), ((0, 0), tokenizer_r.sep_token), ] if not do_lower_case else [ ((0, 0), tokenizer_r.cls_token), ((0, 1), 'a'), ((1, 2), ','), ((3, 8), 'naive'), ((9, 15), tokenizer_r.mask_token), ((16, 21), 'allen'), ((21, 23), '##nl'), ((23, 24), '##p'), ((25, 33), 'sentence'), ((33, 34), '.'), ((0, 0), tokenizer_r.sep_token), ] ) self.assertEqual( [e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens['input_ids'] ) ) self.assertEqual([e[0] for e in expected_results] , tokens['offset_mapping'] ) def a__ ( self : Any ) -> Any: """simple docstring""" lowerCamelCase_ = ['的', '人', '有'] lowerCamelCase_ = ''.join(A_ ) for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ): lowerCamelCase_ = True lowerCamelCase_ = self.tokenizer_class.from_pretrained(A_ , **A_ ) lowerCamelCase_ = self.rust_tokenizer_class.from_pretrained(A_ , **A_ ) lowerCamelCase_ = tokenizer_p.encode(A_ , add_special_tokens=A_ ) lowerCamelCase_ = tokenizer_r.encode(A_ , add_special_tokens=A_ ) lowerCamelCase_ = tokenizer_r.convert_ids_to_tokens(A_ ) lowerCamelCase_ = tokenizer_p.convert_ids_to_tokens(A_ ) # it is expected that each Chinese character is not preceded by "##" self.assertListEqual(A_ , A_ ) self.assertListEqual(A_ , A_ ) lowerCamelCase_ = False lowerCamelCase_ = self.rust_tokenizer_class.from_pretrained(A_ , **A_ ) lowerCamelCase_ = self.tokenizer_class.from_pretrained(A_ , **A_ ) lowerCamelCase_ = tokenizer_r.encode(A_ , add_special_tokens=A_ ) lowerCamelCase_ = tokenizer_p.encode(A_ , add_special_tokens=A_ ) lowerCamelCase_ = tokenizer_r.convert_ids_to_tokens(A_ ) lowerCamelCase_ = tokenizer_p.convert_ids_to_tokens(A_ ) # it is expected that only the first Chinese character is not preceded by "##". lowerCamelCase_ = [ f"""##{token}""" if idx != 0 else token for idx, token in enumerate(A_ ) ] self.assertListEqual(A_ , A_ ) self.assertListEqual(A_ , A_ )
208
import math def _SCREAMING_SNAKE_CASE ( lowercase : float , lowercase : float ): '''simple docstring''' return math.pow(lowercase , 2 ) - a def _SCREAMING_SNAKE_CASE ( lowercase : float ): '''simple docstring''' return 2 * x def _SCREAMING_SNAKE_CASE ( lowercase : float ): '''simple docstring''' lowerCamelCase_ = 2.0 while start <= a: lowerCamelCase_ = math.pow(lowercase , 2 ) return start def _SCREAMING_SNAKE_CASE ( lowercase : float , lowercase : int = 99_99 , lowercase : float = 0.00_0000_0000_0001 ): '''simple docstring''' if a < 0: raise ValueError('math domain error' ) lowerCamelCase_ = get_initial_point(lowercase ) for _ in range(lowercase ): lowerCamelCase_ = value lowerCamelCase_ = value - fx(lowercase , lowercase ) / fx_derivative(lowercase ) if abs(prev_value - value ) < tolerance: return value return value if __name__ == "__main__": from doctest import testmod testmod()
208
1
import dataclasses import re import string from typing import Any, Dict, Iterator, List, Mapping, Optional, Sequence, Tuple import numpy as np from . import residue_constants lowerCAmelCase : Any = Mapping[str, np.ndarray] lowerCAmelCase : int = Mapping[str, Any] # Is a nested dict. lowerCAmelCase : Optional[Any] = 0.01 @dataclasses.dataclass(frozen=UpperCAmelCase_ ) class __lowercase : """simple docstring""" _UpperCAmelCase : np.ndarray # [num_res, num_atom_type, 3] # Amino-acid type for each residue represented as an integer between 0 and # 20, where 20 is 'X'. _UpperCAmelCase : np.ndarray # [num_res] # Binary float mask to indicate presence of a particular atom. 1.0 if an atom # is present and 0.0 if not. This should be used for loss masking. _UpperCAmelCase : np.ndarray # [num_res, num_atom_type] # Residue index as used in PDB. It is not necessarily continuous or 0-indexed. _UpperCAmelCase : np.ndarray # [num_res] # B-factors, or temperature factors, of each residue (in sq. angstroms units), # representing the displacement of the residue from its ground truth mean # value. _UpperCAmelCase : np.ndarray # [num_res, num_atom_type] # Chain indices for multi-chain predictions _UpperCAmelCase : Optional[np.ndarray] = None # Optional remark about the protein. Included as a comment in output PDB # files _UpperCAmelCase : Optional[str] = None # Templates used to generate this protein (prediction-only) _UpperCAmelCase : Optional[Sequence[str]] = None # Chain corresponding to each parent _UpperCAmelCase : Optional[Sequence[int]] = None def A_ ( _UpperCAmelCase ): SCREAMING_SNAKE_CASE_: Optional[Any] = R"(\[[A-Z]+\]\n)" SCREAMING_SNAKE_CASE_: List[str] = [tag.strip() for tag in re.split(_UpperCAmelCase , _UpperCAmelCase ) if len(_UpperCAmelCase ) > 0] SCREAMING_SNAKE_CASE_: Iterator[Tuple[str, List[str]]] = zip(tags[0::2] , [l.split("\n" ) for l in tags[1::2]] ) SCREAMING_SNAKE_CASE_: List[str] = ["N", "CA", "C"] SCREAMING_SNAKE_CASE_: Any = None SCREAMING_SNAKE_CASE_: Optional[Any] = None SCREAMING_SNAKE_CASE_: List[str] = None for g in groups: if "[PRIMARY]" == g[0]: SCREAMING_SNAKE_CASE_: Optional[int] = g[1][0].strip() for i in range(len(_UpperCAmelCase ) ): if seq[i] not in residue_constants.restypes: SCREAMING_SNAKE_CASE_: Union[str, Any] = "X" # FIXME: strings are immutable SCREAMING_SNAKE_CASE_: Tuple = np.array( [residue_constants.restype_order.get(_UpperCAmelCase , residue_constants.restype_num ) for res_symbol in seq] ) elif "[TERTIARY]" == g[0]: SCREAMING_SNAKE_CASE_: List[List[float]] = [] for axis in range(3 ): tertiary.append(list(map(_UpperCAmelCase , g[1][axis].split() ) ) ) SCREAMING_SNAKE_CASE_: List[Any] = np.array(_UpperCAmelCase ) SCREAMING_SNAKE_CASE_: int = np.zeros((len(tertiary[0] ) // 3, residue_constants.atom_type_num, 3) ).astype(np.floataa ) for i, atom in enumerate(_UpperCAmelCase ): SCREAMING_SNAKE_CASE_: str = np.transpose(tertiary_np[:, i::3] ) atom_positions *= PICO_TO_ANGSTROM elif "[MASK]" == g[0]: SCREAMING_SNAKE_CASE_: Optional[int] = np.array(list(map({"-": 0, "+": 1}.get , g[1][0].strip() ) ) ) SCREAMING_SNAKE_CASE_: Any = np.zeros( ( len(_UpperCAmelCase ), residue_constants.atom_type_num, ) ).astype(np.floataa ) for i, atom in enumerate(_UpperCAmelCase ): SCREAMING_SNAKE_CASE_: str = 1 atom_mask *= mask[..., None] assert aatype is not None return Protein( atom_positions=_UpperCAmelCase , atom_mask=_UpperCAmelCase , aatype=_UpperCAmelCase , residue_index=np.arange(len(_UpperCAmelCase ) ) , b_factors=_UpperCAmelCase , ) def A_ ( _UpperCAmelCase , _UpperCAmelCase = 0 ): SCREAMING_SNAKE_CASE_: List[str] = [] SCREAMING_SNAKE_CASE_: Any = prot.remark if remark is not None: pdb_headers.append(f"REMARK {remark}" ) SCREAMING_SNAKE_CASE_: Any = prot.parents SCREAMING_SNAKE_CASE_: Dict = prot.parents_chain_index if parents is not None and parents_chain_index is not None: SCREAMING_SNAKE_CASE_: Optional[int] = [p for i, p in zip(_UpperCAmelCase , _UpperCAmelCase ) if i == chain_id] if parents is None or len(_UpperCAmelCase ) == 0: SCREAMING_SNAKE_CASE_: Optional[int] = ["N/A"] pdb_headers.append(f"PARENT {' '.join(_UpperCAmelCase )}" ) return pdb_headers def A_ ( _UpperCAmelCase , _UpperCAmelCase ): SCREAMING_SNAKE_CASE_: List[str] = [] SCREAMING_SNAKE_CASE_: List[str] = pdb_str.split("\n" ) SCREAMING_SNAKE_CASE_: Optional[int] = prot.remark if remark is not None: out_pdb_lines.append(f"REMARK {remark}" ) SCREAMING_SNAKE_CASE_: List[List[str]] if prot.parents is not None and len(prot.parents ) > 0: SCREAMING_SNAKE_CASE_: Optional[int] = [] if prot.parents_chain_index is not None: SCREAMING_SNAKE_CASE_: Dict[str, List[str]] = {} for p, i in zip(prot.parents , prot.parents_chain_index ): parent_dict.setdefault(str(_UpperCAmelCase ) , [] ) parent_dict[str(_UpperCAmelCase )].append(_UpperCAmelCase ) SCREAMING_SNAKE_CASE_: List[str] = max([int(_UpperCAmelCase ) for chain_idx in parent_dict] ) for i in range(max_idx + 1 ): SCREAMING_SNAKE_CASE_: List[str] = parent_dict.get(str(_UpperCAmelCase ) , ["N/A"] ) parents_per_chain.append(_UpperCAmelCase ) else: parents_per_chain.append(list(prot.parents ) ) else: SCREAMING_SNAKE_CASE_: List[Any] = [["N/A"]] def make_parent_line(_UpperCAmelCase ) -> str: return f"PARENT {' '.join(_UpperCAmelCase )}" out_pdb_lines.append(make_parent_line(parents_per_chain[0] ) ) SCREAMING_SNAKE_CASE_: Union[str, Any] = 0 for i, l in enumerate(_UpperCAmelCase ): if "PARENT" not in l and "REMARK" not in l: out_pdb_lines.append(_UpperCAmelCase ) if "TER" in l and "END" not in lines[i + 1]: chain_counter += 1 if not chain_counter >= len(_UpperCAmelCase ): SCREAMING_SNAKE_CASE_: Any = parents_per_chain[chain_counter] else: SCREAMING_SNAKE_CASE_: Union[str, Any] = ["N/A"] out_pdb_lines.append(make_parent_line(_UpperCAmelCase ) ) return "\n".join(_UpperCAmelCase ) def A_ ( _UpperCAmelCase ): SCREAMING_SNAKE_CASE_: int = residue_constants.restypes + ["X"] def res_atoa(_UpperCAmelCase ) -> str: return residue_constants.restype_atoa.get(restypes[r] , "UNK" ) SCREAMING_SNAKE_CASE_: int = residue_constants.atom_types SCREAMING_SNAKE_CASE_: List[str] = [] SCREAMING_SNAKE_CASE_: Optional[int] = prot.atom_mask SCREAMING_SNAKE_CASE_: Optional[Any] = prot.aatype SCREAMING_SNAKE_CASE_: Optional[Any] = prot.atom_positions SCREAMING_SNAKE_CASE_: int = prot.residue_index.astype(np.intaa ) SCREAMING_SNAKE_CASE_: Dict = prot.b_factors SCREAMING_SNAKE_CASE_: str = prot.chain_index if np.any(aatype > residue_constants.restype_num ): raise ValueError("Invalid aatypes." ) SCREAMING_SNAKE_CASE_: Optional[int] = get_pdb_headers(_UpperCAmelCase ) if len(_UpperCAmelCase ) > 0: pdb_lines.extend(_UpperCAmelCase ) SCREAMING_SNAKE_CASE_: List[Any] = aatype.shape[0] SCREAMING_SNAKE_CASE_: str = 1 SCREAMING_SNAKE_CASE_: List[Any] = 0 SCREAMING_SNAKE_CASE_: List[Any] = string.ascii_uppercase SCREAMING_SNAKE_CASE_: int = None # Add all atom sites. for i in range(_UpperCAmelCase ): SCREAMING_SNAKE_CASE_: List[str] = res_atoa(aatype[i] ) for atom_name, pos, mask, b_factor in zip(_UpperCAmelCase , atom_positions[i] , atom_mask[i] , b_factors[i] ): if mask < 0.5: continue SCREAMING_SNAKE_CASE_: List[Any] = "ATOM" SCREAMING_SNAKE_CASE_: Optional[Any] = atom_name if len(_UpperCAmelCase ) == 4 else f" {atom_name}" SCREAMING_SNAKE_CASE_: List[str] = "" SCREAMING_SNAKE_CASE_: Optional[int] = "" SCREAMING_SNAKE_CASE_: List[str] = 1.0_0 SCREAMING_SNAKE_CASE_: int = atom_name[0] # Protein supports only C, N, O, S, this works. SCREAMING_SNAKE_CASE_: Optional[Any] = "" SCREAMING_SNAKE_CASE_: Dict = "A" if chain_index is not None: SCREAMING_SNAKE_CASE_: int = chain_tags[chain_index[i]] # PDB is a columnar format, every space matters here! SCREAMING_SNAKE_CASE_: Tuple = ( f"{record_type:<6}{atom_index:>5} {name:<4}{alt_loc:>1}" f"{res_name_a:>3} {chain_tag:>1}" f"{residue_index[i]:>4}{insertion_code:>1} " f"{pos[0]:>8.3f}{pos[1]:>8.3f}{pos[2]:>8.3f}" f"{occupancy:>6.2f}{b_factor:>6.2f} " f"{element:>2}{charge:>2}" ) pdb_lines.append(_UpperCAmelCase ) atom_index += 1 SCREAMING_SNAKE_CASE_: Optional[Any] = i == n - 1 if chain_index is not None: if i != n - 1 and chain_index[i + 1] != prev_chain_index: SCREAMING_SNAKE_CASE_: Dict = True SCREAMING_SNAKE_CASE_: List[str] = chain_index[i + 1] if should_terminate: # Close the chain. SCREAMING_SNAKE_CASE_: int = "TER" SCREAMING_SNAKE_CASE_: int = ( f"{chain_end:<6}{atom_index:>5} {res_atoa(aatype[i] ):>3} {chain_tag:>1}{residue_index[i]:>4}" ) pdb_lines.append(_UpperCAmelCase ) atom_index += 1 if i != n - 1: # "prev" is a misnomer here. This happens at the beginning of # each new chain. pdb_lines.extend(get_pdb_headers(_UpperCAmelCase , _UpperCAmelCase ) ) pdb_lines.append("END" ) pdb_lines.append("" ) return "\n".join(_UpperCAmelCase ) def A_ ( _UpperCAmelCase ): return residue_constants.STANDARD_ATOM_MASK[prot.aatype] def A_ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , ): return Protein( aatype=features["aatype"] , atom_positions=result["final_atom_positions"] , atom_mask=result["final_atom_mask"] , residue_index=features["residue_index"] + 1 , b_factors=b_factors if b_factors is not None else np.zeros_like(result["final_atom_mask"] ) , chain_index=_UpperCAmelCase , remark=_UpperCAmelCase , parents=_UpperCAmelCase , parents_chain_index=_UpperCAmelCase , )
13
import argparse import shutil from pathlib import Path from tqdm import tqdm from transformers import AutoTokenizer def lowerCAmelCase_ ( _lowercase : Dict , _lowercase : str , _lowercase : str , _lowercase : Optional[Any]=1024) -> List[Any]: """simple docstring""" a__ , a__ : Optional[int] = [], [] a__ : Union[str, Any] = list(zip(_lowercase , _lowercase)) a__ , a__ : List[Any] = sorted_examples[0] def is_too_big(_lowercase : Tuple): return tok(_lowercase , return_tensors="""pt""").input_ids.shape[1] > max_tokens for src, tgt in tqdm(sorted_examples[1:]): a__ : Tuple = new_src + """ """ + src a__ : Any = new_tgt + """ """ + tgt if is_too_big(_lowercase) or is_too_big(_lowercase): # cant fit, finalize example finished_src.append(_lowercase) finished_tgt.append(_lowercase) a__ , a__ : List[Any] = src, tgt else: # can fit, keep adding a__ , a__ : Tuple = cand_src, cand_tgt # cleanup if new_src: assert new_tgt finished_src.append(_lowercase) finished_tgt.append(_lowercase) return finished_src, finished_tgt def lowerCAmelCase_ ( _lowercase : str , _lowercase : Path , _lowercase : Any , _lowercase : str) -> Tuple: """simple docstring""" a__ : Any = Path(_lowercase) save_path.mkdir(exist_ok=_lowercase) for split in ["train"]: a__ , a__ : List[Any] = data_dir / F'''{split}.source''', data_dir / F'''{split}.target''' a__ : Dict = [x.rstrip() for x in Path(_lowercase).open().readlines()] a__ : Optional[Any] = [x.rstrip() for x in Path(_lowercase).open().readlines()] a__ , a__ : List[Any] = pack_examples(_lowercase , _lowercase , _lowercase , _lowercase) print(F'''packed {split} split from {len(_lowercase)} examples -> {len(_lowercase)}.''') Path(save_path / F'''{split}.source''').open("""w""").write("""\n""".join(_lowercase)) Path(save_path / F'''{split}.target''').open("""w""").write("""\n""".join(_lowercase)) for split in ["val", "test"]: a__ , a__ : Any = data_dir / F'''{split}.source''', data_dir / F'''{split}.target''' shutil.copyfile(_lowercase , save_path / F'''{split}.source''') shutil.copyfile(_lowercase , save_path / F'''{split}.target''') def lowerCAmelCase_ ( ) -> Optional[int]: """simple docstring""" a__ : Tuple = argparse.ArgumentParser() parser.add_argument("""--tok_name""" , type=_lowercase , help="""like facebook/bart-large-cnn,t5-base, etc.""") parser.add_argument("""--max_seq_len""" , type=_lowercase , default=128) parser.add_argument("""--data_dir""" , type=_lowercase) parser.add_argument("""--save_path""" , type=_lowercase) a__ : List[Any] = parser.parse_args() a__ : List[Any] = AutoTokenizer.from_pretrained(args.tok_name) return pack_data_dir(_lowercase , Path(args.data_dir) , args.max_seq_len , args.save_path) if __name__ == "__main__": packer_cli()
170
0
"""simple docstring""" import collections import os from typing import List, Optional, Tuple from transformers.utils import is_jieba_available, requires_backends if is_jieba_available(): import jieba from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging __SCREAMING_SNAKE_CASE : Dict = logging.get_logger(__name__) __SCREAMING_SNAKE_CASE : Tuple = {'vocab_file': 'vocab.txt'} __SCREAMING_SNAKE_CASE : Any = { 'vocab_file': { 'openbmb/cpm-ant-10b': 'https://huggingface.co/openbmb/cpm-ant-10b/blob/main/vocab.txt', }, } __SCREAMING_SNAKE_CASE : Any = { 'openbmb/cpm-ant-10b': 1_024, } def _a ( _SCREAMING_SNAKE_CASE ) -> Any: snake_case_ = collections.OrderedDict() with open(_SCREAMING_SNAKE_CASE , """r""" , encoding="""utf-8""" ) as reader: snake_case_ = reader.readlines() for index, token in enumerate(_SCREAMING_SNAKE_CASE ): snake_case_ = token.rstrip("""\n""" ) snake_case_ = index return vocab class __A (snake_case__): '''simple docstring''' def __init__( self : Tuple , UpperCAmelCase_ : Any , UpperCAmelCase_ : Dict="<unk>" , UpperCAmelCase_ : List[Any]=200 ) ->Any: """simple docstring""" snake_case_ = vocab snake_case_ = unk_token snake_case_ = max_input_chars_per_word def lowerCAmelCase ( self : Optional[Any] , UpperCAmelCase_ : Optional[Any] ) ->Union[str, Any]: """simple docstring""" snake_case_ = list(UpperCAmelCase_ ) if len(UpperCAmelCase_ ) > self.max_input_chars_per_word: return [self.unk_token] snake_case_ = 0 snake_case_ = [] while start < len(UpperCAmelCase_ ): snake_case_ = len(UpperCAmelCase_ ) snake_case_ = None while start < end: snake_case_ = """""".join(chars[start:end] ) if substr in self.vocab: snake_case_ = substr break end -= 1 if cur_substr is None: sub_tokens.append(self.unk_token ) start += 1 else: sub_tokens.append(UpperCAmelCase_ ) snake_case_ = end return sub_tokens class __A (snake_case__): '''simple docstring''' __lowercase: List[str] = VOCAB_FILES_NAMES __lowercase: str = PRETRAINED_VOCAB_FILES_MAP __lowercase: Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __lowercase: str = ["""input_ids""", """attention_mask"""] __lowercase: Dict = False def __init__( self : Tuple , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Any="<d>" , UpperCAmelCase_ : List[Any]="</d>" , UpperCAmelCase_ : Any="<s>" , UpperCAmelCase_ : Tuple="</s>" , UpperCAmelCase_ : Optional[int]="<pad>" , UpperCAmelCase_ : List[Any]="<unk>" , UpperCAmelCase_ : Tuple="</n>" , UpperCAmelCase_ : Tuple="</_>" , UpperCAmelCase_ : List[Any]="left" , **UpperCAmelCase_ : Optional[int] , ) ->int: """simple docstring""" requires_backends(self , ["""jieba"""] ) super().__init__( bod_token=UpperCAmelCase_ , eod_token=UpperCAmelCase_ , bos_token=UpperCAmelCase_ , eos_token=UpperCAmelCase_ , pad_token=UpperCAmelCase_ , unk_token=UpperCAmelCase_ , line_token=UpperCAmelCase_ , space_token=UpperCAmelCase_ , padding_side=UpperCAmelCase_ , **UpperCAmelCase_ , ) snake_case_ = bod_token snake_case_ = eod_token snake_case_ = load_vocab(UpperCAmelCase_ ) snake_case_ = self.encoder[space_token] snake_case_ = self.encoder[line_token] del self.encoder[space_token] del self.encoder[line_token] snake_case_ = collections.OrderedDict(sorted(self.encoder.items() , key=lambda UpperCAmelCase_ : x[1] ) ) snake_case_ = {v: k for k, v in self.encoder.items()} snake_case_ = WordpieceTokenizer(vocab=self.encoder , unk_token=self.unk_token ) @property def lowerCAmelCase ( self : Any ) ->int: """simple docstring""" return self.encoder[self.bod_token] @property def lowerCAmelCase ( self : List[Any] ) ->Union[str, Any]: """simple docstring""" return self.encoder[self.eod_token] @property def lowerCAmelCase ( self : str ) ->Optional[Any]: """simple docstring""" return self.encoder["\n"] @property def lowerCAmelCase ( self : Dict ) ->int: """simple docstring""" return len(self.encoder ) def lowerCAmelCase ( self : List[Any] ) ->Union[str, Any]: """simple docstring""" return dict(self.encoder , **self.added_tokens_encoder ) def lowerCAmelCase ( self : Union[str, Any] , UpperCAmelCase_ : int ) ->int: """simple docstring""" snake_case_ = [] for x in jieba.cut(UpperCAmelCase_ , cut_all=UpperCAmelCase_ ): output_tokens.extend(self.wordpiece_tokenizer.tokenize(UpperCAmelCase_ ) ) return output_tokens def lowerCAmelCase ( self : str , UpperCAmelCase_ : Union[str, Any] , **UpperCAmelCase_ : Tuple ) ->Optional[int]: """simple docstring""" snake_case_ = [i for i in token_ids if i >= 0] snake_case_ = [ x for x in token_ids if x != self.pad_token_id and x != self.eos_token_id and x != self.bos_token_id ] return super()._decode(UpperCAmelCase_ , **UpperCAmelCase_ ) def lowerCAmelCase ( self : List[str] , UpperCAmelCase_ : str ) ->Tuple: """simple docstring""" return token in self.encoder def lowerCAmelCase ( self : int , UpperCAmelCase_ : List[str] ) ->str: """simple docstring""" return "".join(UpperCAmelCase_ ) def lowerCAmelCase ( self : int , UpperCAmelCase_ : Tuple ) ->int: """simple docstring""" return self.encoder.get(UpperCAmelCase_ , self.encoder.get(self.unk_token ) ) def lowerCAmelCase ( self : str , UpperCAmelCase_ : str ) ->Dict: """simple docstring""" return self.decoder.get(UpperCAmelCase_ , self.unk_token ) def lowerCAmelCase ( self : Union[str, Any] , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[str] = None ) ->Tuple[str]: """simple docstring""" if os.path.isdir(UpperCAmelCase_ ): snake_case_ = os.path.join( UpperCAmelCase_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) else: snake_case_ = (filename_prefix + """-""" if filename_prefix else """""") + save_directory snake_case_ = 0 if " " in self.encoder: snake_case_ = self.encoder[""" """] del self.encoder[" "] if "\n" in self.encoder: snake_case_ = self.encoder["""\n"""] del self.encoder["\n"] snake_case_ = collections.OrderedDict(sorted(self.encoder.items() , key=lambda UpperCAmelCase_ : x[1] ) ) with open(UpperCAmelCase_ , """w""" , encoding="""utf-8""" ) as writer: for token, token_index in self.encoder.items(): if index != token_index: logger.warning( F"""Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.""" """ Please check that the vocabulary is not corrupted!""" ) snake_case_ = token_index writer.write(token + """\n""" ) index += 1 return (vocab_file,) def lowerCAmelCase ( self : str , UpperCAmelCase_ : List[int] , UpperCAmelCase_ : List[int] = None ) ->List[int]: """simple docstring""" if token_ids_a is None: return [self.bos_token_id] + token_ids_a return [self.bos_token_id] + token_ids_a + [self.bos_token_id] + token_ids_a def lowerCAmelCase ( self : Optional[int] , UpperCAmelCase_ : List[int] , UpperCAmelCase_ : Optional[List[int]] = None , UpperCAmelCase_ : bool = False ) ->List[int]: """simple docstring""" if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=UpperCAmelCase_ , token_ids_a=UpperCAmelCase_ , already_has_special_tokens=UpperCAmelCase_ ) if token_ids_a is not None: return [1] + ([0] * len(UpperCAmelCase_ )) + [1] + ([0] * len(UpperCAmelCase_ )) return [1] + ([0] * len(UpperCAmelCase_ ))
233
"""simple docstring""" def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 0 ) -> list: snake_case_ = length or len(_SCREAMING_SNAKE_CASE ) snake_case_ = False for i in range(length - 1 ): if list_data[i] > list_data[i + 1]: snake_case_ , snake_case_ = list_data[i + 1], list_data[i] snake_case_ = True return list_data if not swapped else bubble_sort(_SCREAMING_SNAKE_CASE , length - 1 ) if __name__ == "__main__": import doctest doctest.testmod()
233
1
"""simple docstring""" import argparse from pathlib import Path import torch from packaging import version from torch.onnx import export from diffusers import AutoencoderKL __lowerCAmelCase : Dict =version.parse(version.parse(torch.__version__).base_version) < version.parse("""1.11""") def UpperCAmelCase__ ( lowerCAmelCase__ :int , lowerCAmelCase__ :tuple , lowerCAmelCase__ :Path , lowerCAmelCase__ :str , lowerCAmelCase__ :int , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :Any , lowerCAmelCase__ :List[str]=False , ) -> Union[str, Any]: '''simple docstring''' output_path.parent.mkdir(parents=lowerCAmelCase__ , exist_ok=lowerCAmelCase__ ) # PyTorch deprecated the `enable_onnx_checker` and `use_external_data_format` arguments in v1.11, # so we check the torch version for backwards compatibility if is_torch_less_than_1_11: export( lowerCAmelCase__ , lowerCAmelCase__ , f=output_path.as_posix() , input_names=lowerCAmelCase__ , output_names=lowerCAmelCase__ , dynamic_axes=lowerCAmelCase__ , do_constant_folding=lowerCAmelCase__ , use_external_data_format=lowerCAmelCase__ , enable_onnx_checker=lowerCAmelCase__ , opset_version=lowerCAmelCase__ , ) else: export( lowerCAmelCase__ , lowerCAmelCase__ , f=output_path.as_posix() , input_names=lowerCAmelCase__ , output_names=lowerCAmelCase__ , dynamic_axes=lowerCAmelCase__ , do_constant_folding=lowerCAmelCase__ , opset_version=lowerCAmelCase__ , ) @torch.no_grad() def UpperCAmelCase__ ( lowerCAmelCase__ :str , lowerCAmelCase__ :str , lowerCAmelCase__ :int , lowerCAmelCase__ :bool = False ) -> str: '''simple docstring''' lowercase = torch.floataa if fpaa else torch.floataa if fpaa and torch.cuda.is_available(): lowercase = """cuda""" elif fpaa and not torch.cuda.is_available(): raise ValueError("""`float16` model export is only supported on GPUs with CUDA""" ) else: lowercase = """cpu""" lowercase = Path(lowerCAmelCase__ ) # VAE DECODER lowercase = AutoencoderKL.from_pretrained(model_path + """/vae""" ) lowercase = vae_decoder.config.latent_channels # forward only through the decoder part lowercase = vae_decoder.decode onnx_export( lowerCAmelCase__ , model_args=( torch.randn(1 , lowerCAmelCase__ , 2_5 , 2_5 ).to(device=lowerCAmelCase__ , dtype=lowerCAmelCase__ ), False, ) , output_path=output_path / """vae_decoder""" / """model.onnx""" , ordered_input_names=["""latent_sample""", """return_dict"""] , output_names=["""sample"""] , dynamic_axes={ """latent_sample""": {0: """batch""", 1: """channels""", 2: """height""", 3: """width"""}, } , opset=lowerCAmelCase__ , ) del vae_decoder if __name__ == "__main__": __lowerCAmelCase : Tuple =argparse.ArgumentParser() parser.add_argument( """--model_path""", type=str, required=True, help="""Path to the `diffusers` checkpoint to convert (either a local directory or on the Hub).""", ) parser.add_argument("""--output_path""", type=str, required=True, help="""Path to the output model.""") parser.add_argument( """--opset""", default=1_4, type=int, help="""The version of the ONNX operator set to use.""", ) parser.add_argument("""--fp16""", action="""store_true""", default=False, help="""Export the models in `float16` mode""") __lowerCAmelCase : Dict =parser.parse_args() print(args.output_path) convert_models(args.model_path, args.output_path, args.opset, args.fpaa) print("""SD: Done: ONNX""")
197
"""simple docstring""" import argparse from pathlib import Path from transformers import AutoConfig, AutoTokenizer, RagConfig, RagSequenceForGeneration, RagTokenForGeneration def UpperCAmelCase__ ( lowerCAmelCase__ :Any , lowerCAmelCase__ :str , lowerCAmelCase__ :str , lowerCAmelCase__ :Path , lowerCAmelCase__ :str = None , lowerCAmelCase__ :str = None , lowerCAmelCase__ :str = None , ) -> Optional[int]: '''simple docstring''' if config_name_or_path is None: lowercase = """facebook/rag-token-base""" if model_type == """rag_token""" else """facebook/rag-sequence-base""" if generator_tokenizer_name_or_path is None: lowercase = generator_name_or_path if question_encoder_tokenizer_name_or_path is None: lowercase = question_encoder_name_or_path lowercase = RagTokenForGeneration if model_type == """rag_token""" else RagSequenceForGeneration # Save model. lowercase = RagConfig.from_pretrained(lowerCAmelCase__ ) lowercase = AutoConfig.from_pretrained(lowerCAmelCase__ ) lowercase = AutoConfig.from_pretrained(lowerCAmelCase__ ) lowercase = gen_config lowercase = question_encoder_config lowercase = model_class.from_pretrained_question_encoder_generator( lowerCAmelCase__ , lowerCAmelCase__ , config=lowerCAmelCase__ ) rag_model.save_pretrained(lowerCAmelCase__ ) # Sanity check. model_class.from_pretrained(lowerCAmelCase__ ) # Save tokenizers. lowercase = AutoTokenizer.from_pretrained(lowerCAmelCase__ ) gen_tokenizer.save_pretrained(dest_dir / """generator_tokenizer/""" ) lowercase = AutoTokenizer.from_pretrained(lowerCAmelCase__ ) question_encoder_tokenizer.save_pretrained(dest_dir / """question_encoder_tokenizer/""" ) if __name__ == "__main__": __lowerCAmelCase : int =argparse.ArgumentParser() parser.add_argument( """--model_type""", choices=["""rag_sequence""", """rag_token"""], required=True, type=str, help="""RAG model type: rag_sequence, rag_token""", ) parser.add_argument("""--dest""", type=str, required=True, help="""Path to the output checkpoint directory.""") parser.add_argument("""--generator_name_or_path""", type=str, required=True, help="""Generator model identifier""") parser.add_argument( """--question_encoder_name_or_path""", type=str, required=True, help="""Question encoder model identifier""" ) parser.add_argument( """--generator_tokenizer_name_or_path""", type=str, help="""Generator tokenizer identifier, if not specified, resolves to ``generator_name_or_path``""", ) parser.add_argument( """--question_encoder_tokenizer_name_or_path""", type=str, help="""Question encoder tokenizer identifier, if not specified, resolves to ``question_encoder_name_or_path``""", ) parser.add_argument( """--config_name_or_path""", type=str, help=( """Identifier of the model config to use, if not provided, resolves to a base config for a given""" """ ``model_type``""" ), ) __lowerCAmelCase : List[str] =parser.parse_args() __lowerCAmelCase : Dict =Path(args.dest) dest_dir.mkdir(exist_ok=True) consolidate( args.model_type, args.generator_name_or_path, args.question_encoder_name_or_path, dest_dir, args.config_name_or_path, args.generator_tokenizer_name_or_path, args.question_encoder_tokenizer_name_or_path, )
197
1
from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging a : Optional[int] = logging.get_logger(__name__) a : List[Any] = { 'facebook/xmod-base': 'https://huggingface.co/facebook/xmod-base/resolve/main/config.json', 'facebook/xmod-large-prenorm': 'https://huggingface.co/facebook/xmod-large-prenorm/resolve/main/config.json', 'facebook/xmod-base-13-125k': 'https://huggingface.co/facebook/xmod-base-13-125k/resolve/main/config.json', 'facebook/xmod-base-30-125k': 'https://huggingface.co/facebook/xmod-base-30-125k/resolve/main/config.json', 'facebook/xmod-base-30-195k': 'https://huggingface.co/facebook/xmod-base-30-195k/resolve/main/config.json', 'facebook/xmod-base-60-125k': 'https://huggingface.co/facebook/xmod-base-60-125k/resolve/main/config.json', 'facebook/xmod-base-60-265k': 'https://huggingface.co/facebook/xmod-base-60-265k/resolve/main/config.json', 'facebook/xmod-base-75-125k': 'https://huggingface.co/facebook/xmod-base-75-125k/resolve/main/config.json', 'facebook/xmod-base-75-269k': 'https://huggingface.co/facebook/xmod-base-75-269k/resolve/main/config.json', } class _a ( _a ): A = '''xmod''' def __init__(self, SCREAMING_SNAKE_CASE_=30522, SCREAMING_SNAKE_CASE_=768, SCREAMING_SNAKE_CASE_=12, SCREAMING_SNAKE_CASE_=12, SCREAMING_SNAKE_CASE_=3072, SCREAMING_SNAKE_CASE_="gelu", SCREAMING_SNAKE_CASE_=0.1, SCREAMING_SNAKE_CASE_=0.1, SCREAMING_SNAKE_CASE_=512, SCREAMING_SNAKE_CASE_=2, SCREAMING_SNAKE_CASE_=0.0_2, SCREAMING_SNAKE_CASE_=1E-12, SCREAMING_SNAKE_CASE_=1, SCREAMING_SNAKE_CASE_=0, SCREAMING_SNAKE_CASE_=2, SCREAMING_SNAKE_CASE_="absolute", SCREAMING_SNAKE_CASE_=True, SCREAMING_SNAKE_CASE_=None, SCREAMING_SNAKE_CASE_=False, SCREAMING_SNAKE_CASE_=2, SCREAMING_SNAKE_CASE_=False, SCREAMING_SNAKE_CASE_=True, SCREAMING_SNAKE_CASE_=True, SCREAMING_SNAKE_CASE_=("en_XX",), SCREAMING_SNAKE_CASE_=None, **SCREAMING_SNAKE_CASE_, ) -> List[Any]: super().__init__(pad_token_id=_a, bos_token_id=_a, eos_token_id=_a, **_a ) UpperCAmelCase_: Union[str, Any] = vocab_size UpperCAmelCase_: Any = hidden_size UpperCAmelCase_: Union[str, Any] = num_hidden_layers UpperCAmelCase_: Any = num_attention_heads UpperCAmelCase_: Tuple = hidden_act UpperCAmelCase_: str = intermediate_size UpperCAmelCase_: Tuple = hidden_dropout_prob UpperCAmelCase_: Optional[Any] = attention_probs_dropout_prob UpperCAmelCase_: Dict = max_position_embeddings UpperCAmelCase_: Dict = type_vocab_size UpperCAmelCase_: Optional[int] = initializer_range UpperCAmelCase_: Dict = layer_norm_eps UpperCAmelCase_: Optional[int] = position_embedding_type UpperCAmelCase_: int = use_cache UpperCAmelCase_: Optional[Any] = classifier_dropout UpperCAmelCase_: List[Any] = pre_norm UpperCAmelCase_: List[str] = adapter_reduction_factor UpperCAmelCase_: int = adapter_layer_norm UpperCAmelCase_: List[Any] = adapter_reuse_layer_norm UpperCAmelCase_: Tuple = ln_before_adapter UpperCAmelCase_: Union[str, Any] = list(_a ) UpperCAmelCase_: Any = default_language class _a ( _a ): @property def __snake_case (self ) -> Tuple: if self.task == "multiple-choice": UpperCAmelCase_: str = {0: "batch", 1: "choice", 2: "sequence"} else: UpperCAmelCase_: List[Any] = {0: "batch", 1: "sequence"} return OrderedDict( [ ("""input_ids""", dynamic_axis), ("""attention_mask""", dynamic_axis), ] )
370
from collections import defaultdict class _a : def __init__(self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> List[str]: UpperCAmelCase_: Optional[int] = total # total no of tasks (N) # DP table will have a dimension of (2^M)*N # initially all values are set to -1 UpperCAmelCase_: List[Any] = [ [-1 for i in range(total + 1 )] for j in range(2 ** len(SCREAMING_SNAKE_CASE_ ) ) ] UpperCAmelCase_: Union[str, Any] = defaultdict(SCREAMING_SNAKE_CASE_ ) # stores the list of persons for each task # final_mask is used to check if all persons are included by setting all bits # to 1 UpperCAmelCase_: List[Any] = (1 << len(SCREAMING_SNAKE_CASE_ )) - 1 def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]: # if mask == self.finalmask all persons are distributed tasks, return 1 if mask == self.final_mask: return 1 # if not everyone gets the task and no more tasks are available, return 0 if task_no > self.total_tasks: return 0 # if case already considered if self.dp[mask][task_no] != -1: return self.dp[mask][task_no] # Number of ways when we don't this task in the arrangement UpperCAmelCase_: List[Any] = self.count_ways_until(SCREAMING_SNAKE_CASE_, task_no + 1 ) # now assign the tasks one by one to all possible persons and recursively # assign for the remaining tasks. if task_no in self.task: for p in self.task[task_no]: # if p is already given a task if mask & (1 << p): continue # assign this task to p and change the mask value. And recursively # assign tasks with the new mask value. total_ways_util += self.count_ways_until(mask | (1 << p), task_no + 1 ) # save the value. UpperCAmelCase_: List[Any] = total_ways_util return self.dp[mask][task_no] def __snake_case (self, SCREAMING_SNAKE_CASE_ ) -> str: # Store the list of persons for each task for i in range(len(SCREAMING_SNAKE_CASE_ ) ): for j in task_performed[i]: self.task[j].append(SCREAMING_SNAKE_CASE_ ) # call the function to fill the DP table, final answer is stored in dp[0][1] return self.count_ways_until(0, 1 ) if __name__ == "__main__": a : Optional[Any] = 5 # total no of tasks (the value of N) # the list of tasks that can be done by M persons. a : Optional[Any] = [[1, 3, 4], [1, 2, 5], [3, 4]] print( AssignmentUsingBitmask(task_performed, total_tasks).count_no_of_ways( task_performed ) )
82
0
_lowerCamelCase ="0.18.2" from .configuration_utils import ConfigMixin from .utils import ( OptionalDependencyNotAvailable, is_flax_available, is_inflect_available, is_invisible_watermark_available, is_k_diffusion_available, is_k_diffusion_version, is_librosa_available, is_note_seq_available, is_onnx_available, is_scipy_available, is_torch_available, is_torchsde_available, is_transformers_available, is_transformers_version, is_unidecode_available, logging, ) try: if not is_onnx_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_onnx_objects import * # noqa F403 else: from .pipelines import OnnxRuntimeModel try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_pt_objects import * # noqa F403 else: from .models import ( AutoencoderKL, ControlNetModel, ModelMixin, PriorTransformer, TaFilmDecoder, TransformeraDModel, UNetaDModel, UNetaDConditionModel, UNetaDModel, UNetaDConditionModel, VQModel, ) from .optimization import ( get_constant_schedule, get_constant_schedule_with_warmup, get_cosine_schedule_with_warmup, get_cosine_with_hard_restarts_schedule_with_warmup, get_linear_schedule_with_warmup, get_polynomial_decay_schedule_with_warmup, get_scheduler, ) from .pipelines import ( AudioPipelineOutput, ConsistencyModelPipeline, DanceDiffusionPipeline, DDIMPipeline, DDPMPipeline, DiffusionPipeline, DiTPipeline, ImagePipelineOutput, KarrasVePipeline, LDMPipeline, LDMSuperResolutionPipeline, PNDMPipeline, RePaintPipeline, ScoreSdeVePipeline, ) from .schedulers import ( CMStochasticIterativeScheduler, DDIMInverseScheduler, DDIMParallelScheduler, DDIMScheduler, DDPMParallelScheduler, DDPMScheduler, DEISMultistepScheduler, DPMSolverMultistepInverseScheduler, DPMSolverMultistepScheduler, DPMSolverSinglestepScheduler, EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, HeunDiscreteScheduler, IPNDMScheduler, KarrasVeScheduler, KDPMaAncestralDiscreteScheduler, KDPMaDiscreteScheduler, PNDMScheduler, RePaintScheduler, SchedulerMixin, ScoreSdeVeScheduler, UnCLIPScheduler, UniPCMultistepScheduler, VQDiffusionScheduler, ) from .training_utils import EMAModel try: if not (is_torch_available() and is_scipy_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_torch_and_scipy_objects import * # noqa F403 else: from .schedulers import LMSDiscreteScheduler try: if not (is_torch_available() and is_torchsde_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_torch_and_torchsde_objects import * # noqa F403 else: from .schedulers import DPMSolverSDEScheduler try: if not (is_torch_available() and is_transformers_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_torch_and_transformers_objects import * # noqa F403 else: from .pipelines import ( AltDiffusionImgaImgPipeline, AltDiffusionPipeline, AudioLDMPipeline, CycleDiffusionPipeline, IFImgaImgPipeline, IFImgaImgSuperResolutionPipeline, IFInpaintingPipeline, IFInpaintingSuperResolutionPipeline, IFPipeline, IFSuperResolutionPipeline, ImageTextPipelineOutput, KandinskyImgaImgPipeline, KandinskyInpaintPipeline, KandinskyPipeline, KandinskyPriorPipeline, KandinskyVaaControlnetImgaImgPipeline, KandinskyVaaControlnetPipeline, KandinskyVaaImgaImgPipeline, KandinskyVaaInpaintPipeline, KandinskyVaaPipeline, KandinskyVaaPriorEmbaEmbPipeline, KandinskyVaaPriorPipeline, LDMTextToImagePipeline, PaintByExamplePipeline, SemanticStableDiffusionPipeline, ShapEImgaImgPipeline, ShapEPipeline, StableDiffusionAttendAndExcitePipeline, StableDiffusionControlNetImgaImgPipeline, StableDiffusionControlNetInpaintPipeline, StableDiffusionControlNetPipeline, StableDiffusionDepthaImgPipeline, StableDiffusionDiffEditPipeline, StableDiffusionImageVariationPipeline, StableDiffusionImgaImgPipeline, StableDiffusionInpaintPipeline, StableDiffusionInpaintPipelineLegacy, StableDiffusionInstructPixaPixPipeline, StableDiffusionLatentUpscalePipeline, StableDiffusionLDMaDPipeline, StableDiffusionModelEditingPipeline, StableDiffusionPanoramaPipeline, StableDiffusionParadigmsPipeline, StableDiffusionPipeline, StableDiffusionPipelineSafe, StableDiffusionPixaPixZeroPipeline, StableDiffusionSAGPipeline, StableDiffusionUpscalePipeline, StableUnCLIPImgaImgPipeline, StableUnCLIPPipeline, TextToVideoSDPipeline, TextToVideoZeroPipeline, UnCLIPImageVariationPipeline, UnCLIPPipeline, UniDiffuserModel, UniDiffuserPipeline, UniDiffuserTextDecoder, VersatileDiffusionDualGuidedPipeline, VersatileDiffusionImageVariationPipeline, VersatileDiffusionPipeline, VersatileDiffusionTextToImagePipeline, VideoToVideoSDPipeline, VQDiffusionPipeline, ) try: if not (is_torch_available() and is_transformers_available() and is_invisible_watermark_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_torch_and_transformers_and_invisible_watermark_objects import * # noqa F403 else: from .pipelines import StableDiffusionXLImgaImgPipeline, StableDiffusionXLPipeline try: if not (is_torch_available() and is_transformers_available() and is_k_diffusion_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_torch_and_transformers_and_k_diffusion_objects import * # noqa F403 else: from .pipelines import StableDiffusionKDiffusionPipeline try: if not (is_torch_available() and is_transformers_available() and is_onnx_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_torch_and_transformers_and_onnx_objects import * # noqa F403 else: from .pipelines import ( OnnxStableDiffusionImgaImgPipeline, OnnxStableDiffusionInpaintPipeline, OnnxStableDiffusionInpaintPipelineLegacy, OnnxStableDiffusionPipeline, OnnxStableDiffusionUpscalePipeline, StableDiffusionOnnxPipeline, ) try: if not (is_torch_available() and is_librosa_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_torch_and_librosa_objects import * # noqa F403 else: from .pipelines import AudioDiffusionPipeline, Mel try: if not (is_transformers_available() and is_torch_available() and is_note_seq_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403 else: from .pipelines import SpectrogramDiffusionPipeline try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_flax_objects import * # noqa F403 else: from .models.controlnet_flax import FlaxControlNetModel from .models.modeling_flax_utils import FlaxModelMixin from .models.unet_ad_condition_flax import FlaxUNetaDConditionModel from .models.vae_flax import FlaxAutoencoderKL from .pipelines import FlaxDiffusionPipeline from .schedulers import ( FlaxDDIMScheduler, FlaxDDPMScheduler, FlaxDPMSolverMultistepScheduler, FlaxKarrasVeScheduler, FlaxLMSDiscreteScheduler, FlaxPNDMScheduler, FlaxSchedulerMixin, FlaxScoreSdeVeScheduler, ) try: if not (is_flax_available() and is_transformers_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_flax_and_transformers_objects import * # noqa F403 else: from .pipelines import ( FlaxStableDiffusionControlNetPipeline, FlaxStableDiffusionImgaImgPipeline, FlaxStableDiffusionInpaintPipeline, FlaxStableDiffusionPipeline, ) try: if not (is_note_seq_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_note_seq_objects import * # noqa F403 else: from .pipelines import MidiProcessor
334
import argparse import os # New Code # import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType from accelerate.utils import find_executable_batch_size ######################################################################## # This is a fully working simple example to use Accelerate, # specifically showcasing how to ensure out-of-memory errors never # interrupt training, and builds off the `nlp_example.py` script. # # This example trains a Bert base model on GLUE MRPC # in any of the following settings (with the same script): # - single CPU or single GPU # - multi GPUS (using PyTorch distributed mode) # - (multi) TPUs # - fp16 (mixed-precision) or fp32 (normal precision) # # New additions from the base script can be found quickly by # looking for the # New Code # tags # # To run it in each of these various modes, follow the instructions # in the readme for examples: # https://github.com/huggingface/accelerate/tree/main/examples # ######################################################################## _lowerCamelCase =16 _lowerCamelCase =32 def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_ = 16 ): """simple docstring""" SCREAMING_SNAKE_CASE =AutoTokenizer.from_pretrained('bert-base-cased' ) SCREAMING_SNAKE_CASE =load_dataset('glue', 'mrpc' ) def tokenize_function(lowerCAmelCase_ ): # max_length=None => use the model max length (it's actually the default) SCREAMING_SNAKE_CASE =tokenizer(examples['sentence1'], examples['sentence2'], truncation=lowerCAmelCase_, max_length=lowerCAmelCase_ ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset # starting with the main process first: with accelerator.main_process_first(): SCREAMING_SNAKE_CASE =datasets.map( lowerCAmelCase_, batched=lowerCAmelCase_, remove_columns=['idx', 'sentence1', 'sentence2'], ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library SCREAMING_SNAKE_CASE =tokenized_datasets.rename_column('label', 'labels' ) def collate_fn(lowerCAmelCase_ ): # On TPU it's best to pad everything to the same length or training will be very slow. SCREAMING_SNAKE_CASE =128 if accelerator.distributed_type == DistributedType.TPU else None # When using mixed precision we want round multiples of 8/16 if accelerator.mixed_precision == "fp8": SCREAMING_SNAKE_CASE =16 elif accelerator.mixed_precision != "no": SCREAMING_SNAKE_CASE =8 else: SCREAMING_SNAKE_CASE =None return tokenizer.pad( lowerCAmelCase_, padding='longest', max_length=lowerCAmelCase_, pad_to_multiple_of=lowerCAmelCase_, return_tensors='pt', ) # Instantiate dataloaders. SCREAMING_SNAKE_CASE =DataLoader( tokenized_datasets['train'], shuffle=lowerCAmelCase_, collate_fn=lowerCAmelCase_, batch_size=lowerCAmelCase_ ) SCREAMING_SNAKE_CASE =DataLoader( tokenized_datasets['validation'], shuffle=lowerCAmelCase_, collate_fn=lowerCAmelCase_, batch_size=lowerCAmelCase_ ) return train_dataloader, eval_dataloader # For testing only if os.environ.get("TESTING_MOCKED_DATALOADERS", None) == "1": from accelerate.test_utils.training import mocked_dataloaders _lowerCamelCase =mocked_dataloaders # noqa: F811 def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_ ): """simple docstring""" if os.environ.get('TESTING_MOCKED_DATALOADERS', lowerCAmelCase_ ) == "1": SCREAMING_SNAKE_CASE =2 # Initialize accelerator SCREAMING_SNAKE_CASE =Accelerator(cpu=args.cpu, mixed_precision=args.mixed_precision ) # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs SCREAMING_SNAKE_CASE =config['lr'] SCREAMING_SNAKE_CASE =int(config['num_epochs'] ) SCREAMING_SNAKE_CASE =int(config['seed'] ) SCREAMING_SNAKE_CASE =int(config['batch_size'] ) SCREAMING_SNAKE_CASE =evaluate.load('glue', 'mrpc' ) # New Code # # We now can define an inner training loop function. It should take a batch size as the only parameter, # and build the dataloaders in there. # It also gets our decorator @find_executable_batch_size(starting_batch_size=lowerCAmelCase_ ) def inner_training_loop(lowerCAmelCase_ ): # And now just move everything below under this function # We need to bring in the Accelerator object from earlier nonlocal accelerator # And reset all of its attributes that could hold onto any memory: accelerator.free_memory() # Then we can declare the model, optimizer, and everything else: set_seed(lowerCAmelCase_ ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) SCREAMING_SNAKE_CASE =AutoModelForSequenceClassification.from_pretrained('bert-base-cased', return_dict=lowerCAmelCase_ ) # We could avoid this line since the accelerator is set with `device_placement=True` (default value). # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). SCREAMING_SNAKE_CASE =model.to(accelerator.device ) # Instantiate optimizer SCREAMING_SNAKE_CASE =AdamW(params=model.parameters(), lr=lowerCAmelCase_ ) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =get_dataloaders(lowerCAmelCase_, lowerCAmelCase_ ) # Instantiate scheduler SCREAMING_SNAKE_CASE =get_linear_schedule_with_warmup( optimizer=lowerCAmelCase_, num_warmup_steps=100, num_training_steps=(len(lowerCAmelCase_ ) * num_epochs), ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =accelerator.prepare( lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ ) # Now we train the model for epoch in range(lowerCAmelCase_ ): model.train() for step, batch in enumerate(lowerCAmelCase_ ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) SCREAMING_SNAKE_CASE =model(**lowerCAmelCase_ ) SCREAMING_SNAKE_CASE =outputs.loss accelerator.backward(lowerCAmelCase_ ) optimizer.step() lr_scheduler.step() optimizer.zero_grad() model.eval() for step, batch in enumerate(lowerCAmelCase_ ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): SCREAMING_SNAKE_CASE =model(**lowerCAmelCase_ ) SCREAMING_SNAKE_CASE =outputs.logits.argmax(dim=-1 ) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =accelerator.gather_for_metrics((predictions, batch['labels']) ) metric.add_batch( predictions=lowerCAmelCase_, references=lowerCAmelCase_, ) SCREAMING_SNAKE_CASE =metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(F'epoch {epoch}:', lowerCAmelCase_ ) # New Code # # And call it at the end with no arguments # Note: You could also refactor this outside of your training loop function inner_training_loop() def snake_case__ ( ): """simple docstring""" SCREAMING_SNAKE_CASE =argparse.ArgumentParser(description='Simple example of training script.' ) parser.add_argument( '--mixed_precision', type=lowerCAmelCase_, default=lowerCAmelCase_, choices=['no', 'fp16', 'bf16', 'fp8'], help='Whether to use mixed precision. Choose' 'between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.' 'and an Nvidia Ampere GPU.', ) parser.add_argument('--cpu', action='store_true', help='If passed, will train on the CPU.' ) SCREAMING_SNAKE_CASE =parser.parse_args() SCREAMING_SNAKE_CASE ={'lr': 2e-5, 'num_epochs': 3, 'seed': 42, 'batch_size': 16} training_function(lowerCAmelCase_, lowerCAmelCase_ ) if __name__ == "__main__": main()
334
1
'''simple docstring''' import sacrebleu as scb from packaging import version from sacrebleu import TER import datasets _A : List[Any] ='''\ @inproceedings{snover-etal-2006-study, title = \"A Study of Translation Edit Rate with Targeted Human Annotation\", author = \"Snover, Matthew and Dorr, Bonnie and Schwartz, Rich and Micciulla, Linnea and Makhoul, John\", booktitle = \"Proceedings of the 7th Conference of the Association for Machine Translation in the Americas: Technical Papers\", month = aug # \" 8-12\", year = \"2006\", address = \"Cambridge, Massachusetts, USA\", publisher = \"Association for Machine Translation in the Americas\", url = \"https://aclanthology.org/2006.amta-papers.25\", pages = \"223--231\", } @inproceedings{post-2018-call, title = \"A Call for Clarity in Reporting {BLEU} Scores\", author = \"Post, Matt\", booktitle = \"Proceedings of the Third Conference on Machine Translation: Research Papers\", month = oct, year = \"2018\", address = \"Belgium, Brussels\", publisher = \"Association for Computational Linguistics\", url = \"https://www.aclweb.org/anthology/W18-6319\", pages = \"186--191\", } ''' _A : int ='''\ TER (Translation Edit Rate, also called Translation Error Rate) is a metric to quantify the edit operations that a hypothesis requires to match a reference translation. We use the implementation that is already present in sacrebleu (https://github.com/mjpost/sacreBLEU#ter), which in turn is inspired by the TERCOM implementation, which can be found here: https://github.com/jhclark/tercom. The implementation here is slightly different from sacrebleu in terms of the required input format. The length of the references and hypotheses lists need to be the same, so you may need to transpose your references compared to sacrebleu\'s required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534 See the README.md file at https://github.com/mjpost/sacreBLEU#ter for more information. ''' _A : List[Any] =''' Produces TER scores alongside the number of edits and reference length. Args: predictions (list of str): The system stream (a sequence of segments). references (list of list of str): A list of one or more reference streams (each a sequence of segments). normalized (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`. ignore_punct (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`. support_zh_ja_chars (boolean): If `True`, tokenization/normalization supports processing of Chinese characters, as well as Japanese Kanji, Hiragana, Katakana, and Phonetic Extensions of Katakana. Only applies if `normalized = True`. Defaults to `False`. case_sensitive (boolean): If `False`, makes all predictions and references lowercase to ignore differences in case. Defaults to `False`. Returns: \'score\' (float): TER score (num_edits / sum_ref_lengths * 100) \'num_edits\' (int): The cumulative number of edits \'ref_length\' (float): The cumulative average reference length Examples: Example 1: >>> predictions = [\"does this sentence match??\", ... \"what about this sentence?\", ... \"What did the TER metric user say to the developer?\"] >>> references = [[\"does this sentence match\", \"does this sentence match!?!\"], ... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"], ... [\"Your jokes are...\", \"...TERrible\"]] >>> ter = datasets.load_metric(\"ter\") >>> results = ter.compute(predictions=predictions, ... references=references, ... case_sensitive=True) >>> print(results) {\'score\': 150.0, \'num_edits\': 15, \'ref_length\': 10.0} Example 2: >>> predictions = [\"does this sentence match??\", ... \"what about this sentence?\"] >>> references = [[\"does this sentence match\", \"does this sentence match!?!\"], ... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"]] >>> ter = datasets.load_metric(\"ter\") >>> results = ter.compute(predictions=predictions, ... references=references, ... case_sensitive=True) >>> print(results) {\'score\': 62.5, \'num_edits\': 5, \'ref_length\': 8.0} Example 3: >>> predictions = [\"does this sentence match??\", ... \"what about this sentence?\"] >>> references = [[\"does this sentence match\", \"does this sentence match!?!\"], ... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"]] >>> ter = datasets.load_metric(\"ter\") >>> results = ter.compute(predictions=predictions, ... references=references, ... normalized=True, ... case_sensitive=True) >>> print(results) {\'score\': 57.14285714285714, \'num_edits\': 6, \'ref_length\': 10.5} Example 4: >>> predictions = [\"does this sentence match??\", ... \"what about this sentence?\"] >>> references = [[\"does this sentence match\", \"does this sentence match!?!\"], ... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"]] >>> ter = datasets.load_metric(\"ter\") >>> results = ter.compute(predictions=predictions, ... references=references, ... ignore_punct=True, ... case_sensitive=False) >>> print(results) {\'score\': 0.0, \'num_edits\': 0, \'ref_length\': 8.0} Example 5: >>> predictions = [\"does this sentence match??\", ... \"what about this sentence?\", ... \"What did the TER metric user say to the developer?\"] >>> references = [[\"does this sentence match\", \"does this sentence match!?!\"], ... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"], ... [\"Your jokes are...\", \"...TERrible\"]] >>> ter = datasets.load_metric(\"ter\") >>> results = ter.compute(predictions=predictions, ... references=references, ... ignore_punct=True, ... case_sensitive=False) >>> print(results) {\'score\': 100.0, \'num_edits\': 10, \'ref_length\': 10.0} ''' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class _lowercase ( datasets.Metric ): def lowerCamelCase_ ( self: Optional[Any] ): if version.parse(scb.__version__ ) < version.parse("""1.4.12""" ): raise ImportWarning( """To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn\'t match this condition.\n""" """You can install it with `pip install \"sacrebleu>=1.4.12\"`.""" ) return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , homepage="""http://www.cs.umd.edu/~snover/tercom/""" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { """predictions""": datasets.Value("""string""" , id="""sequence""" ), """references""": datasets.Sequence(datasets.Value("""string""" , id="""sequence""" ) , id="""references""" ), } ) , codebase_urls=["""https://github.com/mjpost/sacreBLEU#ter"""] , reference_urls=[ """https://github.com/jhclark/tercom""", ] , ) def lowerCamelCase_ ( self: List[Any] , UpperCamelCase__: Union[str, Any] , UpperCamelCase__: Optional[Any] , UpperCamelCase__: int = False , UpperCamelCase__: Dict = False , UpperCamelCase__: Dict = False , UpperCamelCase__: int = False , ): lowerCamelCase__ : Any = len(references[0] ) if any(len(__a ) != references_per_prediction for refs in references ): raise ValueError("""Sacrebleu requires the same number of references for each prediction""" ) lowerCamelCase__ : int = [[refs[i] for refs in references] for i in range(__a )] lowerCamelCase__ : List[str] = TER( normalized=__a , no_punct=__a , asian_support=__a , case_sensitive=__a , ) lowerCamelCase__ : Optional[int] = sb_ter.corpus_score(__a , __a ) return {"score": output.score, "num_edits": output.num_edits, "ref_length": output.ref_length}
362
'''simple docstring''' from collections import UserDict from typing import Union import numpy as np import requests from ..utils import ( add_end_docstrings, logging, ) from .audio_classification import ffmpeg_read from .base import PIPELINE_INIT_ARGS, Pipeline _A : Optional[Any] =logging.get_logger(__name__) @add_end_docstrings(_lowercase ) class _lowercase ( _lowercase ): def __init__( self: Union[str, Any] , **UpperCamelCase__: str ): super().__init__(**UpperCamelCase__ ) if self.framework != "pt": raise ValueError(F'''The {self.__class__} is only available in PyTorch.''' ) # No specific FOR_XXX available yet def __call__( self: Optional[Any] , UpperCamelCase__: Union[np.ndarray, bytes, str] , **UpperCamelCase__: int ): return super().__call__(UpperCamelCase__ , **UpperCamelCase__ ) def lowerCamelCase_ ( self: Optional[Any] , **UpperCamelCase__: int ): lowerCamelCase__ : Optional[Any] = {} if "candidate_labels" in kwargs: lowerCamelCase__ : str = kwargs["""candidate_labels"""] if "hypothesis_template" in kwargs: lowerCamelCase__ : int = kwargs["""hypothesis_template"""] return preprocess_params, {}, {} def lowerCamelCase_ ( self: Optional[Any] , UpperCamelCase__: int , UpperCamelCase__: List[str]=None , UpperCamelCase__: Dict="This is a sound of {}." ): if isinstance(UpperCamelCase__ , UpperCamelCase__ ): if audio.startswith("""http://""" ) or audio.startswith("""https://""" ): # We need to actually check for a real protocol, otherwise it's impossible to use a local file # like http_huggingface_co.png lowerCamelCase__ : int = requests.get(UpperCamelCase__ ).content else: with open(UpperCamelCase__ , """rb""" ) as f: lowerCamelCase__ : Dict = f.read() if isinstance(UpperCamelCase__ , UpperCamelCase__ ): lowerCamelCase__ : str = ffmpeg_read(UpperCamelCase__ , self.feature_extractor.sampling_rate ) if not isinstance(UpperCamelCase__ , np.ndarray ): raise ValueError("""We expect a numpy ndarray as input""" ) if len(audio.shape ) != 1: raise ValueError("""We expect a single channel audio input for ZeroShotAudioClassificationPipeline""" ) lowerCamelCase__ : Optional[Any] = self.feature_extractor( [audio] , sampling_rate=self.feature_extractor.sampling_rate , return_tensors="""pt""" ) lowerCamelCase__ : Any = candidate_labels lowerCamelCase__ : Any = [hypothesis_template.format(UpperCamelCase__ ) for x in candidate_labels] lowerCamelCase__ : int = self.tokenizer(UpperCamelCase__ , return_tensors=self.framework , padding=UpperCamelCase__ ) lowerCamelCase__ : Tuple = [text_inputs] return inputs def lowerCamelCase_ ( self: Tuple , UpperCamelCase__: Union[str, Any] ): lowerCamelCase__ : Any = model_inputs.pop("""candidate_labels""" ) lowerCamelCase__ : Union[str, Any] = model_inputs.pop("""text_inputs""" ) if isinstance(text_inputs[0] , UpperCamelCase__ ): lowerCamelCase__ : Optional[int] = text_inputs[0] else: # Batching case. lowerCamelCase__ : Tuple = text_inputs[0][0] lowerCamelCase__ : Tuple = self.model(**UpperCamelCase__ , **UpperCamelCase__ ) lowerCamelCase__ : str = { """candidate_labels""": candidate_labels, """logits""": outputs.logits_per_audio, } return model_outputs def lowerCamelCase_ ( self: List[str] , UpperCamelCase__: Union[str, Any] ): lowerCamelCase__ : List[str] = model_outputs.pop("""candidate_labels""" ) lowerCamelCase__ : int = model_outputs["""logits"""][0] if self.framework == "pt": lowerCamelCase__ : Optional[int] = logits.softmax(dim=0 ) lowerCamelCase__ : Any = probs.tolist() else: raise ValueError("""`tf` framework not supported.""" ) lowerCamelCase__ : Tuple = [ {"""score""": score, """label""": candidate_label} for score, candidate_label in sorted(zip(UpperCamelCase__ , UpperCamelCase__ ) , key=lambda UpperCamelCase__ : -x[0] ) ] return result
129
0
"""simple docstring""" import tempfile import torch from diffusers import ( DEISMultistepScheduler, DPMSolverMultistepScheduler, DPMSolverSinglestepScheduler, UniPCMultistepScheduler, ) from .test_schedulers import SchedulerCommonTest class UpperCamelCase ( lowerCAmelCase__ ): SCREAMING_SNAKE_CASE_ = (UniPCMultistepScheduler,) SCREAMING_SNAKE_CASE_ = (("num_inference_steps", 2_5),) def a_ ( self, **lowerCAmelCase__) -> List[Any]: snake_case_ = { 'num_train_timesteps': 1000, 'beta_start': 0.0001, 'beta_end': 0.02, 'beta_schedule': 'linear', 'solver_order': 2, 'solver_type': 'bh2', } config.update(**lowerCAmelCase__) return config def a_ ( self, lowerCAmelCase__=0, **lowerCAmelCase__) -> Any: snake_case_ = dict(self.forward_default_kwargs) snake_case_ = kwargs.pop('num_inference_steps', lowerCAmelCase__) snake_case_ = self.dummy_sample snake_case_ = 0.1 * sample snake_case_ = [residual + 0.2, residual + 0.15, residual + 0.10] for scheduler_class in self.scheduler_classes: snake_case_ = self.get_scheduler_config(**lowerCAmelCase__) snake_case_ = scheduler_class(**lowerCAmelCase__) scheduler.set_timesteps(lowerCAmelCase__) # copy over dummy past residuals snake_case_ = dummy_past_residuals[: scheduler.config.solver_order] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(lowerCAmelCase__) snake_case_ = scheduler_class.from_pretrained(lowerCAmelCase__) new_scheduler.set_timesteps(lowerCAmelCase__) # copy over dummy past residuals snake_case_ = dummy_past_residuals[: new_scheduler.config.solver_order] snake_case_ , snake_case_ = sample, sample for t in range(lowerCAmelCase__, time_step + scheduler.config.solver_order + 1): snake_case_ = scheduler.step(lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, **lowerCAmelCase__).prev_sample snake_case_ = new_scheduler.step(lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, **lowerCAmelCase__).prev_sample assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical" def a_ ( self, lowerCAmelCase__=0, **lowerCAmelCase__) -> Dict: snake_case_ = dict(self.forward_default_kwargs) snake_case_ = kwargs.pop('num_inference_steps', lowerCAmelCase__) snake_case_ = self.dummy_sample snake_case_ = 0.1 * sample snake_case_ = [residual + 0.2, residual + 0.15, residual + 0.10] for scheduler_class in self.scheduler_classes: snake_case_ = self.get_scheduler_config() snake_case_ = scheduler_class(**lowerCAmelCase__) scheduler.set_timesteps(lowerCAmelCase__) # copy over dummy past residuals (must be after setting timesteps) snake_case_ = dummy_past_residuals[: scheduler.config.solver_order] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(lowerCAmelCase__) snake_case_ = scheduler_class.from_pretrained(lowerCAmelCase__) # copy over dummy past residuals new_scheduler.set_timesteps(lowerCAmelCase__) # copy over dummy past residual (must be after setting timesteps) snake_case_ = dummy_past_residuals[: new_scheduler.config.solver_order] snake_case_ = scheduler.step(lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, **lowerCAmelCase__).prev_sample snake_case_ = new_scheduler.step(lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, **lowerCAmelCase__).prev_sample assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical" def a_ ( self, lowerCAmelCase__=None, **lowerCAmelCase__) -> List[Any]: if scheduler is None: snake_case_ = self.scheduler_classes[0] snake_case_ = self.get_scheduler_config(**lowerCAmelCase__) snake_case_ = scheduler_class(**lowerCAmelCase__) snake_case_ = self.scheduler_classes[0] snake_case_ = self.get_scheduler_config(**lowerCAmelCase__) snake_case_ = scheduler_class(**lowerCAmelCase__) snake_case_ = 10 snake_case_ = self.dummy_model() snake_case_ = self.dummy_sample_deter scheduler.set_timesteps(lowerCAmelCase__) for i, t in enumerate(scheduler.timesteps): snake_case_ = model(lowerCAmelCase__, lowerCAmelCase__) snake_case_ = scheduler.step(lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__).prev_sample return sample def a_ ( self) -> List[str]: snake_case_ = dict(self.forward_default_kwargs) snake_case_ = kwargs.pop('num_inference_steps', lowerCAmelCase__) for scheduler_class in self.scheduler_classes: snake_case_ = self.get_scheduler_config() snake_case_ = scheduler_class(**lowerCAmelCase__) snake_case_ = self.dummy_sample snake_case_ = 0.1 * sample if num_inference_steps is not None and hasattr(lowerCAmelCase__, 'set_timesteps'): scheduler.set_timesteps(lowerCAmelCase__) elif num_inference_steps is not None and not hasattr(lowerCAmelCase__, 'set_timesteps'): snake_case_ = num_inference_steps # copy over dummy past residuals (must be done after set_timesteps) snake_case_ = [residual + 0.2, residual + 0.15, residual + 0.10] snake_case_ = dummy_past_residuals[: scheduler.config.solver_order] snake_case_ = scheduler.timesteps[5] snake_case_ = scheduler.timesteps[6] snake_case_ = scheduler.step(lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, **lowerCAmelCase__).prev_sample snake_case_ = scheduler.step(lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, **lowerCAmelCase__).prev_sample self.assertEqual(output_a.shape, sample.shape) self.assertEqual(output_a.shape, output_a.shape) def a_ ( self) -> Tuple: # make sure that iterating over schedulers with same config names gives same results # for defaults snake_case_ = UniPCMultistepScheduler(**self.get_scheduler_config()) snake_case_ = self.full_loop(scheduler=lowerCAmelCase__) snake_case_ = torch.mean(torch.abs(lowerCAmelCase__)) assert abs(result_mean.item() - 0.2464) < 1e-3 snake_case_ = DPMSolverSinglestepScheduler.from_config(scheduler.config) snake_case_ = DEISMultistepScheduler.from_config(scheduler.config) snake_case_ = DPMSolverMultistepScheduler.from_config(scheduler.config) snake_case_ = UniPCMultistepScheduler.from_config(scheduler.config) snake_case_ = self.full_loop(scheduler=lowerCAmelCase__) snake_case_ = torch.mean(torch.abs(lowerCAmelCase__)) assert abs(result_mean.item() - 0.2464) < 1e-3 def a_ ( self) -> str: for timesteps in [25, 50, 100, 999, 1000]: self.check_over_configs(num_train_timesteps=lowerCAmelCase__) def a_ ( self) -> List[Any]: self.check_over_configs(thresholding=lowerCAmelCase__) for order in [1, 2, 3]: for solver_type in ["bh1", "bh2"]: for threshold in [0.5, 1.0, 2.0]: for prediction_type in ["epsilon", "sample"]: self.check_over_configs( thresholding=lowerCAmelCase__, prediction_type=lowerCAmelCase__, sample_max_value=lowerCAmelCase__, solver_order=lowerCAmelCase__, solver_type=lowerCAmelCase__, ) def a_ ( self) -> List[str]: for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=lowerCAmelCase__) def a_ ( self) -> Tuple: for solver_type in ["bh1", "bh2"]: for order in [1, 2, 3]: for prediction_type in ["epsilon", "sample"]: self.check_over_configs( solver_order=lowerCAmelCase__, solver_type=lowerCAmelCase__, prediction_type=lowerCAmelCase__, ) snake_case_ = self.full_loop( solver_order=lowerCAmelCase__, solver_type=lowerCAmelCase__, prediction_type=lowerCAmelCase__, ) assert not torch.isnan(lowerCAmelCase__).any(), "Samples have nan numbers" def a_ ( self) -> Any: self.check_over_configs(lower_order_final=lowerCAmelCase__) self.check_over_configs(lower_order_final=lowerCAmelCase__) def a_ ( self) -> int: for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1000]: self.check_over_forward(num_inference_steps=lowerCAmelCase__, time_step=0) def a_ ( self) -> Tuple: snake_case_ = self.full_loop() snake_case_ = torch.mean(torch.abs(lowerCAmelCase__)) assert abs(result_mean.item() - 0.2464) < 1e-3 def a_ ( self) -> str: snake_case_ = self.full_loop(prediction_type='v_prediction') snake_case_ = torch.mean(torch.abs(lowerCAmelCase__)) assert abs(result_mean.item() - 0.1014) < 1e-3 def a_ ( self) -> Dict: snake_case_ = self.scheduler_classes[0] snake_case_ = self.get_scheduler_config(thresholding=lowerCAmelCase__, dynamic_thresholding_ratio=0) snake_case_ = scheduler_class(**lowerCAmelCase__) snake_case_ = 10 snake_case_ = self.dummy_model() snake_case_ = self.dummy_sample_deter.half() scheduler.set_timesteps(lowerCAmelCase__) for i, t in enumerate(scheduler.timesteps): snake_case_ = model(lowerCAmelCase__, lowerCAmelCase__) snake_case_ = scheduler.step(lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__).prev_sample assert sample.dtype == torch.floataa def a_ ( self, **lowerCAmelCase__) -> str: for scheduler_class in self.scheduler_classes: snake_case_ = self.get_scheduler_config(**lowerCAmelCase__) snake_case_ = scheduler_class(**lowerCAmelCase__) scheduler.set_timesteps(scheduler.config.num_train_timesteps) assert len(scheduler.timesteps.unique()) == scheduler.num_inference_steps
69
'''simple docstring''' import os from datetime import datetime as dt from github import Github __SCREAMING_SNAKE_CASE :str = [ '''good first issue''', '''feature request''', '''wip''', ] def UpperCAmelCase_ ( ) -> Optional[Any]: '''simple docstring''' _UpperCAmelCase = Github(os.environ["GITHUB_TOKEN"] ) _UpperCAmelCase = g.get_repo("huggingface/accelerate" ) _UpperCAmelCase = repo.get_issues(state="open" ) for issue in open_issues: _UpperCAmelCase = sorted([comment for comment in issue.get_comments()] , key=lambda __lowercase : i.created_at , reverse=__lowercase ) _UpperCAmelCase = comments[0] if len(__lowercase ) > 0 else None _UpperCAmelCase = dt.utcnow() _UpperCAmelCase = (current_time - issue.updated_at).days _UpperCAmelCase = (current_time - issue.created_at).days if ( last_comment is not None and last_comment.user.login == "github-actions[bot]" and days_since_updated > 7 and days_since_creation >= 30 and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() ) ): # Close issue since it has been 7 days of inactivity since bot mention. issue.edit(state="closed" ) elif ( days_since_updated > 23 and days_since_creation >= 30 and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() ) ): # Add stale comment issue.create_comment( "This issue has been automatically marked as stale because it has not had " "recent activity. If you think this still needs to be addressed " "please comment on this thread.\n\nPlease note that issues that do not follow the " "[contributing guidelines](https://github.com/huggingface/accelerate/blob/main/CONTRIBUTING.md) " "are likely to be ignored." ) if __name__ == "__main__": main()
22
0
from . import ( albert, align, altclip, audio_spectrogram_transformer, auto, autoformer, bark, bart, barthez, bartpho, beit, bert, bert_generation, bert_japanese, bertweet, big_bird, bigbird_pegasus, biogpt, bit, blenderbot, blenderbot_small, blip, blip_a, bloom, bridgetower, byta, camembert, canine, chinese_clip, clap, clip, clipseg, codegen, conditional_detr, convbert, convnext, convnextva, cpm, cpmant, ctrl, cvt, dataavec, deberta, deberta_va, decision_transformer, deformable_detr, deit, deprecated, deta, detr, dialogpt, dinat, distilbert, dit, donut, dpr, dpt, efficientformer, efficientnet, electra, encodec, encoder_decoder, ernie, ernie_m, esm, falcon, flaubert, flava, fnet, focalnet, fsmt, funnel, git, glpn, gpta, gpt_bigcode, gpt_neo, gpt_neox, gpt_neox_japanese, gpt_swa, gptj, gptsan_japanese, graphormer, groupvit, herbert, hubert, ibert, imagegpt, informer, instructblip, jukebox, layoutlm, layoutlmva, layoutlmva, layoutxlm, led, levit, lilt, llama, longformer, longta, luke, lxmert, mam_aaa, marian, markuplm, maskaformer, maskformer, mbart, mbartaa, mega, megatron_bert, megatron_gpta, mgp_str, mluke, mobilebert, mobilenet_va, mobilenet_va, mobilevit, mobilevitva, mpnet, mra, mta, musicgen, mvp, nat, nezha, nllb, nllb_moe, nystromformer, oneformer, open_llama, openai, opt, owlvit, pegasus, pegasus_x, perceiver, phobert, pixastruct, plbart, poolformer, prophetnet, qdqbert, rag, realm, reformer, regnet, rembert, resnet, roberta, roberta_prelayernorm, roc_bert, roformer, rwkv, sam, segformer, sew, sew_d, speech_encoder_decoder, speech_to_text, speech_to_text_a, speechta, splinter, squeezebert, swiftformer, swin, swinasr, swinva, switch_transformers, ta, table_transformer, tapas, time_series_transformer, timesformer, timm_backbone, transfo_xl, trocr, tvlt, umta, unispeech, unispeech_sat, upernet, videomae, vilt, vision_encoder_decoder, vision_text_dual_encoder, visual_bert, vit, vit_hybrid, vit_mae, vit_msn, vivit, wavaveca, wavaveca_conformer, wavaveca_phoneme, wavaveca_with_lm, wavlm, whisper, x_clip, xglm, xlm, xlm_prophetnet, xlm_roberta, xlm_roberta_xl, xlnet, xmod, yolos, yoso, )
355
from math import sqrt def lowerCAmelCase( __lowerCamelCase ): __a = 0 for i in range(1 , int(sqrt(__lowerCamelCase ) + 1 ) ): if n % i == 0 and i != sqrt(__lowerCamelCase ): total += i + n // i elif i == sqrt(__lowerCamelCase ): total += i return total - n def lowerCAmelCase( __lowerCamelCase = 1_0000 ): __a = sum( i for i in range(1 , __lowerCamelCase ) if sum_of_divisors(sum_of_divisors(__lowerCamelCase ) ) == i and sum_of_divisors(__lowerCamelCase ) != i ) return total if __name__ == "__main__": print(solution(int(str(input()).strip())))
197
0
'''simple docstring''' from maths.prime_check import is_prime def __lowerCAmelCase ( snake_case__ ): if not isinstance(snake_case__ , snake_case__ ): __UpperCamelCase : Optional[int] = F"Input value of [number={number}] must be an integer" raise TypeError(snake_case__ ) if is_prime(snake_case__ ) and is_prime(number + 2 ): return number + 2 else: return -1 if __name__ == "__main__": import doctest doctest.testmod()
298
'''simple docstring''' import argparse import json import logging import os import shutil import sys import tempfile import unittest from unittest import mock import torch from accelerate.utils import write_basic_config from transformers.testing_utils import TestCasePlus, get_gpu_count, run_command, slow, torch_device from transformers.utils import is_apex_available logging.basicConfig(level=logging.DEBUG) _lowerCAmelCase = logging.getLogger() def __lowerCAmelCase ( ): __UpperCamelCase : List[str] = argparse.ArgumentParser() parser.add_argument("-f" ) __UpperCamelCase : Any = parser.parse_args() return args.f def __lowerCAmelCase ( snake_case__ ): __UpperCamelCase : Dict = {} __UpperCamelCase : Dict = os.path.join(snake_case__ , "all_results.json" ) if os.path.exists(snake_case__ ): with open(snake_case__ , "r" ) as f: __UpperCamelCase : Any = json.load(snake_case__ ) else: raise ValueError(F"can't find {path}" ) return results def __lowerCAmelCase ( ): __UpperCamelCase : Any = torch.cuda.is_available() and torch_device == "cuda" return is_using_cuda and is_apex_available() _lowerCAmelCase = logging.StreamHandler(sys.stdout) logger.addHandler(stream_handler) class A ( SCREAMING_SNAKE_CASE__ ): '''simple docstring''' @classmethod def a_ (cls ) -> Union[str, Any]: # Write Accelerate config, will pick up on CPU, GPU, and multi-GPU __UpperCamelCase : Optional[Any] = tempfile.mkdtemp() __UpperCamelCase : List[str] = os.path.join(cls.tmpdir , "default_config.yml" ) write_basic_config(save_location=cls.configPath ) __UpperCamelCase : Optional[Any] = ["accelerate", "launch", "--config_file", cls.configPath] @classmethod def a_ (cls ) -> Union[str, Any]: shutil.rmtree(cls.tmpdir ) @mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} ) def a_ (self ) -> Optional[int]: __UpperCamelCase : List[Any] = self.get_auto_remove_tmp_dir() __UpperCamelCase : List[Any] = f"\n {self.examples_dir}/pytorch/text-classification/run_glue_no_trainer.py\n --model_name_or_path distilbert-base-uncased\n --output_dir {tmp_dir}\n --train_file ./tests/fixtures/tests_samples/MRPC/train.csv\n --validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --learning_rate=1e-4\n --seed=42\n --checkpointing_steps epoch\n --with_tracking\n ".split() if is_cuda_and_apex_available(): testargs.append("--fp16" ) run_command(self._launch_args + testargs ) __UpperCamelCase : Tuple = get_results(_UpperCAmelCase ) self.assertGreaterEqual(result["eval_accuracy"] , 0.75 ) self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , "epoch_0" ) ) ) self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , "glue_no_trainer" ) ) ) @mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} ) def a_ (self ) -> Dict: __UpperCamelCase : Optional[Any] = self.get_auto_remove_tmp_dir() __UpperCamelCase : List[Any] = f"\n {self.examples_dir}/pytorch/language-modeling/run_clm_no_trainer.py\n --model_name_or_path distilgpt2\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --block_size 128\n --per_device_train_batch_size 5\n --per_device_eval_batch_size 5\n --num_train_epochs 2\n --output_dir {tmp_dir}\n --checkpointing_steps epoch\n --with_tracking\n ".split() if torch.cuda.device_count() > 1: # Skipping because there are not enough batches to train the model + would need a drop_last to work. return run_command(self._launch_args + testargs ) __UpperCamelCase : int = get_results(_UpperCAmelCase ) self.assertLess(result["perplexity"] , 1_0_0 ) self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , "epoch_0" ) ) ) self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , "clm_no_trainer" ) ) ) @mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} ) def a_ (self ) -> Any: __UpperCamelCase : List[Any] = self.get_auto_remove_tmp_dir() __UpperCamelCase : Optional[Any] = f"\n {self.examples_dir}/pytorch/language-modeling/run_mlm_no_trainer.py\n --model_name_or_path distilroberta-base\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --output_dir {tmp_dir}\n --num_train_epochs=1\n --checkpointing_steps epoch\n --with_tracking\n ".split() run_command(self._launch_args + testargs ) __UpperCamelCase : Optional[Any] = get_results(_UpperCAmelCase ) self.assertLess(result["perplexity"] , 4_2 ) self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , "epoch_0" ) ) ) self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , "mlm_no_trainer" ) ) ) @mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} ) def a_ (self ) -> int: # with so little data distributed training needs more epochs to get the score on par with 0/1 gpu __UpperCamelCase : int = 7 if get_gpu_count() > 1 else 2 __UpperCamelCase : int = self.get_auto_remove_tmp_dir() __UpperCamelCase : str = f"\n {self.examples_dir}/pytorch/token-classification/run_ner_no_trainer.py\n --model_name_or_path bert-base-uncased\n --train_file tests/fixtures/tests_samples/conll/sample.json\n --validation_file tests/fixtures/tests_samples/conll/sample.json\n --output_dir {tmp_dir}\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=2\n --num_train_epochs={epochs}\n --seed 7\n --checkpointing_steps epoch\n --with_tracking\n ".split() run_command(self._launch_args + testargs ) __UpperCamelCase : List[Any] = get_results(_UpperCAmelCase ) self.assertGreaterEqual(result["eval_accuracy"] , 0.75 ) self.assertLess(result["train_loss"] , 0.5 ) self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , "epoch_0" ) ) ) self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , "ner_no_trainer" ) ) ) @unittest.skip(reason="Fix me @muellerzr" ) @mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} ) def a_ (self ) -> Any: __UpperCamelCase : Tuple = self.get_auto_remove_tmp_dir() __UpperCamelCase : str = f"\n {self.examples_dir}/pytorch/question-answering/run_qa_no_trainer.py\n --model_name_or_path bert-base-uncased\n --version_2_with_negative\n --train_file tests/fixtures/tests_samples/SQUAD/sample.json\n --validation_file tests/fixtures/tests_samples/SQUAD/sample.json\n --output_dir {tmp_dir}\n --seed=42\n --max_train_steps=10\n --num_warmup_steps=2\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --checkpointing_steps epoch\n --with_tracking\n ".split() run_command(self._launch_args + testargs ) __UpperCamelCase : Optional[int] = get_results(_UpperCAmelCase ) # Because we use --version_2_with_negative the testing script uses SQuAD v2 metrics. self.assertGreaterEqual(result["eval_f1"] , 2_8 ) self.assertGreaterEqual(result["eval_exact"] , 2_8 ) self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , "epoch_0" ) ) ) self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , "qa_no_trainer" ) ) ) @mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} ) def a_ (self ) -> Dict: __UpperCamelCase : Tuple = self.get_auto_remove_tmp_dir() __UpperCamelCase : List[str] = f"\n {self.examples_dir}/pytorch/multiple-choice/run_swag_no_trainer.py\n --model_name_or_path bert-base-uncased\n --train_file tests/fixtures/tests_samples/swag/sample.json\n --validation_file tests/fixtures/tests_samples/swag/sample.json\n --output_dir {tmp_dir}\n --max_train_steps=20\n --num_warmup_steps=2\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --with_tracking\n ".split() run_command(self._launch_args + testargs ) __UpperCamelCase : Tuple = get_results(_UpperCAmelCase ) self.assertGreaterEqual(result["eval_accuracy"] , 0.8 ) self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , "swag_no_trainer" ) ) ) @slow @mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} ) def a_ (self ) -> Union[str, Any]: __UpperCamelCase : str = self.get_auto_remove_tmp_dir() __UpperCamelCase : Dict = f"\n {self.examples_dir}/pytorch/summarization/run_summarization_no_trainer.py\n --model_name_or_path t5-small\n --train_file tests/fixtures/tests_samples/xsum/sample.json\n --validation_file tests/fixtures/tests_samples/xsum/sample.json\n --output_dir {tmp_dir}\n --max_train_steps=50\n --num_warmup_steps=8\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --checkpointing_steps epoch\n --with_tracking\n ".split() run_command(self._launch_args + testargs ) __UpperCamelCase : Dict = get_results(_UpperCAmelCase ) self.assertGreaterEqual(result["eval_rouge1"] , 1_0 ) self.assertGreaterEqual(result["eval_rouge2"] , 2 ) self.assertGreaterEqual(result["eval_rougeL"] , 7 ) self.assertGreaterEqual(result["eval_rougeLsum"] , 7 ) self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , "epoch_0" ) ) ) self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , "summarization_no_trainer" ) ) ) @slow @mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} ) def a_ (self ) -> Tuple: __UpperCamelCase : Optional[int] = self.get_auto_remove_tmp_dir() __UpperCamelCase : List[Any] = f"\n {self.examples_dir}/pytorch/translation/run_translation_no_trainer.py\n --model_name_or_path sshleifer/student_marian_en_ro_6_1\n --source_lang en\n --target_lang ro\n --train_file tests/fixtures/tests_samples/wmt16/sample.json\n --validation_file tests/fixtures/tests_samples/wmt16/sample.json\n --output_dir {tmp_dir}\n --max_train_steps=50\n --num_warmup_steps=8\n --num_beams=6\n --learning_rate=3e-3\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --source_lang en_XX\n --target_lang ro_RO\n --checkpointing_steps epoch\n --with_tracking\n ".split() run_command(self._launch_args + testargs ) __UpperCamelCase : List[Any] = get_results(_UpperCAmelCase ) self.assertGreaterEqual(result["eval_bleu"] , 3_0 ) self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , "epoch_0" ) ) ) self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , "translation_no_trainer" ) ) ) @slow def a_ (self ) -> List[Any]: __UpperCamelCase : Tuple = logging.StreamHandler(sys.stdout ) logger.addHandler(_UpperCAmelCase ) __UpperCamelCase : Dict = self.get_auto_remove_tmp_dir() __UpperCamelCase : List[Any] = f"\n {self.examples_dir}/pytorch/semantic-segmentation/run_semantic_segmentation_no_trainer.py\n --dataset_name huggingface/semantic-segmentation-test-sample\n --output_dir {tmp_dir}\n --max_train_steps=10\n --num_warmup_steps=2\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --checkpointing_steps epoch\n ".split() run_command(self._launch_args + testargs ) __UpperCamelCase : Optional[int] = get_results(_UpperCAmelCase ) self.assertGreaterEqual(result["eval_overall_accuracy"] , 0.10 ) @mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} ) def a_ (self ) -> Tuple: __UpperCamelCase : List[Any] = self.get_auto_remove_tmp_dir() __UpperCamelCase : Optional[Any] = f"\n {self.examples_dir}/pytorch/image-classification/run_image_classification_no_trainer.py\n --model_name_or_path google/vit-base-patch16-224-in21k\n --dataset_name hf-internal-testing/cats_vs_dogs_sample\n --learning_rate 1e-4\n --per_device_train_batch_size 2\n --per_device_eval_batch_size 1\n --max_train_steps 2\n --train_val_split 0.1\n --seed 42\n --output_dir {tmp_dir}\n --with_tracking\n --checkpointing_steps 1\n ".split() if is_cuda_and_apex_available(): testargs.append("--fp16" ) run_command(self._launch_args + testargs ) __UpperCamelCase : str = get_results(_UpperCAmelCase ) # The base model scores a 25% self.assertGreaterEqual(result["eval_accuracy"] , 0.6 ) self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , "step_1" ) ) ) self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , "image_classification_no_trainer" ) ) )
298
1
"""simple docstring""" from transformers import HfArgumentParser, TensorFlowBenchmark, TensorFlowBenchmarkArguments def UpperCAmelCase ( ): '''simple docstring''' lowerCamelCase : List[str] = HfArgumentParser(a_ ) lowerCamelCase : List[Any] = parser.parse_args_into_dataclasses()[0] lowerCamelCase : str = TensorFlowBenchmark(args=a_ ) try: lowerCamelCase : int = parser.parse_args_into_dataclasses()[0] except ValueError as e: lowerCamelCase : Optional[Any] = 'Arg --no_{0} is no longer used, please use --no-{0} instead.' lowerCamelCase : int = ' '.join(str(a_ ).split(' ' )[:-1] ) lowerCamelCase : List[str] = '' lowerCamelCase : Dict = eval(str(a_ ).split(' ' )[-1] ) lowerCamelCase : Dict = [] for arg in depreciated_args: # arg[2:] removes '--' if arg[2:] in TensorFlowBenchmark.deprecated_args: # arg[5:] removes '--no_' full_error_msg += arg_error_msg.format(arg[5:] ) else: wrong_args.append(a_ ) if len(a_ ) > 0: lowerCamelCase : str = full_error_msg + begin_error_msg + str(a_ ) raise ValueError(a_ ) benchmark.run() if __name__ == "__main__": main()
205
"""simple docstring""" import os import sys from contextlib import contextmanager # Windows only if os.name == "nt": import ctypes import msvcrt # noqa class _lowercase ( ctypes.Structure ): # _fields is a specific attr expected by ctypes lowercase_ = [('size', ctypes.c_int), ('visible', ctypes.c_byte)] def UpperCAmelCase ( ): '''simple docstring''' if os.name == "nt": lowerCamelCase : Optional[int] = CursorInfo() lowerCamelCase : Union[str, Any] = ctypes.windll.kernelaa.GetStdHandle(-11 ) ctypes.windll.kernelaa.GetConsoleCursorInfo(a_, ctypes.byref(a_ ) ) lowerCamelCase : Dict = False ctypes.windll.kernelaa.SetConsoleCursorInfo(a_, ctypes.byref(a_ ) ) elif os.name == "posix": sys.stdout.write('\033[?25l' ) sys.stdout.flush() def UpperCAmelCase ( ): '''simple docstring''' if os.name == "nt": lowerCamelCase : List[str] = CursorInfo() lowerCamelCase : List[Any] = ctypes.windll.kernelaa.GetStdHandle(-11 ) ctypes.windll.kernelaa.GetConsoleCursorInfo(a_, ctypes.byref(a_ ) ) lowerCamelCase : Optional[Any] = True ctypes.windll.kernelaa.SetConsoleCursorInfo(a_, ctypes.byref(a_ ) ) elif os.name == "posix": sys.stdout.write('\033[?25h' ) sys.stdout.flush() @contextmanager def UpperCAmelCase ( ): '''simple docstring''' try: hide_cursor() yield finally: show_cursor()
205
1
"""simple docstring""" from __future__ import absolute_import, division, print_function, unicode_literals from torch import nn from torch.nn import CrossEntropyLoss, MSELoss from transformers import RobertaConfig from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward from transformers.models.roberta.modeling_roberta import ( ROBERTA_INPUTS_DOCSTRING, ROBERTA_START_DOCSTRING, RobertaEmbeddings, ) from .modeling_highway_bert import BertPreTrainedModel, DeeBertModel, HighwayException, entropy @add_start_docstrings( "The RoBERTa Model transformer with early exiting (DeeRoBERTa). ", UpperCAmelCase_, ) class lowercase__ ( UpperCAmelCase_ ): _UpperCAmelCase :Optional[int] = RobertaConfig _UpperCAmelCase :List[str] = """roberta""" def __init__( self : int , snake_case__ : Union[str, Any] ): super().__init__(__lowercase ) lowerCamelCase_ : Dict =RobertaEmbeddings(__lowercase ) self.init_weights() @add_start_docstrings( "RoBERTa Model (with early exiting - DeeRoBERTa) with a classifier on top,\n also takes care of multi-layer training. ", UpperCAmelCase_, ) class lowercase__ ( UpperCAmelCase_ ): _UpperCAmelCase :List[Any] = RobertaConfig _UpperCAmelCase :str = """roberta""" def __init__( self : Optional[Any] , snake_case__ : Any ): super().__init__(__lowercase ) lowerCamelCase_ : List[str] =config.num_labels lowerCamelCase_ : List[Any] =config.num_hidden_layers lowerCamelCase_ : Optional[Any] =DeeRobertaModel(__lowercase ) lowerCamelCase_ : Any =nn.Dropout(config.hidden_dropout_prob ) lowerCamelCase_ : str =nn.Linear(config.hidden_size , self.config.num_labels ) @add_start_docstrings_to_model_forward(__lowercase ) def UpperCAmelCase__ ( self : Dict , snake_case__ : List[str]=None , snake_case__ : Optional[Any]=None , snake_case__ : Union[str, Any]=None , snake_case__ : Optional[Any]=None , snake_case__ : Optional[Any]=None , snake_case__ : Dict=None , snake_case__ : Optional[Any]=None , snake_case__ : Tuple=-1 , snake_case__ : int=False , ): lowerCamelCase_ : List[str] =self.num_layers try: lowerCamelCase_ : Tuple =self.roberta( __lowercase , attention_mask=__lowercase , token_type_ids=__lowercase , position_ids=__lowercase , head_mask=__lowercase , inputs_embeds=__lowercase , ) lowerCamelCase_ : int =outputs[1] lowerCamelCase_ : Optional[int] =self.dropout(__lowercase ) lowerCamelCase_ : Any =self.classifier(__lowercase ) lowerCamelCase_ : Tuple =(logits,) + outputs[2:] # add hidden states and attention if they are here except HighwayException as e: lowerCamelCase_ : Optional[Any] =e.message lowerCamelCase_ : List[str] =e.exit_layer lowerCamelCase_ : Any =outputs[0] if not self.training: lowerCamelCase_ : Optional[Any] =entropy(__lowercase ) lowerCamelCase_ : Dict =[] lowerCamelCase_ : int =[] if labels is not None: if self.num_labels == 1: # We are doing regression lowerCamelCase_ : Dict =MSELoss() lowerCamelCase_ : str =loss_fct(logits.view(-1 ) , labels.view(-1 ) ) else: lowerCamelCase_ : Optional[Any] =CrossEntropyLoss() lowerCamelCase_ : Any =loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) ) # work with highway exits lowerCamelCase_ : List[Any] =[] for highway_exit in outputs[-1]: lowerCamelCase_ : Any =highway_exit[0] if not self.training: highway_logits_all.append(__lowercase ) highway_entropy.append(highway_exit[2] ) if self.num_labels == 1: # We are doing regression lowerCamelCase_ : Any =MSELoss() lowerCamelCase_ : Optional[Any] =loss_fct(highway_logits.view(-1 ) , labels.view(-1 ) ) else: lowerCamelCase_ : Union[str, Any] =CrossEntropyLoss() lowerCamelCase_ : Union[str, Any] =loss_fct(highway_logits.view(-1 , self.num_labels ) , labels.view(-1 ) ) highway_losses.append(__lowercase ) if train_highway: lowerCamelCase_ : Any =(sum(highway_losses[:-1] ),) + outputs # exclude the final highway, of course else: lowerCamelCase_ : Union[str, Any] =(loss,) + outputs if not self.training: lowerCamelCase_ : Tuple =outputs + ((original_entropy, highway_entropy), exit_layer) if output_layer >= 0: lowerCamelCase_ : List[str] =( (outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:] ) # use the highway of the last layer return outputs # (loss), logits, (hidden_states), (attentions), entropy
144
import argparse import json import os import fairseq import torch from fairseq.data import Dictionary from transformers import ( HubertConfig, HubertForCTC, HubertModel, WavaVecaCTCTokenizer, WavaVecaFeatureExtractor, WavaVecaProcessor, logging, ) logging.set_verbosity_info() __lowercase = logging.get_logger(__name__) __lowercase = { '''post_extract_proj''': '''feature_projection.projection''', '''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''', '''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''', '''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''', '''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''', '''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''', '''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''', '''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''', '''fc2''': '''encoder.layers.*.feed_forward.output_dense''', '''final_layer_norm''': '''encoder.layers.*.final_layer_norm''', '''encoder.layer_norm''': '''encoder.layer_norm''', '''w2v_model.layer_norm''': '''feature_projection.layer_norm''', '''w2v_encoder.proj''': '''lm_head''', '''mask_emb''': '''masked_spec_embed''', } def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): '''simple docstring''' for attribute in key.split('''.''' ): __UpperCamelCase :str = getattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) if weight_type is not None: __UpperCamelCase :Any = getattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ).shape else: __UpperCamelCase :Union[str, Any] = hf_pointer.shape assert hf_shape == value.shape, ( f"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be""" f""" {value.shape} for {full_name}""" ) if weight_type == "weight": __UpperCamelCase :str = value elif weight_type == "weight_g": __UpperCamelCase :List[str] = value elif weight_type == "weight_v": __UpperCamelCase :str = value elif weight_type == "bias": __UpperCamelCase :Union[str, Any] = value else: __UpperCamelCase :str = value logger.info(f"""{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.""" ) def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): '''simple docstring''' __UpperCamelCase :List[Any] = [] __UpperCamelCase :int = fairseq_model.state_dict() __UpperCamelCase :List[Any] = hf_model.hubert.feature_extractor if is_finetuned else hf_model.feature_extractor for name, value in fairseq_dict.items(): __UpperCamelCase :List[Any] = False if "conv_layers" in name: load_conv_layer( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , hf_model.config.feat_extract_norm == '''group''' , ) __UpperCamelCase :List[str] = True else: for key, mapped_key in MAPPING.items(): __UpperCamelCase :Dict = '''hubert.''' + mapped_key if (is_finetuned and mapped_key != '''lm_head''') else mapped_key if key in name or (key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0] and not is_finetuned): __UpperCamelCase :Optional[Any] = True if "*" in mapped_key: __UpperCamelCase :List[str] = name.split(SCREAMING_SNAKE_CASE )[0].split('''.''' )[-2] __UpperCamelCase :Optional[int] = mapped_key.replace('''*''' , SCREAMING_SNAKE_CASE ) if "weight_g" in name: __UpperCamelCase :int = '''weight_g''' elif "weight_v" in name: __UpperCamelCase :List[Any] = '''weight_v''' elif "weight" in name: __UpperCamelCase :Dict = '''weight''' elif "bias" in name: __UpperCamelCase :Dict = '''bias''' else: __UpperCamelCase :Dict = None set_recursively(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) continue if not is_used: unused_weights.append(SCREAMING_SNAKE_CASE ) logger.warning(f"""Unused weights: {unused_weights}""" ) def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): '''simple docstring''' __UpperCamelCase :Tuple = full_name.split('''conv_layers.''' )[-1] __UpperCamelCase :Optional[int] = name.split('''.''' ) __UpperCamelCase :str = int(items[0] ) __UpperCamelCase :List[Any] = int(items[1] ) if type_id == 0: if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, ( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" ) __UpperCamelCase :Dict = value logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, ( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" ) __UpperCamelCase :Any = value logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, ( f"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was""" " found." ) __UpperCamelCase :int = value logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, ( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.""" ) __UpperCamelCase :Union[str, Any] = value logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) else: unused_weights.append(SCREAMING_SNAKE_CASE ) @torch.no_grad() def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=True ): '''simple docstring''' if config_path is not None: __UpperCamelCase :Tuple = HubertConfig.from_pretrained(SCREAMING_SNAKE_CASE ) else: __UpperCamelCase :Optional[int] = HubertConfig() if is_finetuned: if dict_path: __UpperCamelCase :Optional[int] = Dictionary.load(SCREAMING_SNAKE_CASE ) # important change bos & pad token id since CTC symbol is <pad> and # not <s> as in fairseq __UpperCamelCase :Optional[int] = target_dict.pad_index __UpperCamelCase :Dict = target_dict.bos_index __UpperCamelCase :str = target_dict.eos_index __UpperCamelCase :Dict = len(target_dict.symbols ) __UpperCamelCase :List[Any] = os.path.join(SCREAMING_SNAKE_CASE , '''vocab.json''' ) if not os.path.isdir(SCREAMING_SNAKE_CASE ): logger.error('''--pytorch_dump_folder_path ({}) should be a directory'''.format(SCREAMING_SNAKE_CASE ) ) return os.makedirs(SCREAMING_SNAKE_CASE , exist_ok=SCREAMING_SNAKE_CASE ) with open(SCREAMING_SNAKE_CASE , '''w''' , encoding='''utf-8''' ) as vocab_handle: json.dump(target_dict.indices , SCREAMING_SNAKE_CASE ) __UpperCamelCase :Optional[int] = WavaVecaCTCTokenizer( SCREAMING_SNAKE_CASE , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='''|''' , do_lower_case=SCREAMING_SNAKE_CASE , ) __UpperCamelCase :Union[str, Any] = True if config.feat_extract_norm == '''layer''' else False __UpperCamelCase :Any = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=16_000 , padding_value=0 , do_normalize=SCREAMING_SNAKE_CASE , return_attention_mask=SCREAMING_SNAKE_CASE , ) __UpperCamelCase :Any = WavaVecaProcessor(feature_extractor=SCREAMING_SNAKE_CASE , tokenizer=SCREAMING_SNAKE_CASE ) processor.save_pretrained(SCREAMING_SNAKE_CASE ) __UpperCamelCase :List[str] = HubertForCTC(SCREAMING_SNAKE_CASE ) else: __UpperCamelCase :str = HubertModel(SCREAMING_SNAKE_CASE ) if is_finetuned: __UpperCamelCase , __UpperCamelCase , __UpperCamelCase :int = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} ) else: __UpperCamelCase , __UpperCamelCase , __UpperCamelCase :Any = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] ) __UpperCamelCase :Dict = model[0].eval() recursively_load_weights(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) hf_wavavec.save_pretrained(SCREAMING_SNAKE_CASE ) if __name__ == "__main__": __lowercase = argparse.ArgumentParser() parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''') parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''') parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''') parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''') parser.add_argument( '''--not_finetuned''', action='''store_true''', help='''Whether the model to convert is a fine-tuned model or not''' ) __lowercase = parser.parse_args() convert_hubert_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned )
43
0
from .configuration_bert_masked import MaskedBertConfig from .modeling_bert_masked import ( MaskedBertForMultipleChoice, MaskedBertForQuestionAnswering, MaskedBertForSequenceClassification, MaskedBertForTokenClassification, MaskedBertModel, ) from .modules import *
277
__A = 6_5521 def __a ( lowerCAmelCase_ : str ) -> int: '''simple docstring''' UpperCAmelCase_= 1 UpperCAmelCase_= 0 for plain_chr in plain_text: UpperCAmelCase_= (a + ord(lowerCAmelCase_ )) % MOD_ADLER UpperCAmelCase_= (b + a) % MOD_ADLER return (b << 16) | a
277
1
import inspect from typing import Callable, List, Optional, Union import torch from transformers import ( CLIPImageProcessor, CLIPTextModel, CLIPTokenizer, WhisperForConditionalGeneration, WhisperProcessor, ) from diffusers import ( AutoencoderKL, DDIMScheduler, DiffusionPipeline, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel, ) from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker from diffusers.utils import logging SCREAMING_SNAKE_CASE__ : Tuple = logging.get_logger(__name__) # pylint: disable=invalid-name class UpperCamelCase__ (lowerCAmelCase__ ): '''simple docstring''' def __init__( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , ) -> Any: super().__init__() if safety_checker is None: logger.warning( F'''You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure''' " that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered" " results in services or applications open to the public. Both the diffusers team and Hugging Face" " strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling" " it only for use-cases that involve analyzing network behavior or auditing its results. For more" " information, please have a look at https://github.com/huggingface/diffusers/pull/254 ." ) self.register_modules( speech_model=UpperCamelCase__ , speech_processor=UpperCamelCase__ , vae=UpperCamelCase__ , text_encoder=UpperCamelCase__ , tokenizer=UpperCamelCase__ , unet=UpperCamelCase__ , scheduler=UpperCamelCase__ , feature_extractor=UpperCamelCase__ , ) def _lowercase ( self , UpperCamelCase__ = "auto" ) -> Optional[Any]: if slice_size == "auto": lowerCamelCase : Union[str, Any] = self.unet.config.attention_head_dim // 2 self.unet.set_attention_slice(UpperCamelCase__ ) def _lowercase ( self ) -> Union[str, Any]: self.enable_attention_slicing(UpperCamelCase__ ) @torch.no_grad() def __call__( self , UpperCamelCase__ , UpperCamelCase__=1_6000 , UpperCamelCase__ = 512 , UpperCamelCase__ = 512 , UpperCamelCase__ = 50 , UpperCamelCase__ = 7.5 , UpperCamelCase__ = None , UpperCamelCase__ = 1 , UpperCamelCase__ = 0.0 , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = "pil" , UpperCamelCase__ = True , UpperCamelCase__ = None , UpperCamelCase__ = 1 , **UpperCamelCase__ , ) -> str: lowerCamelCase : Dict = self.speech_processor.feature_extractor( UpperCamelCase__ , return_tensors="pt" , sampling_rate=UpperCamelCase__ ).input_features.to(self.device ) lowerCamelCase : Any = self.speech_model.generate(UpperCamelCase__ , max_length=48_0000 ) lowerCamelCase : List[str] = self.speech_processor.tokenizer.batch_decode(UpperCamelCase__ , skip_special_tokens=UpperCamelCase__ , normalize=UpperCamelCase__ )[ 0 ] if isinstance(UpperCamelCase__ , UpperCamelCase__ ): lowerCamelCase : str = 1 elif isinstance(UpperCamelCase__ , UpperCamelCase__ ): lowerCamelCase : Any = len(UpperCamelCase__ ) else: raise ValueError(F'''`prompt` has to be of type `str` or `list` but is {type(UpperCamelCase__ )}''' ) if height % 8 != 0 or width % 8 != 0: raise ValueError(F'''`height` and `width` have to be divisible by 8 but are {height} and {width}.''' ) if (callback_steps is None) or ( callback_steps is not None and (not isinstance(UpperCamelCase__ , UpperCamelCase__ ) or callback_steps <= 0) ): raise ValueError( F'''`callback_steps` has to be a positive integer but is {callback_steps} of type''' F''' {type(UpperCamelCase__ )}.''' ) # get prompt text embeddings lowerCamelCase : Union[str, Any] = self.tokenizer( UpperCamelCase__ , padding="max_length" , max_length=self.tokenizer.model_max_length , return_tensors="pt" , ) lowerCamelCase : List[str] = text_inputs.input_ids if text_input_ids.shape[-1] > self.tokenizer.model_max_length: lowerCamelCase : Any = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] ) logger.warning( "The following part of your input was truncated because CLIP can only handle sequences up to" F''' {self.tokenizer.model_max_length} tokens: {removed_text}''' ) lowerCamelCase : Any = text_input_ids[:, : self.tokenizer.model_max_length] lowerCamelCase : Any = self.text_encoder(text_input_ids.to(self.device ) )[0] # duplicate text embeddings for each generation per prompt, using mps friendly method lowerCamelCase , lowerCamelCase , lowerCamelCase : List[str] = text_embeddings.shape lowerCamelCase : Dict = text_embeddings.repeat(1 , UpperCamelCase__ , 1 ) lowerCamelCase : int = text_embeddings.view(bs_embed * num_images_per_prompt , UpperCamelCase__ , -1 ) # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` # corresponds to doing no classifier free guidance. lowerCamelCase : Union[str, Any] = guidance_scale > 1.0 # get unconditional embeddings for classifier free guidance if do_classifier_free_guidance: lowerCamelCase : List[str] if negative_prompt is None: lowerCamelCase : Dict = [""] * batch_size elif type(UpperCamelCase__ ) is not type(UpperCamelCase__ ): raise TypeError( F'''`negative_prompt` should be the same type to `prompt`, but got {type(UpperCamelCase__ )} !=''' F''' {type(UpperCamelCase__ )}.''' ) elif isinstance(UpperCamelCase__ , UpperCamelCase__ ): lowerCamelCase : Union[str, Any] = [negative_prompt] elif batch_size != len(UpperCamelCase__ ): raise ValueError( F'''`negative_prompt`: {negative_prompt} has batch size {len(UpperCamelCase__ )}, but `prompt`:''' F''' {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches''' " the batch size of `prompt`." ) else: lowerCamelCase : Tuple = negative_prompt lowerCamelCase : Any = text_input_ids.shape[-1] lowerCamelCase : Dict = self.tokenizer( UpperCamelCase__ , padding="max_length" , max_length=UpperCamelCase__ , truncation=UpperCamelCase__ , return_tensors="pt" , ) lowerCamelCase : Optional[int] = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0] # duplicate unconditional embeddings for each generation per prompt, using mps friendly method lowerCamelCase : Optional[Any] = uncond_embeddings.shape[1] lowerCamelCase : Dict = uncond_embeddings.repeat(1 , UpperCamelCase__ , 1 ) lowerCamelCase : List[str] = uncond_embeddings.view(batch_size * num_images_per_prompt , UpperCamelCase__ , -1 ) # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes lowerCamelCase : Dict = torch.cat([uncond_embeddings, text_embeddings] ) # get the initial random noise unless the user supplied it # Unlike in other pipelines, latents need to be generated in the target device # for 1-to-1 results reproducibility with the CompVis implementation. # However this currently doesn't work in `mps`. lowerCamelCase : Optional[Any] = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8) lowerCamelCase : str = text_embeddings.dtype if latents is None: if self.device.type == "mps": # randn does not exist on mps lowerCamelCase : Optional[int] = torch.randn(UpperCamelCase__ , generator=UpperCamelCase__ , device="cpu" , dtype=UpperCamelCase__ ).to( self.device ) else: lowerCamelCase : Dict = torch.randn(UpperCamelCase__ , generator=UpperCamelCase__ , device=self.device , dtype=UpperCamelCase__ ) else: if latents.shape != latents_shape: raise ValueError(F'''Unexpected latents shape, got {latents.shape}, expected {latents_shape}''' ) lowerCamelCase : List[str] = latents.to(self.device ) # set timesteps self.scheduler.set_timesteps(UpperCamelCase__ ) # Some schedulers like PNDM have timesteps as arrays # It's more optimized to move all timesteps to correct device beforehand lowerCamelCase : Any = self.scheduler.timesteps.to(self.device ) # scale the initial noise by the standard deviation required by the scheduler lowerCamelCase : Dict = latents * self.scheduler.init_noise_sigma # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 # and should be between [0, 1] lowerCamelCase : int = "eta" in set(inspect.signature(self.scheduler.step ).parameters.keys() ) lowerCamelCase : List[str] = {} if accepts_eta: lowerCamelCase : Tuple = eta for i, t in enumerate(self.progress_bar(UpperCamelCase__ ) ): # expand the latents if we are doing classifier free guidance lowerCamelCase : str = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents lowerCamelCase : List[Any] = self.scheduler.scale_model_input(UpperCamelCase__ , UpperCamelCase__ ) # predict the noise residual lowerCamelCase : Optional[int] = self.unet(UpperCamelCase__ , UpperCamelCase__ , encoder_hidden_states=UpperCamelCase__ ).sample # perform guidance if do_classifier_free_guidance: lowerCamelCase , lowerCamelCase : Tuple = noise_pred.chunk(2 ) lowerCamelCase : Any = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) # compute the previous noisy sample x_t -> x_t-1 lowerCamelCase : Optional[int] = self.scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ).prev_sample # call the callback, if provided if callback is not None and i % callback_steps == 0: callback(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) lowerCamelCase : List[str] = 1 / 0.18215 * latents lowerCamelCase : List[Any] = self.vae.decode(UpperCamelCase__ ).sample lowerCamelCase : Tuple = (image / 2 + 0.5).clamp(0 , 1 ) # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16 lowerCamelCase : int = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy() if output_type == "pil": lowerCamelCase : Optional[int] = self.numpy_to_pil(UpperCamelCase__ ) if not return_dict: return image return StableDiffusionPipelineOutput(images=UpperCamelCase__ , nsfw_content_detected=UpperCamelCase__ )
48
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> list: lowerCamelCase : Dict = len(_SCREAMING_SNAKE_CASE ) lowerCamelCase : Union[str, Any] = [] for i in range(len(_SCREAMING_SNAKE_CASE ) - pat_len + 1 ): lowerCamelCase : Dict = True for j in range(_SCREAMING_SNAKE_CASE ): if s[i + j] != pattern[j]: lowerCamelCase : Optional[int] = False break if match_found: position.append(_SCREAMING_SNAKE_CASE ) return position if __name__ == "__main__": assert naive_pattern_search('ABCDEFG', 'DE') == [3] print(naive_pattern_search('ABAAABCDBBABCDDEBCABC', 'ABC'))
48
1
import logging import os import sys from dataclasses import dataclass, field from typing import Optional import evaluate import numpy as np import torch from datasets import load_dataset from PIL import Image from torchvision.transforms import ( CenterCrop, Compose, Normalize, RandomHorizontalFlip, RandomResizedCrop, Resize, ToTensor, ) import transformers from transformers import ( MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING, AutoConfig, AutoImageProcessor, AutoModelForImageClassification, HfArgumentParser, Trainer, TrainingArguments, set_seed, ) from transformers.trainer_utils import get_last_checkpoint from transformers.utils import check_min_version, send_example_telemetry from transformers.utils.versions import require_version UpperCAmelCase_ = logging.getLogger(__name__) # Will error if the minimal version of Transformers is not installed. Remove at your own risks. check_min_version('4.31.0') require_version('datasets>=1.8.0', 'To fix: pip install -r examples/pytorch/image-classification/requirements.txt') UpperCAmelCase_ = list(MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING.keys()) UpperCAmelCase_ = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES) def lowerCamelCase__ ( A__ : Dict ): '''simple docstring''' with open(__lowerCAmelCase , """rb""" ) as f: __lowerCamelCase = Image.open(__lowerCAmelCase ) return im.convert("""RGB""" ) @dataclass class lowerCamelCase__: UpperCAmelCase__ : Optional[str] = field( default=lowerCamelCase__ , metadata={ 'help': 'Name of a dataset from the hub (could be your own, possibly private dataset hosted on the hub).' } , ) UpperCAmelCase__ : Optional[str] = field( default=lowerCamelCase__ , metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'}) UpperCAmelCase__ : Optional[str] = field(default=lowerCamelCase__ , metadata={'help': 'A folder containing the training data.'}) UpperCAmelCase__ : Optional[str] = field(default=lowerCamelCase__ , metadata={'help': 'A folder containing the validation data.'}) UpperCAmelCase__ : Optional[float] = field( default=0.15 , metadata={'help': 'Percent to split off of train for validation.'}) UpperCAmelCase__ : Optional[int] = field( default=lowerCamelCase__ , metadata={ 'help': ( 'For debugging purposes or quicker training, truncate the number of training examples to this ' 'value if set.' ) } , ) UpperCAmelCase__ : Optional[int] = field( default=lowerCamelCase__ , metadata={ 'help': ( 'For debugging purposes or quicker training, truncate the number of evaluation examples to this ' 'value if set.' ) } , ) def lowerCAmelCase__ ( self: Any ): if self.dataset_name is None and (self.train_dir is None and self.validation_dir is None): raise ValueError( """You must specify either a dataset name from the hub or a train and/or validation directory.""" ) @dataclass class lowerCamelCase__: UpperCAmelCase__ : str = field( default='google/vit-base-patch16-224-in21k' , metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} , ) UpperCAmelCase__ : Optional[str] = field( default=lowerCamelCase__ , metadata={'help': 'If training from scratch, pass a model type from the list: ' + ', '.join(lowerCamelCase__)} , ) UpperCAmelCase__ : Optional[str] = field( default=lowerCamelCase__ , metadata={'help': 'Pretrained config name or path if not the same as model_name'}) UpperCAmelCase__ : Optional[str] = field( default=lowerCamelCase__ , metadata={'help': 'Where do you want to store the pretrained models downloaded from s3'}) UpperCAmelCase__ : str = field( default='main' , metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'} , ) UpperCAmelCase__ : str = field(default=lowerCamelCase__ , metadata={'help': 'Name or path of preprocessor config.'}) UpperCAmelCase__ : bool = field( default=lowerCamelCase__ , metadata={ 'help': ( 'Will use the token generated when running `huggingface-cli login` (necessary to use this script ' 'with private models).' ) } , ) UpperCAmelCase__ : bool = field( default=lowerCamelCase__ , metadata={'help': 'Will enable to load a pretrained model whose head dimensions are different.'} , ) def lowerCamelCase__ ( A__ : Optional[int] ): '''simple docstring''' __lowerCamelCase = torch.stack([example["""pixel_values"""] for example in examples] ) __lowerCamelCase = torch.tensor([example["""labels"""] for example in examples] ) return {"pixel_values": pixel_values, "labels": labels} def lowerCamelCase__ ( ): '''simple docstring''' __lowerCamelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. __lowerCamelCase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: __lowerCamelCase = parser.parse_args_into_dataclasses() # Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The # information sent is the one passed as arguments along with your Python/PyTorch versions. send_example_telemetry("""run_image_classification""" , __lowerCAmelCase , __lowerCAmelCase ) # Setup logging logging.basicConfig( format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , ) if training_args.should_log: # The default of training_args.log_level is passive, so we set log level at info here to have that default. transformers.utils.logging.set_verbosity_info() __lowerCamelCase = training_args.get_process_log_level() logger.setLevel(__lowerCAmelCase ) transformers.utils.logging.set_verbosity(__lowerCAmelCase ) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Log on each process the small summary: logger.warning( f'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}' + f'distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}' ) logger.info(f'Training/evaluation parameters {training_args}' ) # Detecting last checkpoint. __lowerCamelCase = None if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir: __lowerCamelCase = get_last_checkpoint(training_args.output_dir ) if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0: raise ValueError( f'Output directory ({training_args.output_dir}) already exists and is not empty. ' """Use --overwrite_output_dir to overcome.""" ) elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: logger.info( f'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change ' """the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" ) # Set seed before initializing model. set_seed(training_args.seed ) # Initialize our dataset and prepare it for the 'image-classification' task. if data_args.dataset_name is not None: __lowerCamelCase = load_dataset( data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir , task="""image-classification""" , use_auth_token=True if model_args.use_auth_token else None , ) else: __lowerCamelCase = {} if data_args.train_dir is not None: __lowerCamelCase = os.path.join(data_args.train_dir , """**""" ) if data_args.validation_dir is not None: __lowerCamelCase = os.path.join(data_args.validation_dir , """**""" ) __lowerCamelCase = load_dataset( """imagefolder""" , data_files=__lowerCAmelCase , cache_dir=model_args.cache_dir , task="""image-classification""" , ) # If we don't have a validation split, split off a percentage of train as validation. __lowerCamelCase = None if '''validation''' in dataset.keys() else data_args.train_val_split if isinstance(data_args.train_val_split , __lowerCAmelCase ) and data_args.train_val_split > 0.0: __lowerCamelCase = dataset['''train'''].train_test_split(data_args.train_val_split ) __lowerCamelCase = split['''train'''] __lowerCamelCase = split['''test'''] # Prepare label mappings. # We'll include these in the model's config to get human readable labels in the Inference API. __lowerCamelCase = dataset['''train'''].features['''labels'''].names __lowerCamelCase = {}, {} for i, label in enumerate(__lowerCAmelCase ): __lowerCamelCase = str(__lowerCAmelCase ) __lowerCamelCase = label # Load the accuracy metric from the datasets package __lowerCamelCase = evaluate.load("""accuracy""" ) # Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a # predictions and label_ids field) and has to return a dictionary string to float. def compute_metrics(A__ : Dict ): return metric.compute(predictions=np.argmax(p.predictions , axis=1 ) , references=p.label_ids ) __lowerCamelCase = AutoConfig.from_pretrained( model_args.config_name or model_args.model_name_or_path , num_labels=len(__lowerCAmelCase ) , labelaid=__lowerCAmelCase , idalabel=__lowerCAmelCase , finetuning_task="""image-classification""" , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) __lowerCamelCase = AutoModelForImageClassification.from_pretrained( model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=__lowerCAmelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , ) __lowerCamelCase = AutoImageProcessor.from_pretrained( model_args.image_processor_name or model_args.model_name_or_path , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) # Define torchvision transforms to be applied to each image. if "shortest_edge" in image_processor.size: __lowerCamelCase = image_processor.size['''shortest_edge'''] else: __lowerCamelCase = (image_processor.size['''height'''], image_processor.size['''width''']) __lowerCamelCase = Normalize(mean=image_processor.image_mean , std=image_processor.image_std ) __lowerCamelCase = Compose( [ RandomResizedCrop(__lowerCAmelCase ), RandomHorizontalFlip(), ToTensor(), normalize, ] ) __lowerCamelCase = Compose( [ Resize(__lowerCAmelCase ), CenterCrop(__lowerCAmelCase ), ToTensor(), normalize, ] ) def train_transforms(A__ : Any ): __lowerCamelCase = [ _train_transforms(pil_img.convert("""RGB""" ) ) for pil_img in example_batch['''image'''] ] return example_batch def val_transforms(A__ : List[str] ): __lowerCamelCase = [_val_transforms(pil_img.convert("""RGB""" ) ) for pil_img in example_batch['''image''']] return example_batch if training_args.do_train: if "train" not in dataset: raise ValueError("""--do_train requires a train dataset""" ) if data_args.max_train_samples is not None: __lowerCamelCase = ( dataset['''train'''].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) ) ) # Set the training transforms dataset["train"].set_transform(__lowerCAmelCase ) if training_args.do_eval: if "validation" not in dataset: raise ValueError("""--do_eval requires a validation dataset""" ) if data_args.max_eval_samples is not None: __lowerCamelCase = ( dataset['''validation'''].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) ) ) # Set the validation transforms dataset["validation"].set_transform(__lowerCAmelCase ) # Initalize our trainer __lowerCamelCase = Trainer( model=__lowerCAmelCase , args=__lowerCAmelCase , train_dataset=dataset["""train"""] if training_args.do_train else None , eval_dataset=dataset["""validation"""] if training_args.do_eval else None , compute_metrics=__lowerCAmelCase , tokenizer=__lowerCAmelCase , data_collator=__lowerCAmelCase , ) # Training if training_args.do_train: __lowerCamelCase = None if training_args.resume_from_checkpoint is not None: __lowerCamelCase = training_args.resume_from_checkpoint elif last_checkpoint is not None: __lowerCamelCase = last_checkpoint __lowerCamelCase = trainer.train(resume_from_checkpoint=__lowerCAmelCase ) trainer.save_model() trainer.log_metrics("""train""" , train_result.metrics ) trainer.save_metrics("""train""" , train_result.metrics ) trainer.save_state() # Evaluation if training_args.do_eval: __lowerCamelCase = trainer.evaluate() trainer.log_metrics("""eval""" , __lowerCAmelCase ) trainer.save_metrics("""eval""" , __lowerCAmelCase ) # Write model card and (optionally) push to hub __lowerCamelCase = { '''finetuned_from''': model_args.model_name_or_path, '''tasks''': '''image-classification''', '''dataset''': data_args.dataset_name, '''tags''': ['''image-classification''', '''vision'''], } if training_args.push_to_hub: trainer.push_to_hub(**__lowerCAmelCase ) else: trainer.create_model_card(**__lowerCAmelCase ) if __name__ == "__main__": main()
361
from math import ceil, sqrt def lowerCamelCase__ ( A__ : int = 1000000 ): '''simple docstring''' __lowerCamelCase = 0 for outer_width in range(3 , (limit // 4) + 2 ): if outer_width**2 > limit: __lowerCamelCase = max(ceil(sqrt(outer_width**2 - limit ) ) , 1 ) else: __lowerCamelCase = 1 if (outer_width - hole_width_lower_bound) % 2: hole_width_lower_bound += 1 answer += (outer_width - hole_width_lower_bound - 2) // 2 + 1 return answer if __name__ == "__main__": print(f"""{solution() = }""")
29
0
# Copyright 2021 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from packaging import version from .. import __version__ from .constants import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD from .doc import ( add_code_sample_docstrings, add_end_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, copy_func, replace_return_docstrings, ) from .generic import ( ContextManagers, ExplicitEnum, ModelOutput, PaddingStrategy, TensorType, add_model_info_to_auto_map, cached_property, can_return_loss, expand_dims, find_labels, flatten_dict, infer_framework, is_jax_tensor, is_numpy_array, is_tensor, is_tf_symbolic_tensor, is_tf_tensor, is_torch_device, is_torch_dtype, is_torch_tensor, reshape, squeeze, strtobool, tensor_size, to_numpy, to_py_obj, transpose, working_or_temp_dir, ) from .hub import ( CLOUDFRONT_DISTRIB_PREFIX, DISABLE_TELEMETRY, HF_MODULES_CACHE, HUGGINGFACE_CO_PREFIX, HUGGINGFACE_CO_RESOLVE_ENDPOINT, PYTORCH_PRETRAINED_BERT_CACHE, PYTORCH_TRANSFORMERS_CACHE, S3_BUCKET_PREFIX, TRANSFORMERS_CACHE, TRANSFORMERS_DYNAMIC_MODULE_NAME, EntryNotFoundError, PushToHubMixin, RepositoryNotFoundError, RevisionNotFoundError, cached_file, default_cache_path, define_sagemaker_information, download_url, extract_commit_hash, get_cached_models, get_file_from_repo, get_full_repo_name, has_file, http_user_agent, is_offline_mode, is_remote_url, move_cache, send_example_telemetry, try_to_load_from_cache, ) from .import_utils import ( ENV_VARS_TRUE_AND_AUTO_VALUES, ENV_VARS_TRUE_VALUES, TORCH_FX_REQUIRED_VERSION, USE_JAX, USE_TF, USE_TORCH, DummyObject, OptionalDependencyNotAvailable, _LazyModule, ccl_version, direct_transformers_import, get_torch_version, is_accelerate_available, is_apex_available, is_bitsandbytes_available, is_bsa_available, is_coloredlogs_available, is_cython_available, is_datasets_available, is_decord_available, is_detectrona_available, is_faiss_available, is_flax_available, is_ftfy_available, is_in_notebook, is_ipex_available, is_jieba_available, is_jumanpp_available, is_kenlm_available, is_keras_nlp_available, is_librosa_available, is_natten_available, is_ninja_available, is_onnx_available, is_openai_available, is_optimum_available, is_pandas_available, is_peft_available, is_phonemizer_available, is_protobuf_available, is_psutil_available, is_pyanvml_available, is_pyctcdecode_available, is_pytesseract_available, is_pytest_available, is_pytorch_quantization_available, is_rjieba_available, is_sacremoses_available, is_safetensors_available, is_sagemaker_dp_enabled, is_sagemaker_mp_enabled, is_scipy_available, is_sentencepiece_available, is_seqio_available, is_sklearn_available, is_soundfile_availble, is_spacy_available, is_speech_available, is_sudachi_available, is_tensorflow_probability_available, is_tensorflow_text_available, is_tfaonnx_available, is_tf_available, is_timm_available, is_tokenizers_available, is_torch_available, is_torch_bfaa_available, is_torch_bfaa_cpu_available, is_torch_bfaa_gpu_available, is_torch_compile_available, is_torch_cuda_available, is_torch_fx_available, is_torch_fx_proxy, is_torch_mps_available, is_torch_neuroncore_available, is_torch_tensorrt_fx_available, is_torch_tfaa_available, is_torch_tpu_available, is_torchaudio_available, is_torchdistx_available, is_torchdynamo_available, is_torchvision_available, is_training_run_on_sagemaker, is_vision_available, requires_backends, torch_only_method, ) a_ = 'pytorch_model.bin' a_ = 'pytorch_model.bin.index.json' a_ = 'adapter_config.json' a_ = 'adapter_model.bin' a_ = 'adapter_model.safetensors' a_ = 'tf_model.h5' a_ = 'tf_model.h5.index.json' a_ = 'model.ckpt' a_ = 'flax_model.msgpack' a_ = 'flax_model.msgpack.index.json' a_ = 'model.safetensors' a_ = 'model.safetensors.index.json' a_ = 'config.json' a_ = 'preprocessor_config.json' a_ = FEATURE_EXTRACTOR_NAME a_ = 'generation_config.json' a_ = 'modelcard.json' a_ = '▁' a_ = SENTENCEPIECE_UNDERLINE # Kept for backward compatibility a_ = [ [[0, 1, 0, 1], [1, 0, 0, 1]] ] * 2 # Needs to have 0s and 1s only since XLM uses it for langs too. a_ = [[7, 6, 0, 0, 1], [1, 2, 3, 0, 0], [0, 0, 0, 4, 5]] a_ = [[1, 1, 1, 1, 1], [1, 1, 1, 0, 0], [0, 0, 0, 1, 1]] def lowerCamelCase__ ( _a): if version.parse(_a) < version.parse(_a): if "dev" in min_version: SCREAMING_SNAKE_CASE : Optional[Any] = ( "This example requires a source install from HuggingFace Transformers (see " "`https://huggingface.co/docs/transformers/installation#install-from-source`)," ) else: SCREAMING_SNAKE_CASE : List[Any] = f"This example requires a minimum version of {min_version}," error_message += f" but the version found is {__version__}.\n" raise ImportError( error_message + "Check out https://github.com/huggingface/transformers/tree/main/examples#important-note for the examples corresponding to other " "versions of HuggingFace Transformers.")
76
"""simple docstring""" import warnings from ...utils import logging from .image_processing_segformer import SegformerImageProcessor lowercase_ = logging.get_logger(__name__) class snake_case ( _lowerCAmelCase ): '''simple docstring''' def __init__( self : Optional[int], *_lowerCamelCase : Union[str, Any], **_lowerCamelCase : Dict ): '''simple docstring''' warnings.warn( '''The class SegformerFeatureExtractor is deprecated and will be removed in version 5 of Transformers.''' ''' Please use SegformerImageProcessor instead.''', _lowerCamelCase, ) super().__init__(*_lowerCamelCase, **_lowerCamelCase )
266
0
"""simple docstring""" import pprint import requests lowerCAmelCase__ = '''https://zenquotes.io/api''' def a__ ( ): '''simple docstring''' return requests.get(API_ENDPOINT_URL + "/today" ).json() def a__ ( ): '''simple docstring''' return requests.get(API_ENDPOINT_URL + "/random" ).json() if __name__ == "__main__": lowerCAmelCase__ = random_quotes() pprint.pprint(response)
357
"""simple docstring""" from typing import List, Optional from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCAmelCase__ = logging.get_logger(__name__) lowerCAmelCase__ = { '''huggingface/autoformer-tourism-monthly''': '''https://huggingface.co/huggingface/autoformer-tourism-monthly/resolve/main/config.json''', } class SCREAMING_SNAKE_CASE__ ( lowercase ): """simple docstring""" a : Dict ="autoformer" a : Dict ={ "hidden_size": "d_model", "num_attention_heads": "encoder_attention_heads", "num_hidden_layers": "encoder_layers", } def __init__( self , snake_case__ = None , snake_case__ = None , snake_case__ = "student_t" , snake_case__ = "nll" , snake_case__ = 1 , snake_case__ = [1, 2, 3, 4, 5, 6, 7] , snake_case__ = True , snake_case__ = 0 , snake_case__ = 0 , snake_case__ = 0 , snake_case__ = 0 , snake_case__ = None , snake_case__ = None , snake_case__ = 64 , snake_case__ = 2 , snake_case__ = 2 , snake_case__ = 2 , snake_case__ = 2 , snake_case__ = 32 , snake_case__ = 32 , snake_case__ = "gelu" , snake_case__ = 0.1 , snake_case__ = 0.1 , snake_case__ = 0.1 , snake_case__ = 0.1 , snake_case__ = 0.1 , snake_case__ = 100 , snake_case__ = 0.02 , snake_case__ = True , snake_case__=True , snake_case__ = 10 , snake_case__ = 25 , snake_case__ = 3 , **snake_case__ , ): """simple docstring""" lowerCAmelCase : Any = prediction_length lowerCAmelCase : Dict = context_length if context_length is not None else prediction_length lowerCAmelCase : Tuple = distribution_output lowerCAmelCase : List[Any] = loss lowerCAmelCase : int = input_size lowerCAmelCase : str = num_time_features lowerCAmelCase : str = lags_sequence lowerCAmelCase : List[str] = scaling lowerCAmelCase : List[Any] = num_dynamic_real_features lowerCAmelCase : Tuple = num_static_real_features lowerCAmelCase : Dict = num_static_categorical_features if cardinality is not None and num_static_categorical_features > 0: if len(snake_case__ ) != num_static_categorical_features: raise ValueError( "The cardinality should be a list of the same length as `num_static_categorical_features`" ) lowerCAmelCase : Any = cardinality else: lowerCAmelCase : Union[str, Any] = [0] if embedding_dimension is not None and num_static_categorical_features > 0: if len(snake_case__ ) != num_static_categorical_features: raise ValueError( "The embedding dimension should be a list of the same length as `num_static_categorical_features`" ) lowerCAmelCase : Tuple = embedding_dimension else: lowerCAmelCase : Any = [min(50 , (cat + 1) // 2 ) for cat in self.cardinality] lowerCAmelCase : Any = num_parallel_samples # Transformer architecture configuration lowerCAmelCase : str = input_size * len(self.lags_sequence ) + self._number_of_features lowerCAmelCase : Any = d_model lowerCAmelCase : List[str] = encoder_attention_heads lowerCAmelCase : Union[str, Any] = decoder_attention_heads lowerCAmelCase : Optional[int] = encoder_ffn_dim lowerCAmelCase : Optional[Any] = decoder_ffn_dim lowerCAmelCase : int = encoder_layers lowerCAmelCase : int = decoder_layers lowerCAmelCase : List[Any] = dropout lowerCAmelCase : Optional[int] = attention_dropout lowerCAmelCase : Union[str, Any] = activation_dropout lowerCAmelCase : Optional[int] = encoder_layerdrop lowerCAmelCase : Dict = decoder_layerdrop lowerCAmelCase : Tuple = activation_function lowerCAmelCase : Optional[Any] = init_std lowerCAmelCase : List[Any] = use_cache # Autoformer lowerCAmelCase : Any = label_length lowerCAmelCase : Any = moving_average lowerCAmelCase : Optional[Any] = autocorrelation_factor super().__init__(is_encoder_decoder=snake_case__ , **snake_case__ ) @property def lowercase__ ( self ): """simple docstring""" return ( sum(self.embedding_dimension ) + self.num_dynamic_real_features + self.num_time_features + self.num_static_real_features + self.input_size * 2 # the log1p(abs(loc)) and log(scale) features )
133
0
'''simple docstring''' import argparse import requests import torch from PIL import Image from transformers import SwinConfig, SwinForMaskedImageModeling, ViTImageProcessor def a__ ( a__ ): """simple docstring""" __SCREAMING_SNAKE_CASE = SwinConfig(image_size=1_92 ) if "base" in model_name: __SCREAMING_SNAKE_CASE = 6 __SCREAMING_SNAKE_CASE = 1_28 __SCREAMING_SNAKE_CASE = (2, 2, 18, 2) __SCREAMING_SNAKE_CASE = (4, 8, 16, 32) elif "large" in model_name: __SCREAMING_SNAKE_CASE = 12 __SCREAMING_SNAKE_CASE = 1_92 __SCREAMING_SNAKE_CASE = (2, 2, 18, 2) __SCREAMING_SNAKE_CASE = (6, 12, 24, 48) else: raise ValueError("""Model not supported, only supports base and large variants""" ) __SCREAMING_SNAKE_CASE = window_size __SCREAMING_SNAKE_CASE = embed_dim __SCREAMING_SNAKE_CASE = depths __SCREAMING_SNAKE_CASE = num_heads return config def a__ ( a__ ): """simple docstring""" if "encoder.mask_token" in name: __SCREAMING_SNAKE_CASE = name.replace("""encoder.mask_token""" , """embeddings.mask_token""" ) if "encoder.patch_embed.proj" in name: __SCREAMING_SNAKE_CASE = name.replace("""encoder.patch_embed.proj""" , """embeddings.patch_embeddings.projection""" ) if "encoder.patch_embed.norm" in name: __SCREAMING_SNAKE_CASE = name.replace("""encoder.patch_embed.norm""" , """embeddings.norm""" ) if "attn.proj" in name: __SCREAMING_SNAKE_CASE = name.replace("""attn.proj""" , """attention.output.dense""" ) if "attn" in name: __SCREAMING_SNAKE_CASE = name.replace("""attn""" , """attention.self""" ) if "norm1" in name: __SCREAMING_SNAKE_CASE = name.replace("""norm1""" , """layernorm_before""" ) if "norm2" in name: __SCREAMING_SNAKE_CASE = name.replace("""norm2""" , """layernorm_after""" ) if "mlp.fc1" in name: __SCREAMING_SNAKE_CASE = name.replace("""mlp.fc1""" , """intermediate.dense""" ) if "mlp.fc2" in name: __SCREAMING_SNAKE_CASE = name.replace("""mlp.fc2""" , """output.dense""" ) if name == "encoder.norm.weight": __SCREAMING_SNAKE_CASE = """layernorm.weight""" if name == "encoder.norm.bias": __SCREAMING_SNAKE_CASE = """layernorm.bias""" if "decoder" in name: pass else: __SCREAMING_SNAKE_CASE = """swin.""" + name return name def a__ ( a__ , a__ ): """simple docstring""" for key in orig_state_dict.copy().keys(): __SCREAMING_SNAKE_CASE = orig_state_dict.pop(a__ ) if "attn_mask" in key: pass elif "qkv" in key: __SCREAMING_SNAKE_CASE = key.split(""".""" ) __SCREAMING_SNAKE_CASE = int(key_split[2] ) __SCREAMING_SNAKE_CASE = int(key_split[4] ) __SCREAMING_SNAKE_CASE = model.swin.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size if "weight" in key: __SCREAMING_SNAKE_CASE = val[:dim, :] __SCREAMING_SNAKE_CASE = val[ dim : dim * 2, : ] __SCREAMING_SNAKE_CASE = val[-dim:, :] else: __SCREAMING_SNAKE_CASE = val[ :dim ] __SCREAMING_SNAKE_CASE = val[ dim : dim * 2 ] __SCREAMING_SNAKE_CASE = val[ -dim: ] else: __SCREAMING_SNAKE_CASE = val return orig_state_dict def a__ ( a__ , a__ , a__ , a__ ): """simple docstring""" __SCREAMING_SNAKE_CASE = torch.load(a__ , map_location="""cpu""" )["""model"""] __SCREAMING_SNAKE_CASE = get_swin_config(a__ ) __SCREAMING_SNAKE_CASE = SwinForMaskedImageModeling(a__ ) model.eval() __SCREAMING_SNAKE_CASE = convert_state_dict(a__ , a__ ) model.load_state_dict(a__ ) __SCREAMING_SNAKE_CASE = """http://images.cocodataset.org/val2017/000000039769.jpg""" __SCREAMING_SNAKE_CASE = ViTImageProcessor(size={"""height""": 1_92, """width""": 1_92} ) __SCREAMING_SNAKE_CASE = Image.open(requests.get(a__ , stream=a__ ).raw ) __SCREAMING_SNAKE_CASE = image_processor(images=a__ , return_tensors="""pt""" ) with torch.no_grad(): __SCREAMING_SNAKE_CASE = model(**a__ ).logits print(outputs.keys() ) print("""Looks ok!""" ) if pytorch_dump_folder_path is not None: print(F'Saving model {model_name} to {pytorch_dump_folder_path}' ) model.save_pretrained(a__ ) print(F'Saving image processor to {pytorch_dump_folder_path}' ) image_processor.save_pretrained(a__ ) if push_to_hub: print(F'Pushing model and image processor for {model_name} to hub' ) model.push_to_hub(F'microsoft/{model_name}' ) image_processor.push_to_hub(F'microsoft/{model_name}' ) if __name__ == "__main__": UpperCAmelCase : Optional[Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( '--model_name', default='swin-base-simmim-window6-192', type=str, choices=['swin-base-simmim-window6-192', 'swin-large-simmim-window12-192'], help='Name of the Swin SimMIM model you\'d like to convert.', ) parser.add_argument( '--checkpoint_path', default='/Users/nielsrogge/Documents/SwinSimMIM/simmim_pretrain__swin_base__img192_window6__100ep.pth', type=str, help='Path to the original PyTorch checkpoint (.pth file).', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.' ) parser.add_argument( '--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.' ) UpperCAmelCase : Tuple = parser.parse_args() convert_swin_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
267
'''simple docstring''' # Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse from ...utils.dataclasses import ( ComputeEnvironment, DistributedType, DynamoBackend, PrecisionType, SageMakerDistributedType, ) from ..menu import BulletMenu UpperCAmelCase : Any = [ 'EAGER', 'AOT_EAGER', 'INDUCTOR', 'NVFUSER', 'AOT_NVFUSER', 'AOT_CUDAGRAPHS', 'OFI', 'FX2TRT', 'ONNXRT', 'IPEX', ] def a__ ( a__ , a__=None , a__=None , a__=None ): """simple docstring""" __SCREAMING_SNAKE_CASE = True while ask_again: __SCREAMING_SNAKE_CASE = input(a__ ) try: if default is not None and len(a__ ) == 0: return default return convert_value(a__ ) if convert_value is not None else result except Exception: if error_message is not None: print(a__ ) def a__ ( a__ , a__=[] , a__=None , a__=0 ): """simple docstring""" __SCREAMING_SNAKE_CASE = BulletMenu(a__ , a__ ) __SCREAMING_SNAKE_CASE = menu.run(default_choice=a__ ) return convert_value(a__ ) if convert_value is not None else result def a__ ( a__ ): """simple docstring""" __SCREAMING_SNAKE_CASE = int(a__ ) return ComputeEnvironment(["""LOCAL_MACHINE""", """AMAZON_SAGEMAKER"""][value] ) def a__ ( a__ ): """simple docstring""" __SCREAMING_SNAKE_CASE = int(a__ ) return DistributedType(["""NO""", """MULTI_CPU""", """MULTI_XPU""", """MULTI_GPU""", """MULTI_NPU""", """TPU"""][value] ) def a__ ( a__ ): """simple docstring""" __SCREAMING_SNAKE_CASE = int(a__ ) return DynamoBackend(DYNAMO_BACKENDS[value] ).value def a__ ( a__ ): """simple docstring""" __SCREAMING_SNAKE_CASE = int(a__ ) return PrecisionType(["""no""", """fp16""", """bf16""", """fp8"""][value] ) def a__ ( a__ ): """simple docstring""" __SCREAMING_SNAKE_CASE = int(a__ ) return SageMakerDistributedType(["""NO""", """DATA_PARALLEL""", """MODEL_PARALLEL"""][value] ) def a__ ( a__ ): """simple docstring""" return {"yes": True, "no": False}[value.lower()] class lowerCAmelCase__ ( argparse.RawDescriptionHelpFormatter ): """simple docstring""" def UpperCAmelCase__ ( self : str , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : List[Any] ) -> Dict: """simple docstring""" __SCREAMING_SNAKE_CASE = super()._format_usage(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = usage.replace("""<command> [<args>] """ , """""" ) return usage
267
1
import os from pathlib import Path import numpy as np import pytest from pack_dataset import pack_data_dir from parameterized import parameterized from save_len_file import save_len_file from torch.utils.data import DataLoader from transformers import AutoTokenizer from transformers.models.mbart.modeling_mbart import shift_tokens_right from transformers.testing_utils import TestCasePlus, slow from utils import FAIRSEQ_AVAILABLE, DistributedSortishSampler, LegacySeqaSeqDataset, SeqaSeqDataset lowercase__ ='bert-base-cased' lowercase__ ='google/pegasus-xsum' lowercase__ =[' Sam ate lunch today.', 'Sams lunch ingredients.'] lowercase__ =['A very interesting story about what I ate for lunch.', 'Avocado, celery, turkey, coffee'] lowercase__ ='patrickvonplaten/t5-tiny-random' lowercase__ ='sshleifer/bart-tiny-random' lowercase__ ='sshleifer/tiny-mbart' lowercase__ ='sshleifer/tiny-marian-en-de' def __UpperCamelCase ( lowerCAmelCase__ : Path , lowerCAmelCase__ : list ): __a : List[Any] = '''\n'''.join(lowerCAmelCase__ ) Path(lowerCAmelCase__ ).open('''w''' ).writelines(lowerCAmelCase__ ) def __UpperCamelCase ( lowerCAmelCase__ : int ): for split in ["train", "val", "test"]: _dump_articles(os.path.join(lowerCAmelCase__ , f"{split}.source" ) , lowerCAmelCase__ ) _dump_articles(os.path.join(lowerCAmelCase__ , f"{split}.target" ) , lowerCAmelCase__ ) return tmp_dir class UpperCamelCase__ ( __lowercase ): @parameterized.expand( [ MBART_TINY, MARIAN_TINY, T5_TINY, BART_TINY, PEGASUS_XSUM, ] , ) @slow def lowerCAmelCase (self : int , snake_case_ : int ): __a : Optional[Any] = AutoTokenizer.from_pretrained(snake_case_ ) __a : Optional[Any] = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) __a : Union[str, Any] = max(len(tokenizer.encode(snake_case_ ) ) for a in ARTICLES ) __a : str = max(len(tokenizer.encode(snake_case_ ) ) for a in SUMMARIES ) __a : str = 4 __a : Dict = 8 assert max_len_target > max_src_len # Will be truncated assert max_len_source > max_src_len # Will be truncated __a , __a : Any = '''ro_RO''', '''de_DE''' # ignored for all but mbart, but never causes error. __a : List[Any] = SeqaSeqDataset( snake_case_ , data_dir=snake_case_ , type_path='''train''' , max_source_length=snake_case_ , max_target_length=snake_case_ , src_lang=snake_case_ , tgt_lang=snake_case_ , ) __a : Dict = DataLoader(snake_case_ , batch_size=2 , collate_fn=train_dataset.collate_fn ) for batch in dataloader: assert isinstance(snake_case_ , snake_case_ ) assert batch["attention_mask"].shape == batch["input_ids"].shape # show that articles were trimmed. assert batch["input_ids"].shape[1] == max_src_len # show that targets are the same len assert batch["labels"].shape[1] == max_tgt_len if tok_name != MBART_TINY: continue # check language codes in correct place __a : Dict = shift_tokens_right(batch['''labels'''] , tokenizer.pad_token_id ) assert batch["decoder_input_ids"][0, 0].item() == tokenizer.lang_code_to_id[tgt_lang] assert batch["decoder_input_ids"][0, -1].item() == tokenizer.eos_token_id assert batch["input_ids"][0, -2].item() == tokenizer.eos_token_id assert batch["input_ids"][0, -1].item() == tokenizer.lang_code_to_id[src_lang] break # No need to test every batch @parameterized.expand([BART_TINY, BERT_BASE_CASED] ) def lowerCAmelCase (self : Optional[Any] , snake_case_ : str ): __a : Union[str, Any] = AutoTokenizer.from_pretrained(snake_case_ ) __a : str = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) __a : Any = max(len(tokenizer.encode(snake_case_ ) ) for a in ARTICLES ) __a : Any = max(len(tokenizer.encode(snake_case_ ) ) for a in SUMMARIES ) __a : Dict = 4 __a : Optional[int] = LegacySeqaSeqDataset( snake_case_ , data_dir=snake_case_ , type_path='''train''' , max_source_length=2_0 , max_target_length=snake_case_ , ) __a : Optional[Any] = DataLoader(snake_case_ , batch_size=2 , collate_fn=train_dataset.collate_fn ) for batch in dataloader: assert batch["attention_mask"].shape == batch["input_ids"].shape # show that articles were trimmed. assert batch["input_ids"].shape[1] == max_len_source assert 2_0 >= batch["input_ids"].shape[1] # trimmed significantly # show that targets were truncated assert batch["labels"].shape[1] == trunc_target # Truncated assert max_len_target > trunc_target # Truncated break # No need to test every batch def lowerCAmelCase (self : List[str] ): __a : int = AutoTokenizer.from_pretrained('''facebook/mbart-large-cc25''' ) __a : Any = Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) ) __a : Optional[int] = tmp_dir.joinpath('''train.source''' ).open().readlines() __a : List[Any] = Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) ) pack_data_dir(snake_case_ , snake_case_ , 1_2_8 , snake_case_ ) __a : Optional[Any] = {x.name for x in tmp_dir.iterdir()} __a : Union[str, Any] = {x.name for x in save_dir.iterdir()} __a : str = save_dir.joinpath('''train.source''' ).open().readlines() # orig: [' Sam ate lunch today.\n', 'Sams lunch ingredients.'] # desired_packed: [' Sam ate lunch today.\n Sams lunch ingredients.'] assert len(snake_case_ ) < len(snake_case_ ) assert len(snake_case_ ) == 1 assert len(packed_examples[0] ) == sum(len(snake_case_ ) for x in orig_examples ) assert orig_paths == new_paths @pytest.mark.skipif(not FAIRSEQ_AVAILABLE , reason='''This test requires fairseq''' ) def lowerCAmelCase (self : Any ): if not FAIRSEQ_AVAILABLE: return __a , __a , __a : Any = self._get_dataset(max_len=6_4 ) __a : int = 6_4 __a : List[str] = ds.make_dynamic_sampler(snake_case_ , required_batch_size_multiple=snake_case_ ) __a : List[str] = [len(snake_case_ ) for x in batch_sampler] assert len(set(snake_case_ ) ) > 1 # it's not dynamic batch size if every batch is the same length assert sum(snake_case_ ) == len(snake_case_ ) # no dropped or added examples __a : Union[str, Any] = DataLoader(snake_case_ , batch_sampler=snake_case_ , collate_fn=ds.collate_fn , num_workers=2 ) __a : Tuple = [] __a : Union[str, Any] = [] for batch in data_loader: __a : Any = batch['''input_ids'''].shape __a : str = src_shape[0] assert bs % required_batch_size_multiple == 0 or bs < required_batch_size_multiple __a : Optional[Any] = np.product(batch['''input_ids'''].shape ) num_src_per_batch.append(snake_case_ ) if num_src_tokens > (max_tokens * 1.1): failures.append(snake_case_ ) assert num_src_per_batch[0] == max(snake_case_ ) if failures: raise AssertionError(f"too many tokens in {len(snake_case_ )} batches" ) def lowerCAmelCase (self : int ): __a , __a , __a : Optional[int] = self._get_dataset(max_len=5_1_2 ) __a : Union[str, Any] = 2 __a : str = ds.make_sortish_sampler(snake_case_ , shuffle=snake_case_ ) __a : Tuple = DataLoader(snake_case_ , batch_size=snake_case_ , collate_fn=ds.collate_fn , num_workers=2 ) __a : Tuple = DataLoader(snake_case_ , batch_size=snake_case_ , collate_fn=ds.collate_fn , num_workers=2 , sampler=snake_case_ ) __a : Optional[int] = tokenizer.pad_token_id def count_pad_tokens(snake_case_ : Union[str, Any] , snake_case_ : List[str]="input_ids" ): return [batch[k].eq(snake_case_ ).sum().item() for batch in data_loader] assert sum(count_pad_tokens(snake_case_ , k='''labels''' ) ) < sum(count_pad_tokens(snake_case_ , k='''labels''' ) ) assert sum(count_pad_tokens(snake_case_ ) ) < sum(count_pad_tokens(snake_case_ ) ) assert len(snake_case_ ) == len(snake_case_ ) def lowerCAmelCase (self : int , snake_case_ : int=1_0_0_0 , snake_case_ : Optional[Any]=1_2_8 ): if os.getenv('''USE_REAL_DATA''' , snake_case_ ): __a : Optional[int] = '''examples/seq2seq/wmt_en_ro''' __a : List[Any] = max_len * 2 * 6_4 if not Path(snake_case_ ).joinpath('''train.len''' ).exists(): save_len_file(snake_case_ , snake_case_ ) else: __a : int = '''examples/seq2seq/test_data/wmt_en_ro''' __a : List[str] = max_len * 4 save_len_file(snake_case_ , snake_case_ ) __a : str = AutoTokenizer.from_pretrained(snake_case_ ) __a : Optional[int] = SeqaSeqDataset( snake_case_ , data_dir=snake_case_ , type_path='''train''' , max_source_length=snake_case_ , max_target_length=snake_case_ , n_obs=snake_case_ , ) return ds, max_tokens, tokenizer def lowerCAmelCase (self : List[str] ): __a , __a , __a : str = self._get_dataset() __a : Optional[Any] = set(DistributedSortishSampler(snake_case_ , 2_5_6 , num_replicas=2 , rank=0 , add_extra_examples=snake_case_ ) ) __a : Tuple = set(DistributedSortishSampler(snake_case_ , 2_5_6 , num_replicas=2 , rank=1 , add_extra_examples=snake_case_ ) ) assert idsa.intersection(snake_case_ ) == set() @parameterized.expand( [ MBART_TINY, MARIAN_TINY, T5_TINY, BART_TINY, PEGASUS_XSUM, ] , ) def lowerCAmelCase (self : str , snake_case_ : Union[str, Any] ): __a : Union[str, Any] = AutoTokenizer.from_pretrained(snake_case_ , use_fast=snake_case_ ) if tok_name == MBART_TINY: __a : Any = SeqaSeqDataset( snake_case_ , data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) , type_path='''train''' , max_source_length=4 , max_target_length=8 , src_lang='''EN''' , tgt_lang='''FR''' , ) __a : Tuple = train_dataset.dataset_kwargs assert "src_lang" in kwargs and "tgt_lang" in kwargs else: __a : Optional[Any] = SeqaSeqDataset( snake_case_ , data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) , type_path='''train''' , max_source_length=4 , max_target_length=8 , ) __a : List[Any] = train_dataset.dataset_kwargs assert "add_prefix_space" not in kwargs if tok_name != BART_TINY else "add_prefix_space" in kwargs assert len(snake_case_ ) == 1 if tok_name == BART_TINY else len(snake_case_ ) == 0
90
from math import pi, sqrt def __UpperCamelCase ( lowerCAmelCase__ : float ): if num <= 0: raise ValueError('''math domain error''' ) if num > 1_71.5: raise OverflowError('''math range error''' ) elif num - int(lowerCAmelCase__ ) not in (0, 0.5): raise NotImplementedError('''num must be an integer or a half-integer''' ) elif num == 0.5: return sqrt(lowerCAmelCase__ ) else: return 1.0 if num == 1 else (num - 1) * gamma(num - 1 ) def __UpperCamelCase ( ): assert gamma(0.5 ) == sqrt(lowerCAmelCase__ ) assert gamma(1 ) == 1.0 assert gamma(2 ) == 1.0 if __name__ == "__main__": from doctest import testmod testmod() lowercase__ =1.0 while num: lowercase__ =float(input('Gamma of: ')) print(F"""gamma({num}) = {gamma(num)}""") print('\nEnter 0 to exit...')
90
1
'''simple docstring''' def a_ ( _lowerCAmelCase ) -> list: if n_term == "": return [] __lowerCamelCase : list = [] for temp in range(int(_lowerCAmelCase ) ): series.append(F'1/{temp + 1}' if series else '1' ) return series if __name__ == "__main__": _UpperCamelCase = input('Enter the last number (nth term) of the Harmonic Series') print('Formula of Harmonic Series => 1+1/2+1/3 ..... 1/n') print(harmonic_series(nth_term))
208
'''simple docstring''' def a_ ( _lowerCAmelCase ) -> str: if not all(char in '01' for char in bin_string ): raise ValueError('Non-binary value was passed to the function' ) if not bin_string: raise ValueError('Empty string was passed to the function' ) __lowerCamelCase : int = '' while len(_lowerCAmelCase ) % 3 != 0: __lowerCamelCase : str = '0' + bin_string __lowerCamelCase : Union[str, Any] = [ bin_string[index : index + 3] for index in range(len(_lowerCAmelCase ) ) if index % 3 == 0 ] for bin_group in bin_string_in_3_list: __lowerCamelCase : Tuple = 0 for index, val in enumerate(_lowerCAmelCase ): oct_val += int(2 ** (2 - index) * int(_lowerCAmelCase ) ) oct_string += str(_lowerCAmelCase ) return oct_string if __name__ == "__main__": from doctest import testmod testmod()
208
1
def UpperCamelCase ( __magic_name__ : list ) -> int: """simple docstring""" if not grid or not grid[0]: raise TypeError("""The grid does not contain the appropriate information""" ) for cell_n in range(1 , len(grid[0] ) ): grid[0][cell_n] += grid[0][cell_n - 1] lowercase__ = grid[0] for row_n in range(1 , len(__magic_name__ ) ): lowercase__ = grid[row_n] lowercase__ = fill_row(__magic_name__ , __magic_name__ ) lowercase__ = grid[row_n] return grid[-1][-1] def UpperCamelCase ( __magic_name__ : list , __magic_name__ : list ) -> list: """simple docstring""" current_row[0] += row_above[0] for cell_n in range(1 , len(__magic_name__ ) ): current_row[cell_n] += min(current_row[cell_n - 1] , row_above[cell_n] ) return current_row if __name__ == "__main__": import doctest doctest.testmod()
146
import warnings from ...utils import logging from .image_processing_clip import CLIPImageProcessor A : List[Any] = logging.get_logger(__name__) class A ( UpperCAmelCase__ ): '''simple docstring''' def __init__(self : List[Any] , *_UpperCAmelCase : int , **_UpperCAmelCase : List[str] ) -> None: """simple docstring""" warnings.warn( """The class CLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please""" """ use CLIPImageProcessor instead.""" , _UpperCAmelCase , ) super().__init__(*_UpperCAmelCase , **_UpperCAmelCase )
146
1
import math def snake_case_ ( lowerCAmelCase_ : int ): if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 , int(math.sqrt(lowerCAmelCase_ ) + 1 ) , 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True def snake_case_ ( lowerCAmelCase_ : int = 10001 ): try: __lowercase : Optional[int] = int(lowerCAmelCase_ ) except (TypeError, ValueError): raise TypeError("""Parameter nth must be int or castable to int.""" ) from None if nth <= 0: raise ValueError("""Parameter nth must be greater than or equal to one.""" ) __lowercase : list[int] = [] __lowercase : str = 2 while len(lowerCAmelCase_ ) < nth: if is_prime(lowerCAmelCase_ ): primes.append(lowerCAmelCase_ ) num += 1 else: num += 1 return primes[len(lowerCAmelCase_ ) - 1] if __name__ == "__main__": print(f'''{solution() = }''')
233
def snake_case_ ( lowerCAmelCase_ : list ): if len(lowerCAmelCase_ ) <= 1: return [tuple(lowerCAmelCase_ )] __lowercase : Any = [] def generate(lowerCAmelCase_ : int , lowerCAmelCase_ : list ): if k == 1: res.append(tuple(arr[:] ) ) return generate(k - 1 , lowerCAmelCase_ ) for i in range(k - 1 ): if k % 2 == 0: # k is even __lowercase , __lowercase : List[str] = arr[k - 1], arr[i] else: # k is odd __lowercase , __lowercase : Any = arr[k - 1], arr[0] generate(k - 1 , lowerCAmelCase_ ) generate(len(lowerCAmelCase_ ) , lowerCAmelCase_ ) return res if __name__ == "__main__": lowerCamelCase : Union[str, Any] = input('''Enter numbers separated by a comma:\n''').strip() lowerCamelCase : Optional[Any] = [int(item) for item in user_input.split(''',''')] print(heaps(arr))
233
1
"""simple docstring""" def __lowerCamelCase ( a_ : int ) -> "list[int]": if upper_limit < 0: raise ValueError('''Limit for the Catalan sequence must be ≥ 0''' ) __SCREAMING_SNAKE_CASE :Tuple = [0] * (upper_limit + 1) # Base case: C(0) = C(1) = 1 __SCREAMING_SNAKE_CASE :Tuple = 1 if upper_limit > 0: __SCREAMING_SNAKE_CASE :Optional[Any] = 1 # Recurrence relation: C(i) = sum(C(j).C(i-j-1)), from j = 0 to i for i in range(2 , upper_limit + 1 ): for j in range(a_ ): catalan_list[i] += catalan_list[j] * catalan_list[i - j - 1] return catalan_list if __name__ == "__main__": print("\n********* Catalan Numbers Using Dynamic Programming ************\n") print("\n*** Enter -1 at any time to quit ***") print("\nEnter the upper limit (≥ 0) for the Catalan number sequence: ", end="") try: while True: lowerCamelCase_ = int(input().strip()) if N < 0: print("\n********* Goodbye!! ************") break else: print(f'The Catalan numbers from 0 through {N} are:') print(catalan_numbers(N)) print("Try another upper limit for the sequence: ", end="") except (NameError, ValueError): print("\n********* Invalid input, goodbye! ************\n") import doctest doctest.testmod()
239
"""simple docstring""" import math import unittest def __lowerCamelCase ( a_ : int ) -> bool: assert isinstance(a_ , a_ ) and ( number >= 0 ), "'number' must been an int and positive" if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 , int(math.sqrt(a_ ) + 1 ) , 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True class _SCREAMING_SNAKE_CASE( unittest.TestCase ): def _UpperCamelCase ( self ) -> Union[str, Any]: """simple docstring""" self.assertTrue(is_prime(2 ) ) self.assertTrue(is_prime(3 ) ) self.assertTrue(is_prime(5 ) ) self.assertTrue(is_prime(7 ) ) self.assertTrue(is_prime(11 ) ) self.assertTrue(is_prime(13 ) ) self.assertTrue(is_prime(17 ) ) self.assertTrue(is_prime(19 ) ) self.assertTrue(is_prime(23 ) ) self.assertTrue(is_prime(29 ) ) def _UpperCamelCase ( self ) -> Tuple: """simple docstring""" with self.assertRaises(SCREAMING_SNAKE_CASE__ ): is_prime(-19 ) self.assertFalse( is_prime(0 ) ,'''Zero doesn\'t have any positive factors, primes must have exactly two.''' ,) self.assertFalse( is_prime(1 ) ,'''One only has 1 positive factor, primes must have exactly two.''' ,) self.assertFalse(is_prime(2 * 2 ) ) self.assertFalse(is_prime(2 * 3 ) ) self.assertFalse(is_prime(3 * 3 ) ) self.assertFalse(is_prime(3 * 5 ) ) self.assertFalse(is_prime(3 * 5 * 7 ) ) if __name__ == "__main__": unittest.main()
239
1
"""simple docstring""" import os import tempfile import unittest import uuid from pathlib import Path from transformers.testing_utils import get_tests_dir, require_soundfile, require_torch, require_vision from transformers.tools.agent_types import AgentAudio, AgentImage, AgentText from transformers.utils import is_soundfile_availble, is_torch_available, is_vision_available if is_torch_available(): import torch if is_soundfile_availble(): import soundfile as sf if is_vision_available(): from PIL import Image def lowercase ( A_="" )-> str: '''simple docstring''' a : Tuple = tempfile.mkdtemp() return os.path.join(A_ , str(uuid.uuida() ) + suffix ) @require_soundfile @require_torch class _A ( unittest.TestCase ): """simple docstring""" def __snake_case ( self : Optional[Any]): a : Union[str, Any] = torch.rand(12 , dtype=torch.floataa) - 0.5 a : Any = AgentAudio(__UpperCAmelCase) a : List[str] = str(agent_type.to_string()) # Ensure that the tensor and the agent_type's tensor are the same self.assertTrue(torch.allclose(__UpperCAmelCase , agent_type.to_raw() , atol=1e-4)) del agent_type # Ensure the path remains even after the object deletion self.assertTrue(os.path.exists(__UpperCAmelCase)) # Ensure that the file contains the same value as the original tensor a , a : Union[str, Any] = sf.read(__UpperCAmelCase) self.assertTrue(torch.allclose(__UpperCAmelCase , torch.tensor(__UpperCAmelCase) , atol=1e-4)) def __snake_case ( self : Optional[Any]): a : str = torch.rand(12 , dtype=torch.floataa) - 0.5 a : int = get_new_path(suffix=".wav") sf.write(__UpperCAmelCase , __UpperCAmelCase , 16000) a : List[str] = AgentAudio(__UpperCAmelCase) self.assertTrue(torch.allclose(__UpperCAmelCase , agent_type.to_raw() , atol=1e-4)) self.assertEqual(agent_type.to_string() , __UpperCAmelCase) @require_vision @require_torch class _A ( unittest.TestCase ): """simple docstring""" def __snake_case ( self : List[str]): a : Dict = torch.randint(0 , 256 , (64, 64, 3)) a : str = AgentImage(__UpperCAmelCase) a : str = str(agent_type.to_string()) # Ensure that the tensor and the agent_type's tensor are the same self.assertTrue(torch.allclose(__UpperCAmelCase , agent_type._tensor , atol=1e-4)) self.assertIsInstance(agent_type.to_raw() , Image.Image) # Ensure the path remains even after the object deletion del agent_type self.assertTrue(os.path.exists(__UpperCAmelCase)) def __snake_case ( self : int): a : Union[str, Any] = Path(get_tests_dir("fixtures/tests_samples/COCO")) / "000000039769.png" a : int = Image.open(__UpperCAmelCase) a : Any = AgentImage(__UpperCAmelCase) self.assertTrue(path.samefile(agent_type.to_string())) self.assertTrue(image == agent_type.to_raw()) # Ensure the path remains even after the object deletion del agent_type self.assertTrue(os.path.exists(__UpperCAmelCase)) def __snake_case ( self : str): a : int = Path(get_tests_dir("fixtures/tests_samples/COCO")) / "000000039769.png" a : str = Image.open(__UpperCAmelCase) a : Dict = AgentImage(__UpperCAmelCase) self.assertFalse(path.samefile(agent_type.to_string())) self.assertTrue(image == agent_type.to_raw()) # Ensure the path remains even after the object deletion del agent_type self.assertTrue(os.path.exists(__UpperCAmelCase)) class _A ( unittest.TestCase ): """simple docstring""" def __snake_case ( self : Union[str, Any]): a : Dict = "Hey!" a : Tuple = AgentText(__UpperCAmelCase) self.assertEqual(__UpperCAmelCase , agent_type.to_string()) self.assertEqual(__UpperCAmelCase , agent_type.to_raw()) self.assertEqual(__UpperCAmelCase , __UpperCAmelCase)
40
A__ = {0: [2, 3], 1: [0], 2: [1], 3: [4], 4: []} A__ = {0: [1, 2, 3], 1: [2], 2: [0], 3: [4], 4: [5], 5: [3]} def _UpperCAmelCase ( snake_case , snake_case , snake_case ): """simple docstring""" _lowerCAmelCase = True _lowerCAmelCase = [] for neighbour in graph[vert]: if not visited[neighbour]: order += topology_sort(snake_case , snake_case , snake_case ) order.append(snake_case ) return order def _UpperCAmelCase ( snake_case , snake_case , snake_case ): """simple docstring""" _lowerCAmelCase = True _lowerCAmelCase = [vert] for neighbour in reversed_graph[vert]: if not visited[neighbour]: component += find_components(snake_case , snake_case , snake_case ) return component def _UpperCAmelCase ( snake_case ): """simple docstring""" _lowerCAmelCase = len(snake_case ) * [False] _lowerCAmelCase = {vert: [] for vert in range(len(snake_case ) )} for vert, neighbours in graph.items(): for neighbour in neighbours: reversed_graph[neighbour].append(snake_case ) _lowerCAmelCase = [] for i, was_visited in enumerate(snake_case ): if not was_visited: order += topology_sort(snake_case , snake_case , snake_case ) _lowerCAmelCase = [] _lowerCAmelCase = len(snake_case ) * [False] for i in range(len(snake_case ) ): _lowerCAmelCase = order[len(snake_case ) - i - 1] if not visited[vert]: _lowerCAmelCase = find_components(snake_case , snake_case , snake_case ) components_list.append(snake_case ) return components_list
82
0
'''simple docstring''' import gc import random import unittest import numpy as np import torch from PIL import Image from transformers import XLMRobertaTokenizerFast from diffusers import DDIMScheduler, KandinskyImgaImgPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference enable_full_determinism() class UpperCamelCase_ (a__, unittest.TestCase ): """simple docstring""" _lowerCAmelCase = KandinskyImgaImgPipeline _lowerCAmelCase = ['prompt', 'image_embeds', 'negative_image_embeds', 'image'] _lowerCAmelCase = [ 'prompt', 'negative_prompt', 'image_embeds', 'negative_image_embeds', 'image', ] _lowerCAmelCase = [ 'generator', 'height', 'width', 'strength', 'guidance_scale', 'negative_prompt', 'num_inference_steps', 'return_dict', 'guidance_scale', 'num_images_per_prompt', 'output_type', 'return_dict', ] _lowerCAmelCase = False @property def _a ( self : List[str] ): """simple docstring""" return 32 @property def _a ( self : List[str] ): """simple docstring""" return 32 @property def _a ( self : List[Any] ): """simple docstring""" return self.time_input_dim @property def _a ( self : Tuple ): """simple docstring""" return self.time_input_dim * 4 @property def _a ( self : Optional[Any] ): """simple docstring""" return 100 @property def _a ( self : int ): """simple docstring""" A_ : Dict = XLMRobertaTokenizerFast.from_pretrained('''YiYiXu/tiny-random-mclip-base''' ) return tokenizer @property def _a ( self : Union[str, Any] ): """simple docstring""" torch.manual_seed(0 ) A_ : Tuple = MCLIPConfig( numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=1005 , ) A_ : int = MultilingualCLIP(_lowerCamelCase ) A_ : Dict = text_encoder.eval() return text_encoder @property def _a ( self : Any ): """simple docstring""" torch.manual_seed(0 ) A_ : Optional[Any] = { '''in_channels''': 4, # Out channels is double in channels because predicts mean and variance '''out_channels''': 8, '''addition_embed_type''': '''text_image''', '''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''), '''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''), '''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''', '''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2), '''layers_per_block''': 1, '''encoder_hid_dim''': self.text_embedder_hidden_size, '''encoder_hid_dim_type''': '''text_image_proj''', '''cross_attention_dim''': self.cross_attention_dim, '''attention_head_dim''': 4, '''resnet_time_scale_shift''': '''scale_shift''', '''class_embed_type''': None, } A_ : List[Any] = UNetaDConditionModel(**_lowerCamelCase ) return model @property def _a ( self : List[str] ): """simple docstring""" return { "block_out_channels": [32, 64], "down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"], "in_channels": 3, "latent_channels": 4, "layers_per_block": 1, "norm_num_groups": 8, "norm_type": "spatial", "num_vq_embeddings": 12, "out_channels": 3, "up_block_types": [ "AttnUpDecoderBlock2D", "UpDecoderBlock2D", ], "vq_embed_dim": 4, } @property def _a ( self : Optional[Any] ): """simple docstring""" torch.manual_seed(0 ) A_ : Optional[Any] = VQModel(**self.dummy_movq_kwargs ) return model def _a ( self : str ): """simple docstring""" A_ : Tuple = self.dummy_text_encoder A_ : Optional[int] = self.dummy_tokenizer A_ : Dict = self.dummy_unet A_ : Dict = self.dummy_movq A_ : Any = { '''num_train_timesteps''': 1000, '''beta_schedule''': '''linear''', '''beta_start''': 0.0_00_85, '''beta_end''': 0.0_12, '''clip_sample''': False, '''set_alpha_to_one''': False, '''steps_offset''': 0, '''prediction_type''': '''epsilon''', '''thresholding''': False, } A_ : int = DDIMScheduler(**_lowerCamelCase ) A_ : str = { '''text_encoder''': text_encoder, '''tokenizer''': tokenizer, '''unet''': unet, '''scheduler''': scheduler, '''movq''': movq, } return components def _a ( self : Optional[int] , _lowerCamelCase : Dict , _lowerCamelCase : List[str]=0 ): """simple docstring""" A_ : List[str] = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(_lowerCamelCase ) ).to(_lowerCamelCase ) A_ : Dict = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(_lowerCamelCase ) # create init_image A_ : Optional[int] = floats_tensor((1, 3, 64, 64) , rng=random.Random(_lowerCamelCase ) ).to(_lowerCamelCase ) A_ : str = image.cpu().permute(0 , 2 , 3 , 1 )[0] A_ : Optional[Any] = Image.fromarray(np.uinta(_lowerCamelCase ) ).convert('''RGB''' ).resize((256, 256) ) if str(_lowerCamelCase ).startswith('''mps''' ): A_ : Optional[int] = torch.manual_seed(_lowerCamelCase ) else: A_ : Tuple = torch.Generator(device=_lowerCamelCase ).manual_seed(_lowerCamelCase ) A_ : Optional[Any] = { '''prompt''': '''horse''', '''image''': init_image, '''image_embeds''': image_embeds, '''negative_image_embeds''': negative_image_embeds, '''generator''': generator, '''height''': 64, '''width''': 64, '''num_inference_steps''': 10, '''guidance_scale''': 7.0, '''strength''': 0.2, '''output_type''': '''np''', } return inputs def _a ( self : Any ): """simple docstring""" A_ : Any = '''cpu''' A_ : Optional[Any] = self.get_dummy_components() A_ : Optional[int] = self.pipeline_class(**_lowerCamelCase ) A_ : Dict = pipe.to(_lowerCamelCase ) pipe.set_progress_bar_config(disable=_lowerCamelCase ) A_ : Optional[int] = pipe(**self.get_dummy_inputs(_lowerCamelCase ) ) A_ : Optional[Any] = output.images A_ : Dict = pipe( **self.get_dummy_inputs(_lowerCamelCase ) , return_dict=_lowerCamelCase , )[0] A_ : List[str] = image[0, -3:, -3:, -1] A_ : Optional[int] = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) A_ : Any = np.array( [0.61_47_49_43, 0.6_07_35_39, 0.43_30_85_44, 0.5_92_82_69, 0.47_49_35_95, 0.46_75_59_73, 0.4_61_38_38, 0.45_36_87_97, 0.50_11_92_33] ) assert ( np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 ), f' expected_slice {expected_slice}, but got {image_slice.flatten()}' assert ( np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2 ), f' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}' @slow @require_torch_gpu class UpperCamelCase_ (unittest.TestCase ): """simple docstring""" def _a ( self : str ): """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() def _a ( self : List[str] ): """simple docstring""" A_ : List[Any] = load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinsky/kandinsky_img2img_frog.npy''' ) A_ : str = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinsky/cat.png''' ) A_ : Union[str, Any] = '''A red cartoon frog, 4k''' A_ : List[str] = KandinskyPriorPipeline.from_pretrained( '''kandinsky-community/kandinsky-2-1-prior''' , torch_dtype=torch.floataa ) pipe_prior.to(_lowerCamelCase ) A_ : Tuple = KandinskyImgaImgPipeline.from_pretrained( '''kandinsky-community/kandinsky-2-1''' , torch_dtype=torch.floataa ) A_ : Any = pipeline.to(_lowerCamelCase ) pipeline.set_progress_bar_config(disable=_lowerCamelCase ) A_ : List[str] = torch.Generator(device='''cpu''' ).manual_seed(0 ) A_ : Optional[int] = pipe_prior( _lowerCamelCase , generator=_lowerCamelCase , num_inference_steps=5 , negative_prompt='''''' , ).to_tuple() A_ : Optional[Any] = pipeline( _lowerCamelCase , image=_lowerCamelCase , image_embeds=_lowerCamelCase , negative_image_embeds=_lowerCamelCase , generator=_lowerCamelCase , num_inference_steps=100 , height=768 , width=768 , strength=0.2 , output_type='''np''' , ) A_ : Union[str, Any] = output.images[0] assert image.shape == (768, 768, 3) assert_mean_pixel_difference(_lowerCamelCase , _lowerCamelCase )
371
'''simple docstring''' import heapq def snake_case__ ( lowerCamelCase__ : dict ) -> set[int]: A_ : list[list] = [] # for each node and his adjacency list add them and the rank of the node to queue # using heapq module the queue will be filled like a Priority Queue # heapq works with a min priority queue, so I used -1*len(v) to build it for key, value in graph.items(): # O(log(n)) heapq.heappush(lowerCamelCase__ , [-1 * len(lowerCamelCase__ ), (key, value)] ) # chosen_vertices = set of chosen vertices A_ : str = set() # while queue isn't empty and there are still edges # (queue[0][0] is the rank of the node with max rank) while queue and queue[0][0] != 0: # extract vertex with max rank from queue and add it to chosen_vertices A_ : Tuple = heapq.heappop(lowerCamelCase__ )[1][0] chosen_vertices.add(lowerCamelCase__ ) # Remove all arcs adjacent to argmax for elem in queue: # if v haven't adjacent node, skip if elem[0] == 0: continue # if argmax is reachable from elem # remove argmax from elem's adjacent list and update his rank if argmax in elem[1][1]: A_ : List[str] = elem[1][1].index(lowerCamelCase__ ) del elem[1][1][index] elem[0] += 1 # re-order the queue heapq.heapify(lowerCamelCase__ ) return chosen_vertices if __name__ == "__main__": import doctest doctest.testmod() snake_case__ = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]} print(F'Minimum vertex cover:\n{greedy_min_vertex_cover(graph)}')
4
0
'''simple docstring''' import os from distutils.util import strtobool def a ( __a , __a ) -> Union[str, Any]: '''simple docstring''' for e in env_keys: UpperCamelCase__ :Optional[int] = int(os.environ.get(__a , -1 ) ) if val >= 0: return val return default def a ( __a , __a=False ) -> Union[str, Any]: '''simple docstring''' UpperCamelCase__ :Optional[Any] = os.environ.get(__a , str(__a ) ) return strtobool(__a ) == 1 # As its name indicates `strtobool` actually returns an int... def a ( __a , __a="no" ) -> Optional[int]: '''simple docstring''' UpperCamelCase__ :Union[str, Any] = os.environ.get(__a , str(__a ) ) return value
97
import os import jsonlines import numpy as np from tqdm import tqdm __snake_case : Any =2_0_4_8 __snake_case : Union[str, Any] =4_0_9_6 __snake_case : Optional[Any] =4_2 __snake_case : Dict =os.environ.pop('PROCESS_TRAIN', 'false') __snake_case : List[str] ={'null': 0, 'short': 1, 'long': 2, 'yes': 3, 'no': 4} def lowerCAmelCase__ ( lowerCamelCase_ : List[Any]): '''simple docstring''' def choose_first(lowerCamelCase_ : List[str] ,lowerCamelCase_ : Any=False): assert isinstance(lowerCamelCase_ ,lowerCamelCase_) if len(lowerCamelCase_) == 1: lowerCAmelCase__ : Optional[int] = answer[0] return {k: [answer[k]] for k in answer} if is_long_answer else answer for a in answer: if is_long_answer: lowerCAmelCase__ : Any = {k: [a[k]] for k in a} if len(a['''start_token''']) > 0: break return a lowerCAmelCase__ : Optional[Any] = {'''id''': example['''id''']} lowerCAmelCase__ : int = example['''annotations'''] lowerCAmelCase__ : str = annotation['''yes_no_answer'''] if 0 in yes_no_answer or 1 in yes_no_answer: lowerCAmelCase__ : Union[str, Any] = ['''yes'''] if 1 in yes_no_answer else ['''no'''] lowerCAmelCase__ : int = [] lowerCAmelCase__ : Union[str, Any] = [] lowerCAmelCase__ : int = ['''<cls>'''] else: lowerCAmelCase__ : Tuple = ['''short'''] lowerCAmelCase__ : int = choose_first(annotation['''short_answers''']) if len(out['''start_token''']) == 0: # answer will be long if short is not available lowerCAmelCase__ : Optional[Any] = ['''long'''] lowerCAmelCase__ : str = choose_first(annotation['''long_answer'''] ,is_long_answer=lowerCamelCase_) lowerCAmelCase__ : Optional[int] = [] answer.update(lowerCamelCase_) # disregard some samples if len(answer['''start_token''']) > 1 or answer["start_token"] == answer["end_token"]: lowerCAmelCase__ : Optional[Any] = True else: lowerCAmelCase__ : Union[str, Any] = False lowerCAmelCase__ : Tuple = ['''start_token''', '''end_token''', '''start_byte''', '''end_byte''', '''text'''] if not all(isinstance(answer[k] ,lowerCamelCase_) for k in cols): raise ValueError('''Issue in ID''' ,example['''id''']) return answer def lowerCAmelCase__ ( lowerCamelCase_ : List[Any] ,lowerCamelCase_ : Union[str, Any]=False): '''simple docstring''' lowerCAmelCase__ : Any = _get_single_answer(lowerCamelCase_) # bytes are of no use del answer["start_byte"] del answer["end_byte"] # handle yes_no answers explicitly if answer["category"][0] in ["yes", "no"]: # category is list with one element lowerCAmelCase__ : List[Any] = example['''document''']['''tokens'''] lowerCAmelCase__ : Any = [] for i in range(len(doc['''token'''])): if not doc["is_html"][i]: context.append(doc['''token'''][i]) return { "context": " ".join(lowerCamelCase_), "answer": { "start_token": -100, # ignore index in cross-entropy "end_token": -100, # ignore index in cross-entropy "category": answer["category"], "span": answer["category"], # extra }, } # later, help in removing all no answers if answer["start_token"] == [-1]: return { "context": "None", "answer": { "start_token": -1, "end_token": -1, "category": "null", "span": "None", # extra }, } # handling normal samples lowerCAmelCase__ : Union[str, Any] = ['''start_token''', '''end_token'''] answer.update({k: answer[k][0] if len(answer[k]) > 0 else answer[k] for k in cols}) # e.g. [10] == 10 lowerCAmelCase__ : List[Any] = example['''document''']['''tokens'''] lowerCAmelCase__ : Optional[Any] = answer['''start_token'''] lowerCAmelCase__ : Union[str, Any] = answer['''end_token'''] lowerCAmelCase__ : int = [] for i in range(len(doc['''token'''])): if not doc["is_html"][i]: context.append(doc['''token'''][i]) else: if answer["start_token"] > i: start_token -= 1 if answer["end_token"] > i: end_token -= 1 lowerCAmelCase__ : List[Any] = ''' '''.join(context[start_token:end_token]) # checking above code if assertion: lowerCAmelCase__ : str = doc['''is_html'''][answer['''start_token'''] : answer['''end_token''']] lowerCAmelCase__ : List[Any] = doc['''token'''][answer['''start_token'''] : answer['''end_token''']] lowerCAmelCase__ : Optional[int] = ''' '''.join([old[i] for i in range(len(lowerCamelCase_)) if not is_html[i]]) if new != old: print('''ID:''' ,example['''id''']) print('''New:''' ,lowerCamelCase_ ,end='''\n''') print('''Old:''' ,lowerCamelCase_ ,end='''\n\n''') return { "context": " ".join(lowerCamelCase_), "answer": { "start_token": start_token, "end_token": end_token - 1, # this makes it inclusive "category": answer["category"], # either long or short "span": new, # extra }, } def lowerCAmelCase__ ( lowerCamelCase_ : int ,lowerCamelCase_ : str ,lowerCamelCase_ : Tuple=2048 ,lowerCamelCase_ : Dict=4096 ,lowerCamelCase_ : Optional[Any]=True): '''simple docstring''' lowerCAmelCase__ : int = get_context_and_ans(lowerCamelCase_ ,assertion=lowerCamelCase_) lowerCAmelCase__ : Union[str, Any] = out['''answer'''] # later, removing these samples if answer["start_token"] == -1: return { "example_id": example["id"], "input_ids": [[-1]], "labels": { "start_token": [-1], "end_token": [-1], "category": ["null"], }, } lowerCAmelCase__ : Union[str, Any] = tokenizer(example['''question''']['''text'''] ,out['''context''']).input_ids lowerCAmelCase__ : List[str] = input_ids.index(tokenizer.sep_token_id) + 1 # return yes/no if answer["category"][0] in ["yes", "no"]: # category is list with one element lowerCAmelCase__ : Dict = [] lowerCAmelCase__ : Dict = [] lowerCAmelCase__ : List[Any] = input_ids[:q_len] lowerCAmelCase__ : List[Any] = range(lowerCamelCase_ ,len(lowerCamelCase_) ,max_length - doc_stride) for i in doc_start_indices: lowerCAmelCase__ : Union[str, Any] = i + max_length - q_len lowerCAmelCase__ : Any = input_ids[i:end_index] inputs.append(q_indices + slice) category.append(answer['''category'''][0]) if slice[-1] == tokenizer.sep_token_id: break return { "example_id": example["id"], "input_ids": inputs, "labels": { "start_token": [-100] * len(lowerCamelCase_), "end_token": [-100] * len(lowerCamelCase_), "category": category, }, } lowerCAmelCase__ : Optional[Any] = out['''context'''].split() lowerCAmelCase__ : Union[str, Any] = splitted_context[answer['''end_token''']] lowerCAmelCase__ : Optional[int] = len( tokenizer( ''' '''.join(splitted_context[: answer['''start_token''']]) ,add_special_tokens=lowerCamelCase_ ,).input_ids) lowerCAmelCase__ : Dict = len( tokenizer(''' '''.join(splitted_context[: answer['''end_token''']]) ,add_special_tokens=lowerCamelCase_).input_ids) answer["start_token"] += q_len answer["end_token"] += q_len # fixing end token lowerCAmelCase__ : int = len(tokenizer(lowerCamelCase_ ,add_special_tokens=lowerCamelCase_).input_ids) if num_sub_tokens > 1: answer["end_token"] += num_sub_tokens - 1 lowerCAmelCase__ : Union[str, Any] = input_ids[answer['''start_token'''] : answer['''end_token'''] + 1] # right & left are inclusive lowerCAmelCase__ : List[str] = answer['''start_token'''] lowerCAmelCase__ : Union[str, Any] = answer['''end_token'''] if assertion: lowerCAmelCase__ : int = tokenizer.decode(lowerCamelCase_) if answer["span"] != new: print('''ISSUE IN TOKENIZATION''') print('''OLD:''' ,answer['''span''']) print('''NEW:''' ,lowerCamelCase_ ,end='''\n\n''') if len(lowerCamelCase_) <= max_length: return { "example_id": example["id"], "input_ids": [input_ids], "labels": { "start_token": [answer["start_token"]], "end_token": [answer["end_token"]], "category": answer["category"], }, } lowerCAmelCase__ : int = input_ids[:q_len] lowerCAmelCase__ : Optional[Any] = range(lowerCamelCase_ ,len(lowerCamelCase_) ,max_length - doc_stride) lowerCAmelCase__ : Tuple = [] lowerCAmelCase__ : Optional[Any] = [] lowerCAmelCase__ : Union[str, Any] = [] lowerCAmelCase__ : Any = [] # null, yes, no, long, short for i in doc_start_indices: lowerCAmelCase__ : str = i + max_length - q_len lowerCAmelCase__ : List[str] = input_ids[i:end_index] inputs.append(q_indices + slice) assert len(inputs[-1]) <= max_length, "Issue in truncating length" if start_token >= i and end_token <= end_index - 1: lowerCAmelCase__ : int = start_token - i + q_len lowerCAmelCase__ : str = end_token - i + q_len answers_category.append(answer['''category'''][0]) # ["short"] -> "short" else: lowerCAmelCase__ : Tuple = -100 lowerCAmelCase__ : List[str] = -100 answers_category.append('''null''') lowerCAmelCase__ : int = inputs[-1][start_token : end_token + 1] answers_start_token.append(lowerCamelCase_) answers_end_token.append(lowerCamelCase_) if assertion: if new != old and new != [tokenizer.cls_token_id]: print('''ISSUE in strided for ID:''' ,example['''id''']) print('''New:''' ,tokenizer.decode(lowerCamelCase_)) print('''Old:''' ,tokenizer.decode(lowerCamelCase_) ,end='''\n\n''') if slice[-1] == tokenizer.sep_token_id: break return { "example_id": example["id"], "input_ids": inputs, "labels": { "start_token": answers_start_token, "end_token": answers_end_token, "category": answers_category, }, } def lowerCAmelCase__ ( lowerCamelCase_ : Any ,lowerCamelCase_ : List[Any] ,lowerCamelCase_ : int=2048 ,lowerCamelCase_ : Tuple=4096 ,lowerCamelCase_ : Optional[int]=False): '''simple docstring''' lowerCAmelCase__ : Optional[Any] = get_strided_contexts_and_ans( lowerCamelCase_ ,lowerCamelCase_ ,doc_stride=lowerCamelCase_ ,max_length=lowerCamelCase_ ,assertion=lowerCamelCase_ ,) return example def lowerCAmelCase__ ( lowerCamelCase_ : str ,lowerCamelCase_ : int): '''simple docstring''' with jsonlines.open(lowerCamelCase_ ,'''a''') as writer: for example in tqdm(lowerCamelCase_ ,total=len(lowerCamelCase_) ,desc='''Saving samples ... '''): lowerCAmelCase__ : Optional[Any] = example['''labels'''] for ids, start, end, cat in zip( example['''input_ids'''] ,labels['''start_token'''] ,labels['''end_token'''] ,labels['''category'''] ,): if start == -1 and end == -1: continue # leave waste samples with no answer if cat == "null" and np.random.rand() < 0.6: continue # removing 50 % samples writer.write( { '''input_ids''': ids, '''start_token''': start, '''end_token''': end, '''category''': CATEGORY_MAPPING[cat], }) if __name__ == "__main__": from datasets import load_dataset from transformers import BigBirdTokenizer __snake_case : Optional[int] =load_dataset('natural_questions') __snake_case : Union[str, Any] =BigBirdTokenizer.from_pretrained('google/bigbird-roberta-base') __snake_case : Tuple =data['train' if PROCESS_TRAIN == 'true' else 'validation'] __snake_case : Optional[int] ={ 'tokenizer': tokenizer, 'doc_stride': DOC_STRIDE, 'max_length': MAX_LENGTH, 'assertion': False, } __snake_case : Dict =data.map(prepare_inputs, fn_kwargs=fn_kwargs) __snake_case : Dict =data.remove_columns(['annotations', 'document', 'id', 'question']) print(data) np.random.seed(SEED) __snake_case : int ='nq-training.jsonl' if PROCESS_TRAIN == 'true' else 'nq-validation.jsonl' save_to_disk(data, file_name=cache_file_name)
129
0
"""simple docstring""" from transformers import HfArgumentParser, TensorFlowBenchmark, TensorFlowBenchmarkArguments def snake_case_ ( ): '''simple docstring''' _lowerCamelCase : Tuple = HfArgumentParser(A_ ) _lowerCamelCase : str = parser.parse_args_into_dataclasses()[0] _lowerCamelCase : int = TensorFlowBenchmark(args=A_ ) try: _lowerCamelCase : int = parser.parse_args_into_dataclasses()[0] except ValueError as e: _lowerCamelCase : Tuple = '''Arg --no_{0} is no longer used, please use --no-{0} instead.''' _lowerCamelCase : int = ''' '''.join(str(A_ ).split(''' ''' )[:-1] ) _lowerCamelCase : Dict = '''''' _lowerCamelCase : str = eval(str(A_ ).split(''' ''' )[-1] ) _lowerCamelCase : Dict = [] for arg in depreciated_args: # arg[2:] removes '--' if arg[2:] in TensorFlowBenchmark.deprecated_args: # arg[5:] removes '--no_' full_error_msg += arg_error_msg.format(arg[5:] ) else: wrong_args.append(A_ ) if len(A_ ) > 0: _lowerCamelCase : str = full_error_msg + begin_error_msg + str(A_ ) raise ValueError(A_ ) benchmark.run() if __name__ == "__main__": main()
175
"""simple docstring""" import logging import os from typing import List, Tuple import numpy as np import psutil import torch import torch.distributed as dist from transformers import RagRetriever lowerCAmelCase__ = logging.getLogger(__name__) class __snake_case ( _lowercase): def __init__( self : Union[str, Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : List[str] , __lowerCAmelCase : int=None ): """simple docstring""" super().__init__( __lowerCAmelCase , question_encoder_tokenizer=__lowerCAmelCase , generator_tokenizer=__lowerCAmelCase , index=__lowerCAmelCase , init_retrieval=__lowerCAmelCase , ) _lowerCamelCase : Dict = None def SCREAMING_SNAKE_CASE ( self : Dict , __lowerCAmelCase : int ): """simple docstring""" logger.info('''initializing retrieval''' ) # initializing a separate process group for retrieval as the default # nccl backend doesn't support gather/scatter operations while gloo # is too slow to replace nccl for the core gpu communication if dist.is_initialized(): logger.info('''dist initialized''' ) # needs to be set manually _lowerCamelCase : List[str] = self._infer_socket_ifname() # avoid clash with the NCCL port _lowerCamelCase : Dict = str(distributed_port + 1 ) _lowerCamelCase : str = dist.new_group(ranks=__lowerCAmelCase , backend='''gloo''' ) # initialize retriever only on the main worker if not dist.is_initialized() or self._is_main(): logger.info('''dist not initialized / main''' ) self.index.init_index() # all processes wait untill the retriever is initialized by the main process if dist.is_initialized(): torch.distributed.barrier(group=self.process_group ) def SCREAMING_SNAKE_CASE ( self : Optional[Any] ): """simple docstring""" return dist.get_rank(group=self.process_group ) == 0 def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Tuple=torch.floataa ): """simple docstring""" _lowerCamelCase : Optional[Any] = torch.empty(__lowerCAmelCase , dtype=__lowerCAmelCase ) dist.scatter(__lowerCAmelCase , src=0 , scatter_list=__lowerCAmelCase , group=self.process_group ) return target_tensor def SCREAMING_SNAKE_CASE ( self : Any ): """simple docstring""" _lowerCamelCase : List[str] = psutil.net_if_addrs() # a hacky way to deal with varying network interface names _lowerCamelCase : str = next((addr for addr in addrs if addr.startswith('''e''' )) , __lowerCAmelCase ) return ifname def SCREAMING_SNAKE_CASE ( self : str , __lowerCAmelCase : np.ndarray , __lowerCAmelCase : int ): """simple docstring""" if not dist.is_initialized(): _lowerCamelCase , _lowerCamelCase : Any = self._main_retrieve(__lowerCAmelCase , __lowerCAmelCase ) return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(__lowerCAmelCase ) # distributed training _lowerCamelCase : Dict = dist.get_world_size(group=self.process_group ) # gather logic _lowerCamelCase : str = None if self._is_main(): _lowerCamelCase : List[Any] = [torch.empty(question_hidden_states.shape , dtype=torch.floataa ) for _ in range(__lowerCAmelCase )] dist.gather(torch.tensor(__lowerCAmelCase ) , dst=0 , gather_list=__lowerCAmelCase , group=self.process_group ) # scatter logic _lowerCamelCase : int = question_hidden_states.shape[0] _lowerCamelCase : str = [] _lowerCamelCase : Optional[int] = [] if self._is_main(): assert len(__lowerCAmelCase ) == world_size _lowerCamelCase , _lowerCamelCase : Tuple = self._main_retrieve(torch.cat(__lowerCAmelCase ).numpy() , __lowerCAmelCase ) _lowerCamelCase , _lowerCamelCase : List[str] = torch.tensor(__lowerCAmelCase ), torch.tensor(__lowerCAmelCase ) _lowerCamelCase : List[Any] = self._chunk_tensor(__lowerCAmelCase , __lowerCAmelCase ) _lowerCamelCase : Optional[Any] = self._chunk_tensor(__lowerCAmelCase , __lowerCAmelCase ) _lowerCamelCase : int = self._scattered(__lowerCAmelCase , [n_queries, n_docs] , target_type=torch.intaa ) _lowerCamelCase : str = self._scattered(__lowerCAmelCase , [n_queries, n_docs, question_hidden_states.shape[1]] ) return retrieved_doc_embeds.numpy(), doc_ids.numpy(), self.index.get_doc_dicts(__lowerCAmelCase )
175
1