code
stringlengths
87
55.2k
code_codestyle
int64
0
349
style_context
stringlengths
135
49.1k
style_context_codestyle
int64
0
349
label
int64
0
1
'''simple docstring''' import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging UpperCAmelCase_ = '▁' UpperCAmelCase_ = {'vocab_file': 'spiece.model'} UpperCAmelCase_ = { 'vocab_file': {'google/pegasus-xsum': 'https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model'} } UpperCAmelCase_ = { 'google/pegasus-xsum': 5_1_2, } UpperCAmelCase_ = logging.get_logger(__name__) class lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ ): '''simple docstring''' lowerCAmelCase_ : Tuple = VOCAB_FILES_NAMES lowerCAmelCase_ : Dict = VOCAB_FILES_NAMES lowerCAmelCase_ : int = PRETRAINED_VOCAB_FILES_MAP lowerCAmelCase_ : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowerCAmelCase_ : Optional[int] = ["""input_ids""", """attention_mask"""] def __init__( self : Optional[Any] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : List[Any]="<pad>" , _UpperCAmelCase : Optional[int]="</s>" , _UpperCAmelCase : Any="<unk>" , _UpperCAmelCase : Dict="<mask_2>" , _UpperCAmelCase : Dict="<mask_1>" , _UpperCAmelCase : Union[str, Any]=None , _UpperCAmelCase : Optional[Any]=1_03 , _UpperCAmelCase : Optional[Dict[str, Any]] = None , **_UpperCAmelCase : Optional[Any] , ): """simple docstring""" UpperCAmelCase__ = offset if additional_special_tokens is not None: if not isinstance(lowercase_ , lowercase_ ): raise TypeError( f'''additional_special_tokens should be of type {type(lowercase_ )}, but is''' f''' {type(lowercase_ )}''' ) UpperCAmelCase__ = ( ([mask_token_sent] + additional_special_tokens) if mask_token_sent not in additional_special_tokens and mask_token_sent is not None else additional_special_tokens ) # fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken additional_special_tokens_extended += [ f'''<unk_{i}>''' for i in range(len(lowercase_ ) , self.offset - 1 ) ] if len(set(lowercase_ ) ) != len(lowercase_ ): raise ValueError( """Please make sure that the provided additional_special_tokens do not contain an incorrectly""" f''' shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}.''' ) UpperCAmelCase__ = additional_special_tokens_extended else: UpperCAmelCase__ = [mask_token_sent] if mask_token_sent is not None else [] additional_special_tokens += [f'''<unk_{i}>''' for i in range(2 , self.offset )] UpperCAmelCase__ = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( eos_token=lowercase_ , unk_token=lowercase_ , mask_token=lowercase_ , pad_token=lowercase_ , mask_token_sent=lowercase_ , offset=lowercase_ , additional_special_tokens=lowercase_ , sp_model_kwargs=self.sp_model_kwargs , **lowercase_ , ) UpperCAmelCase__ = mask_token_sent UpperCAmelCase__ = vocab_file UpperCAmelCase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(lowercase_ ) # add special tokens to encoder dict UpperCAmelCase__ = { 0: self.pad_token, 1: self.eos_token, } if self.mask_token_sent is not None: self.encoder.update( { 2: self.mask_token_sent, 3: self.mask_token, } ) if self.offset > 0: # entries 2-104 are only used for pretraining and called <mask_1>, <mask_2>, unk_2, ...unk_102 # mask_token_sent is already added to list -> so start at 1 self.encoder.update({i + 3: additional_special_tokens[i] for i in range(1 , self.offset - 1 )} ) UpperCAmelCase__ = {v: k for k, v in self.encoder.items()} @property def SCREAMING_SNAKE_CASE__ ( self : List[str] ): """simple docstring""" return len(self.sp_model ) + self.offset def SCREAMING_SNAKE_CASE__ ( self : int ): """simple docstring""" UpperCAmelCase__ = {self.convert_ids_to_tokens(lowercase_ ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __getstate__( self : str ): """simple docstring""" UpperCAmelCase__ = self.__dict__.copy() UpperCAmelCase__ = None return state def __setstate__( self : Any , _UpperCAmelCase : List[Any] ): """simple docstring""" UpperCAmelCase__ = d # for backward compatibility if not hasattr(self , """sp_model_kwargs""" ): UpperCAmelCase__ = {} UpperCAmelCase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , _UpperCAmelCase : str ): """simple docstring""" return self.sp_model.encode(lowercase_ , out_type=lowercase_ ) def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , _UpperCAmelCase : str ): """simple docstring""" if token in self.decoder: return self.decoder[token] elif token in self.added_tokens_decoder: return self.added_tokens_decoder[token] UpperCAmelCase__ = self.sp_model.piece_to_id(lowercase_ ) return sp_id + self.offset def SCREAMING_SNAKE_CASE__ ( self : Dict , _UpperCAmelCase : int ): """simple docstring""" if index in self.encoder: return self.encoder[index] elif index in self.added_tokens_encoder: return self.added_tokens_encoder[index] else: UpperCAmelCase__ = self.sp_model.IdToPiece(index - self.offset ) return token def SCREAMING_SNAKE_CASE__ ( self : Dict , _UpperCAmelCase : Optional[Any] ): """simple docstring""" UpperCAmelCase__ = [] UpperCAmelCase__ = """""" for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: out_string += self.sp_model.decode(lowercase_ ) + token UpperCAmelCase__ = [] else: current_sub_tokens.append(lowercase_ ) out_string += self.sp_model.decode(lowercase_ ) return out_string.strip() def SCREAMING_SNAKE_CASE__ ( self : List[str] , _UpperCAmelCase : Union[str, Any]=False ): """simple docstring""" return 1 def SCREAMING_SNAKE_CASE__ ( self : List[Any] , _UpperCAmelCase : List[str] ): """simple docstring""" UpperCAmelCase__ = set(self.all_special_ids ) # call it once instead of inside list comp all_special_ids.remove(self.unk_token_id ) # <unk> is only sometimes special return [1 if x in all_special_ids else 0 for x in seq] def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , _UpperCAmelCase : List , _UpperCAmelCase : Optional[List] = None , _UpperCAmelCase : bool = False ): """simple docstring""" if already_has_special_tokens: return self._special_token_mask(lowercase_ ) elif token_ids_a is None: return self._special_token_mask(lowercase_ ) + [1] else: return self._special_token_mask(token_ids_a + token_ids_a ) + [1] def SCREAMING_SNAKE_CASE__ ( self : Any , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : str=None ): """simple docstring""" if token_ids_a is None: return token_ids_a + [self.eos_token_id] # We don't expect to process pairs, but leave the pair logic for API consistency return token_ids_a + token_ids_a + [self.eos_token_id] def SCREAMING_SNAKE_CASE__ ( self : Any , _UpperCAmelCase : str , _UpperCAmelCase : Optional[str] = None ): """simple docstring""" if not os.path.isdir(lowercase_ ): logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' ) return UpperCAmelCase__ = os.path.join( lowercase_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(lowercase_ ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , lowercase_ ) elif not os.path.isfile(self.vocab_file ): with open(lowercase_ , """wb""" ) as fi: UpperCAmelCase__ = self.sp_model.serialized_model_proto() fi.write(lowercase_ ) return (out_vocab_file,)
346
"""simple docstring""" from typing import Union from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_torch_available(): from ..models.auto.modeling_auto import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING snake_case_ = logging.get_logger(__name__) @add_end_docstrings(SCREAMING_SNAKE_CASE_ ) class A_ ( SCREAMING_SNAKE_CASE_ ): """simple docstring""" def __init__( self :Any , *lowercase_ :str , **lowercase_ :List[Any] ) -> Union[str, Any]: super().__init__(*lowercase_ , **lowercase_ ) self.check_model_type(lowercase_ ) def UpperCAmelCase__ ( self :Union[str, Any] , lowercase_ :Any=None , lowercase_ :Optional[int]=None , lowercase_ :Tuple=None , **lowercase_ :Tuple ) -> Dict: UpperCAmelCase , UpperCAmelCase = {}, {} if padding is not None: UpperCAmelCase = padding if truncation is not None: UpperCAmelCase = truncation if top_k is not None: UpperCAmelCase = top_k return preprocess_params, {}, postprocess_params def __call__( self :List[Any] , lowercase_ :Union["Image.Image", str] , lowercase_ :str = None , **lowercase_ :Union[str, Any] ) -> Union[str, Any]: if isinstance(lowercase_ , (Image.Image, str) ) and isinstance(lowercase_ , lowercase_ ): UpperCAmelCase = {'image': image, 'question': question} else: UpperCAmelCase = image UpperCAmelCase = super().__call__(lowercase_ , **lowercase_ ) return results def UpperCAmelCase__ ( self :List[str] , lowercase_ :List[Any] , lowercase_ :int=False , lowercase_ :Optional[int]=False ) -> Union[str, Any]: UpperCAmelCase = load_image(inputs['image'] ) UpperCAmelCase = self.tokenizer( inputs['question'] , return_tensors=self.framework , padding=lowercase_ , truncation=lowercase_ ) UpperCAmelCase = self.image_processor(images=lowercase_ , return_tensors=self.framework ) model_inputs.update(lowercase_ ) return model_inputs def UpperCAmelCase__ ( self :List[Any] , lowercase_ :List[str] ) -> Any: UpperCAmelCase = self.model(**lowercase_ ) return model_outputs def UpperCAmelCase__ ( self :Dict , lowercase_ :Tuple , lowercase_ :List[Any]=5 ) -> Union[str, Any]: if top_k > self.model.config.num_labels: UpperCAmelCase = self.model.config.num_labels if self.framework == "pt": UpperCAmelCase = model_outputs.logits.sigmoid()[0] UpperCAmelCase , UpperCAmelCase = probs.topk(lowercase_ ) else: raise ValueError(f"""Unsupported framework: {self.framework}""" ) UpperCAmelCase = scores.tolist() UpperCAmelCase = ids.tolist() return [{"score": score, "answer": self.model.config.idalabel[_id]} for score, _id in zip(lowercase_ , lowercase_ )]
78
0
'''simple docstring''' from functools import lru_cache @lru_cache def __snake_case( _lowerCAmelCase ) -> Any: if num < 0: raise ValueError("""Number should not be negative.""" ) return 1 if num in (0, 1) else num * factorial(num - 1 ) if __name__ == "__main__": import doctest doctest.testmod()
35
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging snake_case_ = logging.get_logger(__name__) snake_case_ = { """transfo-xl-wt103""": """https://huggingface.co/transfo-xl-wt103/resolve/main/config.json""", } class A_ ( SCREAMING_SNAKE_CASE_ ): """simple docstring""" __UpperCamelCase = """transfo-xl""" __UpperCamelCase = ["""mems"""] __UpperCamelCase = { """n_token""": """vocab_size""", """hidden_size""": """d_model""", """num_attention_heads""": """n_head""", """num_hidden_layers""": """n_layer""", } def __init__( self :List[Any] , lowercase_ :Optional[int]=26_77_35 , lowercase_ :Union[str, Any]=[2_00_00, 4_00_00, 20_00_00] , lowercase_ :List[Any]=10_24 , lowercase_ :Optional[Any]=10_24 , lowercase_ :Tuple=16 , lowercase_ :Tuple=64 , lowercase_ :Any=40_96 , lowercase_ :int=4 , lowercase_ :List[str]=False , lowercase_ :Union[str, Any]=18 , lowercase_ :Optional[Any]=16_00 , lowercase_ :Dict=10_00 , lowercase_ :Optional[int]=True , lowercase_ :Tuple=True , lowercase_ :Dict=0 , lowercase_ :Tuple=-1 , lowercase_ :Optional[int]=True , lowercase_ :Optional[int]=0.1 , lowercase_ :str=0.0 , lowercase_ :List[str]=True , lowercase_ :int="normal" , lowercase_ :Dict=0.01 , lowercase_ :Optional[Any]=0.01 , lowercase_ :Dict=0.02 , lowercase_ :Tuple=1E-5 , lowercase_ :str=0 , **lowercase_ :Tuple , ) -> List[str]: UpperCAmelCase = vocab_size UpperCAmelCase = [] self.cutoffs.extend(lowercase_ ) if proj_share_all_but_first: UpperCAmelCase = [False] + [True] * len(self.cutoffs ) else: UpperCAmelCase = [False] + [False] * len(self.cutoffs ) UpperCAmelCase = d_model UpperCAmelCase = d_embed UpperCAmelCase = d_head UpperCAmelCase = d_inner UpperCAmelCase = div_val UpperCAmelCase = pre_lnorm UpperCAmelCase = n_layer UpperCAmelCase = n_head UpperCAmelCase = mem_len UpperCAmelCase = same_length UpperCAmelCase = attn_type UpperCAmelCase = clamp_len UpperCAmelCase = sample_softmax UpperCAmelCase = adaptive UpperCAmelCase = dropout UpperCAmelCase = dropatt UpperCAmelCase = untie_r UpperCAmelCase = init UpperCAmelCase = init_range UpperCAmelCase = proj_init_std UpperCAmelCase = init_std UpperCAmelCase = layer_norm_epsilon super().__init__(eos_token_id=lowercase_ , **lowercase_ ) @property def UpperCAmelCase__ ( self :Union[str, Any] ) -> Any: # Message copied from Transformer-XL documentation logger.info(f"""The model {self.model_type} is one of the few models that has no sequence length limit.""" ) return -1 @max_position_embeddings.setter def UpperCAmelCase__ ( self :Union[str, Any] , lowercase_ :Any ) -> Tuple: # Message copied from Transformer-XL documentation raise NotImplementedError( f"""The model {self.model_type} is one of the few models that has no sequence length limit.""" )
78
0
"""simple docstring""" import importlib import os import fsspec import pytest from fsspec import register_implementation from fsspec.registry import _registry as _fsspec_registry from datasets.filesystems import COMPRESSION_FILESYSTEMS, HfFileSystem, extract_path_from_uri, is_remote_filesystem from .utils import require_lza, require_zstandard def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Optional[Any] ) -> int: assert "mock" in _fsspec_registry assert "bz2" in _fsspec_registry def SCREAMING_SNAKE_CASE ( ) -> Any: assert "mock" not in _fsspec_registry assert "bz2" in _fsspec_registry def SCREAMING_SNAKE_CASE ( ) -> Union[str, Any]: _lowerCAmelCase : Tuple = """mock-s3-bucket""" _lowerCAmelCase : str = f"s3://{mock_bucket}" _lowerCAmelCase : Dict = extract_path_from_uri(lowercase_ ) assert dataset_path.startswith("""s3://""" ) is False _lowerCAmelCase : Dict = """./local/path""" _lowerCAmelCase : str = extract_path_from_uri(lowercase_ ) assert dataset_path == new_dataset_path def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Union[str, Any] ) -> Optional[Any]: _lowerCAmelCase : Optional[int] = is_remote_filesystem(lowercase_ ) assert is_remote is True _lowerCAmelCase : List[Any] = fsspec.filesystem("""file""" ) _lowerCAmelCase : Any = is_remote_filesystem(lowercase_ ) assert is_remote is False @pytest.mark.parametrize("""compression_fs_class""" ,lowercase_ ) def SCREAMING_SNAKE_CASE ( _lowerCamelCase : List[str] ,_lowerCamelCase : Tuple ,_lowerCamelCase : Optional[Any] ,_lowerCamelCase : int ,_lowerCamelCase : str ,_lowerCamelCase : int ,_lowerCamelCase : List[Any] ) -> List[str]: _lowerCAmelCase : Optional[Any] = {"""gzip""": gz_file, """xz""": xz_file, """zstd""": zstd_file, """bz2""": bza_file, """lz4""": lza_file} _lowerCAmelCase : Any = input_paths[compression_fs_class.protocol] if input_path is None: _lowerCAmelCase : int = f"for '{compression_fs_class.protocol}' compression protocol, " if compression_fs_class.protocol == "lz4": reason += require_lza.kwargs["reason"] elif compression_fs_class.protocol == "zstd": reason += require_zstandard.kwargs["reason"] pytest.skip(lowercase_ ) _lowerCAmelCase : int = fsspec.filesystem(compression_fs_class.protocol ,fo=lowercase_ ) assert isinstance(lowercase_ ,lowercase_ ) _lowerCAmelCase : Dict = os.path.basename(lowercase_ ) _lowerCAmelCase : Tuple = expected_filename[: expected_filename.rindex(""".""" )] assert fs.glob("""*""" ) == [expected_filename] with fs.open(lowercase_ ,"""r""" ,encoding="""utf-8""" ) as f, open(lowercase_ ,encoding="""utf-8""" ) as expected_file: assert f.read() == expected_file.read() @pytest.mark.parametrize("""protocol""" ,["""zip""", """gzip"""] ) def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Tuple ,_lowerCamelCase : Tuple ,_lowerCamelCase : Optional[Any] ) -> Union[str, Any]: _lowerCAmelCase : List[str] = {"""zip""": zip_jsonl_path, """gzip""": jsonl_gz_path} _lowerCAmelCase : List[Any] = compressed_file_paths[protocol] _lowerCAmelCase : Any = """dataset.jsonl""" _lowerCAmelCase : Optional[int] = f"{protocol}://{member_file_path}::{compressed_file_path}" _lowerCAmelCase , *_lowerCAmelCase : str = fsspec.get_fs_token_paths(lowercase_ ) assert fs.isfile(lowercase_ ) assert not fs.isfile("""non_existing_""" + member_file_path ) @pytest.mark.integration def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Optional[Any] ,_lowerCamelCase : List[Any] ,_lowerCamelCase : Any ,_lowerCamelCase : int ) -> Tuple: _lowerCAmelCase : Optional[int] = hf_api.dataset_info(lowercase_ ,token=lowercase_ ) _lowerCAmelCase : int = HfFileSystem(repo_info=lowercase_ ,token=lowercase_ ) assert sorted(hffs.glob("""*""" ) ) == [".gitattributes", "data"] assert hffs.isdir("""data""" ) assert hffs.isfile(""".gitattributes""" ) and hffs.isfile("""data/text_data.txt""" ) with open(lowercase_ ) as f: assert hffs.open("""data/text_data.txt""" ,"""r""" ).read() == f.read() def SCREAMING_SNAKE_CASE ( ) -> str: _lowerCAmelCase : Optional[int] = """bz2""" # Import module import datasets.filesystems # Overwrite protocol and reload register_implementation(lowercase_ ,lowercase_ ,clobber=lowercase_ ) with pytest.warns(lowercase_ ) as warning_info: importlib.reload(datasets.filesystems ) assert len(lowercase_ ) == 1 assert ( str(warning_info[0].message ) == f"A filesystem protocol was already set for {protocol} and will be overwritten." )
44
"""simple docstring""" from json import JSONDecodeError # Workaround for requests.exceptions.JSONDecodeError import requests def _lowerCAmelCase ( lowercase_ = "isbn/0140328726" ): UpperCAmelCase = olid.strip().strip('/' ) # Remove leading/trailing whitespace & slashes if new_olid.count('/' ) != 1: UpperCAmelCase = F"""{olid} is not a valid Open Library olid""" raise ValueError(lowercase_ ) return requests.get(F"""https://openlibrary.org/{new_olid}.json""" ).json() def _lowerCAmelCase ( lowercase_ ): UpperCAmelCase = { 'title': 'Title', 'publish_date': 'Publish date', 'authors': 'Authors', 'number_of_pages': 'Number of pages:', 'first_sentence': 'First sentence', 'isbn_10': 'ISBN (10)', 'isbn_13': 'ISBN (13)', } UpperCAmelCase = {better_key: ol_book_data[key] for key, better_key in desired_keys.items()} UpperCAmelCase = [ get_openlibrary_data(author['key'] )['name'] for author in data['Authors'] ] UpperCAmelCase = data['First sentence']['value'] for key, value in data.items(): if isinstance(lowercase_ , lowercase_ ): UpperCAmelCase = ', '.join(lowercase_ ) return data if __name__ == "__main__": import doctest doctest.testmod() while True: snake_case_ = input("""\nEnter the ISBN code to search (or 'quit' to stop): """).strip() if isbn.lower() in ("", "q", "quit", "exit", "stop"): break if len(isbn) not in (10, 13) or not isbn.isdigit(): print(f'''Sorry, {isbn} is not a valid ISBN. Please, input a valid ISBN.''') continue print(f'''\nSearching Open Library for ISBN: {isbn}...\n''') try: snake_case_ = summarize_book(get_openlibrary_data(f'''isbn/{isbn}''')) print("""\n""".join(f'''{key}: {value}''' for key, value in book_summary.items())) except JSONDecodeError: # Workaround for requests.exceptions.RequestException: print(f'''Sorry, there are no results for ISBN: {isbn}.''')
78
0
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available _lowerCamelCase : str = { """configuration_swinv2""": ["""SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP""", """Swinv2Config"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCamelCase : Any = [ """SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST""", """Swinv2ForImageClassification""", """Swinv2ForMaskedImageModeling""", """Swinv2Model""", """Swinv2PreTrainedModel""", ] if TYPE_CHECKING: from .configuration_swinva import SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinvaConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_swinva import ( SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST, SwinvaForImageClassification, SwinvaForMaskedImageModeling, SwinvaModel, SwinvaPreTrainedModel, ) else: import sys _lowerCamelCase : Dict = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
14
"""simple docstring""" class A_ : """simple docstring""" def __init__( self :List[str] , lowercase_ :int , lowercase_ :Optional[int]=None , lowercase_ :List[str]=None ) -> str: UpperCAmelCase = data UpperCAmelCase = previous UpperCAmelCase = next_node def __str__( self :Optional[Any] ) -> str: return f"""{self.data}""" def UpperCAmelCase__ ( self :int ) -> int: return self.data def UpperCAmelCase__ ( self :List[str] ) -> Any: return self.next def UpperCAmelCase__ ( self :Tuple ) -> Optional[int]: return self.previous class A_ : """simple docstring""" def __init__( self :Optional[Any] , lowercase_ :Optional[Any] ) -> str: UpperCAmelCase = head def __iter__( self :List[str] ) -> List[str]: return self def UpperCAmelCase__ ( self :int ) -> Any: if not self.current: raise StopIteration else: UpperCAmelCase = self.current.get_data() UpperCAmelCase = self.current.get_next() return value class A_ : """simple docstring""" def __init__( self :Union[str, Any] ) -> List[Any]: UpperCAmelCase = None # First node in list UpperCAmelCase = None # Last node in list def __str__( self :List[Any] ) -> Optional[Any]: UpperCAmelCase = self.head UpperCAmelCase = [] while current is not None: nodes.append(current.get_data() ) UpperCAmelCase = current.get_next() return " ".join(str(lowercase_ ) for node in nodes ) def __contains__( self :str , lowercase_ :int ) -> str: UpperCAmelCase = self.head while current: if current.get_data() == value: return True UpperCAmelCase = current.get_next() return False def __iter__( self :Tuple ) -> Dict: return LinkedListIterator(self.head ) def UpperCAmelCase__ ( self :Optional[int] ) -> Optional[Any]: if self.head: return self.head.get_data() return None def UpperCAmelCase__ ( self :Union[str, Any] ) -> List[str]: if self.tail: return self.tail.get_data() return None def UpperCAmelCase__ ( self :Union[str, Any] , lowercase_ :Node ) -> None: if self.head is None: UpperCAmelCase = node UpperCAmelCase = node else: self.insert_before_node(self.head , lowercase_ ) def UpperCAmelCase__ ( self :Any , lowercase_ :Node ) -> None: if self.head is None: self.set_head(lowercase_ ) else: self.insert_after_node(self.tail , lowercase_ ) def UpperCAmelCase__ ( self :List[str] , lowercase_ :int ) -> None: UpperCAmelCase = Node(lowercase_ ) if self.head is None: self.set_head(lowercase_ ) else: self.set_tail(lowercase_ ) def UpperCAmelCase__ ( self :int , lowercase_ :Node , lowercase_ :Node ) -> None: UpperCAmelCase = node UpperCAmelCase = node.previous if node.get_previous() is None: UpperCAmelCase = node_to_insert else: UpperCAmelCase = node_to_insert UpperCAmelCase = node_to_insert def UpperCAmelCase__ ( self :Union[str, Any] , lowercase_ :Node , lowercase_ :Node ) -> None: UpperCAmelCase = node UpperCAmelCase = node.next if node.get_next() is None: UpperCAmelCase = node_to_insert else: UpperCAmelCase = node_to_insert UpperCAmelCase = node_to_insert def UpperCAmelCase__ ( self :Any , lowercase_ :int , lowercase_ :int ) -> None: UpperCAmelCase = 1 UpperCAmelCase = Node(lowercase_ ) UpperCAmelCase = self.head while node: if current_position == position: self.insert_before_node(lowercase_ , lowercase_ ) return current_position += 1 UpperCAmelCase = node.next self.insert_after_node(self.tail , lowercase_ ) def UpperCAmelCase__ ( self :Optional[Any] , lowercase_ :int ) -> Node: UpperCAmelCase = self.head while node: if node.get_data() == item: return node UpperCAmelCase = node.get_next() raise Exception('Node not found' ) def UpperCAmelCase__ ( self :Any , lowercase_ :Optional[Any] ) -> Dict: if (node := self.get_node(lowercase_ )) is not None: if node == self.head: UpperCAmelCase = self.head.get_next() if node == self.tail: UpperCAmelCase = self.tail.get_previous() self.remove_node_pointers(lowercase_ ) @staticmethod def UpperCAmelCase__ ( lowercase_ :Node ) -> None: if node.get_next(): UpperCAmelCase = node.previous if node.get_previous(): UpperCAmelCase = node.next UpperCAmelCase = None UpperCAmelCase = None def UpperCAmelCase__ ( self :Union[str, Any] ) -> List[str]: return self.head is None def _lowerCAmelCase ( ): pass if __name__ == "__main__": import doctest doctest.testmod()
78
0
import json import os import shutil import warnings from argparse import ArgumentParser, Namespace from pathlib import Path from typing import List from ..utils import logging from . import BaseTransformersCLICommand try: from cookiecutter.main import cookiecutter UpperCAmelCase_ = True except ImportError: UpperCAmelCase_ = False UpperCAmelCase_ = logging.get_logger(__name__) # pylint: disable=invalid-name def lowerCAmelCase_ ( __UpperCAmelCase: Union[str, Any] ) -> Optional[Any]: return AddNewModelCommand(args.testing , args.testing_file , path=args.path ) class lowercase__ ( SCREAMING_SNAKE_CASE_ ): '''simple docstring''' @staticmethod def UpperCamelCase__ ( __magic_name__ ) -> List[str]: """simple docstring""" UpperCamelCase__ : Optional[Any] = parser.add_parser('''add-new-model''' ) add_new_model_parser.add_argument('''--testing''', action='''store_true''', help='''If in testing mode.''' ) add_new_model_parser.add_argument('''--testing_file''', type=lowercase_, help='''Configuration file on which to run.''' ) add_new_model_parser.add_argument( '''--path''', type=lowercase_, help='''Path to cookiecutter. Should only be used for testing purposes.''' ) add_new_model_parser.set_defaults(func=lowercase_ ) def __init__( self, __magic_name__, __magic_name__, __magic_name__=None, *__magic_name__ ) -> str: """simple docstring""" UpperCamelCase__ : List[Any] = testing UpperCamelCase__ : List[str] = testing_file UpperCamelCase__ : int = path def UpperCamelCase__ ( self ) -> int: """simple docstring""" warnings.warn( '''The command `transformers-cli add-new-model` is deprecated and will be removed in v5 of Transformers. ''' '''It is not actively maintained anymore, so might give a result that won\'t pass all tests and quality ''' '''checks, you should use `transformers-cli add-new-model-like` instead.''' ) if not _has_cookiecutter: raise ImportError( '''Model creation dependencies are required to use the `add_new_model` command. Install them by running ''' '''the following at the root of your `transformers` clone:\n\n\t$ pip install -e .[modelcreation]\n''' ) # Ensure that there is no other `cookiecutter-template-xxx` directory in the current working directory UpperCamelCase__ : Optional[Any] = [directory for directory in os.listdir() if '''cookiecutter-template-''' == directory[:22]] if len(lowercase_ ) > 0: raise ValueError( '''Several directories starting with `cookiecutter-template-` in current working directory. ''' '''Please clean your directory by removing all folders starting with `cookiecutter-template-` or ''' '''change your working directory.''' ) UpperCamelCase__ : str = ( Path(lowercase_ ).parent.parent.parent.parent if self._path is None else Path(self._path ).parent.parent ) UpperCamelCase__ : List[Any] = path_to_transformer_root / '''templates''' / '''adding_a_new_model''' # Execute cookiecutter if not self._testing: cookiecutter(str(lowercase_ ) ) else: with open(self._testing_file, '''r''' ) as configuration_file: UpperCamelCase__ : int = json.load(lowercase_ ) cookiecutter( str(path_to_cookiecutter if self._path is None else self._path ), no_input=lowercase_, extra_context=lowercase_, ) UpperCamelCase__ : List[str] = [directory for directory in os.listdir() if '''cookiecutter-template-''' in directory[:22]][0] # Retrieve configuration with open(directory + '''/configuration.json''', '''r''' ) as configuration_file: UpperCamelCase__ : Any = json.load(lowercase_ ) UpperCamelCase__ : Optional[int] = configuration['''lowercase_modelname'''] UpperCamelCase__ : Union[str, Any] = configuration['''generate_tensorflow_pytorch_and_flax'''] os.remove(f"{directory}/configuration.json" ) UpperCamelCase__ : Any = '''PyTorch''' in generate_tensorflow_pytorch_and_flax UpperCamelCase__ : Dict = '''TensorFlow''' in generate_tensorflow_pytorch_and_flax UpperCamelCase__ : Optional[int] = '''Flax''' in generate_tensorflow_pytorch_and_flax UpperCamelCase__ : Tuple = f"{path_to_transformer_root}/src/transformers/models/{lowercase_model_name}" os.makedirs(lowercase_, exist_ok=lowercase_ ) os.makedirs(f"{path_to_transformer_root}/tests/models/{lowercase_model_name}", exist_ok=lowercase_ ) # Tests require submodules as they have parent imports with open(f"{path_to_transformer_root}/tests/models/{lowercase_model_name}/__init__.py", '''w''' ): pass shutil.move( f"{directory}/__init__.py", f"{model_dir}/__init__.py", ) shutil.move( f"{directory}/configuration_{lowercase_model_name}.py", f"{model_dir}/configuration_{lowercase_model_name}.py", ) def remove_copy_lines(__magic_name__ ): with open(lowercase_, '''r''' ) as f: UpperCamelCase__ : List[Any] = f.readlines() with open(lowercase_, '''w''' ) as f: for line in lines: if "# Copied from transformers." not in line: f.write(lowercase_ ) if output_pytorch: if not self._testing: remove_copy_lines(f"{directory}/modeling_{lowercase_model_name}.py" ) shutil.move( f"{directory}/modeling_{lowercase_model_name}.py", f"{model_dir}/modeling_{lowercase_model_name}.py", ) shutil.move( f"{directory}/test_modeling_{lowercase_model_name}.py", f"{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_{lowercase_model_name}.py", ) else: os.remove(f"{directory}/modeling_{lowercase_model_name}.py" ) os.remove(f"{directory}/test_modeling_{lowercase_model_name}.py" ) if output_tensorflow: if not self._testing: remove_copy_lines(f"{directory}/modeling_tf_{lowercase_model_name}.py" ) shutil.move( f"{directory}/modeling_tf_{lowercase_model_name}.py", f"{model_dir}/modeling_tf_{lowercase_model_name}.py", ) shutil.move( f"{directory}/test_modeling_tf_{lowercase_model_name}.py", f"{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_tf_{lowercase_model_name}.py", ) else: os.remove(f"{directory}/modeling_tf_{lowercase_model_name}.py" ) os.remove(f"{directory}/test_modeling_tf_{lowercase_model_name}.py" ) if output_flax: if not self._testing: remove_copy_lines(f"{directory}/modeling_flax_{lowercase_model_name}.py" ) shutil.move( f"{directory}/modeling_flax_{lowercase_model_name}.py", f"{model_dir}/modeling_flax_{lowercase_model_name}.py", ) shutil.move( f"{directory}/test_modeling_flax_{lowercase_model_name}.py", f"{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_flax_{lowercase_model_name}.py", ) else: os.remove(f"{directory}/modeling_flax_{lowercase_model_name}.py" ) os.remove(f"{directory}/test_modeling_flax_{lowercase_model_name}.py" ) shutil.move( f"{directory}/{lowercase_model_name}.md", f"{path_to_transformer_root}/docs/source/en/model_doc/{lowercase_model_name}.md", ) shutil.move( f"{directory}/tokenization_{lowercase_model_name}.py", f"{model_dir}/tokenization_{lowercase_model_name}.py", ) shutil.move( f"{directory}/tokenization_fast_{lowercase_model_name}.py", f"{model_dir}/tokenization_{lowercase_model_name}_fast.py", ) from os import fdopen, remove from shutil import copymode, move from tempfile import mkstemp def replace(__magic_name__, __magic_name__, __magic_name__ ): # Create temp file UpperCamelCase__ ,UpperCamelCase__ : Tuple = mkstemp() UpperCamelCase__ : int = False with fdopen(lowercase_, '''w''' ) as new_file: with open(lowercase_ ) as old_file: for line in old_file: new_file.write(lowercase_ ) if line_to_copy_below in line: UpperCamelCase__ : Optional[int] = True for line_to_copy in lines_to_copy: new_file.write(lowercase_ ) if not line_found: raise ValueError(f"Line {line_to_copy_below} was not found in file." ) # Copy the file permissions from the old file to the new file copymode(lowercase_, lowercase_ ) # Remove original file remove(lowercase_ ) # Move new file move(lowercase_, lowercase_ ) def skip_units(__magic_name__ ): return ( ("generating PyTorch" in line and not output_pytorch) or ("generating TensorFlow" in line and not output_tensorflow) or ("generating Flax" in line and not output_flax) ) def replace_in_files(__magic_name__ ): with open(lowercase_ ) as datafile: UpperCamelCase__ : Union[str, Any] = [] UpperCamelCase__ : int = False UpperCamelCase__ : Any = False for line in datafile: if "# To replace in: " in line and "##" not in line: UpperCamelCase__ : Any = line.split('''"''' )[1] UpperCamelCase__ : Dict = skip_units(lowercase_ ) elif "# Below: " in line and "##" not in line: UpperCamelCase__ : List[str] = line.split('''"''' )[1] UpperCamelCase__ : int = skip_units(lowercase_ ) elif "# End." in line and "##" not in line: if not skip_file and not skip_snippet: replace(lowercase_, lowercase_, lowercase_ ) UpperCamelCase__ : Tuple = [] elif "# Replace with" in line and "##" not in line: UpperCamelCase__ : Optional[Any] = [] elif "##" not in line: lines_to_copy.append(lowercase_ ) remove(lowercase_ ) replace_in_files(f"{directory}/to_replace_{lowercase_model_name}.py" ) os.rmdir(lowercase_ )
201
"""simple docstring""" class A_ : """simple docstring""" def __init__( self :List[Any] , lowercase_ :int ) -> None: UpperCAmelCase = size UpperCAmelCase = [0] * size UpperCAmelCase = [0] * size @staticmethod def UpperCAmelCase__ ( lowercase_ :int ) -> int: return index | (index + 1) @staticmethod def UpperCAmelCase__ ( lowercase_ :int ) -> int: return (index & (index + 1)) - 1 def UpperCAmelCase__ ( self :Any , lowercase_ :int , lowercase_ :int ) -> None: UpperCAmelCase = value while index < self.size: UpperCAmelCase = self.get_prev(lowercase_ ) + 1 if current_left_border == index: UpperCAmelCase = value else: UpperCAmelCase = max(lowercase_ , lowercase_ , lowercase_ ) UpperCAmelCase = self.get_next(lowercase_ ) def UpperCAmelCase__ ( self :List[str] , lowercase_ :int , lowercase_ :int ) -> int: right -= 1 # Because of right is exclusive UpperCAmelCase = 0 while left <= right: UpperCAmelCase = self.get_prev(lowercase_ ) if left <= current_left: UpperCAmelCase = max(lowercase_ , self.tree[right] ) UpperCAmelCase = current_left else: UpperCAmelCase = max(lowercase_ , self.arr[right] ) right -= 1 return result if __name__ == "__main__": import doctest doctest.testmod()
78
0
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_tokenizers_available, is_torch_available, ) _snake_case = { 'configuration_deberta': ['DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'DebertaConfig', 'DebertaOnnxConfig'], 'tokenization_deberta': ['DebertaTokenizer'], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _snake_case = ['DebertaTokenizerFast'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _snake_case = [ 'DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST', 'DebertaForMaskedLM', 'DebertaForQuestionAnswering', 'DebertaForSequenceClassification', 'DebertaForTokenClassification', 'DebertaModel', 'DebertaPreTrainedModel', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _snake_case = [ 'TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST', 'TFDebertaForMaskedLM', 'TFDebertaForQuestionAnswering', 'TFDebertaForSequenceClassification', 'TFDebertaForTokenClassification', 'TFDebertaModel', 'TFDebertaPreTrainedModel', ] if TYPE_CHECKING: from .configuration_deberta import DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, DebertaConfig, DebertaOnnxConfig from .tokenization_deberta import DebertaTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_deberta_fast import DebertaTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_deberta import ( DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST, DebertaForMaskedLM, DebertaForQuestionAnswering, DebertaForSequenceClassification, DebertaForTokenClassification, DebertaModel, DebertaPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_deberta import ( TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST, TFDebertaForMaskedLM, TFDebertaForQuestionAnswering, TFDebertaForSequenceClassification, TFDebertaForTokenClassification, TFDebertaModel, TFDebertaPreTrainedModel, ) else: import sys _snake_case = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
294
"""simple docstring""" import json from typing import Iterator, List, Union from tokenizers import AddedToken, Regex, Tokenizer, decoders, normalizers, pre_tokenizers, trainers from tokenizers.implementations.base_tokenizer import BaseTokenizer from tokenizers.models import Unigram from tokenizers.processors import TemplateProcessing class A_ ( SCREAMING_SNAKE_CASE_ ): """simple docstring""" def __init__( self :Dict , lowercase_ :str = "▁" , lowercase_ :bool = True , lowercase_ :Union[str, AddedToken] = "<unk>" , lowercase_ :Union[str, AddedToken] = "</s>" , lowercase_ :Union[str, AddedToken] = "<pad>" , ) -> str: UpperCAmelCase = { 'pad': {'id': 0, 'token': pad_token}, 'eos': {'id': 1, 'token': eos_token}, 'unk': {'id': 2, 'token': unk_token}, } UpperCAmelCase = [None] * len(self.special_tokens ) for token_dict in self.special_tokens.values(): UpperCAmelCase = token_dict['token'] UpperCAmelCase = Tokenizer(Unigram() ) UpperCAmelCase = normalizers.Sequence( [ normalizers.Nmt(), normalizers.NFKC(), normalizers.Replace(Regex(' {2,}' ) , ' ' ), normalizers.Lowercase(), ] ) UpperCAmelCase = pre_tokenizers.Sequence( [ pre_tokenizers.Metaspace(replacement=lowercase_ , add_prefix_space=lowercase_ ), pre_tokenizers.Digits(individual_digits=lowercase_ ), pre_tokenizers.Punctuation(), ] ) UpperCAmelCase = decoders.Metaspace(replacement=lowercase_ , add_prefix_space=lowercase_ ) UpperCAmelCase = TemplateProcessing( single=f"""$A {self.special_tokens['eos']['token']}""" , special_tokens=[(self.special_tokens['eos']['token'], self.special_tokens['eos']['id'])] , ) UpperCAmelCase = { 'model': 'SentencePieceUnigram', 'replacement': replacement, 'add_prefix_space': add_prefix_space, } super().__init__(lowercase_ , lowercase_ ) def UpperCAmelCase__ ( self :Optional[int] , lowercase_ :Union[str, List[str]] , lowercase_ :int = 80_00 , lowercase_ :bool = True , ) -> Union[str, Any]: UpperCAmelCase = trainers.UnigramTrainer( vocab_size=lowercase_ , special_tokens=self.special_tokens_list , show_progress=lowercase_ , ) if isinstance(lowercase_ , lowercase_ ): UpperCAmelCase = [files] self._tokenizer.train(lowercase_ , trainer=lowercase_ ) self.add_unk_id() def UpperCAmelCase__ ( self :str , lowercase_ :Union[Iterator[str], Iterator[Iterator[str]]] , lowercase_ :int = 80_00 , lowercase_ :bool = True , ) -> Tuple: UpperCAmelCase = trainers.UnigramTrainer( vocab_size=lowercase_ , special_tokens=self.special_tokens_list , show_progress=lowercase_ , ) self._tokenizer.train_from_iterator(lowercase_ , trainer=lowercase_ ) self.add_unk_id() def UpperCAmelCase__ ( self :Union[str, Any] ) -> int: UpperCAmelCase = json.loads(self._tokenizer.to_str() ) UpperCAmelCase = self.special_tokens['unk']['id'] UpperCAmelCase = Tokenizer.from_str(json.dumps(lowercase_ ) )
78
0
"""simple docstring""" import logging import os from typing import List, Tuple import numpy as np import psutil import torch import torch.distributed as dist from transformers import RagRetriever SCREAMING_SNAKE_CASE : Any = logging.getLogger(__name__) class _UpperCAmelCase ( SCREAMING_SNAKE_CASE_ ): '''simple docstring''' def __init__(self , a_ , a_ , a_ , a_=None ): '''simple docstring''' super().__init__( lowercase_ , question_encoder_tokenizer=lowercase_ , generator_tokenizer=lowercase_ , index=lowercase_ , init_retrieval=lowercase_ , ) __snake_case : Any = None def SCREAMING_SNAKE_CASE (self , a_ ): '''simple docstring''' logger.info('''initializing retrieval''' ) # initializing a separate process group for retrieval as the default # nccl backend doesn't support gather/scatter operations while gloo # is too slow to replace nccl for the core gpu communication if dist.is_initialized(): logger.info('''dist initialized''' ) # needs to be set manually __snake_case : Union[str, Any] = self._infer_socket_ifname() # avoid clash with the NCCL port __snake_case : Optional[Any] = str(distributed_port + 1 ) __snake_case : str = dist.new_group(ranks=lowercase_ , backend='''gloo''' ) # initialize retriever only on the main worker if not dist.is_initialized() or self._is_main(): logger.info('''dist not initialized / main''' ) self.index.init_index() # all processes wait untill the retriever is initialized by the main process if dist.is_initialized(): torch.distributed.barrier(group=self.process_group ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' return dist.get_rank(group=self.process_group ) == 0 def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_=torch.floataa ): '''simple docstring''' __snake_case : Any = torch.empty(lowercase_ , dtype=lowercase_ ) dist.scatter(lowercase_ , src=0 , scatter_list=lowercase_ , group=self.process_group ) return target_tensor def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : int = psutil.net_if_addrs() # a hacky way to deal with varying network interface names __snake_case : Any = next((addr for addr in addrs if addr.startswith('''e''' )) , lowercase_ ) return ifname def SCREAMING_SNAKE_CASE (self , a_ , a_ ): '''simple docstring''' if not dist.is_initialized(): __snake_case , __snake_case : Dict = self._main_retrieve(lowercase_ , lowercase_ ) return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(lowercase_ ) # distributed training __snake_case : Union[str, Any] = dist.get_world_size(group=self.process_group ) # gather logic __snake_case : Dict = None if self._is_main(): __snake_case : int = [torch.empty(question_hidden_states.shape , dtype=torch.floataa ) for _ in range(lowercase_ )] dist.gather(torch.tensor(lowercase_ ) , dst=0 , gather_list=lowercase_ , group=self.process_group ) # scatter logic __snake_case : List[str] = question_hidden_states.shape[0] __snake_case : int = [] __snake_case : List[Any] = [] if self._is_main(): assert len(lowercase_ ) == world_size __snake_case , __snake_case : List[str] = self._main_retrieve(torch.cat(lowercase_ ).numpy() , lowercase_ ) __snake_case , __snake_case : Optional[int] = torch.tensor(lowercase_ ), torch.tensor(lowercase_ ) __snake_case : Union[str, Any] = self._chunk_tensor(lowercase_ , lowercase_ ) __snake_case : Tuple = self._chunk_tensor(lowercase_ , lowercase_ ) __snake_case : Any = self._scattered(lowercase_ , [n_queries, n_docs] , target_type=torch.intaa ) __snake_case : str = self._scattered(lowercase_ , [n_queries, n_docs, question_hidden_states.shape[1]] ) return retrieved_doc_embeds.numpy(), doc_ids.numpy(), self.index.get_doc_dicts(lowercase_ )
102
"""simple docstring""" import secrets from random import shuffle from string import ascii_letters, ascii_lowercase, ascii_uppercase, digits, punctuation def _lowerCAmelCase ( lowercase_ = 8 ): UpperCAmelCase = ascii_letters + digits + punctuation return "".join(secrets.choice(lowercase_ ) for _ in range(lowercase_ ) ) def _lowerCAmelCase ( lowercase_ , lowercase_ ): # Password Generator = full boot with random_number, random_letters, and # random_character FUNCTIONS # Put your code here... i -= len(lowercase_ ) UpperCAmelCase = i // 3 UpperCAmelCase = i % 3 # chars = chars_incl + random_letters(ascii_letters, i / 3 + remainder) + # random_number(digits, i / 3) + random_characters(punctuation, i / 3) UpperCAmelCase = ( chars_incl + random(lowercase_ , quotient + remainder ) + random(lowercase_ , lowercase_ ) + random(lowercase_ , lowercase_ ) ) UpperCAmelCase = list(lowercase_ ) shuffle(lowercase_ ) return "".join(lowercase_ ) # random is a generalised function for letters, characters and numbers def _lowerCAmelCase ( lowercase_ , lowercase_ ): return "".join(secrets.choice(lowercase_ ) for _ in range(lowercase_ ) ) def _lowerCAmelCase ( lowercase_ , lowercase_ ): pass # Put your code here... def _lowerCAmelCase ( lowercase_ , lowercase_ ): pass # Put your code here... def _lowerCAmelCase ( lowercase_ , lowercase_ ): pass # Put your code here... def _lowerCAmelCase ( lowercase_ , lowercase_ = 8 ): if len(lowercase_ ) < min_length: # Your Password must be at least 8 characters long return False UpperCAmelCase = any(char in ascii_uppercase for char in password ) UpperCAmelCase = any(char in ascii_lowercase for char in password ) UpperCAmelCase = any(char in digits for char in password ) UpperCAmelCase = any(char in punctuation for char in password ) return upper and lower and num and spec_char # Passwords should contain UPPERCASE, lowerase # numbers, and special characters def _lowerCAmelCase ( ): UpperCAmelCase = int(input('Please indicate the max length of your password: ' ).strip() ) UpperCAmelCase = input( 'Please indicate the characters that must be in your password: ' ).strip() print('Password generated:' , password_generator(lowercase_ ) ) print( 'Alternative Password generated:' , alternative_password_generator(lowercase_ , lowercase_ ) , ) print('[If you are thinking of using this passsword, You better save it.]' ) if __name__ == "__main__": main()
78
0
import argparse import os import pickle import sys import torch from transformers import TransfoXLConfig, TransfoXLLMHeadModel, load_tf_weights_in_transfo_xl from transformers.models.transfo_xl import tokenization_transfo_xl as data_utils from transformers.models.transfo_xl.tokenization_transfo_xl import CORPUS_NAME, VOCAB_FILES_NAMES from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging logging.set_verbosity_info() # We do this to be able to load python 2 datasets pickles # See e.g. https://stackoverflow.com/questions/2121874/python-pickling-after-changing-a-modules-directory/2121918#2121918 A__ : Optional[int] = data_utils.TransfoXLTokenizer A__ : Dict = data_utils.TransfoXLCorpus A__ : str = data_utils A__ : Union[str, Any] = data_utils def UpperCamelCase( __UpperCamelCase : List[Any] ,__UpperCamelCase : Union[str, Any] ,__UpperCamelCase : Dict ,__UpperCamelCase : Dict ): if transfo_xl_dataset_file: # Convert a pre-processed corpus (see original TensorFlow repo) with open(lowercase_ ,'''rb''' ) as fp: lowerCAmelCase_ : Optional[int] = pickle.load(lowercase_ ,encoding='''latin1''' ) # Save vocabulary and dataset cache as Dictionaries (should be better than pickles for the long-term) lowerCAmelCase_ : Optional[Any] = pytorch_dump_folder_path + '''/''' + VOCAB_FILES_NAMES['''pretrained_vocab_file'''] print(f"""Save vocabulary to {pytorch_vocab_dump_path}""" ) lowerCAmelCase_ : Any = corpus.vocab.__dict__ torch.save(lowercase_ ,lowercase_ ) lowerCAmelCase_ : int = corpus.__dict__ corpus_dict_no_vocab.pop('''vocab''' ,lowercase_ ) lowerCAmelCase_ : List[Any] = pytorch_dump_folder_path + '''/''' + CORPUS_NAME print(f"""Save dataset to {pytorch_dataset_dump_path}""" ) torch.save(lowercase_ ,lowercase_ ) if tf_checkpoint_path: # Convert a pre-trained TensorFlow model lowerCAmelCase_ : int = os.path.abspath(lowercase_ ) lowerCAmelCase_ : List[Any] = os.path.abspath(lowercase_ ) print(f"""Converting Transformer XL checkpoint from {tf_path} with config at {config_path}.""" ) # Initialise PyTorch model if transfo_xl_config_file == "": lowerCAmelCase_ : str = TransfoXLConfig() else: lowerCAmelCase_ : Optional[Any] = TransfoXLConfig.from_json_file(lowercase_ ) print(f"""Building PyTorch model from configuration: {config}""" ) lowerCAmelCase_ : int = TransfoXLLMHeadModel(lowercase_ ) lowerCAmelCase_ : Dict = load_tf_weights_in_transfo_xl(lowercase_ ,lowercase_ ,lowercase_ ) # Save pytorch-model lowerCAmelCase_ : str = os.path.join(lowercase_ ,lowercase_ ) lowerCAmelCase_ : Union[str, Any] = os.path.join(lowercase_ ,lowercase_ ) print(f"""Save PyTorch model to {os.path.abspath(lowercase_ )}""" ) torch.save(model.state_dict() ,lowercase_ ) print(f"""Save configuration file to {os.path.abspath(lowercase_ )}""" ) with open(lowercase_ ,'''w''' ,encoding='''utf-8''' ) as f: f.write(config.to_json_string() ) if __name__ == "__main__": A__ : Any = argparse.ArgumentParser() parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the folder to store the PyTorch model or dataset/vocab.''', ) parser.add_argument( '''--tf_checkpoint_path''', default='''''', type=str, help='''An optional path to a TensorFlow checkpoint path to be converted.''', ) parser.add_argument( '''--transfo_xl_config_file''', default='''''', type=str, help=( '''An optional config json file corresponding to the pre-trained BERT model. \n''' '''This specifies the model architecture.''' ), ) parser.add_argument( '''--transfo_xl_dataset_file''', default='''''', type=str, help='''An optional dataset file to be converted in a vocabulary.''', ) A__ : Optional[Any] = parser.parse_args() convert_transfo_xl_checkpoint_to_pytorch( args.tf_checkpoint_path, args.transfo_xl_config_file, args.pytorch_dump_folder_path, args.transfo_xl_dataset_file, )
103
"""simple docstring""" import tempfile import numpy as np import torch from transformers import AutoTokenizer, TaEncoderModel from diffusers import DDPMScheduler, UNetaDConditionModel from diffusers.models.attention_processor import AttnAddedKVProcessor from diffusers.pipelines.deepfloyd_if import IFWatermarker from diffusers.utils.testing_utils import torch_device from ..test_pipelines_common import to_np class A_ : """simple docstring""" def UpperCAmelCase__ ( self :Any ) -> List[str]: torch.manual_seed(0 ) UpperCAmelCase = TaEncoderModel.from_pretrained('hf-internal-testing/tiny-random-t5' ) torch.manual_seed(0 ) UpperCAmelCase = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-t5' ) torch.manual_seed(0 ) UpperCAmelCase = UNetaDConditionModel( sample_size=32 , layers_per_block=1 , block_out_channels=[32, 64] , down_block_types=[ 'ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D', ] , mid_block_type='UNetMidBlock2DSimpleCrossAttn' , up_block_types=['SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'] , in_channels=3 , out_channels=6 , cross_attention_dim=32 , encoder_hid_dim=32 , attention_head_dim=8 , addition_embed_type='text' , addition_embed_type_num_heads=2 , cross_attention_norm='group_norm' , resnet_time_scale_shift='scale_shift' , act_fn='gelu' , ) unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests torch.manual_seed(0 ) UpperCAmelCase = DDPMScheduler( num_train_timesteps=10_00 , beta_schedule='squaredcos_cap_v2' , beta_start=0.0001 , beta_end=0.02 , thresholding=lowercase_ , dynamic_thresholding_ratio=0.95 , sample_max_value=1.0 , prediction_type='epsilon' , variance_type='learned_range' , ) torch.manual_seed(0 ) UpperCAmelCase = IFWatermarker() return { "text_encoder": text_encoder, "tokenizer": tokenizer, "unet": unet, "scheduler": scheduler, "watermarker": watermarker, "safety_checker": None, "feature_extractor": None, } def UpperCAmelCase__ ( self :List[Any] ) -> Any: torch.manual_seed(0 ) UpperCAmelCase = TaEncoderModel.from_pretrained('hf-internal-testing/tiny-random-t5' ) torch.manual_seed(0 ) UpperCAmelCase = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-t5' ) torch.manual_seed(0 ) UpperCAmelCase = UNetaDConditionModel( sample_size=32 , layers_per_block=[1, 2] , block_out_channels=[32, 64] , down_block_types=[ 'ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D', ] , mid_block_type='UNetMidBlock2DSimpleCrossAttn' , up_block_types=['SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'] , in_channels=6 , out_channels=6 , cross_attention_dim=32 , encoder_hid_dim=32 , attention_head_dim=8 , addition_embed_type='text' , addition_embed_type_num_heads=2 , cross_attention_norm='group_norm' , resnet_time_scale_shift='scale_shift' , act_fn='gelu' , class_embed_type='timestep' , mid_block_scale_factor=1.414 , time_embedding_act_fn='gelu' , time_embedding_dim=32 , ) unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests torch.manual_seed(0 ) UpperCAmelCase = DDPMScheduler( num_train_timesteps=10_00 , beta_schedule='squaredcos_cap_v2' , beta_start=0.0001 , beta_end=0.02 , thresholding=lowercase_ , dynamic_thresholding_ratio=0.95 , sample_max_value=1.0 , prediction_type='epsilon' , variance_type='learned_range' , ) torch.manual_seed(0 ) UpperCAmelCase = DDPMScheduler( num_train_timesteps=10_00 , beta_schedule='squaredcos_cap_v2' , beta_start=0.0001 , beta_end=0.02 , ) torch.manual_seed(0 ) UpperCAmelCase = IFWatermarker() return { "text_encoder": text_encoder, "tokenizer": tokenizer, "unet": unet, "scheduler": scheduler, "image_noising_scheduler": image_noising_scheduler, "watermarker": watermarker, "safety_checker": None, "feature_extractor": None, } def UpperCAmelCase__ ( self :List[str] ) -> str: UpperCAmelCase = self.get_dummy_components() UpperCAmelCase = self.pipeline_class(**lowercase_ ) pipe.to(lowercase_ ) pipe.set_progress_bar_config(disable=lowercase_ ) UpperCAmelCase = self.get_dummy_inputs(lowercase_ ) UpperCAmelCase = inputs['prompt'] UpperCAmelCase = inputs['generator'] UpperCAmelCase = inputs['num_inference_steps'] UpperCAmelCase = inputs['output_type'] if "image" in inputs: UpperCAmelCase = inputs['image'] else: UpperCAmelCase = None if "mask_image" in inputs: UpperCAmelCase = inputs['mask_image'] else: UpperCAmelCase = None if "original_image" in inputs: UpperCAmelCase = inputs['original_image'] else: UpperCAmelCase = None UpperCAmelCase , UpperCAmelCase = pipe.encode_prompt(lowercase_ ) # inputs with prompt converted to embeddings UpperCAmelCase = { 'prompt_embeds': prompt_embeds, 'negative_prompt_embeds': negative_prompt_embeds, 'generator': generator, 'num_inference_steps': num_inference_steps, 'output_type': output_type, } if image is not None: UpperCAmelCase = image if mask_image is not None: UpperCAmelCase = mask_image if original_image is not None: UpperCAmelCase = original_image # set all optional components to None for optional_component in pipe._optional_components: setattr(lowercase_ , lowercase_ , lowercase_ ) UpperCAmelCase = pipe(**lowercase_ )[0] with tempfile.TemporaryDirectory() as tmpdir: pipe.save_pretrained(lowercase_ ) UpperCAmelCase = self.pipeline_class.from_pretrained(lowercase_ ) pipe_loaded.to(lowercase_ ) pipe_loaded.set_progress_bar_config(disable=lowercase_ ) pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests for optional_component in pipe._optional_components: self.assertTrue( getattr(lowercase_ , lowercase_ ) is None , f"""`{optional_component}` did not stay set to None after loading.""" , ) UpperCAmelCase = self.get_dummy_inputs(lowercase_ ) UpperCAmelCase = inputs['generator'] UpperCAmelCase = inputs['num_inference_steps'] UpperCAmelCase = inputs['output_type'] # inputs with prompt converted to embeddings UpperCAmelCase = { 'prompt_embeds': prompt_embeds, 'negative_prompt_embeds': negative_prompt_embeds, 'generator': generator, 'num_inference_steps': num_inference_steps, 'output_type': output_type, } if image is not None: UpperCAmelCase = image if mask_image is not None: UpperCAmelCase = mask_image if original_image is not None: UpperCAmelCase = original_image UpperCAmelCase = pipe_loaded(**lowercase_ )[0] UpperCAmelCase = np.abs(to_np(lowercase_ ) - to_np(lowercase_ ) ).max() self.assertLess(lowercase_ , 1E-4 ) def UpperCAmelCase__ ( self :List[Any] ) -> str: UpperCAmelCase = self.get_dummy_components() UpperCAmelCase = self.pipeline_class(**lowercase_ ) pipe.to(lowercase_ ) pipe.set_progress_bar_config(disable=lowercase_ ) UpperCAmelCase = self.get_dummy_inputs(lowercase_ ) UpperCAmelCase = pipe(**lowercase_ )[0] with tempfile.TemporaryDirectory() as tmpdir: pipe.save_pretrained(lowercase_ ) UpperCAmelCase = self.pipeline_class.from_pretrained(lowercase_ ) pipe_loaded.to(lowercase_ ) pipe_loaded.set_progress_bar_config(disable=lowercase_ ) pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests UpperCAmelCase = self.get_dummy_inputs(lowercase_ ) UpperCAmelCase = pipe_loaded(**lowercase_ )[0] UpperCAmelCase = np.abs(to_np(lowercase_ ) - to_np(lowercase_ ) ).max() self.assertLess(lowercase_ , 1E-4 )
78
0
"""simple docstring""" from ..utils import DummyObject, requires_backends class lowercase ( metaclass=SCREAMING_SNAKE_CASE_ ): _SCREAMING_SNAKE_CASE = ['torch'] def __init__( self , *lowercase , **lowercase ) -> Optional[int]: requires_backends(self , ["""torch"""] ) @classmethod def _snake_case ( cls , *lowercase , **lowercase ) -> Optional[Any]: requires_backends(cls , ["""torch"""] ) @classmethod def _snake_case ( cls , *lowercase , **lowercase ) -> str: requires_backends(cls , ["""torch"""] ) class lowercase ( metaclass=SCREAMING_SNAKE_CASE_ ): _SCREAMING_SNAKE_CASE = ['torch'] def __init__( self , *lowercase , **lowercase ) -> Any: requires_backends(self , ["""torch"""] ) @classmethod def _snake_case ( cls , *lowercase , **lowercase ) -> Optional[Any]: requires_backends(cls , ["""torch"""] ) @classmethod def _snake_case ( cls , *lowercase , **lowercase ) -> str: requires_backends(cls , ["""torch"""] ) class lowercase ( metaclass=SCREAMING_SNAKE_CASE_ ): _SCREAMING_SNAKE_CASE = ['torch'] def __init__( self , *lowercase , **lowercase ) -> Union[str, Any]: requires_backends(self , ["""torch"""] ) @classmethod def _snake_case ( cls , *lowercase , **lowercase ) -> int: requires_backends(cls , ["""torch"""] ) @classmethod def _snake_case ( cls , *lowercase , **lowercase ) -> List[str]: requires_backends(cls , ["""torch"""] ) class lowercase ( metaclass=SCREAMING_SNAKE_CASE_ ): _SCREAMING_SNAKE_CASE = ['torch'] def __init__( self , *lowercase , **lowercase ) -> List[str]: requires_backends(self , ["""torch"""] ) @classmethod def _snake_case ( cls , *lowercase , **lowercase ) -> Tuple: requires_backends(cls , ["""torch"""] ) @classmethod def _snake_case ( cls , *lowercase , **lowercase ) -> List[Any]: requires_backends(cls , ["""torch"""] ) class lowercase ( metaclass=SCREAMING_SNAKE_CASE_ ): _SCREAMING_SNAKE_CASE = ['torch'] def __init__( self , *lowercase , **lowercase ) -> Any: requires_backends(self , ["""torch"""] ) @classmethod def _snake_case ( cls , *lowercase , **lowercase ) -> str: requires_backends(cls , ["""torch"""] ) @classmethod def _snake_case ( cls , *lowercase , **lowercase ) -> int: requires_backends(cls , ["""torch"""] ) class lowercase ( metaclass=SCREAMING_SNAKE_CASE_ ): _SCREAMING_SNAKE_CASE = ['torch'] def __init__( self , *lowercase , **lowercase ) -> int: requires_backends(self , ["""torch"""] ) @classmethod def _snake_case ( cls , *lowercase , **lowercase ) -> Dict: requires_backends(cls , ["""torch"""] ) @classmethod def _snake_case ( cls , *lowercase , **lowercase ) -> Dict: requires_backends(cls , ["""torch"""] ) class lowercase ( metaclass=SCREAMING_SNAKE_CASE_ ): _SCREAMING_SNAKE_CASE = ['torch'] def __init__( self , *lowercase , **lowercase ) -> int: requires_backends(self , ["""torch"""] ) @classmethod def _snake_case ( cls , *lowercase , **lowercase ) -> Optional[Any]: requires_backends(cls , ["""torch"""] ) @classmethod def _snake_case ( cls , *lowercase , **lowercase ) -> str: requires_backends(cls , ["""torch"""] ) class lowercase ( metaclass=SCREAMING_SNAKE_CASE_ ): _SCREAMING_SNAKE_CASE = ['torch'] def __init__( self , *lowercase , **lowercase ) -> List[Any]: requires_backends(self , ["""torch"""] ) @classmethod def _snake_case ( cls , *lowercase , **lowercase ) -> Any: requires_backends(cls , ["""torch"""] ) @classmethod def _snake_case ( cls , *lowercase , **lowercase ) -> Any: requires_backends(cls , ["""torch"""] ) class lowercase ( metaclass=SCREAMING_SNAKE_CASE_ ): _SCREAMING_SNAKE_CASE = ['torch'] def __init__( self , *lowercase , **lowercase ) -> Optional[Any]: requires_backends(self , ["""torch"""] ) @classmethod def _snake_case ( cls , *lowercase , **lowercase ) -> Union[str, Any]: requires_backends(cls , ["""torch"""] ) @classmethod def _snake_case ( cls , *lowercase , **lowercase ) -> Optional[int]: requires_backends(cls , ["""torch"""] ) class lowercase ( metaclass=SCREAMING_SNAKE_CASE_ ): _SCREAMING_SNAKE_CASE = ['torch'] def __init__( self , *lowercase , **lowercase ) -> str: requires_backends(self , ["""torch"""] ) @classmethod def _snake_case ( cls , *lowercase , **lowercase ) -> List[str]: requires_backends(cls , ["""torch"""] ) @classmethod def _snake_case ( cls , *lowercase , **lowercase ) -> Optional[int]: requires_backends(cls , ["""torch"""] ) class lowercase ( metaclass=SCREAMING_SNAKE_CASE_ ): _SCREAMING_SNAKE_CASE = ['torch'] def __init__( self , *lowercase , **lowercase ) -> Union[str, Any]: requires_backends(self , ["""torch"""] ) @classmethod def _snake_case ( cls , *lowercase , **lowercase ) -> int: requires_backends(cls , ["""torch"""] ) @classmethod def _snake_case ( cls , *lowercase , **lowercase ) -> List[str]: requires_backends(cls , ["""torch"""] ) def UpperCAmelCase__ ( *SCREAMING_SNAKE_CASE : Tuple , **SCREAMING_SNAKE_CASE : List[Any] ): '''simple docstring''' requires_backends(lowercase_ , ["""torch"""] ) def UpperCAmelCase__ ( *SCREAMING_SNAKE_CASE : List[Any] , **SCREAMING_SNAKE_CASE : List[str] ): '''simple docstring''' requires_backends(lowercase_ , ["""torch"""] ) def UpperCAmelCase__ ( *SCREAMING_SNAKE_CASE : Optional[int] , **SCREAMING_SNAKE_CASE : Dict ): '''simple docstring''' requires_backends(lowercase_ , ["""torch"""] ) def UpperCAmelCase__ ( *SCREAMING_SNAKE_CASE : List[Any] , **SCREAMING_SNAKE_CASE : Union[str, Any] ): '''simple docstring''' requires_backends(lowercase_ , ["""torch"""] ) def UpperCAmelCase__ ( *SCREAMING_SNAKE_CASE : Union[str, Any] , **SCREAMING_SNAKE_CASE : Any ): '''simple docstring''' requires_backends(lowercase_ , ["""torch"""] ) def UpperCAmelCase__ ( *SCREAMING_SNAKE_CASE : Optional[int] , **SCREAMING_SNAKE_CASE : Tuple ): '''simple docstring''' requires_backends(lowercase_ , ["""torch"""] ) def UpperCAmelCase__ ( *SCREAMING_SNAKE_CASE : str , **SCREAMING_SNAKE_CASE : str ): '''simple docstring''' requires_backends(lowercase_ , ["""torch"""] ) class lowercase ( metaclass=SCREAMING_SNAKE_CASE_ ): _SCREAMING_SNAKE_CASE = ['torch'] def __init__( self , *lowercase , **lowercase ) -> Dict: requires_backends(self , ["""torch"""] ) @classmethod def _snake_case ( cls , *lowercase , **lowercase ) -> Optional[int]: requires_backends(cls , ["""torch"""] ) @classmethod def _snake_case ( cls , *lowercase , **lowercase ) -> List[str]: requires_backends(cls , ["""torch"""] ) class lowercase ( metaclass=SCREAMING_SNAKE_CASE_ ): _SCREAMING_SNAKE_CASE = ['torch'] def __init__( self , *lowercase , **lowercase ) -> Any: requires_backends(self , ["""torch"""] ) @classmethod def _snake_case ( cls , *lowercase , **lowercase ) -> Optional[int]: requires_backends(cls , ["""torch"""] ) @classmethod def _snake_case ( cls , *lowercase , **lowercase ) -> Dict: requires_backends(cls , ["""torch"""] ) class lowercase ( metaclass=SCREAMING_SNAKE_CASE_ ): _SCREAMING_SNAKE_CASE = ['torch'] def __init__( self , *lowercase , **lowercase ) -> List[str]: requires_backends(self , ["""torch"""] ) @classmethod def _snake_case ( cls , *lowercase , **lowercase ) -> str: requires_backends(cls , ["""torch"""] ) @classmethod def _snake_case ( cls , *lowercase , **lowercase ) -> Union[str, Any]: requires_backends(cls , ["""torch"""] ) class lowercase ( metaclass=SCREAMING_SNAKE_CASE_ ): _SCREAMING_SNAKE_CASE = ['torch'] def __init__( self , *lowercase , **lowercase ) -> int: requires_backends(self , ["""torch"""] ) @classmethod def _snake_case ( cls , *lowercase , **lowercase ) -> Dict: requires_backends(cls , ["""torch"""] ) @classmethod def _snake_case ( cls , *lowercase , **lowercase ) -> int: requires_backends(cls , ["""torch"""] ) class lowercase ( metaclass=SCREAMING_SNAKE_CASE_ ): _SCREAMING_SNAKE_CASE = ['torch'] def __init__( self , *lowercase , **lowercase ) -> Union[str, Any]: requires_backends(self , ["""torch"""] ) @classmethod def _snake_case ( cls , *lowercase , **lowercase ) -> Optional[int]: requires_backends(cls , ["""torch"""] ) @classmethod def _snake_case ( cls , *lowercase , **lowercase ) -> Optional[int]: requires_backends(cls , ["""torch"""] ) class lowercase ( metaclass=SCREAMING_SNAKE_CASE_ ): _SCREAMING_SNAKE_CASE = ['torch'] def __init__( self , *lowercase , **lowercase ) -> str: requires_backends(self , ["""torch"""] ) @classmethod def _snake_case ( cls , *lowercase , **lowercase ) -> Any: requires_backends(cls , ["""torch"""] ) @classmethod def _snake_case ( cls , *lowercase , **lowercase ) -> Any: requires_backends(cls , ["""torch"""] ) class lowercase ( metaclass=SCREAMING_SNAKE_CASE_ ): _SCREAMING_SNAKE_CASE = ['torch'] def __init__( self , *lowercase , **lowercase ) -> List[str]: requires_backends(self , ["""torch"""] ) @classmethod def _snake_case ( cls , *lowercase , **lowercase ) -> str: requires_backends(cls , ["""torch"""] ) @classmethod def _snake_case ( cls , *lowercase , **lowercase ) -> List[str]: requires_backends(cls , ["""torch"""] ) class lowercase ( metaclass=SCREAMING_SNAKE_CASE_ ): _SCREAMING_SNAKE_CASE = ['torch'] def __init__( self , *lowercase , **lowercase ) -> List[Any]: requires_backends(self , ["""torch"""] ) @classmethod def _snake_case ( cls , *lowercase , **lowercase ) -> List[Any]: requires_backends(cls , ["""torch"""] ) @classmethod def _snake_case ( cls , *lowercase , **lowercase ) -> List[Any]: requires_backends(cls , ["""torch"""] ) class lowercase ( metaclass=SCREAMING_SNAKE_CASE_ ): _SCREAMING_SNAKE_CASE = ['torch'] def __init__( self , *lowercase , **lowercase ) -> Dict: requires_backends(self , ["""torch"""] ) @classmethod def _snake_case ( cls , *lowercase , **lowercase ) -> List[str]: requires_backends(cls , ["""torch"""] ) @classmethod def _snake_case ( cls , *lowercase , **lowercase ) -> Union[str, Any]: requires_backends(cls , ["""torch"""] ) class lowercase ( metaclass=SCREAMING_SNAKE_CASE_ ): _SCREAMING_SNAKE_CASE = ['torch'] def __init__( self , *lowercase , **lowercase ) -> str: requires_backends(self , ["""torch"""] ) @classmethod def _snake_case ( cls , *lowercase , **lowercase ) -> str: requires_backends(cls , ["""torch"""] ) @classmethod def _snake_case ( cls , *lowercase , **lowercase ) -> str: requires_backends(cls , ["""torch"""] ) class lowercase ( metaclass=SCREAMING_SNAKE_CASE_ ): _SCREAMING_SNAKE_CASE = ['torch'] def __init__( self , *lowercase , **lowercase ) -> Dict: requires_backends(self , ["""torch"""] ) @classmethod def _snake_case ( cls , *lowercase , **lowercase ) -> Dict: requires_backends(cls , ["""torch"""] ) @classmethod def _snake_case ( cls , *lowercase , **lowercase ) -> List[str]: requires_backends(cls , ["""torch"""] ) class lowercase ( metaclass=SCREAMING_SNAKE_CASE_ ): _SCREAMING_SNAKE_CASE = ['torch'] def __init__( self , *lowercase , **lowercase ) -> str: requires_backends(self , ["""torch"""] ) @classmethod def _snake_case ( cls , *lowercase , **lowercase ) -> Optional[Any]: requires_backends(cls , ["""torch"""] ) @classmethod def _snake_case ( cls , *lowercase , **lowercase ) -> Tuple: requires_backends(cls , ["""torch"""] ) class lowercase ( metaclass=SCREAMING_SNAKE_CASE_ ): _SCREAMING_SNAKE_CASE = ['torch'] def __init__( self , *lowercase , **lowercase ) -> Optional[Any]: requires_backends(self , ["""torch"""] ) @classmethod def _snake_case ( cls , *lowercase , **lowercase ) -> Optional[int]: requires_backends(cls , ["""torch"""] ) @classmethod def _snake_case ( cls , *lowercase , **lowercase ) -> Dict: requires_backends(cls , ["""torch"""] ) class lowercase ( metaclass=SCREAMING_SNAKE_CASE_ ): _SCREAMING_SNAKE_CASE = ['torch'] def __init__( self , *lowercase , **lowercase ) -> Any: requires_backends(self , ["""torch"""] ) @classmethod def _snake_case ( cls , *lowercase , **lowercase ) -> Union[str, Any]: requires_backends(cls , ["""torch"""] ) @classmethod def _snake_case ( cls , *lowercase , **lowercase ) -> Optional[int]: requires_backends(cls , ["""torch"""] ) class lowercase ( metaclass=SCREAMING_SNAKE_CASE_ ): _SCREAMING_SNAKE_CASE = ['torch'] def __init__( self , *lowercase , **lowercase ) -> List[Any]: requires_backends(self , ["""torch"""] ) @classmethod def _snake_case ( cls , *lowercase , **lowercase ) -> Tuple: requires_backends(cls , ["""torch"""] ) @classmethod def _snake_case ( cls , *lowercase , **lowercase ) -> List[Any]: requires_backends(cls , ["""torch"""] ) class lowercase ( metaclass=SCREAMING_SNAKE_CASE_ ): _SCREAMING_SNAKE_CASE = ['torch'] def __init__( self , *lowercase , **lowercase ) -> Any: requires_backends(self , ["""torch"""] ) @classmethod def _snake_case ( cls , *lowercase , **lowercase ) -> List[str]: requires_backends(cls , ["""torch"""] ) @classmethod def _snake_case ( cls , *lowercase , **lowercase ) -> int: requires_backends(cls , ["""torch"""] ) class lowercase ( metaclass=SCREAMING_SNAKE_CASE_ ): _SCREAMING_SNAKE_CASE = ['torch'] def __init__( self , *lowercase , **lowercase ) -> int: requires_backends(self , ["""torch"""] ) @classmethod def _snake_case ( cls , *lowercase , **lowercase ) -> Union[str, Any]: requires_backends(cls , ["""torch"""] ) @classmethod def _snake_case ( cls , *lowercase , **lowercase ) -> Tuple: requires_backends(cls , ["""torch"""] ) class lowercase ( metaclass=SCREAMING_SNAKE_CASE_ ): _SCREAMING_SNAKE_CASE = ['torch'] def __init__( self , *lowercase , **lowercase ) -> int: requires_backends(self , ["""torch"""] ) @classmethod def _snake_case ( cls , *lowercase , **lowercase ) -> Tuple: requires_backends(cls , ["""torch"""] ) @classmethod def _snake_case ( cls , *lowercase , **lowercase ) -> Optional[int]: requires_backends(cls , ["""torch"""] ) class lowercase ( metaclass=SCREAMING_SNAKE_CASE_ ): _SCREAMING_SNAKE_CASE = ['torch'] def __init__( self , *lowercase , **lowercase ) -> str: requires_backends(self , ["""torch"""] ) @classmethod def _snake_case ( cls , *lowercase , **lowercase ) -> List[str]: requires_backends(cls , ["""torch"""] ) @classmethod def _snake_case ( cls , *lowercase , **lowercase ) -> List[Any]: requires_backends(cls , ["""torch"""] ) class lowercase ( metaclass=SCREAMING_SNAKE_CASE_ ): _SCREAMING_SNAKE_CASE = ['torch'] def __init__( self , *lowercase , **lowercase ) -> List[Any]: requires_backends(self , ["""torch"""] ) @classmethod def _snake_case ( cls , *lowercase , **lowercase ) -> Union[str, Any]: requires_backends(cls , ["""torch"""] ) @classmethod def _snake_case ( cls , *lowercase , **lowercase ) -> Optional[int]: requires_backends(cls , ["""torch"""] ) class lowercase ( metaclass=SCREAMING_SNAKE_CASE_ ): _SCREAMING_SNAKE_CASE = ['torch'] def __init__( self , *lowercase , **lowercase ) -> Optional[int]: requires_backends(self , ["""torch"""] ) @classmethod def _snake_case ( cls , *lowercase , **lowercase ) -> Union[str, Any]: requires_backends(cls , ["""torch"""] ) @classmethod def _snake_case ( cls , *lowercase , **lowercase ) -> str: requires_backends(cls , ["""torch"""] ) class lowercase ( metaclass=SCREAMING_SNAKE_CASE_ ): _SCREAMING_SNAKE_CASE = ['torch'] def __init__( self , *lowercase , **lowercase ) -> Optional[int]: requires_backends(self , ["""torch"""] ) @classmethod def _snake_case ( cls , *lowercase , **lowercase ) -> Optional[Any]: requires_backends(cls , ["""torch"""] ) @classmethod def _snake_case ( cls , *lowercase , **lowercase ) -> int: requires_backends(cls , ["""torch"""] ) class lowercase ( metaclass=SCREAMING_SNAKE_CASE_ ): _SCREAMING_SNAKE_CASE = ['torch'] def __init__( self , *lowercase , **lowercase ) -> List[str]: requires_backends(self , ["""torch"""] ) @classmethod def _snake_case ( cls , *lowercase , **lowercase ) -> str: requires_backends(cls , ["""torch"""] ) @classmethod def _snake_case ( cls , *lowercase , **lowercase ) -> List[Any]: requires_backends(cls , ["""torch"""] ) class lowercase ( metaclass=SCREAMING_SNAKE_CASE_ ): _SCREAMING_SNAKE_CASE = ['torch'] def __init__( self , *lowercase , **lowercase ) -> Union[str, Any]: requires_backends(self , ["""torch"""] ) @classmethod def _snake_case ( cls , *lowercase , **lowercase ) -> Tuple: requires_backends(cls , ["""torch"""] ) @classmethod def _snake_case ( cls , *lowercase , **lowercase ) -> Optional[Any]: requires_backends(cls , ["""torch"""] ) class lowercase ( metaclass=SCREAMING_SNAKE_CASE_ ): _SCREAMING_SNAKE_CASE = ['torch'] def __init__( self , *lowercase , **lowercase ) -> List[str]: requires_backends(self , ["""torch"""] ) @classmethod def _snake_case ( cls , *lowercase , **lowercase ) -> Tuple: requires_backends(cls , ["""torch"""] ) @classmethod def _snake_case ( cls , *lowercase , **lowercase ) -> Dict: requires_backends(cls , ["""torch"""] ) class lowercase ( metaclass=SCREAMING_SNAKE_CASE_ ): _SCREAMING_SNAKE_CASE = ['torch'] def __init__( self , *lowercase , **lowercase ) -> Tuple: requires_backends(self , ["""torch"""] ) @classmethod def _snake_case ( cls , *lowercase , **lowercase ) -> Dict: requires_backends(cls , ["""torch"""] ) @classmethod def _snake_case ( cls , *lowercase , **lowercase ) -> Tuple: requires_backends(cls , ["""torch"""] ) class lowercase ( metaclass=SCREAMING_SNAKE_CASE_ ): _SCREAMING_SNAKE_CASE = ['torch'] def __init__( self , *lowercase , **lowercase ) -> Dict: requires_backends(self , ["""torch"""] ) @classmethod def _snake_case ( cls , *lowercase , **lowercase ) -> Tuple: requires_backends(cls , ["""torch"""] ) @classmethod def _snake_case ( cls , *lowercase , **lowercase ) -> Dict: requires_backends(cls , ["""torch"""] ) class lowercase ( metaclass=SCREAMING_SNAKE_CASE_ ): _SCREAMING_SNAKE_CASE = ['torch'] def __init__( self , *lowercase , **lowercase ) -> Tuple: requires_backends(self , ["""torch"""] ) @classmethod def _snake_case ( cls , *lowercase , **lowercase ) -> List[str]: requires_backends(cls , ["""torch"""] ) @classmethod def _snake_case ( cls , *lowercase , **lowercase ) -> int: requires_backends(cls , ["""torch"""] ) class lowercase ( metaclass=SCREAMING_SNAKE_CASE_ ): _SCREAMING_SNAKE_CASE = ['torch'] def __init__( self , *lowercase , **lowercase ) -> Dict: requires_backends(self , ["""torch"""] ) @classmethod def _snake_case ( cls , *lowercase , **lowercase ) -> str: requires_backends(cls , ["""torch"""] ) @classmethod def _snake_case ( cls , *lowercase , **lowercase ) -> Union[str, Any]: requires_backends(cls , ["""torch"""] ) class lowercase ( metaclass=SCREAMING_SNAKE_CASE_ ): _SCREAMING_SNAKE_CASE = ['torch'] def __init__( self , *lowercase , **lowercase ) -> Any: requires_backends(self , ["""torch"""] ) @classmethod def _snake_case ( cls , *lowercase , **lowercase ) -> List[Any]: requires_backends(cls , ["""torch"""] ) @classmethod def _snake_case ( cls , *lowercase , **lowercase ) -> Dict: requires_backends(cls , ["""torch"""] ) class lowercase ( metaclass=SCREAMING_SNAKE_CASE_ ): _SCREAMING_SNAKE_CASE = ['torch'] def __init__( self , *lowercase , **lowercase ) -> List[Any]: requires_backends(self , ["""torch"""] ) @classmethod def _snake_case ( cls , *lowercase , **lowercase ) -> List[Any]: requires_backends(cls , ["""torch"""] ) @classmethod def _snake_case ( cls , *lowercase , **lowercase ) -> str: requires_backends(cls , ["""torch"""] ) class lowercase ( metaclass=SCREAMING_SNAKE_CASE_ ): _SCREAMING_SNAKE_CASE = ['torch'] def __init__( self , *lowercase , **lowercase ) -> List[str]: requires_backends(self , ["""torch"""] ) @classmethod def _snake_case ( cls , *lowercase , **lowercase ) -> Tuple: requires_backends(cls , ["""torch"""] ) @classmethod def _snake_case ( cls , *lowercase , **lowercase ) -> Any: requires_backends(cls , ["""torch"""] ) class lowercase ( metaclass=SCREAMING_SNAKE_CASE_ ): _SCREAMING_SNAKE_CASE = ['torch'] def __init__( self , *lowercase , **lowercase ) -> Any: requires_backends(self , ["""torch"""] ) @classmethod def _snake_case ( cls , *lowercase , **lowercase ) -> Any: requires_backends(cls , ["""torch"""] ) @classmethod def _snake_case ( cls , *lowercase , **lowercase ) -> Optional[Any]: requires_backends(cls , ["""torch"""] ) class lowercase ( metaclass=SCREAMING_SNAKE_CASE_ ): _SCREAMING_SNAKE_CASE = ['torch'] def __init__( self , *lowercase , **lowercase ) -> Tuple: requires_backends(self , ["""torch"""] ) @classmethod def _snake_case ( cls , *lowercase , **lowercase ) -> Dict: requires_backends(cls , ["""torch"""] ) @classmethod def _snake_case ( cls , *lowercase , **lowercase ) -> Union[str, Any]: requires_backends(cls , ["""torch"""] ) class lowercase ( metaclass=SCREAMING_SNAKE_CASE_ ): _SCREAMING_SNAKE_CASE = ['torch'] def __init__( self , *lowercase , **lowercase ) -> Optional[Any]: requires_backends(self , ["""torch"""] ) @classmethod def _snake_case ( cls , *lowercase , **lowercase ) -> int: requires_backends(cls , ["""torch"""] ) @classmethod def _snake_case ( cls , *lowercase , **lowercase ) -> Any: requires_backends(cls , ["""torch"""] ) class lowercase ( metaclass=SCREAMING_SNAKE_CASE_ ): _SCREAMING_SNAKE_CASE = ['torch'] def __init__( self , *lowercase , **lowercase ) -> Union[str, Any]: requires_backends(self , ["""torch"""] ) @classmethod def _snake_case ( cls , *lowercase , **lowercase ) -> Tuple: requires_backends(cls , ["""torch"""] ) @classmethod def _snake_case ( cls , *lowercase , **lowercase ) -> List[str]: requires_backends(cls , ["""torch"""] ) class lowercase ( metaclass=SCREAMING_SNAKE_CASE_ ): _SCREAMING_SNAKE_CASE = ['torch'] def __init__( self , *lowercase , **lowercase ) -> Optional[int]: requires_backends(self , ["""torch"""] ) @classmethod def _snake_case ( cls , *lowercase , **lowercase ) -> List[Any]: requires_backends(cls , ["""torch"""] ) @classmethod def _snake_case ( cls , *lowercase , **lowercase ) -> Tuple: requires_backends(cls , ["""torch"""] ) class lowercase ( metaclass=SCREAMING_SNAKE_CASE_ ): _SCREAMING_SNAKE_CASE = ['torch'] def __init__( self , *lowercase , **lowercase ) -> List[Any]: requires_backends(self , ["""torch"""] ) @classmethod def _snake_case ( cls , *lowercase , **lowercase ) -> int: requires_backends(cls , ["""torch"""] ) @classmethod def _snake_case ( cls , *lowercase , **lowercase ) -> str: requires_backends(cls , ["""torch"""] ) class lowercase ( metaclass=SCREAMING_SNAKE_CASE_ ): _SCREAMING_SNAKE_CASE = ['torch'] def __init__( self , *lowercase , **lowercase ) -> List[Any]: requires_backends(self , ["""torch"""] ) @classmethod def _snake_case ( cls , *lowercase , **lowercase ) -> Tuple: requires_backends(cls , ["""torch"""] ) @classmethod def _snake_case ( cls , *lowercase , **lowercase ) -> List[str]: requires_backends(cls , ["""torch"""] )
46
"""simple docstring""" from typing import List, Optional, Union import numpy as np import PIL import torch from PIL import Image from ...models import UNetaDConditionModel, VQModel from ...pipelines import DiffusionPipeline from ...pipelines.pipeline_utils import ImagePipelineOutput from ...schedulers import DDPMScheduler from ...utils import ( is_accelerate_available, is_accelerate_version, logging, randn_tensor, replace_example_docstring, ) snake_case_ = logging.get_logger(__name__) # pylint: disable=invalid-name snake_case_ = """ Examples: ```py >>> from diffusers import KandinskyV22Img2ImgPipeline, KandinskyV22PriorPipeline >>> from diffusers.utils import load_image >>> import torch >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained( ... \"kandinsky-community/kandinsky-2-2-prior\", torch_dtype=torch.float16 ... ) >>> pipe_prior.to(\"cuda\") >>> prompt = \"A red cartoon frog, 4k\" >>> image_emb, zero_image_emb = pipe_prior(prompt, return_dict=False) >>> pipe = KandinskyV22Img2ImgPipeline.from_pretrained( ... \"kandinsky-community/kandinsky-2-2-decoder\", torch_dtype=torch.float16 ... ) >>> pipe.to(\"cuda\") >>> init_image = load_image( ... \"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main\" ... \"/kandinsky/frog.png\" ... ) >>> image = pipe( ... image=init_image, ... image_embeds=image_emb, ... negative_image_embeds=zero_image_emb, ... height=768, ... width=768, ... num_inference_steps=100, ... strength=0.2, ... ).images >>> image[0].save(\"red_frog.png\") ``` """ def _lowerCAmelCase ( lowercase_ , lowercase_ , lowercase_=8 ): UpperCAmelCase = height // scale_factor**2 if height % scale_factor**2 != 0: new_height += 1 UpperCAmelCase = width // scale_factor**2 if width % scale_factor**2 != 0: new_width += 1 return new_height * scale_factor, new_width * scale_factor def _lowerCAmelCase ( lowercase_ , lowercase_=512 , lowercase_=512 ): UpperCAmelCase = pil_image.resize((w, h) , resample=Image.BICUBIC , reducing_gap=1 ) UpperCAmelCase = np.array(pil_image.convert('RGB' ) ) UpperCAmelCase = arr.astype(np.floataa ) / 1_2_7.5 - 1 UpperCAmelCase = np.transpose(lowercase_ , [2, 0, 1] ) UpperCAmelCase = torch.from_numpy(lowercase_ ).unsqueeze(0 ) return image class A_ ( SCREAMING_SNAKE_CASE_ ): """simple docstring""" def __init__( self :Dict , lowercase_ :UNetaDConditionModel , lowercase_ :DDPMScheduler , lowercase_ :VQModel , ) -> List[str]: super().__init__() self.register_modules( unet=lowercase_ , scheduler=lowercase_ , movq=lowercase_ , ) UpperCAmelCase = 2 ** (len(self.movq.config.block_out_channels ) - 1) def UpperCAmelCase__ ( self :Optional[int] , lowercase_ :Optional[Any] , lowercase_ :Tuple , lowercase_ :Any ) -> Optional[int]: # get the original timestep using init_timestep UpperCAmelCase = min(int(num_inference_steps * strength ) , lowercase_ ) UpperCAmelCase = max(num_inference_steps - init_timestep , 0 ) UpperCAmelCase = self.scheduler.timesteps[t_start:] return timesteps, num_inference_steps - t_start def UpperCAmelCase__ ( self :List[Any] , lowercase_ :Dict , lowercase_ :str , lowercase_ :Optional[Any] , lowercase_ :Union[str, Any] , lowercase_ :List[Any] , lowercase_ :Optional[Any] , lowercase_ :Any=None ) -> Any: if not isinstance(lowercase_ , (torch.Tensor, PIL.Image.Image, list) ): raise ValueError( f"""`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(lowercase_ )}""" ) UpperCAmelCase = image.to(device=lowercase_ , dtype=lowercase_ ) UpperCAmelCase = batch_size * num_images_per_prompt if image.shape[1] == 4: UpperCAmelCase = image else: if isinstance(lowercase_ , lowercase_ ) and len(lowercase_ ) != batch_size: raise ValueError( f"""You have passed a list of generators of length {len(lowercase_ )}, but requested an effective batch""" f""" size of {batch_size}. Make sure the batch size matches the length of the generators.""" ) elif isinstance(lowercase_ , lowercase_ ): UpperCAmelCase = [ self.movq.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(lowercase_ ) ] UpperCAmelCase = torch.cat(lowercase_ , dim=0 ) else: UpperCAmelCase = self.movq.encode(lowercase_ ).latent_dist.sample(lowercase_ ) UpperCAmelCase = self.movq.config.scaling_factor * init_latents UpperCAmelCase = torch.cat([init_latents] , dim=0 ) UpperCAmelCase = init_latents.shape UpperCAmelCase = randn_tensor(lowercase_ , generator=lowercase_ , device=lowercase_ , dtype=lowercase_ ) # get latents UpperCAmelCase = self.scheduler.add_noise(lowercase_ , lowercase_ , lowercase_ ) UpperCAmelCase = init_latents return latents def UpperCAmelCase__ ( self :int , lowercase_ :int=0 ) -> List[str]: if is_accelerate_available(): from accelerate import cpu_offload else: raise ImportError('Please install accelerate via `pip install accelerate`' ) UpperCAmelCase = torch.device(f"""cuda:{gpu_id}""" ) UpperCAmelCase = [ self.unet, self.movq, ] for cpu_offloaded_model in models: if cpu_offloaded_model is not None: cpu_offload(lowercase_ , lowercase_ ) def UpperCAmelCase__ ( self :Union[str, Any] , lowercase_ :str=0 ) -> Dict: if is_accelerate_available() and is_accelerate_version('>=' , '0.17.0.dev0' ): from accelerate import cpu_offload_with_hook else: raise ImportError('`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.' ) UpperCAmelCase = torch.device(f"""cuda:{gpu_id}""" ) if self.device.type != "cpu": self.to('cpu' , silence_dtype_warnings=lowercase_ ) torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist) UpperCAmelCase = None for cpu_offloaded_model in [self.unet, self.movq]: UpperCAmelCase , UpperCAmelCase = cpu_offload_with_hook(lowercase_ , lowercase_ , prev_module_hook=lowercase_ ) # We'll offload the last model manually. UpperCAmelCase = hook @property # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device def UpperCAmelCase__ ( self :List[Any] ) -> Dict: if not hasattr(self.unet , '_hf_hook' ): return self.device for module in self.unet.modules(): if ( hasattr(lowercase_ , '_hf_hook' ) and hasattr(module._hf_hook , 'execution_device' ) and module._hf_hook.execution_device is not None ): return torch.device(module._hf_hook.execution_device ) return self.device @torch.no_grad() @replace_example_docstring(lowercase_ ) def __call__( self :str , lowercase_ :Union[torch.FloatTensor, List[torch.FloatTensor]] , lowercase_ :Union[torch.FloatTensor, PIL.Image.Image, List[torch.FloatTensor], List[PIL.Image.Image]] , lowercase_ :Union[torch.FloatTensor, List[torch.FloatTensor]] , lowercase_ :int = 5_12 , lowercase_ :int = 5_12 , lowercase_ :int = 1_00 , lowercase_ :float = 4.0 , lowercase_ :float = 0.3 , lowercase_ :int = 1 , lowercase_ :Optional[Union[torch.Generator, List[torch.Generator]]] = None , lowercase_ :Optional[str] = "pil" , lowercase_ :bool = True , ) -> List[str]: UpperCAmelCase = self._execution_device UpperCAmelCase = guidance_scale > 1.0 if isinstance(lowercase_ , lowercase_ ): UpperCAmelCase = torch.cat(lowercase_ , dim=0 ) UpperCAmelCase = image_embeds.shape[0] if isinstance(lowercase_ , lowercase_ ): UpperCAmelCase = torch.cat(lowercase_ , dim=0 ) if do_classifier_free_guidance: UpperCAmelCase = image_embeds.repeat_interleave(lowercase_ , dim=0 ) UpperCAmelCase = negative_image_embeds.repeat_interleave(lowercase_ , dim=0 ) UpperCAmelCase = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=lowercase_ ) if not isinstance(lowercase_ , lowercase_ ): UpperCAmelCase = [image] if not all(isinstance(lowercase_ , (PIL.Image.Image, torch.Tensor) ) for i in image ): raise ValueError( f"""Input is in incorrect format: {[type(lowercase_ ) for i in image]}. Currently, we only support PIL image and pytorch tensor""" ) UpperCAmelCase = torch.cat([prepare_image(lowercase_ , lowercase_ , lowercase_ ) for i in image] , dim=0 ) UpperCAmelCase = image.to(dtype=image_embeds.dtype , device=lowercase_ ) UpperCAmelCase = self.movq.encode(lowercase_ )['latents'] UpperCAmelCase = latents.repeat_interleave(lowercase_ , dim=0 ) self.scheduler.set_timesteps(lowercase_ , device=lowercase_ ) UpperCAmelCase , UpperCAmelCase = self.get_timesteps(lowercase_ , lowercase_ , lowercase_ ) UpperCAmelCase = timesteps[:1].repeat(batch_size * num_images_per_prompt ) UpperCAmelCase , UpperCAmelCase = downscale_height_and_width(lowercase_ , lowercase_ , self.movq_scale_factor ) UpperCAmelCase = self.prepare_latents( lowercase_ , lowercase_ , lowercase_ , lowercase_ , image_embeds.dtype , lowercase_ , lowercase_ ) for i, t in enumerate(self.progress_bar(lowercase_ ) ): # expand the latents if we are doing classifier free guidance UpperCAmelCase = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents UpperCAmelCase = {'image_embeds': image_embeds} UpperCAmelCase = self.unet( sample=lowercase_ , timestep=lowercase_ , encoder_hidden_states=lowercase_ , added_cond_kwargs=lowercase_ , return_dict=lowercase_ , )[0] if do_classifier_free_guidance: UpperCAmelCase , UpperCAmelCase = noise_pred.split(latents.shape[1] , dim=1 ) UpperCAmelCase , UpperCAmelCase = noise_pred.chunk(2 ) UpperCAmelCase , UpperCAmelCase = variance_pred.chunk(2 ) UpperCAmelCase = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) UpperCAmelCase = torch.cat([noise_pred, variance_pred_text] , dim=1 ) if not ( hasattr(self.scheduler.config , 'variance_type' ) and self.scheduler.config.variance_type in ["learned", "learned_range"] ): UpperCAmelCase , UpperCAmelCase = noise_pred.split(latents.shape[1] , dim=1 ) # compute the previous noisy sample x_t -> x_t-1 UpperCAmelCase = self.scheduler.step( lowercase_ , lowercase_ , lowercase_ , generator=lowercase_ , )[0] # post-processing UpperCAmelCase = self.movq.decode(lowercase_ , force_not_quantize=lowercase_ )['sample'] if output_type not in ["pt", "np", "pil"]: raise ValueError(f"""Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}""" ) if output_type in ["np", "pil"]: UpperCAmelCase = image * 0.5 + 0.5 UpperCAmelCase = image.clamp(0 , 1 ) UpperCAmelCase = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy() if output_type == "pil": UpperCAmelCase = self.numpy_to_pil(lowercase_ ) if not return_dict: return (image,) return ImagePipelineOutput(images=lowercase_ )
78
0
"""simple docstring""" import secrets from random import shuffle from string import ascii_letters, ascii_lowercase, ascii_uppercase, digits, punctuation def UpperCAmelCase__ ( _UpperCAmelCase = 8 ): """simple docstring""" A_ : Any = ascii_letters + digits + punctuation return "".join(secrets.choice(lowercase_ ) for _ in range(lowercase_ ) ) def UpperCAmelCase__ ( _UpperCAmelCase , _UpperCAmelCase ): """simple docstring""" i -= len(lowercase_ ) A_ : Any = i // 3 A_ : Tuple = i % 3 # chars = chars_incl + random_letters(ascii_letters, i / 3 + remainder) + # random_number(digits, i / 3) + random_characters(punctuation, i / 3) A_ : Tuple = ( chars_incl + random(lowercase_ , quotient + remainder ) + random(lowercase_ , lowercase_ ) + random(lowercase_ , lowercase_ ) ) A_ : Union[str, Any] = list(lowercase_ ) shuffle(lowercase_ ) return "".join(lowercase_ ) # random is a generalised function for letters, characters and numbers def UpperCAmelCase__ ( _UpperCAmelCase , _UpperCAmelCase ): """simple docstring""" return "".join(secrets.choice(lowercase_ ) for _ in range(lowercase_ ) ) def UpperCAmelCase__ ( _UpperCAmelCase , _UpperCAmelCase ): """simple docstring""" pass # Put your code here... def UpperCAmelCase__ ( _UpperCAmelCase , _UpperCAmelCase ): """simple docstring""" pass # Put your code here... def UpperCAmelCase__ ( _UpperCAmelCase , _UpperCAmelCase ): """simple docstring""" pass # Put your code here... def UpperCAmelCase__ ( _UpperCAmelCase , _UpperCAmelCase = 8 ): """simple docstring""" if len(lowercase_ ) < min_length: # Your Password must be at least 8 characters long return False A_ : Dict = any(char in ascii_uppercase for char in password ) A_ : Optional[int] = any(char in ascii_lowercase for char in password ) A_ : List[Any] = any(char in digits for char in password ) A_ : Optional[Any] = any(char in punctuation for char in password ) return upper and lower and num and spec_char # Passwords should contain UPPERCASE, lowerase # numbers, and special characters def UpperCAmelCase__ ( ): """simple docstring""" A_ : Any = int(input('Please indicate the max length of your password: ' ).strip() ) A_ : List[Any] = input( 'Please indicate the characters that must be in your password: ' ).strip() print('Password generated:' , password_generator(lowercase_ ) ) print( 'Alternative Password generated:' , alternative_password_generator(lowercase_ , lowercase_ ) , ) print('[If you are thinking of using this passsword, You better save it.]' ) if __name__ == "__main__": main()
286
"""simple docstring""" import colorsys from PIL import Image # type: ignore def _lowerCAmelCase ( lowercase_ , lowercase_ , lowercase_ ): UpperCAmelCase = x UpperCAmelCase = y for step in range(lowercase_ ): # noqa: B007 UpperCAmelCase = a * a - b * b + x UpperCAmelCase = 2 * a * b + y UpperCAmelCase = a_new # divergence happens for all complex number with an absolute value # greater than 4 if a * a + b * b > 4: break return step / (max_step - 1) def _lowerCAmelCase ( lowercase_ ): if distance == 1: return (0, 0, 0) else: return (255, 255, 255) def _lowerCAmelCase ( lowercase_ ): if distance == 1: return (0, 0, 0) else: return tuple(round(i * 255 ) for i in colorsys.hsv_to_rgb(lowercase_ , 1 , 1 ) ) def _lowerCAmelCase ( lowercase_ = 800 , lowercase_ = 600 , lowercase_ = -0.6 , lowercase_ = 0 , lowercase_ = 3.2 , lowercase_ = 50 , lowercase_ = True , ): UpperCAmelCase = Image.new('RGB' , (image_width, image_height) ) UpperCAmelCase = img.load() # loop through the image-coordinates for image_x in range(lowercase_ ): for image_y in range(lowercase_ ): # determine the figure-coordinates based on the image-coordinates UpperCAmelCase = figure_width / image_width * image_height UpperCAmelCase = figure_center_x + (image_x / image_width - 0.5) * figure_width UpperCAmelCase = figure_center_y + (image_y / image_height - 0.5) * figure_height UpperCAmelCase = get_distance(lowercase_ , lowercase_ , lowercase_ ) # color the corresponding pixel based on the selected coloring-function if use_distance_color_coding: UpperCAmelCase = get_color_coded_rgb(lowercase_ ) else: UpperCAmelCase = get_black_and_white_rgb(lowercase_ ) return img if __name__ == "__main__": import doctest doctest.testmod() # colored version, full figure snake_case_ = get_image() # uncomment for colored version, different section, zoomed in # img = get_image(figure_center_x = -0.6, figure_center_y = -0.4, # figure_width = 0.8) # uncomment for black and white version, full figure # img = get_image(use_distance_color_coding = False) # uncomment to save the image # img.save("mandelbrot.png") img.show()
78
0
"""simple docstring""" def _snake_case ( UpperCamelCase : List[str] = 50000000 ): UpperCAmelCase : List[Any] = set() UpperCAmelCase : str = int((limit - 24) ** (1 / 2) ) UpperCAmelCase : int = set(range(3 , prime_square_limit + 1 , 2 ) ) primes.add(2 ) for p in range(3 , prime_square_limit + 1 , 2 ): if p not in primes: continue primes.difference_update(set(range(p * p , prime_square_limit + 1 , lowercase_ ) ) ) for primea in primes: UpperCAmelCase : Tuple = primea * primea for primea in primes: UpperCAmelCase : int = primea * primea * primea if square + cube >= limit - 16: break for primea in primes: UpperCAmelCase : int = primea * primea * primea * primea UpperCAmelCase : Union[str, Any] = square + cube + tetr if total >= limit: break ret.add(lowercase_ ) return len(lowercase_ ) if __name__ == "__main__": print(f"""{solution() = }""")
109
"""simple docstring""" import requests snake_case_ = """""" # <-- Put your OpenWeatherMap appid here! snake_case_ = """https://api.openweathermap.org/data/2.5/""" def _lowerCAmelCase ( lowercase_ = "Chicago" , lowercase_ = APPID ): return requests.get(URL_BASE + 'weather' , params=locals() ).json() def _lowerCAmelCase ( lowercase_ = "Kolkata, India" , lowercase_ = APPID ): return requests.get(URL_BASE + 'forecast' , params=locals() ).json() def _lowerCAmelCase ( lowercase_ = 5_5.6_8 , lowercase_ = 1_2.5_7 , lowercase_ = APPID ): return requests.get(URL_BASE + 'onecall' , params=locals() ).json() if __name__ == "__main__": from pprint import pprint while True: snake_case_ = input("""Enter a location:""").strip() if location: pprint(current_weather(location)) else: break
78
0
'''simple docstring''' import unittest from transformers import GPTNeoXJapaneseConfig, is_torch_available from transformers.models.gpt_neox_japanese.tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer from transformers.testing_utils import require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import GPTNeoXJapaneseForCausalLM, GPTNeoXJapaneseModel class lowerCAmelCase_ : '''simple docstring''' def __init__( self : Union[str, Any] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Any=13 , _UpperCAmelCase : Tuple=7 , _UpperCAmelCase : Any=True , _UpperCAmelCase : int=True , _UpperCAmelCase : List[str]=True , _UpperCAmelCase : Dict=True , _UpperCAmelCase : Any=99 , _UpperCAmelCase : Union[str, Any]=32 , _UpperCAmelCase : str=5 , _UpperCAmelCase : Optional[Any]=4 , _UpperCAmelCase : int=4 , _UpperCAmelCase : Dict="gelu" , _UpperCAmelCase : Tuple=0.0 , _UpperCAmelCase : Union[str, Any]=0.1 , _UpperCAmelCase : Dict=True , _UpperCAmelCase : str=5_12 , _UpperCAmelCase : Optional[Any]=16 , _UpperCAmelCase : Union[str, Any]=2 , _UpperCAmelCase : int=0.02 , _UpperCAmelCase : List[str]=3 , _UpperCAmelCase : List[Any]=4 , _UpperCAmelCase : int=None , ): """simple docstring""" UpperCAmelCase__ = parent UpperCAmelCase__ = batch_size UpperCAmelCase__ = seq_length UpperCAmelCase__ = is_training UpperCAmelCase__ = use_input_mask UpperCAmelCase__ = use_token_type_ids UpperCAmelCase__ = use_labels UpperCAmelCase__ = vocab_size UpperCAmelCase__ = hidden_size UpperCAmelCase__ = num_hidden_layers UpperCAmelCase__ = num_attention_heads UpperCAmelCase__ = intermediate_multiple_size UpperCAmelCase__ = hidden_act UpperCAmelCase__ = hidden_dropout UpperCAmelCase__ = attention_dropout UpperCAmelCase__ = weight_tying UpperCAmelCase__ = max_position_embeddings UpperCAmelCase__ = type_vocab_size UpperCAmelCase__ = type_sequence_label_size UpperCAmelCase__ = initializer_range UpperCAmelCase__ = num_labels UpperCAmelCase__ = num_choices UpperCAmelCase__ = scope def SCREAMING_SNAKE_CASE__ ( self : Any ): """simple docstring""" UpperCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) UpperCAmelCase__ = None if self.use_input_mask: UpperCAmelCase__ = random_attention_mask([self.batch_size, self.seq_length] ) UpperCAmelCase__ = None if self.use_labels: UpperCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) UpperCAmelCase__ = self.get_config() return config, input_ids, input_mask, token_labels def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ): """simple docstring""" return GPTNeoXJapaneseConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_multiple_size=self.intermediate_multiple_size , hidden_act=self.hidden_act , hidden_dropout=self.hidden_dropout , attention_dropout=self.attention_dropout , weight_tying=self.weight_tying , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowercase_ , initializer_range=self.initializer_range , ) def SCREAMING_SNAKE_CASE__ ( self : List[Any] ): """simple docstring""" UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = self.prepare_config_and_inputs() UpperCAmelCase__ = True return config, input_ids, input_mask, token_labels def SCREAMING_SNAKE_CASE__ ( self : str , _UpperCAmelCase : Any , _UpperCAmelCase : int , _UpperCAmelCase : List[Any] ): """simple docstring""" UpperCAmelCase__ = GPTNeoXJapaneseModel(config=lowercase_ ) model.to(lowercase_ ) model.eval() UpperCAmelCase__ = model(lowercase_ , attention_mask=lowercase_ ) UpperCAmelCase__ = model(lowercase_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , _UpperCAmelCase : Tuple , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Optional[Any] ): """simple docstring""" UpperCAmelCase__ = True UpperCAmelCase__ = GPTNeoXJapaneseModel(lowercase_ ) model.to(lowercase_ ) model.eval() UpperCAmelCase__ = model(lowercase_ , attention_mask=lowercase_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Any , _UpperCAmelCase : Optional[Any] ): """simple docstring""" UpperCAmelCase__ = GPTNeoXJapaneseForCausalLM(config=lowercase_ ) model.to(lowercase_ ) model.eval() UpperCAmelCase__ = model(lowercase_ , attention_mask=lowercase_ , labels=lowercase_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def SCREAMING_SNAKE_CASE__ ( self : Any , _UpperCAmelCase : List[Any] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Union[str, Any] ): """simple docstring""" UpperCAmelCase__ = True UpperCAmelCase__ = GPTNeoXJapaneseForCausalLM(config=lowercase_ ) model.to(lowercase_ ) model.eval() # first forward pass UpperCAmelCase__ = model(lowercase_ , attention_mask=lowercase_ , use_cache=lowercase_ ) UpperCAmelCase__ = outputs.past_key_values # create hypothetical multiple next token and extent to next_input_ids UpperCAmelCase__ = ids_tensor((self.batch_size, 3) , config.vocab_size ) UpperCAmelCase__ = ids_tensor((self.batch_size, 3) , vocab_size=2 ) # append to next input_ids and UpperCAmelCase__ = torch.cat([input_ids, next_tokens] , dim=-1 ) UpperCAmelCase__ = torch.cat([input_mask, next_mask] , dim=-1 ) UpperCAmelCase__ = model(lowercase_ , attention_mask=lowercase_ , output_hidden_states=lowercase_ ) UpperCAmelCase__ = output_from_no_past["""hidden_states"""][0] UpperCAmelCase__ = model( lowercase_ , attention_mask=lowercase_ , past_key_values=lowercase_ , output_hidden_states=lowercase_ , )["""hidden_states"""][0] # select random slice UpperCAmelCase__ = ids_tensor((1,) , output_from_past.shape[-1] ).item() UpperCAmelCase__ = output_from_no_past[:, -3:, random_slice_idx].detach() UpperCAmelCase__ = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] ) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(lowercase_ , lowercase_ , atol=1E-3 ) ) def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ): """simple docstring""" UpperCAmelCase__ = self.prepare_config_and_inputs() UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = config_and_inputs UpperCAmelCase__ = {"""input_ids""": input_ids, """attention_mask""": input_mask} return config, inputs_dict @require_torch class lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ): '''simple docstring''' lowerCAmelCase_ : List[str] = (GPTNeoXJapaneseModel, GPTNeoXJapaneseForCausalLM) if is_torch_available() else () lowerCAmelCase_ : str = (GPTNeoXJapaneseForCausalLM,) if is_torch_available() else () lowerCAmelCase_ : Optional[int] = ( {"""feature-extraction""": GPTNeoXJapaneseModel, """text-generation""": GPTNeoXJapaneseForCausalLM} if is_torch_available() else {} ) lowerCAmelCase_ : Union[str, Any] = False lowerCAmelCase_ : str = False lowerCAmelCase_ : Tuple = False lowerCAmelCase_ : List[Any] = False def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ): """simple docstring""" UpperCAmelCase__ = GPTNeoXJapaneseModelTester(self ) UpperCAmelCase__ = ConfigTester(self , config_class=lowercase_ , hidden_size=37 ) def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ): """simple docstring""" self.config_tester.run_common_tests() def SCREAMING_SNAKE_CASE__ ( self : List[str] ): """simple docstring""" UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(lowercase_ , lowercase_ , lowercase_ ) def SCREAMING_SNAKE_CASE__ ( self : List[Any] ): """simple docstring""" UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_model_as_decoder(lowercase_ , lowercase_ , lowercase_ ) def SCREAMING_SNAKE_CASE__ ( self : Any ): """simple docstring""" UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_decoder() UpperCAmelCase__ = None self.model_tester.create_and_check_model_as_decoder(lowercase_ , lowercase_ , lowercase_ ) def SCREAMING_SNAKE_CASE__ ( self : int ): """simple docstring""" UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_decoder_model_past_large_inputs(lowercase_ , lowercase_ , lowercase_ ) def SCREAMING_SNAKE_CASE__ ( self : str ): """simple docstring""" UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_causal_lm(*lowercase_ ) @slow def SCREAMING_SNAKE_CASE__ ( self : List[str] ): """simple docstring""" UpperCAmelCase__ = """abeja/gpt-neox-japanese-2.7b""" UpperCAmelCase__ = ["""データサイエンティストとは、""", """100年後に必要とされる会社は、""", """フルリモートの環境で働くために必要なことは、""", """国境の長いトンネルを抜けると""", """美味しい日本食といえば、"""] UpperCAmelCase__ = [ """データサイエンティストとは、データを分析し、ビジネスに役立つ知見を導き出す専門家のことです。""", """100年後に必要とされる会社は、「人」が中心の会社です。""", """フルリモートの環境で働くために必要なことは、「自分の時間をコントロールする」ことです。""", """国境の長いトンネルを抜けると、そこは雪国だった。""", """美味しい日本食といえば、やっぱりお寿司ですよね。""", ] UpperCAmelCase__ = GPTNeoXJapaneseTokenizer.from_pretrained(lowercase_ ) UpperCAmelCase__ = GPTNeoXJapaneseForCausalLM.from_pretrained(lowercase_ ) UpperCAmelCase__ = [] for prompt in prompts: UpperCAmelCase__ = tokenizer(lowercase_ , return_tensors="""pt""" ).input_ids UpperCAmelCase__ = model.generate(lowercase_ , max_length=50 ) UpperCAmelCase__ = tokenizer.batch_decode(lowercase_ , skip_special_tokens=lowercase_ ) predicted_outputs += generated_string self.assertListEqual(lowercase_ , lowercase_ )
346
"""simple docstring""" import warnings from typing import List, Optional, Union from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType class A_ ( SCREAMING_SNAKE_CASE_ ): """simple docstring""" __UpperCamelCase = ["""image_processor""", """tokenizer"""] __UpperCamelCase = """LayoutLMv2ImageProcessor""" __UpperCamelCase = ("""LayoutXLMTokenizer""", """LayoutXLMTokenizerFast""") def __init__( self :Any , lowercase_ :int=None , lowercase_ :Union[str, Any]=None , **lowercase_ :Optional[Any] ) -> Dict: if "feature_extractor" in kwargs: warnings.warn( 'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`' ' instead.' , lowercase_ , ) UpperCAmelCase = kwargs.pop('feature_extractor' ) UpperCAmelCase = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError('You need to specify an `image_processor`.' ) if tokenizer is None: raise ValueError('You need to specify a `tokenizer`.' ) super().__init__(lowercase_ , lowercase_ ) def __call__( self :str , lowercase_ :Optional[int] , lowercase_ :Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , lowercase_ :Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]] = None , lowercase_ :Union[List[List[int]], List[List[List[int]]]] = None , lowercase_ :Optional[Union[List[int], List[List[int]]]] = None , lowercase_ :bool = True , lowercase_ :Union[bool, str, PaddingStrategy] = False , lowercase_ :Union[bool, str, TruncationStrategy] = None , lowercase_ :Optional[int] = None , lowercase_ :int = 0 , lowercase_ :Optional[int] = None , lowercase_ :Optional[bool] = None , lowercase_ :Optional[bool] = None , lowercase_ :bool = False , lowercase_ :bool = False , lowercase_ :bool = False , lowercase_ :bool = False , lowercase_ :bool = True , lowercase_ :Optional[Union[str, TensorType]] = None , **lowercase_ :Any , ) -> BatchEncoding: # verify input if self.image_processor.apply_ocr and (boxes is not None): raise ValueError( 'You cannot provide bounding boxes ' 'if you initialized the image processor with apply_ocr set to True.' ) if self.image_processor.apply_ocr and (word_labels is not None): raise ValueError( 'You cannot provide word labels if you initialized the image processor with apply_ocr set to True.' ) if return_overflowing_tokens is True and return_offsets_mapping is False: raise ValueError('You cannot return overflowing tokens without returning the offsets mapping.' ) # first, apply the image processor UpperCAmelCase = self.image_processor(images=lowercase_ , return_tensors=lowercase_ ) # second, apply the tokenizer if text is not None and self.image_processor.apply_ocr and text_pair is None: if isinstance(lowercase_ , lowercase_ ): UpperCAmelCase = [text] # add batch dimension (as the image processor always adds a batch dimension) UpperCAmelCase = features['words'] UpperCAmelCase = self.tokenizer( text=text if text is not None else features['words'] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features['boxes'] , word_labels=lowercase_ , add_special_tokens=lowercase_ , padding=lowercase_ , truncation=lowercase_ , max_length=lowercase_ , stride=lowercase_ , pad_to_multiple_of=lowercase_ , return_token_type_ids=lowercase_ , return_attention_mask=lowercase_ , return_overflowing_tokens=lowercase_ , return_special_tokens_mask=lowercase_ , return_offsets_mapping=lowercase_ , return_length=lowercase_ , verbose=lowercase_ , return_tensors=lowercase_ , **lowercase_ , ) # add pixel values UpperCAmelCase = features.pop('pixel_values' ) if return_overflowing_tokens is True: UpperCAmelCase = self.get_overflowing_images(lowercase_ , encoded_inputs['overflow_to_sample_mapping'] ) UpperCAmelCase = images return encoded_inputs def UpperCAmelCase__ ( self :Dict , lowercase_ :List[Any] , lowercase_ :Any ) -> Optional[Any]: # in case there's an overflow, ensure each `input_ids` sample is mapped to its corresponding image UpperCAmelCase = [] for sample_idx in overflow_to_sample_mapping: images_with_overflow.append(images[sample_idx] ) if len(lowercase_ ) != len(lowercase_ ): raise ValueError( 'Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got' f""" {len(lowercase_ )} and {len(lowercase_ )}""" ) return images_with_overflow def UpperCAmelCase__ ( self :Any , *lowercase_ :int , **lowercase_ :Tuple ) -> Tuple: return self.tokenizer.batch_decode(*lowercase_ , **lowercase_ ) def UpperCAmelCase__ ( self :Any , *lowercase_ :List[Any] , **lowercase_ :Optional[int] ) -> Optional[Any]: return self.tokenizer.decode(*lowercase_ , **lowercase_ ) @property def UpperCAmelCase__ ( self :int ) -> Optional[int]: return ["input_ids", "bbox", "attention_mask", "image"] @property def UpperCAmelCase__ ( self :int ) -> Dict: warnings.warn( '`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , lowercase_ , ) return self.image_processor_class @property def UpperCAmelCase__ ( self :Union[str, Any] ) -> Optional[int]: warnings.warn( '`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , lowercase_ , ) return self.image_processor
78
0
'''simple docstring''' from collections import deque class UpperCAmelCase_ : """simple docstring""" def __init__( self : int , snake_case_ : str , snake_case_ : int , snake_case_ : int ): snake_case__ : Union[str, Any] = process_name # process name snake_case__ : Any = arrival_time # arrival time of the process # completion time of finished process or last interrupted time snake_case__ : str = arrival_time snake_case__ : str = burst_time # remaining burst time snake_case__ : List[str] = 0 # total time of the process wait in ready queue snake_case__ : Union[str, Any] = 0 # time from arrival time to completion time class UpperCAmelCase_ : """simple docstring""" def __init__( self : int , snake_case_ : int , snake_case_ : list[int] , snake_case_ : deque[Process] , snake_case_ : int , ): # total number of mlfq's queues snake_case__ : Union[str, Any] = number_of_queues # time slice of queues that round robin algorithm applied snake_case__ : List[str] = time_slices # unfinished process is in this ready_queue snake_case__ : Dict = queue # current time snake_case__ : Union[str, Any] = current_time # finished process is in this sequence queue snake_case__ : Optional[int] = deque() def lowerCamelCase ( self : Any ): snake_case__ : Union[str, Any] = [] for i in range(len(self.finish_queue ) ): sequence.append(self.finish_queue[i].process_name ) return sequence def lowerCamelCase ( self : Dict , snake_case_ : list[Process] ): snake_case__ : List[Any] = [] for i in range(len(lowercase_ ) ): waiting_times.append(queue[i].waiting_time ) return waiting_times def lowerCamelCase ( self : List[str] , snake_case_ : list[Process] ): snake_case__ : Dict = [] for i in range(len(lowercase_ ) ): turnaround_times.append(queue[i].turnaround_time ) return turnaround_times def lowerCamelCase ( self : Union[str, Any] , snake_case_ : list[Process] ): snake_case__ : int = [] for i in range(len(lowercase_ ) ): completion_times.append(queue[i].stop_time ) return completion_times def lowerCamelCase ( self : List[str] , snake_case_ : deque[Process] ): return [q.burst_time for q in queue] def lowerCamelCase ( self : Dict , snake_case_ : Process ): process.waiting_time += self.current_time - process.stop_time return process.waiting_time def lowerCamelCase ( self : Tuple , snake_case_ : deque[Process] ): snake_case__ : List[Any] = deque() # sequence deque of finished process while len(lowercase_ ) != 0: snake_case__ : Dict = ready_queue.popleft() # current process # if process's arrival time is later than current time, update current time if self.current_time < cp.arrival_time: self.current_time += cp.arrival_time # update waiting time of current process self.update_waiting_time(lowercase_ ) # update current time self.current_time += cp.burst_time # finish the process and set the process's burst-time 0 snake_case__ : Optional[Any] = 0 # set the process's turnaround time because it is finished snake_case__ : Any = self.current_time - cp.arrival_time # set the completion time snake_case__ : Dict = self.current_time # add the process to queue that has finished queue finished.append(lowercase_ ) self.finish_queue.extend(lowercase_ ) # add finished process to finish queue # FCFS will finish all remaining processes return finished def lowerCamelCase ( self : List[Any] , snake_case_ : deque[Process] , snake_case_ : int ): snake_case__ : Union[str, Any] = deque() # sequence deque of terminated process # just for 1 cycle and unfinished processes will go back to queue for _ in range(len(lowercase_ ) ): snake_case__ : Tuple = ready_queue.popleft() # current process # if process's arrival time is later than current time, update current time if self.current_time < cp.arrival_time: self.current_time += cp.arrival_time # update waiting time of unfinished processes self.update_waiting_time(lowercase_ ) # if the burst time of process is bigger than time-slice if cp.burst_time > time_slice: # use CPU for only time-slice self.current_time += time_slice # update remaining burst time cp.burst_time -= time_slice # update end point time snake_case__ : List[Any] = self.current_time # locate the process behind the queue because it is not finished ready_queue.append(lowercase_ ) else: # use CPU for remaining burst time self.current_time += cp.burst_time # set burst time 0 because the process is finished snake_case__ : List[Any] = 0 # set the finish time snake_case__ : Any = self.current_time # update the process' turnaround time because it is finished snake_case__ : Tuple = self.current_time - cp.arrival_time # add the process to queue that has finished queue finished.append(lowercase_ ) self.finish_queue.extend(lowercase_ ) # add finished process to finish queue # return finished processes queue and remaining processes queue return finished, ready_queue def lowerCamelCase ( self : Optional[Any] ): # all queues except last one have round_robin algorithm for i in range(self.number_of_queues - 1 ): snake_case__ , snake_case__ : int = self.round_robin( self.ready_queue , self.time_slices[i] ) # the last queue has first_come_first_served algorithm self.first_come_first_served(self.ready_queue ) return self.finish_queue if __name__ == "__main__": import doctest __a = Process("P1", 0, 53) __a = Process("P2", 0, 17) __a = Process("P3", 0, 68) __a = Process("P4", 0, 24) __a = 3 __a = [17, 25] __a = deque([Pa, Pa, Pa, Pa]) if len(time_slices) != number_of_queues - 1: raise SystemExit(0) doctest.testmod(extraglobs={"queue": deque([Pa, Pa, Pa, Pa])}) __a = Process("P1", 0, 53) __a = Process("P2", 0, 17) __a = Process("P3", 0, 68) __a = Process("P4", 0, 24) __a = 3 __a = [17, 25] __a = deque([Pa, Pa, Pa, Pa]) __a = MLFQ(number_of_queues, time_slices, queue, 0) __a = mlfq.multi_level_feedback_queue() # print total waiting times of processes(P1, P2, P3, P4) print( F"waiting time:\\n \t\t\t{MLFQ.calculate_waiting_time(mlfq, [Pa, Pa, Pa, Pa])}" ) # print completion times of processes(P1, P2, P3, P4) print( F"completion time:\\n \t\t{MLFQ.calculate_completion_time(mlfq, [Pa, Pa, Pa, Pa])}" ) # print total turnaround times of processes(P1, P2, P3, P4) print( F"turnaround time:\\n \t\t{MLFQ.calculate_turnaround_time(mlfq, [Pa, Pa, Pa, Pa])}" ) # print sequence of finished processes print( F"sequence of finished processes:\\n {mlfq.calculate_sequence_of_finish_queue()}" )
35
"""simple docstring""" from collections import deque from math import floor from random import random from time import time class A_ : """simple docstring""" def __init__( self :Union[str, Any] ) -> str: UpperCAmelCase = {} def UpperCAmelCase__ ( self :Any , lowercase_ :List[Any] , lowercase_ :List[str] , lowercase_ :Dict=1 ) -> List[Any]: if self.graph.get(lowercase_ ): if self.graph[u].count([w, v] ) == 0: self.graph[u].append([w, v] ) else: UpperCAmelCase = [[w, v]] if not self.graph.get(lowercase_ ): UpperCAmelCase = [] def UpperCAmelCase__ ( self :Any ) -> Optional[int]: return list(self.graph ) def UpperCAmelCase__ ( self :List[Any] , lowercase_ :Optional[int] , lowercase_ :Optional[Any] ) -> Dict: if self.graph.get(lowercase_ ): for _ in self.graph[u]: if _[1] == v: self.graph[u].remove(lowercase_ ) def UpperCAmelCase__ ( self :List[str] , lowercase_ :Tuple=-2 , lowercase_ :List[Any]=-1 ) -> List[Any]: if s == d: return [] UpperCAmelCase = [] UpperCAmelCase = [] if s == -2: UpperCAmelCase = list(self.graph )[0] stack.append(lowercase_ ) visited.append(lowercase_ ) UpperCAmelCase = s while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: UpperCAmelCase = s for node in self.graph[s]: if visited.count(node[1] ) < 1: if node[1] == d: visited.append(lowercase_ ) return visited else: stack.append(node[1] ) visited.append(node[1] ) UpperCAmelCase = node[1] break # check if all the children are visited if s == ss: stack.pop() if len(lowercase_ ) != 0: UpperCAmelCase = stack[len(lowercase_ ) - 1] else: UpperCAmelCase = ss # check if se have reached the starting point if len(lowercase_ ) == 0: return visited def UpperCAmelCase__ ( self :List[str] , lowercase_ :int=-1 ) -> Tuple: if c == -1: UpperCAmelCase = floor(random() * 1_00_00 ) + 10 for i in range(lowercase_ ): # every vertex has max 100 edges for _ in range(floor(random() * 1_02 ) + 1 ): UpperCAmelCase = floor(random() * c ) + 1 if n != i: self.add_pair(lowercase_ , lowercase_ , 1 ) def UpperCAmelCase__ ( self :Tuple , lowercase_ :Optional[Any]=-2 ) -> Optional[int]: UpperCAmelCase = deque() UpperCAmelCase = [] if s == -2: UpperCAmelCase = list(self.graph )[0] d.append(lowercase_ ) visited.append(lowercase_ ) while d: UpperCAmelCase = d.popleft() if len(self.graph[s] ) != 0: for node in self.graph[s]: if visited.count(node[1] ) < 1: d.append(node[1] ) visited.append(node[1] ) return visited def UpperCAmelCase__ ( self :Any , lowercase_ :Optional[int] ) -> List[Any]: UpperCAmelCase = 0 for x in self.graph: for y in self.graph[x]: if y[1] == u: count += 1 return count def UpperCAmelCase__ ( self :Tuple , lowercase_ :List[str] ) -> List[str]: return len(self.graph[u] ) def UpperCAmelCase__ ( self :Optional[int] , lowercase_ :Any=-2 ) -> int: UpperCAmelCase = [] UpperCAmelCase = [] if s == -2: UpperCAmelCase = list(self.graph )[0] stack.append(lowercase_ ) visited.append(lowercase_ ) UpperCAmelCase = s UpperCAmelCase = [] while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: UpperCAmelCase = s for node in self.graph[s]: if visited.count(node[1] ) < 1: stack.append(node[1] ) visited.append(node[1] ) UpperCAmelCase = node[1] break # check if all the children are visited if s == ss: sorted_nodes.append(stack.pop() ) if len(lowercase_ ) != 0: UpperCAmelCase = stack[len(lowercase_ ) - 1] else: UpperCAmelCase = ss # check if se have reached the starting point if len(lowercase_ ) == 0: return sorted_nodes def UpperCAmelCase__ ( self :str ) -> str: UpperCAmelCase = [] UpperCAmelCase = [] UpperCAmelCase = list(self.graph )[0] stack.append(lowercase_ ) visited.append(lowercase_ ) UpperCAmelCase = -2 UpperCAmelCase = [] UpperCAmelCase = s UpperCAmelCase = False UpperCAmelCase = set() while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: UpperCAmelCase = s for node in self.graph[s]: if ( visited.count(node[1] ) > 0 and node[1] != parent and indirect_parents.count(node[1] ) > 0 and not on_the_way_back ): UpperCAmelCase = len(lowercase_ ) - 1 while len_stack >= 0: if stack[len_stack] == node[1]: anticipating_nodes.add(node[1] ) break else: anticipating_nodes.add(stack[len_stack] ) len_stack -= 1 if visited.count(node[1] ) < 1: stack.append(node[1] ) visited.append(node[1] ) UpperCAmelCase = node[1] break # check if all the children are visited if s == ss: stack.pop() UpperCAmelCase = True if len(lowercase_ ) != 0: UpperCAmelCase = stack[len(lowercase_ ) - 1] else: UpperCAmelCase = False indirect_parents.append(lowercase_ ) UpperCAmelCase = s UpperCAmelCase = ss # check if se have reached the starting point if len(lowercase_ ) == 0: return list(lowercase_ ) def UpperCAmelCase__ ( self :Optional[int] ) -> Tuple: UpperCAmelCase = [] UpperCAmelCase = [] UpperCAmelCase = list(self.graph )[0] stack.append(lowercase_ ) visited.append(lowercase_ ) UpperCAmelCase = -2 UpperCAmelCase = [] UpperCAmelCase = s UpperCAmelCase = False UpperCAmelCase = set() while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: UpperCAmelCase = s for node in self.graph[s]: if ( visited.count(node[1] ) > 0 and node[1] != parent and indirect_parents.count(node[1] ) > 0 and not on_the_way_back ): UpperCAmelCase = len(lowercase_ ) - 1 while len_stack_minus_one >= 0: if stack[len_stack_minus_one] == node[1]: anticipating_nodes.add(node[1] ) break else: return True if visited.count(node[1] ) < 1: stack.append(node[1] ) visited.append(node[1] ) UpperCAmelCase = node[1] break # check if all the children are visited if s == ss: stack.pop() UpperCAmelCase = True if len(lowercase_ ) != 0: UpperCAmelCase = stack[len(lowercase_ ) - 1] else: UpperCAmelCase = False indirect_parents.append(lowercase_ ) UpperCAmelCase = s UpperCAmelCase = ss # check if se have reached the starting point if len(lowercase_ ) == 0: return False def UpperCAmelCase__ ( self :Optional[Any] , lowercase_ :int=-2 , lowercase_ :List[str]=-1 ) -> Any: UpperCAmelCase = time() self.dfs(lowercase_ , lowercase_ ) UpperCAmelCase = time() return end - begin def UpperCAmelCase__ ( self :Union[str, Any] , lowercase_ :List[str]=-2 ) -> str: UpperCAmelCase = time() self.bfs(lowercase_ ) UpperCAmelCase = time() return end - begin class A_ : """simple docstring""" def __init__( self :List[str] ) -> Union[str, Any]: UpperCAmelCase = {} def UpperCAmelCase__ ( self :str , lowercase_ :Dict , lowercase_ :Optional[Any] , lowercase_ :Optional[int]=1 ) -> Dict: # check if the u exists if self.graph.get(lowercase_ ): # if there already is a edge if self.graph[u].count([w, v] ) == 0: self.graph[u].append([w, v] ) else: # if u does not exist UpperCAmelCase = [[w, v]] # add the other way if self.graph.get(lowercase_ ): # if there already is a edge if self.graph[v].count([w, u] ) == 0: self.graph[v].append([w, u] ) else: # if u does not exist UpperCAmelCase = [[w, u]] def UpperCAmelCase__ ( self :Any , lowercase_ :Union[str, Any] , lowercase_ :Tuple ) -> Optional[Any]: if self.graph.get(lowercase_ ): for _ in self.graph[u]: if _[1] == v: self.graph[u].remove(lowercase_ ) # the other way round if self.graph.get(lowercase_ ): for _ in self.graph[v]: if _[1] == u: self.graph[v].remove(lowercase_ ) def UpperCAmelCase__ ( self :Optional[Any] , lowercase_ :Optional[int]=-2 , lowercase_ :Optional[int]=-1 ) -> List[str]: if s == d: return [] UpperCAmelCase = [] UpperCAmelCase = [] if s == -2: UpperCAmelCase = list(self.graph )[0] stack.append(lowercase_ ) visited.append(lowercase_ ) UpperCAmelCase = s while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: UpperCAmelCase = s for node in self.graph[s]: if visited.count(node[1] ) < 1: if node[1] == d: visited.append(lowercase_ ) return visited else: stack.append(node[1] ) visited.append(node[1] ) UpperCAmelCase = node[1] break # check if all the children are visited if s == ss: stack.pop() if len(lowercase_ ) != 0: UpperCAmelCase = stack[len(lowercase_ ) - 1] else: UpperCAmelCase = ss # check if se have reached the starting point if len(lowercase_ ) == 0: return visited def UpperCAmelCase__ ( self :List[str] , lowercase_ :Optional[int]=-1 ) -> Any: if c == -1: UpperCAmelCase = floor(random() * 1_00_00 ) + 10 for i in range(lowercase_ ): # every vertex has max 100 edges for _ in range(floor(random() * 1_02 ) + 1 ): UpperCAmelCase = floor(random() * c ) + 1 if n != i: self.add_pair(lowercase_ , lowercase_ , 1 ) def UpperCAmelCase__ ( self :Dict , lowercase_ :int=-2 ) -> int: UpperCAmelCase = deque() UpperCAmelCase = [] if s == -2: UpperCAmelCase = list(self.graph )[0] d.append(lowercase_ ) visited.append(lowercase_ ) while d: UpperCAmelCase = d.popleft() if len(self.graph[s] ) != 0: for node in self.graph[s]: if visited.count(node[1] ) < 1: d.append(node[1] ) visited.append(node[1] ) return visited def UpperCAmelCase__ ( self :Union[str, Any] , lowercase_ :List[Any] ) -> str: return len(self.graph[u] ) def UpperCAmelCase__ ( self :Optional[Any] ) -> Any: UpperCAmelCase = [] UpperCAmelCase = [] UpperCAmelCase = list(self.graph )[0] stack.append(lowercase_ ) visited.append(lowercase_ ) UpperCAmelCase = -2 UpperCAmelCase = [] UpperCAmelCase = s UpperCAmelCase = False UpperCAmelCase = set() while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: UpperCAmelCase = s for node in self.graph[s]: if ( visited.count(node[1] ) > 0 and node[1] != parent and indirect_parents.count(node[1] ) > 0 and not on_the_way_back ): UpperCAmelCase = len(lowercase_ ) - 1 while len_stack >= 0: if stack[len_stack] == node[1]: anticipating_nodes.add(node[1] ) break else: anticipating_nodes.add(stack[len_stack] ) len_stack -= 1 if visited.count(node[1] ) < 1: stack.append(node[1] ) visited.append(node[1] ) UpperCAmelCase = node[1] break # check if all the children are visited if s == ss: stack.pop() UpperCAmelCase = True if len(lowercase_ ) != 0: UpperCAmelCase = stack[len(lowercase_ ) - 1] else: UpperCAmelCase = False indirect_parents.append(lowercase_ ) UpperCAmelCase = s UpperCAmelCase = ss # check if se have reached the starting point if len(lowercase_ ) == 0: return list(lowercase_ ) def UpperCAmelCase__ ( self :Optional[int] ) -> str: UpperCAmelCase = [] UpperCAmelCase = [] UpperCAmelCase = list(self.graph )[0] stack.append(lowercase_ ) visited.append(lowercase_ ) UpperCAmelCase = -2 UpperCAmelCase = [] UpperCAmelCase = s UpperCAmelCase = False UpperCAmelCase = set() while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: UpperCAmelCase = s for node in self.graph[s]: if ( visited.count(node[1] ) > 0 and node[1] != parent and indirect_parents.count(node[1] ) > 0 and not on_the_way_back ): UpperCAmelCase = len(lowercase_ ) - 1 while len_stack_minus_one >= 0: if stack[len_stack_minus_one] == node[1]: anticipating_nodes.add(node[1] ) break else: return True if visited.count(node[1] ) < 1: stack.append(node[1] ) visited.append(node[1] ) UpperCAmelCase = node[1] break # check if all the children are visited if s == ss: stack.pop() UpperCAmelCase = True if len(lowercase_ ) != 0: UpperCAmelCase = stack[len(lowercase_ ) - 1] else: UpperCAmelCase = False indirect_parents.append(lowercase_ ) UpperCAmelCase = s UpperCAmelCase = ss # check if se have reached the starting point if len(lowercase_ ) == 0: return False def UpperCAmelCase__ ( self :Union[str, Any] ) -> Union[str, Any]: return list(self.graph ) def UpperCAmelCase__ ( self :List[Any] , lowercase_ :Union[str, Any]=-2 , lowercase_ :List[str]=-1 ) -> str: UpperCAmelCase = time() self.dfs(lowercase_ , lowercase_ ) UpperCAmelCase = time() return end - begin def UpperCAmelCase__ ( self :Any , lowercase_ :int=-2 ) -> str: UpperCAmelCase = time() self.bfs(lowercase_ ) UpperCAmelCase = time() return end - begin
78
0
"""simple docstring""" def SCREAMING_SNAKE_CASE ( _lowerCamelCase : str ,_lowerCamelCase : List[Any] ) -> Tuple: while a != 0: _lowerCAmelCase , _lowerCAmelCase : List[str] = b % a, a return b def SCREAMING_SNAKE_CASE ( _lowerCamelCase : int ,_lowerCamelCase : Optional[int] ) -> List[Any]: if gcd(lowercase_ ,lowercase_ ) != 1: _lowerCAmelCase : Tuple = f"mod inverse of {a!r} and {m!r} does not exist" raise ValueError(lowercase_ ) _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Any = 1, 0, a _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Union[str, Any] = 0, 1, m while va != 0: _lowerCAmelCase : Optional[int] = ua // va _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : int = (ua - q * va), (ua - q * va), (ua - q * va), va, va, va return ua % m
44
"""simple docstring""" from .glue import GlueDataset, GlueDataTrainingArguments from .language_modeling import ( LineByLineTextDataset, LineByLineWithRefDataset, LineByLineWithSOPTextDataset, TextDataset, TextDatasetForNextSentencePrediction, ) from .squad import SquadDataset, SquadDataTrainingArguments
78
0
import warnings from typing import List, Optional, Union from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType class UpperCamelCase_ ( SCREAMING_SNAKE_CASE_ ): '''simple docstring''' UpperCAmelCase__ = ['''image_processor''', '''tokenizer'''] UpperCAmelCase__ = '''LayoutLMv2ImageProcessor''' UpperCAmelCase__ = ('''LayoutXLMTokenizer''', '''LayoutXLMTokenizerFast''') def __init__( self : Any , UpperCAmelCase__ : int=None , UpperCAmelCase__ : Union[str, Any]=None , **UpperCAmelCase__ : Optional[Any]) ->Dict: '''simple docstring''' if "feature_extractor" in kwargs: warnings.warn( '''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`''' ''' instead.''' , lowercase_ , ) A__ = kwargs.pop('''feature_extractor''') A__ = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError('''You need to specify an `image_processor`.''') if tokenizer is None: raise ValueError('''You need to specify a `tokenizer`.''') super().__init__(lowercase_ , lowercase_) def __call__( self : str , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , UpperCAmelCase__ : Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]] = None , UpperCAmelCase__ : Union[List[List[int]], List[List[List[int]]]] = None , UpperCAmelCase__ : Optional[Union[List[int], List[List[int]]]] = None , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : Union[bool, str, PaddingStrategy] = False , UpperCAmelCase__ : Union[bool, str, TruncationStrategy] = None , UpperCAmelCase__ : Optional[int] = None , UpperCAmelCase__ : int = 0 , UpperCAmelCase__ : Optional[int] = None , UpperCAmelCase__ : Optional[bool] = None , UpperCAmelCase__ : Optional[bool] = None , UpperCAmelCase__ : bool = False , UpperCAmelCase__ : bool = False , UpperCAmelCase__ : bool = False , UpperCAmelCase__ : bool = False , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : Optional[Union[str, TensorType]] = None , **UpperCAmelCase__ : Any , ) ->BatchEncoding: '''simple docstring''' if self.image_processor.apply_ocr and (boxes is not None): raise ValueError( '''You cannot provide bounding boxes ''' '''if you initialized the image processor with apply_ocr set to True.''') if self.image_processor.apply_ocr and (word_labels is not None): raise ValueError( '''You cannot provide word labels if you initialized the image processor with apply_ocr set to True.''') if return_overflowing_tokens is True and return_offsets_mapping is False: raise ValueError('''You cannot return overflowing tokens without returning the offsets mapping.''') # first, apply the image processor A__ = self.image_processor(images=lowercase_ , return_tensors=lowercase_) # second, apply the tokenizer if text is not None and self.image_processor.apply_ocr and text_pair is None: if isinstance(lowercase_ , lowercase_): A__ = [text] # add batch dimension (as the image processor always adds a batch dimension) A__ = features['''words'''] A__ = self.tokenizer( text=text if text is not None else features['''words'''] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features['''boxes'''] , word_labels=lowercase_ , add_special_tokens=lowercase_ , padding=lowercase_ , truncation=lowercase_ , max_length=lowercase_ , stride=lowercase_ , pad_to_multiple_of=lowercase_ , return_token_type_ids=lowercase_ , return_attention_mask=lowercase_ , return_overflowing_tokens=lowercase_ , return_special_tokens_mask=lowercase_ , return_offsets_mapping=lowercase_ , return_length=lowercase_ , verbose=lowercase_ , return_tensors=lowercase_ , **lowercase_ , ) # add pixel values A__ = features.pop('''pixel_values''') if return_overflowing_tokens is True: A__ = self.get_overflowing_images(lowercase_ , encoded_inputs['''overflow_to_sample_mapping''']) A__ = images return encoded_inputs def SCREAMING_SNAKE_CASE ( self : Dict , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Any) ->Optional[Any]: '''simple docstring''' A__ = [] for sample_idx in overflow_to_sample_mapping: images_with_overflow.append(images[sample_idx]) if len(lowercase_) != len(lowercase_): raise ValueError( '''Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got''' f""" {len(lowercase_)} and {len(lowercase_)}""") return images_with_overflow def SCREAMING_SNAKE_CASE ( self : Any , *UpperCAmelCase__ : int , **UpperCAmelCase__ : Tuple) ->Tuple: '''simple docstring''' return self.tokenizer.batch_decode(*lowercase_ , **lowercase_) def SCREAMING_SNAKE_CASE ( self : Any , *UpperCAmelCase__ : List[Any] , **UpperCAmelCase__ : Optional[int]) ->Optional[Any]: '''simple docstring''' return self.tokenizer.decode(*lowercase_ , **lowercase_) @property def SCREAMING_SNAKE_CASE ( self : int) ->Optional[int]: '''simple docstring''' return ["input_ids", "bbox", "attention_mask", "image"] @property def SCREAMING_SNAKE_CASE ( self : int) ->Dict: '''simple docstring''' warnings.warn( '''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , lowercase_ , ) return self.image_processor_class @property def SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->Optional[int]: '''simple docstring''' warnings.warn( '''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , lowercase_ , ) return self.image_processor
14
"""simple docstring""" def _lowerCAmelCase ( lowercase_ , lowercase_ = " " ): UpperCAmelCase = [] UpperCAmelCase = 0 for index, char in enumerate(lowercase_ ): if char == separator: split_words.append(string[last_index:index] ) UpperCAmelCase = index + 1 elif index + 1 == len(lowercase_ ): split_words.append(string[last_index : index + 1] ) return split_words if __name__ == "__main__": from doctest import testmod testmod()
78
0
from math import acos, sin from typing import List, Tuple, Union import numpy as np import torch from PIL import Image from ...models import AutoencoderKL, UNetaDConditionModel from ...schedulers import DDIMScheduler, DDPMScheduler from ...utils import randn_tensor from ..pipeline_utils import AudioPipelineOutput, BaseOutput, DiffusionPipeline, ImagePipelineOutput from .mel import Mel class lowercase__ ( SCREAMING_SNAKE_CASE_ ): '''simple docstring''' a : Any = ["vqvae"] def __init__( self, __magic_name__, __magic_name__, __magic_name__, __magic_name__, ) -> List[Any]: """simple docstring""" super().__init__() self.register_modules(unet=lowercase_, scheduler=lowercase_, mel=lowercase_, vqvae=lowercase_ ) def UpperCamelCase__ ( self ) -> int: """simple docstring""" return 50 if isinstance(self.scheduler, lowercase_ ) else 1000 @torch.no_grad() def __call__( self, __magic_name__ = 1, __magic_name__ = None, __magic_name__ = None, __magic_name__ = 0, __magic_name__ = 0, __magic_name__ = None, __magic_name__ = None, __magic_name__ = 0, __magic_name__ = 0, __magic_name__ = None, __magic_name__ = 0, __magic_name__ = None, __magic_name__ = None, __magic_name__=True, ) -> Union[ Union[AudioPipelineOutput, ImagePipelineOutput], Tuple[List[Image.Image], Tuple[int, List[np.ndarray]]], ]: """simple docstring""" UpperCamelCase__ : Union[str, Any] = steps or self.get_default_steps() self.scheduler.set_timesteps(lowercase_ ) UpperCamelCase__ : int = step_generator or generator # For backwards compatibility if type(self.unet.config.sample_size ) == int: UpperCamelCase__ : Dict = (self.unet.config.sample_size, self.unet.config.sample_size) if noise is None: UpperCamelCase__ : Union[str, Any] = randn_tensor( ( batch_size, self.unet.config.in_channels, self.unet.config.sample_size[0], self.unet.config.sample_size[1], ), generator=lowercase_, device=self.device, ) UpperCamelCase__ : Any = noise UpperCamelCase__ : List[Any] = None if audio_file is not None or raw_audio is not None: self.mel.load_audio(lowercase_, lowercase_ ) UpperCamelCase__ : Optional[int] = self.mel.audio_slice_to_image(lowercase_ ) UpperCamelCase__ : Optional[Any] = np.frombuffer(input_image.tobytes(), dtype='''uint8''' ).reshape( (input_image.height, input_image.width) ) UpperCamelCase__ : Tuple = (input_image / 255) * 2 - 1 UpperCamelCase__ : str = torch.tensor(input_image[np.newaxis, :, :], dtype=torch.float ).to(self.device ) if self.vqvae is not None: UpperCamelCase__ : Dict = self.vqvae.encode(torch.unsqueeze(lowercase_, 0 ) ).latent_dist.sample( generator=lowercase_ )[0] UpperCamelCase__ : Union[str, Any] = self.vqvae.config.scaling_factor * input_images if start_step > 0: UpperCamelCase__ : Any = self.scheduler.add_noise(lowercase_, lowercase_, self.scheduler.timesteps[start_step - 1] ) UpperCamelCase__ : Any = ( self.unet.config.sample_size[1] * self.mel.get_sample_rate() / self.mel.x_res / self.mel.hop_length ) UpperCamelCase__ : List[Any] = int(mask_start_secs * pixels_per_second ) UpperCamelCase__ : Union[str, Any] = int(mask_end_secs * pixels_per_second ) UpperCamelCase__ : Optional[int] = self.scheduler.add_noise(lowercase_, lowercase_, torch.tensor(self.scheduler.timesteps[start_step:] ) ) for step, t in enumerate(self.progress_bar(self.scheduler.timesteps[start_step:] ) ): if isinstance(self.unet, lowercase_ ): UpperCamelCase__ : List[Any] = self.unet(lowercase_, lowercase_, lowercase_ )['''sample'''] else: UpperCamelCase__ : Any = self.unet(lowercase_, lowercase_ )['''sample'''] if isinstance(self.scheduler, lowercase_ ): UpperCamelCase__ : Union[str, Any] = self.scheduler.step( model_output=lowercase_, timestep=lowercase_, sample=lowercase_, eta=lowercase_, generator=lowercase_, )['''prev_sample'''] else: UpperCamelCase__ : List[str] = self.scheduler.step( model_output=lowercase_, timestep=lowercase_, sample=lowercase_, generator=lowercase_, )['''prev_sample'''] if mask is not None: if mask_start > 0: UpperCamelCase__ : List[str] = mask[:, step, :, :mask_start] if mask_end > 0: UpperCamelCase__ : Optional[Any] = mask[:, step, :, -mask_end:] if self.vqvae is not None: # 0.18215 was scaling factor used in training to ensure unit variance UpperCamelCase__ : str = 1 / self.vqvae.config.scaling_factor * images UpperCamelCase__ : Dict = self.vqvae.decode(lowercase_ )['''sample'''] UpperCamelCase__ : Union[str, Any] = (images / 2 + 0.5).clamp(0, 1 ) UpperCamelCase__ : Union[str, Any] = images.cpu().permute(0, 2, 3, 1 ).numpy() UpperCamelCase__ : Optional[Any] = (images * 255).round().astype('''uint8''' ) UpperCamelCase__ : Dict = list( (Image.fromarray(_[:, :, 0] ) for _ in images) if images.shape[3] == 1 else (Image.fromarray(lowercase_, mode='''RGB''' ).convert('''L''' ) for _ in images) ) UpperCamelCase__ : str = [self.mel.image_to_audio(lowercase_ ) for _ in images] if not return_dict: return images, (self.mel.get_sample_rate(), audios) return BaseOutput(**AudioPipelineOutput(np.array(lowercase_ )[:, np.newaxis, :] ), **ImagePipelineOutput(lowercase_ ) ) @torch.no_grad() def UpperCamelCase__ ( self, __magic_name__, __magic_name__ = 50 ) -> np.ndarray: """simple docstring""" assert isinstance(self.scheduler, lowercase_ ) self.scheduler.set_timesteps(lowercase_ ) UpperCamelCase__ : str = np.array( [np.frombuffer(image.tobytes(), dtype='''uint8''' ).reshape((1, image.height, image.width) ) for image in images] ) UpperCamelCase__ : Dict = (sample / 255) * 2 - 1 UpperCamelCase__ : List[str] = torch.Tensor(lowercase_ ).to(self.device ) for t in self.progress_bar(torch.flip(self.scheduler.timesteps, (0,) ) ): UpperCamelCase__ : str = t - self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps UpperCamelCase__ : Union[str, Any] = self.scheduler.alphas_cumprod[t] UpperCamelCase__ : Optional[int] = ( self.scheduler.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.scheduler.final_alpha_cumprod ) UpperCamelCase__ : Dict = 1 - alpha_prod_t UpperCamelCase__ : Dict = self.unet(lowercase_, lowercase_ )['''sample'''] UpperCamelCase__ : Tuple = (1 - alpha_prod_t_prev) ** 0.5 * model_output UpperCamelCase__ : Tuple = (sample - pred_sample_direction) * alpha_prod_t_prev ** (-0.5) UpperCamelCase__ : Tuple = sample * alpha_prod_t ** 0.5 + beta_prod_t ** 0.5 * model_output return sample @staticmethod def UpperCamelCase__ ( __magic_name__, __magic_name__, __magic_name__ ) -> torch.Tensor: """simple docstring""" UpperCamelCase__ : Tuple = acos(torch.dot(torch.flatten(lowercase_ ), torch.flatten(lowercase_ ) ) / torch.norm(lowercase_ ) / torch.norm(lowercase_ ) ) return sin((1 - alpha) * theta ) * xa / sin(lowercase_ ) + sin(alpha * theta ) * xa / sin(lowercase_ )
201
"""simple docstring""" import json import logging import os import socket import git import numpy as np import torch logging.basicConfig( format="""%(asctime)s - %(levelname)s - %(name)s - PID: %(process)d - %(message)s""", datefmt="""%m/%d/%Y %H:%M:%S""", level=logging.INFO, ) snake_case_ = logging.getLogger(__name__) def _lowerCAmelCase ( lowercase_ ): UpperCAmelCase = git.Repo(search_parent_directories=lowercase_ ) UpperCAmelCase = { 'repo_id': str(lowercase_ ), 'repo_sha': str(repo.head.object.hexsha ), 'repo_branch': str(repo.active_branch ), } with open(os.path.join(lowercase_ , 'git_log.json' ) , 'w' ) as f: json.dump(lowercase_ , lowercase_ , indent=4 ) def _lowerCAmelCase ( lowercase_ ): if params.n_gpu <= 0: UpperCAmelCase = 0 UpperCAmelCase = -1 UpperCAmelCase = True UpperCAmelCase = False return assert torch.cuda.is_available() logger.info('Initializing GPUs' ) if params.n_gpu > 1: assert params.local_rank != -1 UpperCAmelCase = int(os.environ['WORLD_SIZE'] ) UpperCAmelCase = int(os.environ['N_GPU_NODE'] ) UpperCAmelCase = int(os.environ['RANK'] ) # number of nodes / node ID UpperCAmelCase = params.world_size // params.n_gpu_per_node UpperCAmelCase = params.global_rank // params.n_gpu_per_node UpperCAmelCase = True assert params.n_nodes == int(os.environ['N_NODES'] ) assert params.node_id == int(os.environ['NODE_RANK'] ) # local job (single GPU) else: assert params.local_rank == -1 UpperCAmelCase = 1 UpperCAmelCase = 0 UpperCAmelCase = 0 UpperCAmelCase = 0 UpperCAmelCase = 1 UpperCAmelCase = 1 UpperCAmelCase = False # sanity checks assert params.n_nodes >= 1 assert 0 <= params.node_id < params.n_nodes assert 0 <= params.local_rank <= params.global_rank < params.world_size assert params.world_size == params.n_nodes * params.n_gpu_per_node # define whether this is the master process / if we are in multi-node distributed mode UpperCAmelCase = params.node_id == 0 and params.local_rank == 0 UpperCAmelCase = params.n_nodes > 1 # summary UpperCAmelCase = F"""--- Global rank: {params.global_rank} - """ logger.info(PREFIX + 'Number of nodes: %i' % params.n_nodes ) logger.info(PREFIX + 'Node ID : %i' % params.node_id ) logger.info(PREFIX + 'Local rank : %i' % params.local_rank ) logger.info(PREFIX + 'World size : %i' % params.world_size ) logger.info(PREFIX + 'GPUs per node : %i' % params.n_gpu_per_node ) logger.info(PREFIX + 'Master : %s' % str(params.is_master ) ) logger.info(PREFIX + 'Multi-node : %s' % str(params.multi_node ) ) logger.info(PREFIX + 'Multi-GPU : %s' % str(params.multi_gpu ) ) logger.info(PREFIX + 'Hostname : %s' % socket.gethostname() ) # set GPU device torch.cuda.set_device(params.local_rank ) # initialize multi-GPU if params.multi_gpu: logger.info('Initializing PyTorch distributed' ) torch.distributed.init_process_group( init_method='env://' , backend='nccl' , ) def _lowerCAmelCase ( lowercase_ ): np.random.seed(args.seed ) torch.manual_seed(args.seed ) if args.n_gpu > 0: torch.cuda.manual_seed_all(args.seed )
78
0
"""simple docstring""" from typing import List import jiwer import jiwer.transforms as tr from packaging import version import datasets from datasets.config import PY_VERSION if PY_VERSION < version.parse('3.8'): import importlib_metadata else: import importlib.metadata as importlib_metadata _snake_case = '' if version.parse(importlib_metadata.version('jiwer')) < version.parse('2.3.0'): class UpperCamelCase ( tr.AbstractTransform ): def __init__( self : Optional[Any] , UpperCAmelCase__ : str = " " ) -> str: _a : Dict = sentence_delimiter def _lowercase ( self : Any , UpperCAmelCase__ : str ) -> Any: return list(lowercase_ ) def _lowercase ( self : List[str] , UpperCAmelCase__ : List[str] ) -> Optional[int]: _a : Tuple = [] for sent_idx, sentence in enumerate(lowercase_ ): chars.extend(self.process_string(lowercase_ ) ) if self.sentence_delimiter is not None and self.sentence_delimiter != "" and sent_idx < len(lowercase_ ) - 1: chars.append(self.sentence_delimiter ) return chars _snake_case = tr.Compose( [tr.RemoveMultipleSpaces(), tr.Strip(), SentencesToListOfCharacters(SENTENCE_DELIMITER)] ) else: _snake_case = tr.Compose( [ tr.RemoveMultipleSpaces(), tr.Strip(), tr.ReduceToSingleSentence(SENTENCE_DELIMITER), tr.ReduceToListOfListOfChars(), ] ) _snake_case = '\\n@inproceedings{inproceedings,\n author = {Morris, Andrew and Maier, Viktoria and Green, Phil},\n year = {2004},\n month = {01},\n pages = {},\n title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}\n}\n' _snake_case = '\\nCharacter error rate (CER) is a common metric of the performance of an automatic speech recognition system.\n\nCER is similar to Word Error Rate (WER), but operates on character instead of word. Please refer to docs of WER for further information.\n\nCharacter error rate can be computed as:\n\nCER = (S + D + I) / N = (S + D + I) / (S + D + C)\n\nwhere\n\nS is the number of substitutions,\nD is the number of deletions,\nI is the number of insertions,\nC is the number of correct characters,\nN is the number of characters in the reference (N=S+D+C).\n\nCER\'s output is not always a number between 0 and 1, in particular when there is a high number of insertions. This value is often associated to the percentage of characters that were incorrectly predicted. The lower the value, the better the\nperformance of the ASR system with a CER of 0 being a perfect score.\n' _snake_case = '\nComputes CER score of transcribed segments against references.\nArgs:\n references: list of references for each speech input.\n predictions: list of transcribtions to score.\n concatenate_texts: Whether or not to concatenate sentences before evaluation, set to True for more accurate result.\nReturns:\n (float): the character error rate\n\nExamples:\n\n >>> predictions = [\"this is the prediction\", \"there is an other sample\"]\n >>> references = [\"this is the reference\", \"there is another one\"]\n >>> cer = datasets.load_metric(\"cer\")\n >>> cer_score = cer.compute(predictions=predictions, references=references)\n >>> print(cer_score)\n 0.34146341463414637\n' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class UpperCamelCase ( datasets.Metric ): def _lowercase ( self : List[Any] ) -> Optional[Any]: return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { """predictions""": datasets.Value("""string""" , id="""sequence""" ), """references""": datasets.Value("""string""" , id="""sequence""" ), } ) , codebase_urls=["""https://github.com/jitsi/jiwer/"""] , reference_urls=[ """https://en.wikipedia.org/wiki/Word_error_rate""", """https://sites.google.com/site/textdigitisation/qualitymeasures/computingerrorrates""", ] , ) def _lowercase ( self : Optional[int] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Any=False ) -> int: if concatenate_texts: return jiwer.compute_measures( lowercase_ , lowercase_ , truth_transform=lowercase_ , hypothesis_transform=lowercase_ , )["wer"] _a : Tuple = 0 _a : Union[str, Any] = 0 for prediction, reference in zip(lowercase_ , lowercase_ ): _a : Optional[int] = jiwer.compute_measures( lowercase_ , lowercase_ , truth_transform=lowercase_ , hypothesis_transform=lowercase_ , ) incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"] total += measures["substitutions"] + measures["deletions"] + measures["hits"] return incorrect / total
294
"""simple docstring""" import os import time import numpy as np import onnxruntime as ort snake_case_ = """1""" snake_case_ = """0""" snake_case_ = """1""" snake_case_ = ort.SessionOptions() snake_case_ = ort.GraphOptimizationLevel.ORT_DISABLE_ALL print("""Create inference session...""") snake_case_ = ["""TensorrtExecutionProvider""", """CUDAExecutionProvider"""] snake_case_ = ort.InferenceSession("""model.onnx""", sess_options=sess_opt, providers=execution_provider) snake_case_ = ort.RunOptions() snake_case_ = 128 snake_case_ = 1 snake_case_ = np.ones((batch, sequence), dtype=np.intaa) snake_case_ = np.ones((batch, sequence), dtype=np.intaa) snake_case_ = np.ones((batch, sequence), dtype=np.intaa) print("""Warm up phase...""") sess.run( None, { sess.get_inputs()[0].name: input_ids, sess.get_inputs()[1].name: attention_mask, sess.get_inputs()[2].name: token_type_ids, }, run_options=run_opt, ) print("""Start inference...""") snake_case_ = time.time() snake_case_ = 2000 snake_case_ = {} for iter in range(max_iters): snake_case_ = sess.run( None, { sess.get_inputs()[0].name: input_ids, sess.get_inputs()[1].name: attention_mask, sess.get_inputs()[2].name: token_type_ids, }, run_options=run_opt, ) print("""Average Inference Time = {:.3f} ms""".format((time.time() - start_time) * 1000 / max_iters))
78
0
"""simple docstring""" import unittest from transformers import MPNetConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MPNetForMaskedLM, MPNetForMultipleChoice, MPNetForQuestionAnswering, MPNetForSequenceClassification, MPNetForTokenClassification, MPNetModel, ) class _UpperCAmelCase : '''simple docstring''' def __init__(self , a_ , a_=13 , a_=7 , a_=True , a_=True , a_=False , a_=True , a_=99 , a_=64 , a_=5 , a_=4 , a_=64 , a_="gelu" , a_=0.1 , a_=0.1 , a_=5_12 , a_=16 , a_=2 , a_=0.02 , a_=3 , a_=4 , a_=None , ): '''simple docstring''' __snake_case : List[Any] = parent __snake_case : Any = batch_size __snake_case : Union[str, Any] = seq_length __snake_case : Optional[Any] = is_training __snake_case : Optional[int] = use_input_mask __snake_case : Any = use_token_type_ids __snake_case : Any = use_labels __snake_case : Tuple = vocab_size __snake_case : Dict = hidden_size __snake_case : int = num_hidden_layers __snake_case : List[str] = num_attention_heads __snake_case : List[Any] = intermediate_size __snake_case : List[str] = hidden_act __snake_case : Optional[int] = hidden_dropout_prob __snake_case : str = attention_probs_dropout_prob __snake_case : Tuple = max_position_embeddings __snake_case : List[str] = type_vocab_size __snake_case : Tuple = type_sequence_label_size __snake_case : List[Any] = initializer_range __snake_case : Any = num_labels __snake_case : Union[str, Any] = num_choices __snake_case : Any = scope def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' return MPNetConfig.from_pretrained('''microsoft/mpnet-base''' ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __snake_case : Any = None if self.use_input_mask: __snake_case : str = random_attention_mask([self.batch_size, self.seq_length] ) __snake_case : Optional[int] = None __snake_case : List[str] = None __snake_case : int = None if self.use_labels: __snake_case : Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __snake_case : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __snake_case : int = ids_tensor([self.batch_size] , self.num_choices ) __snake_case : int = self.get_config() return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' return MPNetConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , ) def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_ , a_ , a_ , a_ ): '''simple docstring''' __snake_case : str = MPNetModel(config=lowercase_ ) model.to(lowercase_ ) model.eval() __snake_case : str = model(lowercase_ , lowercase_ ) __snake_case : int = model(lowercase_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_ , a_ , a_ , a_ ): '''simple docstring''' __snake_case : Union[str, Any] = MPNetForQuestionAnswering(config=lowercase_ ) model.to(lowercase_ ) model.eval() __snake_case : Any = model( lowercase_ , attention_mask=lowercase_ , start_positions=lowercase_ , end_positions=lowercase_ , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_ , a_ , a_ , a_ ): '''simple docstring''' __snake_case : Optional[int] = self.num_labels __snake_case : str = MPNetForSequenceClassification(lowercase_ ) model.to(lowercase_ ) model.eval() __snake_case : str = model(lowercase_ , attention_mask=lowercase_ , labels=lowercase_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_ , a_ , a_ , a_ ): '''simple docstring''' __snake_case : List[Any] = self.num_choices __snake_case : int = MPNetForMultipleChoice(config=lowercase_ ) model.to(lowercase_ ) model.eval() __snake_case : List[Any] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __snake_case : List[Any] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __snake_case : List[str] = model( lowercase_ , attention_mask=lowercase_ , labels=lowercase_ , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_ , a_ , a_ , a_ ): '''simple docstring''' __snake_case : Tuple = self.num_labels __snake_case : List[Any] = MPNetForTokenClassification(config=lowercase_ ) model.to(lowercase_ ) model.eval() __snake_case : List[str] = model(lowercase_ , attention_mask=lowercase_ , labels=lowercase_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Union[str, Any] = self.prepare_config_and_inputs() ((__snake_case) , (__snake_case) , (__snake_case) , (__snake_case) , (__snake_case) , (__snake_case)) : str = config_and_inputs __snake_case : Optional[Any] = {'''input_ids''': input_ids, '''attention_mask''': input_mask} return config, inputs_dict @require_torch class _UpperCAmelCase ( SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, unittest.TestCase ): '''simple docstring''' lowerCamelCase__ =( ( MPNetForMaskedLM, MPNetForMultipleChoice, MPNetForQuestionAnswering, MPNetForSequenceClassification, MPNetForTokenClassification, MPNetModel, ) if is_torch_available() else () ) lowerCamelCase__ =( { 'feature-extraction': MPNetModel, 'fill-mask': MPNetForMaskedLM, 'question-answering': MPNetForQuestionAnswering, 'text-classification': MPNetForSequenceClassification, 'token-classification': MPNetForTokenClassification, 'zero-shot': MPNetForSequenceClassification, } if is_torch_available() else {} ) lowerCamelCase__ =False lowerCamelCase__ =True def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Union[str, Any] = MPNetModelTester(self ) __snake_case : Optional[int] = ConfigTester(self , config_class=lowercase_ , hidden_size=37 ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' self.config_tester.run_common_tests() def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mpnet_model(*lowercase_ ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mpnet_for_sequence_classification(*lowercase_ ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mpnet_for_multiple_choice(*lowercase_ ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mpnet_for_token_classification(*lowercase_ ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mpnet_for_question_answering(*lowercase_ ) @require_torch class _UpperCAmelCase ( unittest.TestCase ): '''simple docstring''' @slow def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : int = MPNetModel.from_pretrained('''microsoft/mpnet-base''' ) __snake_case : Tuple = torch.tensor([[0, 3_45, 2_32, 3_28, 7_40, 1_40, 16_95, 69, 60_78, 15_88, 2]] ) __snake_case : Dict = model(lowercase_ )[0] __snake_case : int = torch.Size((1, 11, 7_68) ) self.assertEqual(output.shape , lowercase_ ) __snake_case : Tuple = torch.tensor( [[[-0.0550, 0.1943, -0.0740], [-0.0562, 0.2211, -0.0579], [-0.0437, 0.3337, -0.0641]]] ) # compare the actual values for a slice. self.assertTrue(torch.allclose(output[:, :3, :3] , lowercase_ , atol=1E-4 ) )
102
"""simple docstring""" from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import convert_to_rgb, normalize, rescale, resize, to_channel_dimension_format from ...image_utils import ( OPENAI_CLIP_MEAN, OPENAI_CLIP_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging if is_vision_available(): import PIL snake_case_ = logging.get_logger(__name__) class A_ ( SCREAMING_SNAKE_CASE_ ): """simple docstring""" __UpperCamelCase = ["""pixel_values"""] def __init__( self :int , lowercase_ :bool = True , lowercase_ :Dict[str, int] = None , lowercase_ :PILImageResampling = PILImageResampling.BICUBIC , lowercase_ :bool = True , lowercase_ :Union[int, float] = 1 / 2_55 , lowercase_ :bool = True , lowercase_ :Optional[Union[float, List[float]]] = None , lowercase_ :Optional[Union[float, List[float]]] = None , lowercase_ :bool = True , **lowercase_ :Union[str, Any] , ) -> None: super().__init__(**lowercase_ ) UpperCAmelCase = size if size is not None else {'height': 3_84, 'width': 3_84} UpperCAmelCase = get_size_dict(lowercase_ , default_to_square=lowercase_ ) UpperCAmelCase = do_resize UpperCAmelCase = size UpperCAmelCase = resample UpperCAmelCase = do_rescale UpperCAmelCase = rescale_factor UpperCAmelCase = do_normalize UpperCAmelCase = image_mean if image_mean is not None else OPENAI_CLIP_MEAN UpperCAmelCase = image_std if image_std is not None else OPENAI_CLIP_STD UpperCAmelCase = do_convert_rgb def UpperCAmelCase__ ( self :Optional[int] , lowercase_ :np.ndarray , lowercase_ :Dict[str, int] , lowercase_ :PILImageResampling = PILImageResampling.BICUBIC , lowercase_ :Optional[Union[str, ChannelDimension]] = None , **lowercase_ :Any , ) -> np.ndarray: UpperCAmelCase = get_size_dict(lowercase_ , default_to_square=lowercase_ ) if "height" not in size or "width" not in size: raise ValueError(f"""The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}""" ) UpperCAmelCase = (size['height'], size['width']) return resize(lowercase_ , size=lowercase_ , resample=lowercase_ , data_format=lowercase_ , **lowercase_ ) def UpperCAmelCase__ ( self :Union[str, Any] , lowercase_ :np.ndarray , lowercase_ :Union[int, float] , lowercase_ :Optional[Union[str, ChannelDimension]] = None , **lowercase_ :Optional[int] , ) -> int: return rescale(lowercase_ , scale=lowercase_ , data_format=lowercase_ , **lowercase_ ) def UpperCAmelCase__ ( self :Any , lowercase_ :np.ndarray , lowercase_ :Union[float, List[float]] , lowercase_ :Union[float, List[float]] , lowercase_ :Optional[Union[str, ChannelDimension]] = None , **lowercase_ :Optional[Any] , ) -> np.ndarray: return normalize(lowercase_ , mean=lowercase_ , std=lowercase_ , data_format=lowercase_ , **lowercase_ ) def UpperCAmelCase__ ( self :List[Any] , lowercase_ :ImageInput , lowercase_ :Optional[bool] = None , lowercase_ :Optional[Dict[str, int]] = None , lowercase_ :PILImageResampling = None , lowercase_ :Optional[bool] = None , lowercase_ :Optional[float] = None , lowercase_ :Optional[bool] = None , lowercase_ :Optional[Union[float, List[float]]] = None , lowercase_ :Optional[Union[float, List[float]]] = None , lowercase_ :Optional[Union[str, TensorType]] = None , lowercase_ :bool = None , lowercase_ :ChannelDimension = ChannelDimension.FIRST , **lowercase_ :Tuple , ) -> PIL.Image.Image: UpperCAmelCase = do_resize if do_resize is not None else self.do_resize UpperCAmelCase = resample if resample is not None else self.resample UpperCAmelCase = do_rescale if do_rescale is not None else self.do_rescale UpperCAmelCase = rescale_factor if rescale_factor is not None else self.rescale_factor UpperCAmelCase = do_normalize if do_normalize is not None else self.do_normalize UpperCAmelCase = image_mean if image_mean is not None else self.image_mean UpperCAmelCase = image_std if image_std is not None else self.image_std UpperCAmelCase = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb UpperCAmelCase = size if size is not None else self.size UpperCAmelCase = get_size_dict(lowercase_ , default_to_square=lowercase_ ) UpperCAmelCase = make_list_of_images(lowercase_ ) if not valid_images(lowercase_ ): raise ValueError( 'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ' 'torch.Tensor, tf.Tensor or jax.ndarray.' ) if do_resize and size is None or resample is None: raise ValueError('Size and resample must be specified if do_resize is True.' ) if do_rescale and rescale_factor is None: raise ValueError('Rescale factor must be specified if do_rescale is True.' ) if do_normalize and (image_mean is None or image_std is None): raise ValueError('Image mean and std must be specified if do_normalize is True.' ) # PIL RGBA images are converted to RGB if do_convert_rgb: UpperCAmelCase = [convert_to_rgb(lowercase_ ) for image in images] # All transformations expect numpy arrays. UpperCAmelCase = [to_numpy_array(lowercase_ ) for image in images] if do_resize: UpperCAmelCase = [self.resize(image=lowercase_ , size=lowercase_ , resample=lowercase_ ) for image in images] if do_rescale: UpperCAmelCase = [self.rescale(image=lowercase_ , scale=lowercase_ ) for image in images] if do_normalize: UpperCAmelCase = [self.normalize(image=lowercase_ , mean=lowercase_ , std=lowercase_ ) for image in images] UpperCAmelCase = [to_channel_dimension_format(lowercase_ , lowercase_ ) for image in images] UpperCAmelCase = BatchFeature(data={'pixel_values': images} , tensor_type=lowercase_ ) return encoded_outputs
78
0
def UpperCamelCase( __UpperCamelCase : Dict ): lowerCAmelCase_ : str = generate_pascal_triangle(lowercase_ ) for row_idx in range(lowercase_ ): # Print left spaces for _ in range(num_rows - row_idx - 1 ): print(end=''' ''' ) # Print row values for col_idx in range(row_idx + 1 ): if col_idx != row_idx: print(triangle[row_idx][col_idx] ,end=''' ''' ) else: print(triangle[row_idx][col_idx] ,end='''''' ) print() def UpperCamelCase( __UpperCamelCase : Any ): if not isinstance(lowercase_ ,lowercase_ ): raise TypeError('''The input value of \'num_rows\' should be \'int\'''' ) if num_rows == 0: return [] elif num_rows < 0: raise ValueError( '''The input value of \'num_rows\' should be greater than or equal to 0''' ) lowerCAmelCase_ : str = [] for current_row_idx in range(lowercase_ ): lowerCAmelCase_ : Any = populate_current_row(lowercase_ ,lowercase_ ) triangle.append(lowercase_ ) return triangle def UpperCamelCase( __UpperCamelCase : str ,__UpperCamelCase : int ): lowerCAmelCase_ : Optional[int] = [-1] * (current_row_idx + 1) # first and last elements of current row are equal to 1 lowerCAmelCase_ , lowerCAmelCase_ : Tuple = 1, 1 for current_col_idx in range(1 ,lowercase_ ): calculate_current_element( lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ) return current_row def UpperCamelCase( __UpperCamelCase : Union[str, Any] ,__UpperCamelCase : Dict ,__UpperCamelCase : Dict ,__UpperCamelCase : List[str] ,): lowerCAmelCase_ : List[Any] = triangle[current_row_idx - 1][current_col_idx - 1] lowerCAmelCase_ : Any = triangle[current_row_idx - 1][current_col_idx] lowerCAmelCase_ : Union[str, Any] = above_to_left_elt + above_to_right_elt def UpperCamelCase( __UpperCamelCase : Any ): if not isinstance(lowercase_ ,lowercase_ ): raise TypeError('''The input value of \'num_rows\' should be \'int\'''' ) if num_rows == 0: return [] elif num_rows < 0: raise ValueError( '''The input value of \'num_rows\' should be greater than or equal to 0''' ) lowerCAmelCase_ : str = [[1]] for row_index in range(1 ,lowercase_ ): lowerCAmelCase_ : Union[str, Any] = [0] + result[-1] + [0] lowerCAmelCase_ : List[Any] = row_index + 1 # Calculate the number of distinct elements in a row lowerCAmelCase_ : Tuple = sum(divmod(lowercase_ ,2 ) ) lowerCAmelCase_ : Optional[int] = [ temp_row[i - 1] + temp_row[i] for i in range(1 ,distinct_elements + 1 ) ] lowerCAmelCase_ : List[str] = row_first_half[: (row_index + 1) // 2] row_second_half.reverse() lowerCAmelCase_ : Optional[Any] = row_first_half + row_second_half result.append(lowercase_ ) return result def UpperCamelCase( ): from collections.abc import Callable from timeit import timeit def benchmark_a_function(__UpperCamelCase : Optional[int] ,__UpperCamelCase : str ) -> None: lowerCAmelCase_ : Dict = f"""{func.__name__}({value})""" lowerCAmelCase_ : List[str] = timeit(f"""__main__.{call}""" ,setup='''import __main__''' ) # print(f"{call:38} = {func(value)} -- {timing:.4f} seconds") print(f"""{call:38} -- {timing:.4f} seconds""" ) for value in range(15 ): # (1, 7, 14): for func in (generate_pascal_triangle, generate_pascal_triangle_optimized): benchmark_a_function(lowercase_ ,lowercase_ ) print() if __name__ == "__main__": import doctest doctest.testmod() benchmark()
103
"""simple docstring""" from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging snake_case_ = logging.get_logger(__name__) snake_case_ = { """microsoft/beit-base-patch16-224-pt22k""": ( """https://huggingface.co/microsoft/beit-base-patch16-224-pt22k/resolve/main/config.json""" ), # See all BEiT models at https://huggingface.co/models?filter=beit } class A_ ( SCREAMING_SNAKE_CASE_ ): """simple docstring""" __UpperCamelCase = """beit""" def __init__( self :List[str] , lowercase_ :List[Any]=81_92 , lowercase_ :str=7_68 , lowercase_ :List[str]=12 , lowercase_ :Optional[int]=12 , lowercase_ :Dict=30_72 , lowercase_ :Tuple="gelu" , lowercase_ :Any=0.0 , lowercase_ :Optional[int]=0.0 , lowercase_ :Dict=0.02 , lowercase_ :int=1E-12 , lowercase_ :List[Any]=2_24 , lowercase_ :Dict=16 , lowercase_ :List[Any]=3 , lowercase_ :List[str]=False , lowercase_ :Optional[Any]=False , lowercase_ :Optional[Any]=False , lowercase_ :Optional[Any]=False , lowercase_ :Union[str, Any]=0.1 , lowercase_ :str=0.1 , lowercase_ :str=True , lowercase_ :List[str]=[3, 5, 7, 11] , lowercase_ :Optional[int]=[1, 2, 3, 6] , lowercase_ :str=True , lowercase_ :int=0.4 , lowercase_ :Union[str, Any]=2_56 , lowercase_ :int=1 , lowercase_ :Tuple=False , lowercase_ :Optional[int]=2_55 , **lowercase_ :str , ) -> Any: super().__init__(**lowercase_ ) UpperCAmelCase = vocab_size UpperCAmelCase = hidden_size UpperCAmelCase = num_hidden_layers UpperCAmelCase = num_attention_heads UpperCAmelCase = intermediate_size UpperCAmelCase = hidden_act UpperCAmelCase = hidden_dropout_prob UpperCAmelCase = attention_probs_dropout_prob UpperCAmelCase = initializer_range UpperCAmelCase = layer_norm_eps UpperCAmelCase = image_size UpperCAmelCase = patch_size UpperCAmelCase = num_channels UpperCAmelCase = use_mask_token UpperCAmelCase = use_absolute_position_embeddings UpperCAmelCase = use_relative_position_bias UpperCAmelCase = use_shared_relative_position_bias UpperCAmelCase = layer_scale_init_value UpperCAmelCase = drop_path_rate UpperCAmelCase = use_mean_pooling # decode head attributes (semantic segmentation) UpperCAmelCase = out_indices UpperCAmelCase = pool_scales # auxiliary head attributes (semantic segmentation) UpperCAmelCase = use_auxiliary_head UpperCAmelCase = auxiliary_loss_weight UpperCAmelCase = auxiliary_channels UpperCAmelCase = auxiliary_num_convs UpperCAmelCase = auxiliary_concat_input UpperCAmelCase = semantic_loss_ignore_index class A_ ( SCREAMING_SNAKE_CASE_ ): """simple docstring""" __UpperCamelCase = version.parse("""1.11""" ) @property def UpperCAmelCase__ ( self :Dict ) -> Mapping[str, Mapping[int, str]]: return OrderedDict( [ ('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}), ] ) @property def UpperCAmelCase__ ( self :Tuple ) -> float: return 1E-4
78
0
"""simple docstring""" import logging import os import sys from pathlib import Path from unittest.mock import patch from parameterized import parameterized from run_eval import run_generate from run_eval_search import run_search from transformers.testing_utils import CaptureStdout, TestCasePlus, slow from utils import ROUGE_KEYS logging.basicConfig(level=logging.DEBUG) SCREAMING_SNAKE_CASE__ = logging.getLogger() def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Optional[Any] ): '''simple docstring''' lowerCAmelCase = """\n""".join(lowercase_ ) Path(lowercase_ ).open("""w""" ).writelines(lowercase_ ) SCREAMING_SNAKE_CASE__ = "patrickvonplaten/t5-tiny-random" SCREAMING_SNAKE_CASE__ = "sshleifer/bart-tiny-random" SCREAMING_SNAKE_CASE__ = "sshleifer/tiny-mbart" SCREAMING_SNAKE_CASE__ = logging.StreamHandler(sys.stdout) logger.addHandler(stream_handler) logging.disable(logging.CRITICAL) # remove noisy download output from tracebacks class lowercase ( SCREAMING_SNAKE_CASE_ ): def _snake_case ( self , lowercase ) -> Union[str, Any]: lowerCAmelCase = Path(self.get_auto_remove_tmp_dir() ) / """utest_input.source""" lowerCAmelCase = input_file_name.parent / """utest_output.txt""" assert not output_file_name.exists() lowerCAmelCase = [""" New York (CNN)When Liana Barrientos was 23 years old, she got married in Westchester County."""] _dump_articles(lowercase_ , lowercase_ ) lowerCAmelCase = str(Path(self.get_auto_remove_tmp_dir() ) / """scores.json""" ) lowerCAmelCase = """translation_en_to_de""" if model == T5_TINY else """summarization""" lowerCAmelCase = f'\n run_eval_search.py\n {model}\n {input_file_name}\n {output_file_name}\n --score_path {score_path}\n --task {task}\n --num_beams 2\n --length_penalty 2.0\n '.split() with patch.object(lowercase_ , """argv""" , lowercase_ ): run_generate() assert Path(lowercase_ ).exists() # os.remove(Path(output_file_name)) def _snake_case ( self ) -> List[Any]: self.run_eval_tester(lowercase_ ) @parameterized.expand([BART_TINY, MBART_TINY] ) @slow def _snake_case ( self , lowercase ) -> Tuple: self.run_eval_tester(lowercase_ ) @parameterized.expand([T5_TINY, MBART_TINY] ) @slow def _snake_case ( self , lowercase ) -> Optional[int]: lowerCAmelCase = Path(self.get_auto_remove_tmp_dir() ) / """utest_input.source""" lowerCAmelCase = input_file_name.parent / """utest_output.txt""" assert not output_file_name.exists() lowerCAmelCase = { """en""": ["""Machine learning is great, isn\'t it?""", """I like to eat bananas""", """Tomorrow is another great day!"""], """de""": [ """Maschinelles Lernen ist großartig, oder?""", """Ich esse gerne Bananen""", """Morgen ist wieder ein toller Tag!""", ], } lowerCAmelCase = Path(self.get_auto_remove_tmp_dir() ) lowerCAmelCase = str(tmp_dir / """scores.json""" ) lowerCAmelCase = str(tmp_dir / """val.target""" ) _dump_articles(lowercase_ , text["""en"""] ) _dump_articles(lowercase_ , text["""de"""] ) lowerCAmelCase = """translation_en_to_de""" if model == T5_TINY else """summarization""" lowerCAmelCase = f'\n run_eval_search.py\n {model}\n {str(lowercase_ )}\n {str(lowercase_ )}\n --score_path {score_path}\n --reference_path {reference_path}\n --task {task}\n '.split() testargs.extend(["""--search""", """num_beams=1:2 length_penalty=0.9:1.0"""] ) with patch.object(lowercase_ , """argv""" , lowercase_ ): with CaptureStdout() as cs: run_search() lowerCAmelCase = [""" num_beams | length_penalty""", model, """Best score args"""] lowerCAmelCase = ["""Info"""] if "translation" in task: expected_strings.append("""bleu""" ) else: expected_strings.extend(lowercase_ ) for w in expected_strings: assert w in cs.out for w in un_expected_strings: assert w not in cs.out assert Path(lowercase_ ).exists() os.remove(Path(lowercase_ ) )
46
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available snake_case_ = { """configuration_longt5""": ["""LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP""", """LongT5Config""", """LongT5OnnxConfig"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case_ = [ """LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST""", """LongT5EncoderModel""", """LongT5ForConditionalGeneration""", """LongT5Model""", """LongT5PreTrainedModel""", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case_ = [ """FlaxLongT5ForConditionalGeneration""", """FlaxLongT5Model""", """FlaxLongT5PreTrainedModel""", ] if TYPE_CHECKING: from .configuration_longta import LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP, LongTaConfig, LongTaOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_longta import ( LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST, LongTaEncoderModel, LongTaForConditionalGeneration, LongTaModel, LongTaPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_longta import ( FlaxLongTaForConditionalGeneration, FlaxLongTaModel, FlaxLongTaPreTrainedModel, ) else: import sys snake_case_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
78
0
"""simple docstring""" import logging import os import sys from dataclasses import dataclass, field from typing import Optional import evaluate import numpy as np import torch from datasets import load_dataset from PIL import Image from torchvision.transforms import ( CenterCrop, Compose, Normalize, RandomHorizontalFlip, RandomResizedCrop, Resize, ToTensor, ) import transformers from transformers import ( MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING, AutoConfig, AutoImageProcessor, AutoModelForImageClassification, HfArgumentParser, Trainer, TrainingArguments, set_seed, ) from transformers.trainer_utils import get_last_checkpoint from transformers.utils import check_min_version, send_example_telemetry from transformers.utils.versions import require_version lowerCamelCase_ : Any = logging.getLogger(__name__) # Will error if the minimal version of Transformers is not installed. Remove at your own risks. check_min_version('4.31.0') require_version('datasets>=1.8.0', 'To fix: pip install -r examples/pytorch/image-classification/requirements.txt') lowerCamelCase_ : int = list(MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING.keys()) lowerCamelCase_ : Union[str, Any] = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES) def UpperCAmelCase__ ( _UpperCAmelCase ): """simple docstring""" with open(lowercase_ , 'rb' ) as f: A_ : Dict = Image.open(lowercase_ ) return im.convert('RGB' ) @dataclass class _UpperCAmelCase : '''simple docstring''' lowercase_ : Optional[int] = field( default=SCREAMING_SNAKE_CASE_ , metadata={ """help""": """Name of a dataset from the hub (could be your own, possibly private dataset hosted on the hub).""" } , ) lowercase_ : Optional[int] = field( default=SCREAMING_SNAKE_CASE_ , metadata={"""help""": """The configuration name of the dataset to use (via the datasets library)."""} ) lowercase_ : Tuple = field(default=SCREAMING_SNAKE_CASE_ , metadata={"""help""": """A folder containing the training data."""} ) lowercase_ : int = field(default=SCREAMING_SNAKE_CASE_ , metadata={"""help""": """A folder containing the validation data."""} ) lowercase_ : Dict = field( default=0.15 , metadata={"""help""": """Percent to split off of train for validation."""} ) lowercase_ : Any = field( default=SCREAMING_SNAKE_CASE_ , metadata={ """help""": ( """For debugging purposes or quicker training, truncate the number of training examples to this """ """value if set.""" ) } , ) lowercase_ : List[str] = field( default=SCREAMING_SNAKE_CASE_ , metadata={ """help""": ( """For debugging purposes or quicker training, truncate the number of evaluation examples to this """ """value if set.""" ) } , ) def lowerCamelCase_ ( self ): """simple docstring""" if self.dataset_name is None and (self.train_dir is None and self.validation_dir is None): raise ValueError( 'You must specify either a dataset name from the hub or a train and/or validation directory.' ) @dataclass class _UpperCAmelCase : '''simple docstring''' lowercase_ : str = field( default="""google/vit-base-patch16-224-in21k""" , metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} , ) lowercase_ : Optional[Any] = field( default=SCREAMING_SNAKE_CASE_ , metadata={"""help""": """If training from scratch, pass a model type from the list: """ + """, """.join(SCREAMING_SNAKE_CASE_ )} , ) lowercase_ : int = field( default=SCREAMING_SNAKE_CASE_ , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} ) lowercase_ : List[Any] = field( default=SCREAMING_SNAKE_CASE_ , metadata={"""help""": """Where do you want to store the pretrained models downloaded from s3"""} ) lowercase_ : Optional[Any] = field( default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , ) lowercase_ : Dict = field(default=SCREAMING_SNAKE_CASE_ , metadata={"""help""": """Name or path of preprocessor config."""} ) lowercase_ : Union[str, Any] = field( default=SCREAMING_SNAKE_CASE_ , metadata={ """help""": ( """Will use the token generated when running `huggingface-cli login` (necessary to use this script """ """with private models).""" ) } , ) lowercase_ : int = field( default=SCREAMING_SNAKE_CASE_ , metadata={"""help""": """Will enable to load a pretrained model whose head dimensions are different."""} , ) def UpperCAmelCase__ ( _UpperCAmelCase ): """simple docstring""" A_ : Tuple = torch.stack([example['pixel_values'] for example in examples] ) A_ : int = torch.tensor([example['labels'] for example in examples] ) return {"pixel_values": pixel_values, "labels": labels} def UpperCAmelCase__ ( ): """simple docstring""" A_ : int = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. A_ , A_ , A_ : List[Any] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: A_ , A_ , A_ : List[str] = parser.parse_args_into_dataclasses() # Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The # information sent is the one passed as arguments along with your Python/PyTorch versions. send_example_telemetry('run_image_classification' , lowercase_ , lowercase_ ) # Setup logging logging.basicConfig( format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , ) if training_args.should_log: # The default of training_args.log_level is passive, so we set log level at info here to have that default. transformers.utils.logging.set_verbosity_info() A_ : List[str] = training_args.get_process_log_level() logger.setLevel(lowercase_ ) transformers.utils.logging.set_verbosity(lowercase_ ) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Log on each process the small summary: logger.warning( f"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}""" + f"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" ) logger.info(f"""Training/evaluation parameters {training_args}""" ) # Detecting last checkpoint. A_ : Dict = None if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir: A_ : List[str] = get_last_checkpoint(training_args.output_dir ) if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0: raise ValueError( f"""Output directory ({training_args.output_dir}) already exists and is not empty. """ 'Use --overwrite_output_dir to overcome.' ) elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: logger.info( f"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """ 'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' ) # Set seed before initializing model. set_seed(training_args.seed ) # Initialize our dataset and prepare it for the 'image-classification' task. if data_args.dataset_name is not None: A_ : Tuple = load_dataset( data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir , task='image-classification' , use_auth_token=True if model_args.use_auth_token else None , ) else: A_ : Optional[Any] = {} if data_args.train_dir is not None: A_ : Optional[int] = os.path.join(data_args.train_dir , '**' ) if data_args.validation_dir is not None: A_ : str = os.path.join(data_args.validation_dir , '**' ) A_ : Tuple = load_dataset( 'imagefolder' , data_files=lowercase_ , cache_dir=model_args.cache_dir , task='image-classification' , ) # If we don't have a validation split, split off a percentage of train as validation. A_ : str = None if 'validation' in dataset.keys() else data_args.train_val_split if isinstance(data_args.train_val_split , lowercase_ ) and data_args.train_val_split > 0.0: A_ : Union[str, Any] = dataset['train'].train_test_split(data_args.train_val_split ) A_ : Any = split['train'] A_ : Any = split['test'] # Prepare label mappings. # We'll include these in the model's config to get human readable labels in the Inference API. A_ : Dict = dataset['train'].features['labels'].names A_ , A_ : Optional[Any] = {}, {} for i, label in enumerate(lowercase_ ): A_ : Dict = str(lowercase_ ) A_ : Optional[Any] = label # Load the accuracy metric from the datasets package A_ : Union[str, Any] = evaluate.load('accuracy' ) # Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a # predictions and label_ids field) and has to return a dictionary string to float. def compute_metrics(_UpperCAmelCase ): return metric.compute(predictions=np.argmax(p.predictions , axis=1 ) , references=p.label_ids ) A_ : int = AutoConfig.from_pretrained( model_args.config_name or model_args.model_name_or_path , num_labels=len(lowercase_ ) , labelaid=lowercase_ , idalabel=lowercase_ , finetuning_task='image-classification' , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) A_ : str = AutoModelForImageClassification.from_pretrained( model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=lowercase_ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , ) A_ : int = AutoImageProcessor.from_pretrained( model_args.image_processor_name or model_args.model_name_or_path , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) # Define torchvision transforms to be applied to each image. if "shortest_edge" in image_processor.size: A_ : List[str] = image_processor.size['shortest_edge'] else: A_ : List[str] = (image_processor.size['height'], image_processor.size['width']) A_ : Dict = Normalize(mean=image_processor.image_mean , std=image_processor.image_std ) A_ : Union[str, Any] = Compose( [ RandomResizedCrop(lowercase_ ), RandomHorizontalFlip(), ToTensor(), normalize, ] ) A_ : Tuple = Compose( [ Resize(lowercase_ ), CenterCrop(lowercase_ ), ToTensor(), normalize, ] ) def train_transforms(_UpperCAmelCase ): A_ : List[str] = [ _train_transforms(pil_img.convert('RGB' ) ) for pil_img in example_batch['image'] ] return example_batch def val_transforms(_UpperCAmelCase ): A_ : Optional[Any] = [_val_transforms(pil_img.convert('RGB' ) ) for pil_img in example_batch['image']] return example_batch if training_args.do_train: if "train" not in dataset: raise ValueError('--do_train requires a train dataset' ) if data_args.max_train_samples is not None: A_ : List[Any] = ( dataset['train'].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) ) ) # Set the training transforms dataset["train"].set_transform(lowercase_ ) if training_args.do_eval: if "validation" not in dataset: raise ValueError('--do_eval requires a validation dataset' ) if data_args.max_eval_samples is not None: A_ : Union[str, Any] = ( dataset['validation'].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) ) ) # Set the validation transforms dataset["validation"].set_transform(lowercase_ ) # Initalize our trainer A_ : str = Trainer( model=lowercase_ , args=lowercase_ , train_dataset=dataset['train'] if training_args.do_train else None , eval_dataset=dataset['validation'] if training_args.do_eval else None , compute_metrics=lowercase_ , tokenizer=lowercase_ , data_collator=lowercase_ , ) # Training if training_args.do_train: A_ : int = None if training_args.resume_from_checkpoint is not None: A_ : int = training_args.resume_from_checkpoint elif last_checkpoint is not None: A_ : str = last_checkpoint A_ : List[Any] = trainer.train(resume_from_checkpoint=lowercase_ ) trainer.save_model() trainer.log_metrics('train' , train_result.metrics ) trainer.save_metrics('train' , train_result.metrics ) trainer.save_state() # Evaluation if training_args.do_eval: A_ : Optional[int] = trainer.evaluate() trainer.log_metrics('eval' , lowercase_ ) trainer.save_metrics('eval' , lowercase_ ) # Write model card and (optionally) push to hub A_ : Dict = { 'finetuned_from': model_args.model_name_or_path, 'tasks': 'image-classification', 'dataset': data_args.dataset_name, 'tags': ['image-classification', 'vision'], } if training_args.push_to_hub: trainer.push_to_hub(**lowercase_ ) else: trainer.create_model_card(**lowercase_ ) if __name__ == "__main__": main()
286
"""simple docstring""" import argparse import os import re import numpy as np import PIL import torch from timm import create_model from torch.optim.lr_scheduler import OneCycleLR from torch.utils.data import DataLoader, Dataset from torchvision.transforms import Compose, RandomResizedCrop, Resize, ToTensor from accelerate import Accelerator def _lowerCAmelCase ( lowercase_ ): UpperCAmelCase = fname.split(os.path.sep )[-1] return re.search(R'^(.*)_\d+\.jpg$' , lowercase_ ).groups()[0] class A_ ( SCREAMING_SNAKE_CASE_ ): """simple docstring""" def __init__( self :List[str] , lowercase_ :Dict , lowercase_ :List[str]=None , lowercase_ :Optional[Any]=None ) -> Optional[int]: UpperCAmelCase = file_names UpperCAmelCase = image_transform UpperCAmelCase = label_to_id def __len__( self :Optional[int] ) -> Optional[Any]: return len(self.file_names ) def __getitem__( self :int , lowercase_ :str ) -> List[str]: UpperCAmelCase = self.file_names[idx] UpperCAmelCase = PIL.Image.open(lowercase_ ) UpperCAmelCase = raw_image.convert('RGB' ) if self.image_transform is not None: UpperCAmelCase = self.image_transform(lowercase_ ) UpperCAmelCase = extract_label(lowercase_ ) if self.label_to_id is not None: UpperCAmelCase = self.label_to_id[label] return {"image": image, "label": label} def _lowerCAmelCase ( lowercase_ , lowercase_ ): # Initialize accelerator if args.with_tracking: UpperCAmelCase = Accelerator( cpu=args.cpu , mixed_precision=args.mixed_precision , log_with='all' , project_dir=args.project_dir ) else: UpperCAmelCase = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision ) # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs UpperCAmelCase = config['lr'] UpperCAmelCase = int(config['num_epochs'] ) UpperCAmelCase = int(config['seed'] ) UpperCAmelCase = int(config['batch_size'] ) UpperCAmelCase = config['image_size'] if not isinstance(lowercase_ , (list, tuple) ): UpperCAmelCase = (image_size, image_size) # Parse out whether we are saving every epoch or after a certain number of batches if hasattr(args.checkpointing_steps , 'isdigit' ): if args.checkpointing_steps == "epoch": UpperCAmelCase = args.checkpointing_steps elif args.checkpointing_steps.isdigit(): UpperCAmelCase = int(args.checkpointing_steps ) else: raise ValueError( F"""Argument `checkpointing_steps` must be either a number or `epoch`. `{args.checkpointing_steps}` passed.""" ) else: UpperCAmelCase = None # We need to initialize the trackers we use, and also store our configuration if args.with_tracking: UpperCAmelCase = os.path.split(lowercase_ )[-1].split('.' )[0] accelerator.init_trackers(lowercase_ , lowercase_ ) # Grab all the image filenames UpperCAmelCase = [os.path.join(args.data_dir , lowercase_ ) for fname in os.listdir(args.data_dir ) if fname.endswith('.jpg' )] # Build the label correspondences UpperCAmelCase = [extract_label(lowercase_ ) for fname in file_names] UpperCAmelCase = list(set(lowercase_ ) ) id_to_label.sort() UpperCAmelCase = {lbl: i for i, lbl in enumerate(lowercase_ )} # Set the seed before splitting the data. np.random.seed(lowercase_ ) torch.manual_seed(lowercase_ ) torch.cuda.manual_seed_all(lowercase_ ) # Split our filenames between train and validation UpperCAmelCase = np.random.permutation(len(lowercase_ ) ) UpperCAmelCase = int(0.8 * len(lowercase_ ) ) UpperCAmelCase = random_perm[:cut] UpperCAmelCase = random_perm[cut:] # For training we use a simple RandomResizedCrop UpperCAmelCase = Compose([RandomResizedCrop(lowercase_ , scale=(0.5, 1.0) ), ToTensor()] ) UpperCAmelCase = PetsDataset( [file_names[i] for i in train_split] , image_transform=lowercase_ , label_to_id=lowercase_ ) # For evaluation, we use a deterministic Resize UpperCAmelCase = Compose([Resize(lowercase_ ), ToTensor()] ) UpperCAmelCase = PetsDataset([file_names[i] for i in eval_split] , image_transform=lowercase_ , label_to_id=lowercase_ ) # Instantiate dataloaders. UpperCAmelCase = DataLoader(lowercase_ , shuffle=lowercase_ , batch_size=lowercase_ , num_workers=4 ) UpperCAmelCase = DataLoader(lowercase_ , shuffle=lowercase_ , batch_size=lowercase_ , num_workers=4 ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) UpperCAmelCase = create_model('resnet50d' , pretrained=lowercase_ , num_classes=len(lowercase_ ) ) # We could avoid this line since the accelerator is set with `device_placement=True` (default value). # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). UpperCAmelCase = model.to(accelerator.device ) # Freezing the base model for param in model.parameters(): UpperCAmelCase = False for param in model.get_classifier().parameters(): UpperCAmelCase = True # We normalize the batches of images to be a bit faster. UpperCAmelCase = torch.tensor(model.default_cfg['mean'] )[None, :, None, None].to(accelerator.device ) UpperCAmelCase = torch.tensor(model.default_cfg['std'] )[None, :, None, None].to(accelerator.device ) # Instantiate optimizer UpperCAmelCase = torch.optim.Adam(params=model.parameters() , lr=lr / 25 ) # Instantiate learning rate scheduler UpperCAmelCase = OneCycleLR(optimizer=lowercase_ , max_lr=lowercase_ , epochs=lowercase_ , steps_per_epoch=len(lowercase_ ) ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = accelerator.prepare( lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ ) # We need to keep track of how many total steps we have iterated over UpperCAmelCase = 0 # We also need to keep track of the starting epoch so files are named properly UpperCAmelCase = 0 # Potentially load in the weights and states from a previous save if args.resume_from_checkpoint: if args.resume_from_checkpoint is not None or args.resume_from_checkpoint != "": accelerator.print(F"""Resumed from checkpoint: {args.resume_from_checkpoint}""" ) accelerator.load_state(args.resume_from_checkpoint ) UpperCAmelCase = os.path.basename(args.resume_from_checkpoint ) else: # Get the most recent checkpoint UpperCAmelCase = [f.name for f in os.scandir(os.getcwd() ) if f.is_dir()] dirs.sort(key=os.path.getctime ) UpperCAmelCase = dirs[-1] # Sorts folders by date modified, most recent checkpoint is the last # Extract `epoch_{i}` or `step_{i}` UpperCAmelCase = os.path.splitext(lowercase_ )[0] if "epoch" in training_difference: UpperCAmelCase = int(training_difference.replace('epoch_' , '' ) ) + 1 UpperCAmelCase = None else: UpperCAmelCase = int(training_difference.replace('step_' , '' ) ) UpperCAmelCase = resume_step // len(lowercase_ ) resume_step -= starting_epoch * len(lowercase_ ) # Now we train the model for epoch in range(lowercase_ , lowercase_ ): model.train() if args.with_tracking: UpperCAmelCase = 0 if args.resume_from_checkpoint and epoch == starting_epoch and resume_step is not None: # We need to skip steps until we reach the resumed step UpperCAmelCase = accelerator.skip_first_batches(lowercase_ , lowercase_ ) overall_step += resume_step else: # After the first iteration though, we need to go back to the original dataloader UpperCAmelCase = train_dataloader for batch in active_dataloader: # We could avoid this line since we set the accelerator with `device_placement=True`. UpperCAmelCase = {k: v.to(accelerator.device ) for k, v in batch.items()} UpperCAmelCase = (batch['image'] - mean) / std UpperCAmelCase = model(lowercase_ ) UpperCAmelCase = torch.nn.functional.cross_entropy(lowercase_ , batch['label'] ) # We keep track of the loss at each epoch if args.with_tracking: total_loss += loss.detach().float() accelerator.backward(lowercase_ ) optimizer.step() lr_scheduler.step() optimizer.zero_grad() overall_step += 1 if isinstance(lowercase_ , lowercase_ ): UpperCAmelCase = F"""step_{overall_step}""" if overall_step % checkpointing_steps == 0: if args.output_dir is not None: UpperCAmelCase = os.path.join(args.output_dir , lowercase_ ) accelerator.save_state(lowercase_ ) model.eval() UpperCAmelCase = 0 UpperCAmelCase = 0 for step, batch in enumerate(lowercase_ ): # We could avoid this line since we set the accelerator with `device_placement=True`. UpperCAmelCase = {k: v.to(accelerator.device ) for k, v in batch.items()} UpperCAmelCase = (batch['image'] - mean) / std with torch.no_grad(): UpperCAmelCase = model(lowercase_ ) UpperCAmelCase = outputs.argmax(dim=-1 ) UpperCAmelCase , UpperCAmelCase = accelerator.gather_for_metrics((predictions, batch['label']) ) UpperCAmelCase = predictions == references num_elems += accurate_preds.shape[0] accurate += accurate_preds.long().sum() UpperCAmelCase = accurate.item() / num_elems # Use accelerator.print to print only on the main process. accelerator.print(F"""epoch {epoch}: {100 * eval_metric:.2f}""" ) if args.with_tracking: accelerator.log( { 'accuracy': 100 * eval_metric, 'train_loss': total_loss.item() / len(lowercase_ ), 'epoch': epoch, } , step=lowercase_ , ) if checkpointing_steps == "epoch": UpperCAmelCase = F"""epoch_{epoch}""" if args.output_dir is not None: UpperCAmelCase = os.path.join(args.output_dir , lowercase_ ) accelerator.save_state(lowercase_ ) if args.with_tracking: accelerator.end_training() def _lowerCAmelCase ( ): UpperCAmelCase = argparse.ArgumentParser(description='Simple example of training script.' ) parser.add_argument('--data_dir' , required=lowercase_ , help='The data folder on disk.' ) parser.add_argument('--fp16' , action='store_true' , help='If passed, will use FP16 training.' ) parser.add_argument( '--mixed_precision' , type=lowercase_ , default=lowercase_ , choices=['no', 'fp16', 'bf16', 'fp8'] , help='Whether to use mixed precision. Choose' 'between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.' 'and an Nvidia Ampere GPU.' , ) parser.add_argument('--cpu' , action='store_true' , help='If passed, will train on the CPU.' ) parser.add_argument( '--checkpointing_steps' , type=lowercase_ , default=lowercase_ , help='Whether the various states should be saved at the end of every n steps, or \'epoch\' for each epoch.' , ) parser.add_argument( '--output_dir' , type=lowercase_ , default='.' , help='Optional save directory where all checkpoint folders will be stored. Default is the current working directory.' , ) parser.add_argument( '--resume_from_checkpoint' , type=lowercase_ , default=lowercase_ , help='If the training should continue from a checkpoint folder.' , ) parser.add_argument( '--with_tracking' , action='store_true' , help='Whether to load in all available experiment trackers from the environment and use them for logging.' , ) parser.add_argument( '--project_dir' , type=lowercase_ , default='logs' , help='Location on where to store experiment tracking logs` and relevent project information' , ) UpperCAmelCase = parser.parse_args() UpperCAmelCase = {'lr': 3e-2, 'num_epochs': 3, 'seed': 42, 'batch_size': 64, 'image_size': 224} training_function(lowercase_ , lowercase_ ) if __name__ == "__main__": main()
78
0
"""simple docstring""" import json import os import shutil import tempfile import unittest import numpy as np import pytest from transformers import BertTokenizer, BertTokenizerFast from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES from transformers.testing_utils import require_vision from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available if is_vision_available(): from PIL import Image from transformers import AlignProcessor, EfficientNetImageProcessor @require_vision class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ): def SCREAMING_SNAKE_CASE ( self ) -> Tuple: '''simple docstring''' UpperCAmelCase : Tuple = tempfile.mkdtemp() UpperCAmelCase : Union[str, Any] = [ """[UNK]""", """[CLS]""", """[SEP]""", """[PAD]""", """[MASK]""", """want""", """##want""", """##ed""", """wa""", """un""", """runn""", """##ing""", """,""", """low""", """lowest""", ] UpperCAmelCase : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] ) with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer: vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) ) UpperCAmelCase : Dict = { """do_resize""": True, """size""": 20, """do_center_crop""": True, """crop_size""": 18, """do_normalize""": True, """image_mean""": [0.4814_5466, 0.457_8275, 0.4082_1073], """image_std""": [0.2686_2954, 0.2613_0258, 0.2757_7711], } UpperCAmelCase : Any = os.path.join(self.tmpdirname , lowercase_ ) with open(self.image_processor_file , """w""" , encoding="""utf-8""" ) as fp: json.dump(lowercase_ , lowercase_ ) def SCREAMING_SNAKE_CASE ( self , **_SCREAMING_SNAKE_CASE ) -> List[Any]: '''simple docstring''' return BertTokenizer.from_pretrained(self.tmpdirname , **lowercase_ ) def SCREAMING_SNAKE_CASE ( self , **_SCREAMING_SNAKE_CASE ) -> List[str]: '''simple docstring''' return BertTokenizerFast.from_pretrained(self.tmpdirname , **lowercase_ ) def SCREAMING_SNAKE_CASE ( self , **_SCREAMING_SNAKE_CASE ) -> Tuple: '''simple docstring''' return EfficientNetImageProcessor.from_pretrained(self.tmpdirname , **lowercase_ ) def SCREAMING_SNAKE_CASE ( self ) -> List[Any]: '''simple docstring''' shutil.rmtree(self.tmpdirname ) def SCREAMING_SNAKE_CASE ( self ) -> Dict: '''simple docstring''' UpperCAmelCase : Optional[Any] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )] UpperCAmelCase : Tuple = [Image.fromarray(np.moveaxis(lowercase_ , 0 , -1 ) ) for x in image_inputs] return image_inputs def SCREAMING_SNAKE_CASE ( self ) -> Dict: '''simple docstring''' UpperCAmelCase : Optional[int] = self.get_tokenizer() UpperCAmelCase : int = self.get_rust_tokenizer() UpperCAmelCase : int = self.get_image_processor() UpperCAmelCase : Dict = AlignProcessor(tokenizer=lowercase_ , image_processor=lowercase_ ) processor_slow.save_pretrained(self.tmpdirname ) UpperCAmelCase : str = AlignProcessor.from_pretrained(self.tmpdirname , use_fast=lowercase_ ) UpperCAmelCase : Tuple = AlignProcessor(tokenizer=lowercase_ , image_processor=lowercase_ ) processor_fast.save_pretrained(self.tmpdirname ) UpperCAmelCase : Dict = AlignProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() ) self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() ) self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() ) self.assertIsInstance(processor_slow.tokenizer , lowercase_ ) self.assertIsInstance(processor_fast.tokenizer , lowercase_ ) self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertIsInstance(processor_slow.image_processor , lowercase_ ) self.assertIsInstance(processor_fast.image_processor , lowercase_ ) def SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]: '''simple docstring''' UpperCAmelCase : int = AlignProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) UpperCAmelCase : Union[str, Any] = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" ) UpperCAmelCase : Any = self.get_image_processor(do_normalize=lowercase_ , padding_value=1.0 ) UpperCAmelCase : List[Any] = AlignProcessor.from_pretrained( self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=lowercase_ , padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , lowercase_ ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , lowercase_ ) def SCREAMING_SNAKE_CASE ( self ) -> str: '''simple docstring''' UpperCAmelCase : Optional[Any] = self.get_image_processor() UpperCAmelCase : Dict = self.get_tokenizer() UpperCAmelCase : Dict = AlignProcessor(tokenizer=lowercase_ , image_processor=lowercase_ ) UpperCAmelCase : Any = self.prepare_image_inputs() UpperCAmelCase : Tuple = image_processor(lowercase_ , return_tensors="""np""" ) UpperCAmelCase : Tuple = processor(images=lowercase_ , return_tensors="""np""" ) for key in input_image_proc.keys(): self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 ) def SCREAMING_SNAKE_CASE ( self ) -> Any: '''simple docstring''' UpperCAmelCase : List[Any] = self.get_image_processor() UpperCAmelCase : Tuple = self.get_tokenizer() UpperCAmelCase : List[str] = AlignProcessor(tokenizer=lowercase_ , image_processor=lowercase_ ) UpperCAmelCase : Optional[int] = """lower newer""" UpperCAmelCase : int = processor(text=lowercase_ ) UpperCAmelCase : Optional[int] = tokenizer(lowercase_ , padding="""max_length""" , max_length=64 ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def SCREAMING_SNAKE_CASE ( self ) -> int: '''simple docstring''' UpperCAmelCase : Any = self.get_image_processor() UpperCAmelCase : Tuple = self.get_tokenizer() UpperCAmelCase : Optional[int] = AlignProcessor(tokenizer=lowercase_ , image_processor=lowercase_ ) UpperCAmelCase : List[str] = """lower newer""" UpperCAmelCase : List[Any] = self.prepare_image_inputs() UpperCAmelCase : Tuple = processor(text=lowercase_ , images=lowercase_ ) self.assertListEqual(list(inputs.keys() ) , ["""input_ids""", """token_type_ids""", """attention_mask""", """pixel_values"""] ) # test if it raises when no input is passed with pytest.raises(lowercase_ ): processor() def SCREAMING_SNAKE_CASE ( self ) -> str: '''simple docstring''' UpperCAmelCase : Optional[int] = self.get_image_processor() UpperCAmelCase : Optional[Any] = self.get_tokenizer() UpperCAmelCase : List[str] = AlignProcessor(tokenizer=lowercase_ , image_processor=lowercase_ ) UpperCAmelCase : Optional[Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] UpperCAmelCase : Optional[Any] = processor.batch_decode(lowercase_ ) UpperCAmelCase : List[str] = tokenizer.batch_decode(lowercase_ ) self.assertListEqual(lowercase_ , lowercase_ ) def SCREAMING_SNAKE_CASE ( self ) -> Tuple: '''simple docstring''' UpperCAmelCase : Optional[Any] = self.get_image_processor() UpperCAmelCase : Dict = self.get_tokenizer() UpperCAmelCase : Optional[Any] = AlignProcessor(tokenizer=lowercase_ , image_processor=lowercase_ ) UpperCAmelCase : int = """lower newer""" UpperCAmelCase : int = self.prepare_image_inputs() UpperCAmelCase : Dict = processor(text=lowercase_ , images=lowercase_ ) self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
109
"""simple docstring""" from __future__ import annotations def _lowerCAmelCase ( lowercase_ , lowercase_ , lowercase_ ): UpperCAmelCase = list(range(len(lowercase_ ) ) ) UpperCAmelCase = [v / w for v, w in zip(lowercase_ , lowercase_ )] index.sort(key=lambda lowercase_ : ratio[i] , reverse=lowercase_ ) UpperCAmelCase = 0 UpperCAmelCase = [0] * len(lowercase_ ) for i in index: if weight[i] <= capacity: UpperCAmelCase = 1 max_value += value[i] capacity -= weight[i] else: UpperCAmelCase = capacity / weight[i] max_value += value[i] * capacity / weight[i] break return max_value, fractions if __name__ == "__main__": import doctest doctest.testmod()
78
0
'''simple docstring''' import logging import os import sys from dataclasses import dataclass, field from typing import Optional from seqaseq_trainer import SeqaSeqTrainer from seqaseq_training_args import SeqaSeqTrainingArguments import transformers from transformers import ( AutoConfig, AutoModelForSeqaSeqLM, AutoTokenizer, HfArgumentParser, MBartTokenizer, MBartTokenizerFast, set_seed, ) from transformers.trainer_utils import EvaluationStrategy, is_main_process from transformers.training_args import ParallelMode from utils import ( SeqaSeqDataCollator, SeqaSeqDataset, assert_all_frozen, build_compute_metrics_fn, check_output_dir, freeze_embeds, freeze_params, lmap, save_json, use_task_specific_params, write_txt_file, ) UpperCAmelCase_ = logging.getLogger(__name__) @dataclass class lowerCAmelCase_ : '''simple docstring''' lowerCAmelCase_ : int = field( metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} ) lowerCAmelCase_ : List[Any] = field( default=SCREAMING_SNAKE_CASE_ , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} ) lowerCAmelCase_ : Optional[Any] = field( default=SCREAMING_SNAKE_CASE_ , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} ) lowerCAmelCase_ : Optional[Any] = field( default=SCREAMING_SNAKE_CASE_ , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , ) lowerCAmelCase_ : Union[str, Any] = field(default=SCREAMING_SNAKE_CASE_ , metadata={"""help""": """Whether tp freeze the encoder."""} ) lowerCAmelCase_ : str = field(default=SCREAMING_SNAKE_CASE_ , metadata={"""help""": """Whether to freeze the embeddings."""} ) @dataclass class lowerCAmelCase_ : '''simple docstring''' lowerCAmelCase_ : List[str] = field( metadata={"""help""": """The input data dir. Should contain the .tsv files (or other data files) for the task."""} ) lowerCAmelCase_ : str = field( default="""summarization""" , metadata={"""help""": """Task name, summarization (or summarization_{dataset} for pegasus) or translation"""} , ) lowerCAmelCase_ : int = field( default=1_024 , metadata={ """help""": ( """The maximum total input sequence length after tokenization. Sequences longer """ """than this will be truncated, sequences shorter will be padded.""" ) } , ) lowerCAmelCase_ : Optional[Any] = field( default=128 , metadata={ """help""": ( """The maximum total sequence length for target text after tokenization. Sequences longer """ """than this will be truncated, sequences shorter will be padded.""" ) } , ) lowerCAmelCase_ : Any = field( default=142 , metadata={ """help""": ( """The maximum total sequence length for validation target text after tokenization. Sequences longer """ """than this will be truncated, sequences shorter will be padded. """ """This argument is also used to override the ``max_length`` param of ``model.generate``, which is used """ """during ``evaluate`` and ``predict``.""" ) } , ) lowerCAmelCase_ : Union[str, Any] = field( default=142 , metadata={ """help""": ( """The maximum total sequence length for test target text after tokenization. Sequences longer """ """than this will be truncated, sequences shorter will be padded.""" ) } , ) lowerCAmelCase_ : List[Any] = field(default=-1 , metadata={"""help""": """# training examples. -1 means use all."""} ) lowerCAmelCase_ : List[str] = field(default=-1 , metadata={"""help""": """# validation examples. -1 means use all."""} ) lowerCAmelCase_ : Optional[Any] = field(default=-1 , metadata={"""help""": """# test examples. -1 means use all."""} ) lowerCAmelCase_ : Tuple = field(default=SCREAMING_SNAKE_CASE_ , metadata={"""help""": """Source language id for translation."""} ) lowerCAmelCase_ : Optional[int] = field(default=SCREAMING_SNAKE_CASE_ , metadata={"""help""": """Target language id for translation."""} ) lowerCAmelCase_ : Dict = field(default=SCREAMING_SNAKE_CASE_ , metadata={"""help""": """# num_beams to use for evaluation."""} ) lowerCAmelCase_ : Union[str, Any] = field( default=SCREAMING_SNAKE_CASE_ , metadata={"""help""": """If only pad tokens should be ignored. This assumes that `config.pad_token_id` is defined."""} , ) def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : str ): '''simple docstring''' logger.info(F'''***** {split} metrics *****''' ) for key in sorted(metrics.keys() ): logger.info(F''' {key} = {metrics[key]}''' ) save_json(lowercase_ , os.path.join(lowercase_ , F'''{split}_results.json''' ) ) def _UpperCamelCase ( ): '''simple docstring''' UpperCAmelCase__ = HfArgumentParser((ModelArguments, DataTrainingArguments, SeqaSeqTrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = parser.parse_args_into_dataclasses() check_output_dir(lowercase_ ) # Setup logging logging.basicConfig( format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , ) logger.warning( """Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s""" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.parallel_mode == ParallelMode.DISTRIBUTED ) , training_args.fpaa , ) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Set the verbosity to info of the Transformers logger (on main process only): if is_main_process(training_args.local_rank ): transformers.utils.logging.set_verbosity_info() logger.info("""Training/evaluation parameters %s""" , lowercase_ ) # Set seed set_seed(training_args.seed ) # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. UpperCAmelCase__ = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , ) UpperCAmelCase__ = ("""encoder_layerdrop""", """decoder_layerdrop""", """dropout""", """attention_dropout""") for p in extra_model_params: if getattr(lowercase_ , lowercase_ , lowercase_ ): assert hasattr(lowercase_ , lowercase_ ), F'''({config.__class__.__name__}) doesn\'t have a `{p}` attribute''' setattr(lowercase_ , lowercase_ , getattr(lowercase_ , lowercase_ ) ) UpperCAmelCase__ = AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , ) UpperCAmelCase__ = AutoModelForSeqaSeqLM.from_pretrained( model_args.model_name_or_path , from_tf=""".ckpt""" in model_args.model_name_or_path , config=lowercase_ , cache_dir=model_args.cache_dir , ) # use task specific params use_task_specific_params(lowercase_ , data_args.task ) # set num_beams for evaluation if data_args.eval_beams is None: UpperCAmelCase__ = model.config.num_beams # set decoder_start_token_id for MBart if model.config.decoder_start_token_id is None and isinstance(lowercase_ , (MBartTokenizer, MBartTokenizerFast) ): assert ( data_args.tgt_lang is not None and data_args.src_lang is not None ), "mBart requires --tgt_lang and --src_lang" if isinstance(lowercase_ , lowercase_ ): UpperCAmelCase__ = tokenizer.lang_code_to_id[data_args.tgt_lang] else: UpperCAmelCase__ = tokenizer.convert_tokens_to_ids(data_args.tgt_lang ) if model_args.freeze_embeds: freeze_embeds(lowercase_ ) if model_args.freeze_encoder: freeze_params(model.get_encoder() ) assert_all_frozen(model.get_encoder() ) UpperCAmelCase__ = SeqaSeqDataset # Get datasets UpperCAmelCase__ = ( dataset_class( lowercase_ , type_path="""train""" , data_dir=data_args.data_dir , n_obs=data_args.n_train , max_target_length=data_args.max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or """""" , ) if training_args.do_train else None ) UpperCAmelCase__ = ( dataset_class( lowercase_ , type_path="""val""" , data_dir=data_args.data_dir , n_obs=data_args.n_val , max_target_length=data_args.val_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or """""" , ) if training_args.do_eval or training_args.evaluation_strategy != EvaluationStrategy.NO else None ) UpperCAmelCase__ = ( dataset_class( lowercase_ , type_path="""test""" , data_dir=data_args.data_dir , n_obs=data_args.n_test , max_target_length=data_args.test_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or """""" , ) if training_args.do_predict else None ) # Initialize our Trainer UpperCAmelCase__ = ( build_compute_metrics_fn(data_args.task , lowercase_ ) if training_args.predict_with_generate else None ) UpperCAmelCase__ = SeqaSeqTrainer( model=lowercase_ , args=lowercase_ , data_args=lowercase_ , train_dataset=lowercase_ , eval_dataset=lowercase_ , data_collator=SeqaSeqDataCollator( lowercase_ , lowercase_ , model.config.decoder_start_token_id , training_args.tpu_num_cores ) , compute_metrics=lowercase_ , tokenizer=lowercase_ , ) UpperCAmelCase__ = {} # Training if training_args.do_train: logger.info("""*** Train ***""" ) UpperCAmelCase__ = trainer.train( model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None ) UpperCAmelCase__ = train_result.metrics UpperCAmelCase__ = data_args.n_train trainer.save_model() # this also saves the tokenizer if trainer.is_world_process_zero(): handle_metrics("""train""" , lowercase_ , training_args.output_dir ) all_metrics.update(lowercase_ ) # Need to save the state, since Trainer.save_model saves only the tokenizer with the model trainer.state.save_to_json(os.path.join(training_args.output_dir , """trainer_state.json""" ) ) # For convenience, we also re-save the tokenizer to the same directory, # so that you can share your model easily on huggingface.co/models =) tokenizer.save_pretrained(training_args.output_dir ) # Evaluation if training_args.do_eval: logger.info("""*** Evaluate ***""" ) UpperCAmelCase__ = trainer.evaluate(metric_key_prefix="""val""" ) UpperCAmelCase__ = data_args.n_val UpperCAmelCase__ = round(metrics["""val_loss"""] , 4 ) if trainer.is_world_process_zero(): handle_metrics("""val""" , lowercase_ , training_args.output_dir ) all_metrics.update(lowercase_ ) if training_args.do_predict: logger.info("""*** Predict ***""" ) UpperCAmelCase__ = trainer.predict(test_dataset=lowercase_ , metric_key_prefix="""test""" ) UpperCAmelCase__ = test_output.metrics UpperCAmelCase__ = data_args.n_test if trainer.is_world_process_zero(): UpperCAmelCase__ = round(metrics["""test_loss"""] , 4 ) handle_metrics("""test""" , lowercase_ , training_args.output_dir ) all_metrics.update(lowercase_ ) if training_args.predict_with_generate: UpperCAmelCase__ = tokenizer.batch_decode( test_output.predictions , skip_special_tokens=lowercase_ , clean_up_tokenization_spaces=lowercase_ ) UpperCAmelCase__ = lmap(str.strip , lowercase_ ) write_txt_file(lowercase_ , os.path.join(training_args.output_dir , """test_generations.txt""" ) ) if trainer.is_world_process_zero(): save_json(lowercase_ , os.path.join(training_args.output_dir , """all_results.json""" ) ) return all_metrics def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : Dict ): '''simple docstring''' main() if __name__ == "__main__": main()
346
"""simple docstring""" from typing import Union from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_torch_available(): from ..models.auto.modeling_auto import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING snake_case_ = logging.get_logger(__name__) @add_end_docstrings(SCREAMING_SNAKE_CASE_ ) class A_ ( SCREAMING_SNAKE_CASE_ ): """simple docstring""" def __init__( self :Any , *lowercase_ :str , **lowercase_ :List[Any] ) -> Union[str, Any]: super().__init__(*lowercase_ , **lowercase_ ) self.check_model_type(lowercase_ ) def UpperCAmelCase__ ( self :Union[str, Any] , lowercase_ :Any=None , lowercase_ :Optional[int]=None , lowercase_ :Tuple=None , **lowercase_ :Tuple ) -> Dict: UpperCAmelCase , UpperCAmelCase = {}, {} if padding is not None: UpperCAmelCase = padding if truncation is not None: UpperCAmelCase = truncation if top_k is not None: UpperCAmelCase = top_k return preprocess_params, {}, postprocess_params def __call__( self :List[Any] , lowercase_ :Union["Image.Image", str] , lowercase_ :str = None , **lowercase_ :Union[str, Any] ) -> Union[str, Any]: if isinstance(lowercase_ , (Image.Image, str) ) and isinstance(lowercase_ , lowercase_ ): UpperCAmelCase = {'image': image, 'question': question} else: UpperCAmelCase = image UpperCAmelCase = super().__call__(lowercase_ , **lowercase_ ) return results def UpperCAmelCase__ ( self :List[str] , lowercase_ :List[Any] , lowercase_ :int=False , lowercase_ :Optional[int]=False ) -> Union[str, Any]: UpperCAmelCase = load_image(inputs['image'] ) UpperCAmelCase = self.tokenizer( inputs['question'] , return_tensors=self.framework , padding=lowercase_ , truncation=lowercase_ ) UpperCAmelCase = self.image_processor(images=lowercase_ , return_tensors=self.framework ) model_inputs.update(lowercase_ ) return model_inputs def UpperCAmelCase__ ( self :List[Any] , lowercase_ :List[str] ) -> Any: UpperCAmelCase = self.model(**lowercase_ ) return model_outputs def UpperCAmelCase__ ( self :Dict , lowercase_ :Tuple , lowercase_ :List[Any]=5 ) -> Union[str, Any]: if top_k > self.model.config.num_labels: UpperCAmelCase = self.model.config.num_labels if self.framework == "pt": UpperCAmelCase = model_outputs.logits.sigmoid()[0] UpperCAmelCase , UpperCAmelCase = probs.topk(lowercase_ ) else: raise ValueError(f"""Unsupported framework: {self.framework}""" ) UpperCAmelCase = scores.tolist() UpperCAmelCase = ids.tolist() return [{"score": score, "answer": self.model.config.idalabel[_id]} for score, _id in zip(lowercase_ , lowercase_ )]
78
0
'''simple docstring''' from graphs.minimum_spanning_tree_kruskal import kruskal def __snake_case( ) -> int: snake_case__ : Dict = 9 snake_case__ : List[str] = [ [0, 1, 4], [0, 7, 8], [1, 2, 8], [7, 8, 7], [7, 6, 1], [2, 8, 2], [8, 6, 6], [2, 3, 7], [2, 5, 4], [6, 5, 2], [3, 5, 14], [3, 4, 9], [5, 4, 10], [1, 7, 11], ] snake_case__ : Tuple = kruskal(lowercase_ , lowercase_ ) snake_case__ : Dict = [ [7, 6, 1], [2, 8, 2], [6, 5, 2], [0, 1, 4], [2, 5, 4], [2, 3, 7], [0, 7, 8], [3, 4, 9], ] assert sorted(lowercase_ ) == sorted(lowercase_ )
35
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging snake_case_ = logging.get_logger(__name__) snake_case_ = { """transfo-xl-wt103""": """https://huggingface.co/transfo-xl-wt103/resolve/main/config.json""", } class A_ ( SCREAMING_SNAKE_CASE_ ): """simple docstring""" __UpperCamelCase = """transfo-xl""" __UpperCamelCase = ["""mems"""] __UpperCamelCase = { """n_token""": """vocab_size""", """hidden_size""": """d_model""", """num_attention_heads""": """n_head""", """num_hidden_layers""": """n_layer""", } def __init__( self :List[Any] , lowercase_ :Optional[int]=26_77_35 , lowercase_ :Union[str, Any]=[2_00_00, 4_00_00, 20_00_00] , lowercase_ :List[Any]=10_24 , lowercase_ :Optional[Any]=10_24 , lowercase_ :Tuple=16 , lowercase_ :Tuple=64 , lowercase_ :Any=40_96 , lowercase_ :int=4 , lowercase_ :List[str]=False , lowercase_ :Union[str, Any]=18 , lowercase_ :Optional[Any]=16_00 , lowercase_ :Dict=10_00 , lowercase_ :Optional[int]=True , lowercase_ :Tuple=True , lowercase_ :Dict=0 , lowercase_ :Tuple=-1 , lowercase_ :Optional[int]=True , lowercase_ :Optional[int]=0.1 , lowercase_ :str=0.0 , lowercase_ :List[str]=True , lowercase_ :int="normal" , lowercase_ :Dict=0.01 , lowercase_ :Optional[Any]=0.01 , lowercase_ :Dict=0.02 , lowercase_ :Tuple=1E-5 , lowercase_ :str=0 , **lowercase_ :Tuple , ) -> List[str]: UpperCAmelCase = vocab_size UpperCAmelCase = [] self.cutoffs.extend(lowercase_ ) if proj_share_all_but_first: UpperCAmelCase = [False] + [True] * len(self.cutoffs ) else: UpperCAmelCase = [False] + [False] * len(self.cutoffs ) UpperCAmelCase = d_model UpperCAmelCase = d_embed UpperCAmelCase = d_head UpperCAmelCase = d_inner UpperCAmelCase = div_val UpperCAmelCase = pre_lnorm UpperCAmelCase = n_layer UpperCAmelCase = n_head UpperCAmelCase = mem_len UpperCAmelCase = same_length UpperCAmelCase = attn_type UpperCAmelCase = clamp_len UpperCAmelCase = sample_softmax UpperCAmelCase = adaptive UpperCAmelCase = dropout UpperCAmelCase = dropatt UpperCAmelCase = untie_r UpperCAmelCase = init UpperCAmelCase = init_range UpperCAmelCase = proj_init_std UpperCAmelCase = init_std UpperCAmelCase = layer_norm_epsilon super().__init__(eos_token_id=lowercase_ , **lowercase_ ) @property def UpperCAmelCase__ ( self :Union[str, Any] ) -> Any: # Message copied from Transformer-XL documentation logger.info(f"""The model {self.model_type} is one of the few models that has no sequence length limit.""" ) return -1 @max_position_embeddings.setter def UpperCAmelCase__ ( self :Union[str, Any] , lowercase_ :Any ) -> Tuple: # Message copied from Transformer-XL documentation raise NotImplementedError( f"""The model {self.model_type} is one of the few models that has no sequence length limit.""" )
78
0
"""simple docstring""" from __future__ import annotations from collections.abc import MutableSequence class __A : def __init__( self , a__ , a__ ): if len(lowercase_ ) != degree + 1: raise ValueError( """The number of coefficients should be equal to the degree + 1.""" ) _lowerCAmelCase : List[str] = list(lowercase_ ) _lowerCAmelCase : Any = degree def __add__( self , a__ ): if self.degree > polynomial_a.degree: _lowerCAmelCase : Optional[int] = self.coefficients[:] for i in range(polynomial_a.degree + 1 ): coefficients[i] += polynomial_a.coefficients[i] return Polynomial(self.degree , lowercase_ ) else: _lowerCAmelCase : Optional[Any] = polynomial_a.coefficients[:] for i in range(self.degree + 1 ): coefficients[i] += self.coefficients[i] return Polynomial(polynomial_a.degree , lowercase_ ) def __sub__( self , a__ ): return self + polynomial_a * Polynomial(0 , [-1] ) def __neg__( self ): return Polynomial(self.degree , [-c for c in self.coefficients] ) def __mul__( self , a__ ): _lowerCAmelCase : List[str] = [0] * (self.degree + polynomial_a.degree + 1) for i in range(self.degree + 1 ): for j in range(polynomial_a.degree + 1 ): coefficients[i + j] += ( self.coefficients[i] * polynomial_a.coefficients[j] ) return Polynomial(self.degree + polynomial_a.degree , lowercase_ ) def __A ( self , a__ ): _lowerCAmelCase : List[str] = 0 for i in range(self.degree + 1 ): result += self.coefficients[i] * (substitution**i) return result def __str__( self ): _lowerCAmelCase : Optional[int] = """""" for i in range(self.degree , -1 , -1 ): if self.coefficients[i] == 0: continue elif self.coefficients[i] > 0: if polynomial: polynomial += " + " else: polynomial += " - " if i == 0: polynomial += str(abs(self.coefficients[i] ) ) elif i == 1: polynomial += str(abs(self.coefficients[i] ) ) + "x" else: polynomial += str(abs(self.coefficients[i] ) ) + "x^" + str(lowercase_ ) return polynomial def __repr__( self ): return self.__str__() def __A ( self ): _lowerCAmelCase : List[str] = [0] * self.degree for i in range(self.degree ): _lowerCAmelCase : Union[str, Any] = self.coefficients[i + 1] * (i + 1) return Polynomial(self.degree - 1 , lowercase_ ) def __A ( self , a__ = 0 ): _lowerCAmelCase : List[str] = [0] * (self.degree + 2) _lowerCAmelCase : List[str] = constant for i in range(self.degree + 1 ): _lowerCAmelCase : int = self.coefficients[i] / (i + 1) return Polynomial(self.degree + 1 , lowercase_ ) def __eq__( self , a__ ): if not isinstance(lowercase_ , lowercase_ ): return False if self.degree != polynomial_a.degree: return False for i in range(self.degree + 1 ): if self.coefficients[i] != polynomial_a.coefficients[i]: return False return True def __ne__( self , a__ ): return not self.__eq__(lowercase_ )
44
"""simple docstring""" from json import JSONDecodeError # Workaround for requests.exceptions.JSONDecodeError import requests def _lowerCAmelCase ( lowercase_ = "isbn/0140328726" ): UpperCAmelCase = olid.strip().strip('/' ) # Remove leading/trailing whitespace & slashes if new_olid.count('/' ) != 1: UpperCAmelCase = F"""{olid} is not a valid Open Library olid""" raise ValueError(lowercase_ ) return requests.get(F"""https://openlibrary.org/{new_olid}.json""" ).json() def _lowerCAmelCase ( lowercase_ ): UpperCAmelCase = { 'title': 'Title', 'publish_date': 'Publish date', 'authors': 'Authors', 'number_of_pages': 'Number of pages:', 'first_sentence': 'First sentence', 'isbn_10': 'ISBN (10)', 'isbn_13': 'ISBN (13)', } UpperCAmelCase = {better_key: ol_book_data[key] for key, better_key in desired_keys.items()} UpperCAmelCase = [ get_openlibrary_data(author['key'] )['name'] for author in data['Authors'] ] UpperCAmelCase = data['First sentence']['value'] for key, value in data.items(): if isinstance(lowercase_ , lowercase_ ): UpperCAmelCase = ', '.join(lowercase_ ) return data if __name__ == "__main__": import doctest doctest.testmod() while True: snake_case_ = input("""\nEnter the ISBN code to search (or 'quit' to stop): """).strip() if isbn.lower() in ("", "q", "quit", "exit", "stop"): break if len(isbn) not in (10, 13) or not isbn.isdigit(): print(f'''Sorry, {isbn} is not a valid ISBN. Please, input a valid ISBN.''') continue print(f'''\nSearching Open Library for ISBN: {isbn}...\n''') try: snake_case_ = summarize_book(get_openlibrary_data(f'''isbn/{isbn}''')) print("""\n""".join(f'''{key}: {value}''' for key, value in book_summary.items())) except JSONDecodeError: # Workaround for requests.exceptions.RequestException: print(f'''Sorry, there are no results for ISBN: {isbn}.''')
78
0
from dataclasses import dataclass from typing import Tuple import numpy as np import torch @dataclass class UpperCamelCase_ : '''simple docstring''' UpperCAmelCase__ = 42 # [batch_size x 3] UpperCAmelCase__ = 42 # [batch_size x 3] UpperCAmelCase__ = 42 # [batch_size x 3] UpperCAmelCase__ = 42 # [batch_size x 3] UpperCAmelCase__ = 42 UpperCAmelCase__ = 42 UpperCAmelCase__ = 42 UpperCAmelCase__ = 42 UpperCAmelCase__ = 42 def SCREAMING_SNAKE_CASE ( self : Dict) ->Optional[Any]: '''simple docstring''' assert self.x.shape[0] == self.y.shape[0] == self.z.shape[0] == self.origin.shape[0] assert self.x.shape[1] == self.y.shape[1] == self.z.shape[1] == self.origin.shape[1] == 3 assert len(self.x.shape) == len(self.y.shape) == len(self.z.shape) == len(self.origin.shape) == 2 def SCREAMING_SNAKE_CASE ( self : Tuple) ->Dict: '''simple docstring''' return torch.from_numpy(np.array([self.width, self.height] , dtype=np.floataa)) def SCREAMING_SNAKE_CASE ( self : List[Any]) ->Any: '''simple docstring''' return torch.from_numpy(np.array([self.x_fov, self.y_fov] , dtype=np.floataa)) def SCREAMING_SNAKE_CASE ( self : List[str]) ->torch.Tensor: '''simple docstring''' A__ = torch.arange(self.height * self.width) A__ = torch.stack( [ pixel_indices % self.width, torch.div(lowercase_ , self.width , rounding_mode='''trunc'''), ] , axis=1 , ) return coords @property def SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->Optional[int]: '''simple docstring''' A__ , *A__ = self.shape A__ = int(np.prod(lowercase_)) A__ = self.get_image_coords() A__ = torch.broadcast_to(coords.unsqueeze(0) , [batch_size * inner_batch_size, *coords.shape]) A__ = self.get_camera_rays(lowercase_) A__ = rays.view(lowercase_ , inner_batch_size * self.height * self.width , 2 , 3) return rays def SCREAMING_SNAKE_CASE ( self : Dict , UpperCAmelCase__ : torch.Tensor) ->torch.Tensor: '''simple docstring''' A__ , *A__ , A__ = coords.shape assert n_coords == 2 assert batch_size == self.origin.shape[0] A__ = coords.view(lowercase_ , -1 , 2) A__ = self.resolution() A__ = self.fov() A__ = (flat.float() / (res - 1)) * 2 - 1 A__ = fracs * torch.tan(fov / 2) A__ = fracs.view(lowercase_ , -1 , 2) A__ = ( self.z.view(lowercase_ , 1 , 3) + self.x.view(lowercase_ , 1 , 3) * fracs[:, :, :1] + self.y.view(lowercase_ , 1 , 3) * fracs[:, :, 1:] ) A__ = directions / directions.norm(dim=-1 , keepdim=lowercase_) A__ = torch.stack( [ torch.broadcast_to(self.origin.view(lowercase_ , 1 , 3) , [batch_size, directions.shape[1], 3]), directions, ] , dim=2 , ) return rays.view(lowercase_ , *lowercase_ , 2 , 3) def SCREAMING_SNAKE_CASE ( self : Optional[int] , UpperCAmelCase__ : int , UpperCAmelCase__ : int) ->"DifferentiableProjectiveCamera": '''simple docstring''' assert width * self.height == height * self.width, "The aspect ratio should not change." return DifferentiableProjectiveCamera( origin=self.origin , x=self.x , y=self.y , z=self.z , width=lowercase_ , height=lowercase_ , x_fov=self.x_fov , y_fov=self.y_fov , ) def SCREAMING_SNAKE_CASE ( lowercase_ ) -> Optional[Any]: """simple docstring""" A__ = [] A__ = [] A__ = [] A__ = [] for theta in np.linspace(0 , 2 * np.pi , num=20 ): A__ = np.array([np.sin(lowercase_ ), np.cos(lowercase_ ), -0.5] ) z /= np.sqrt(np.sum(z**2 ) ) A__ = -z * 4 A__ = np.array([np.cos(lowercase_ ), -np.sin(lowercase_ ), 0.0] ) A__ = np.cross(lowercase_ , lowercase_ ) origins.append(lowercase_ ) xs.append(lowercase_ ) ys.append(lowercase_ ) zs.append(lowercase_ ) return DifferentiableProjectiveCamera( origin=torch.from_numpy(np.stack(lowercase_ , axis=0 ) ).float() , x=torch.from_numpy(np.stack(lowercase_ , axis=0 ) ).float() , y=torch.from_numpy(np.stack(lowercase_ , axis=0 ) ).float() , z=torch.from_numpy(np.stack(lowercase_ , axis=0 ) ).float() , width=lowercase_ , height=lowercase_ , x_fov=0.7 , y_fov=0.7 , shape=(1, len(lowercase_ )) , )
14
"""simple docstring""" class A_ : """simple docstring""" def __init__( self :List[str] , lowercase_ :int , lowercase_ :Optional[int]=None , lowercase_ :List[str]=None ) -> str: UpperCAmelCase = data UpperCAmelCase = previous UpperCAmelCase = next_node def __str__( self :Optional[Any] ) -> str: return f"""{self.data}""" def UpperCAmelCase__ ( self :int ) -> int: return self.data def UpperCAmelCase__ ( self :List[str] ) -> Any: return self.next def UpperCAmelCase__ ( self :Tuple ) -> Optional[int]: return self.previous class A_ : """simple docstring""" def __init__( self :Optional[Any] , lowercase_ :Optional[Any] ) -> str: UpperCAmelCase = head def __iter__( self :List[str] ) -> List[str]: return self def UpperCAmelCase__ ( self :int ) -> Any: if not self.current: raise StopIteration else: UpperCAmelCase = self.current.get_data() UpperCAmelCase = self.current.get_next() return value class A_ : """simple docstring""" def __init__( self :Union[str, Any] ) -> List[Any]: UpperCAmelCase = None # First node in list UpperCAmelCase = None # Last node in list def __str__( self :List[Any] ) -> Optional[Any]: UpperCAmelCase = self.head UpperCAmelCase = [] while current is not None: nodes.append(current.get_data() ) UpperCAmelCase = current.get_next() return " ".join(str(lowercase_ ) for node in nodes ) def __contains__( self :str , lowercase_ :int ) -> str: UpperCAmelCase = self.head while current: if current.get_data() == value: return True UpperCAmelCase = current.get_next() return False def __iter__( self :Tuple ) -> Dict: return LinkedListIterator(self.head ) def UpperCAmelCase__ ( self :Optional[int] ) -> Optional[Any]: if self.head: return self.head.get_data() return None def UpperCAmelCase__ ( self :Union[str, Any] ) -> List[str]: if self.tail: return self.tail.get_data() return None def UpperCAmelCase__ ( self :Union[str, Any] , lowercase_ :Node ) -> None: if self.head is None: UpperCAmelCase = node UpperCAmelCase = node else: self.insert_before_node(self.head , lowercase_ ) def UpperCAmelCase__ ( self :Any , lowercase_ :Node ) -> None: if self.head is None: self.set_head(lowercase_ ) else: self.insert_after_node(self.tail , lowercase_ ) def UpperCAmelCase__ ( self :List[str] , lowercase_ :int ) -> None: UpperCAmelCase = Node(lowercase_ ) if self.head is None: self.set_head(lowercase_ ) else: self.set_tail(lowercase_ ) def UpperCAmelCase__ ( self :int , lowercase_ :Node , lowercase_ :Node ) -> None: UpperCAmelCase = node UpperCAmelCase = node.previous if node.get_previous() is None: UpperCAmelCase = node_to_insert else: UpperCAmelCase = node_to_insert UpperCAmelCase = node_to_insert def UpperCAmelCase__ ( self :Union[str, Any] , lowercase_ :Node , lowercase_ :Node ) -> None: UpperCAmelCase = node UpperCAmelCase = node.next if node.get_next() is None: UpperCAmelCase = node_to_insert else: UpperCAmelCase = node_to_insert UpperCAmelCase = node_to_insert def UpperCAmelCase__ ( self :Any , lowercase_ :int , lowercase_ :int ) -> None: UpperCAmelCase = 1 UpperCAmelCase = Node(lowercase_ ) UpperCAmelCase = self.head while node: if current_position == position: self.insert_before_node(lowercase_ , lowercase_ ) return current_position += 1 UpperCAmelCase = node.next self.insert_after_node(self.tail , lowercase_ ) def UpperCAmelCase__ ( self :Optional[Any] , lowercase_ :int ) -> Node: UpperCAmelCase = self.head while node: if node.get_data() == item: return node UpperCAmelCase = node.get_next() raise Exception('Node not found' ) def UpperCAmelCase__ ( self :Any , lowercase_ :Optional[Any] ) -> Dict: if (node := self.get_node(lowercase_ )) is not None: if node == self.head: UpperCAmelCase = self.head.get_next() if node == self.tail: UpperCAmelCase = self.tail.get_previous() self.remove_node_pointers(lowercase_ ) @staticmethod def UpperCAmelCase__ ( lowercase_ :Node ) -> None: if node.get_next(): UpperCAmelCase = node.previous if node.get_previous(): UpperCAmelCase = node.next UpperCAmelCase = None UpperCAmelCase = None def UpperCAmelCase__ ( self :Union[str, Any] ) -> List[str]: return self.head is None def _lowerCAmelCase ( ): pass if __name__ == "__main__": import doctest doctest.testmod()
78
0
import shutil import tempfile import unittest from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartaaTokenizer, MBartaaTokenizerFast, is_torch_available from transformers.testing_utils import ( get_tests_dir, nested_simplify, require_sentencepiece, require_tokenizers, require_torch, slow, ) from ...test_tokenization_common import TokenizerTesterMixin UpperCAmelCase_ = get_tests_dir('fixtures/test_sentencepiece.model') if is_torch_available(): from transformers.models.mbart.modeling_mbart import shift_tokens_right UpperCAmelCase_ = 25_0004 UpperCAmelCase_ = 25_0020 @require_sentencepiece @require_tokenizers class lowercase__ ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ): '''simple docstring''' a : Dict = MBartaaTokenizer a : List[Any] = MBartaaTokenizerFast a : int = True a : List[str] = True def UpperCamelCase__ ( self ) -> Dict: """simple docstring""" super().setUp() # We have a SentencePiece fixture for testing UpperCamelCase__ : List[str] = MBartaaTokenizer(lowercase_, src_lang='''en_XX''', tgt_lang='''ro_RO''', keep_accents=lowercase_ ) tokenizer.save_pretrained(self.tmpdirname ) def UpperCamelCase__ ( self ) -> Optional[Any]: """simple docstring""" UpperCamelCase__ : List[Any] = '''<s>''' UpperCamelCase__ : Tuple = 0 self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowercase_ ), lowercase_ ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowercase_ ), lowercase_ ) def UpperCamelCase__ ( self ) -> Optional[Any]: """simple docstring""" UpperCamelCase__ : Optional[int] = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0], '''<s>''' ) self.assertEqual(vocab_keys[1], '''<pad>''' ) self.assertEqual(vocab_keys[-1], '''<mask>''' ) self.assertEqual(len(lowercase_ ), 1054 ) def UpperCamelCase__ ( self ) -> Optional[int]: """simple docstring""" self.assertEqual(self.get_tokenizer().vocab_size, 1054 ) def UpperCamelCase__ ( self ) -> Dict: """simple docstring""" UpperCamelCase__ : Dict = MBartaaTokenizer(lowercase_, src_lang='''en_XX''', tgt_lang='''ro_RO''', keep_accents=lowercase_ ) UpperCamelCase__ : str = tokenizer.tokenize('''This is a test''' ) self.assertListEqual(lowercase_, ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(lowercase_ ), [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]], ) UpperCamelCase__ : List[str] = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' ) self.assertListEqual( lowercase_, [SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''9''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''é''', '''.'''], ) UpperCamelCase__ : Union[str, Any] = tokenizer.convert_tokens_to_ids(lowercase_ ) self.assertListEqual( lowercase_, [ value + tokenizer.fairseq_offset for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4] ], ) UpperCamelCase__ : List[str] = tokenizer.convert_ids_to_tokens(lowercase_ ) self.assertListEqual( lowercase_, [SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''<unk>''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''<unk>''', '''.'''], ) @slow def UpperCamelCase__ ( self ) -> Any: """simple docstring""" # fmt: off UpperCamelCase__ : Optional[int] = {'''input_ids''': [[250004, 11062, 82772, 7, 15, 82772, 538, 51529, 237, 17198, 1290, 206, 9, 215175, 1314, 136, 17198, 1290, 206, 9, 56359, 42, 122009, 9, 16466, 16, 87344, 4537, 9, 4717, 78381, 6, 159958, 7, 15, 24480, 618, 4, 527, 22693, 5428, 4, 2777, 24480, 9874, 4, 43523, 594, 4, 803, 18392, 33189, 18, 4, 43523, 24447, 12399, 100, 24955, 83658, 9626, 144057, 15, 839, 22335, 16, 136, 24955, 83658, 83479, 15, 39102, 724, 16, 678, 645, 2789, 1328, 4589, 42, 122009, 115774, 23, 805, 1328, 46876, 7, 136, 53894, 1940, 42227, 41159, 17721, 823, 425, 4, 27512, 98722, 206, 136, 5531, 4970, 919, 17336, 5, 2], [250004, 20080, 618, 83, 82775, 47, 479, 9, 1517, 73, 53894, 333, 80581, 110117, 18811, 5256, 1295, 51, 152526, 297, 7986, 390, 124416, 538, 35431, 214, 98, 15044, 25737, 136, 7108, 43701, 23, 756, 135355, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [250004, 581, 63773, 119455, 6, 147797, 88203, 7, 645, 70, 21, 3285, 10269, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=lowercase_, model_name='''facebook/mbart-large-50''', revision='''d3913889c59cd5c9e456b269c376325eabad57e2''', ) def UpperCamelCase__ ( self ) -> Dict: """simple docstring""" if not self.test_slow_tokenizer: # as we don't have a slow version, we can't compare the outputs between slow and fast versions return UpperCamelCase__ : int = (self.rust_tokenizer_class, '''hf-internal-testing/tiny-random-mbart50''', {}) for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ): UpperCamelCase__ : Dict = self.rust_tokenizer_class.from_pretrained(lowercase_, **lowercase_ ) UpperCamelCase__ : List[str] = self.tokenizer_class.from_pretrained(lowercase_, **lowercase_ ) UpperCamelCase__ : Tuple = tempfile.mkdtemp() UpperCamelCase__ : int = tokenizer_r.save_pretrained(lowercase_ ) UpperCamelCase__ : int = tokenizer_p.save_pretrained(lowercase_ ) # Checks it save with the same files + the tokenizer.json file for the fast one self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) ) UpperCamelCase__ : Optional[Any] = tuple(f for f in tokenizer_r_files if '''tokenizer.json''' not in f ) self.assertSequenceEqual(lowercase_, lowercase_ ) # Checks everything loads correctly in the same way UpperCamelCase__ : Any = tokenizer_r.from_pretrained(lowercase_ ) UpperCamelCase__ : Tuple = tokenizer_p.from_pretrained(lowercase_ ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(lowercase_, lowercase_ ) ) # self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key)) # self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id")) shutil.rmtree(lowercase_ ) # Save tokenizer rust, legacy_format=True UpperCamelCase__ : Union[str, Any] = tempfile.mkdtemp() UpperCamelCase__ : Any = tokenizer_r.save_pretrained(lowercase_, legacy_format=lowercase_ ) UpperCamelCase__ : Any = tokenizer_p.save_pretrained(lowercase_ ) # Checks it save with the same files self.assertSequenceEqual(lowercase_, lowercase_ ) # Checks everything loads correctly in the same way UpperCamelCase__ : int = tokenizer_r.from_pretrained(lowercase_ ) UpperCamelCase__ : Optional[Any] = tokenizer_p.from_pretrained(lowercase_ ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(lowercase_, lowercase_ ) ) shutil.rmtree(lowercase_ ) # Save tokenizer rust, legacy_format=False UpperCamelCase__ : Optional[int] = tempfile.mkdtemp() UpperCamelCase__ : Any = tokenizer_r.save_pretrained(lowercase_, legacy_format=lowercase_ ) UpperCamelCase__ : Dict = tokenizer_p.save_pretrained(lowercase_ ) # Checks it saved the tokenizer.json file self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) ) # Checks everything loads correctly in the same way UpperCamelCase__ : Any = tokenizer_r.from_pretrained(lowercase_ ) UpperCamelCase__ : str = tokenizer_p.from_pretrained(lowercase_ ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(lowercase_, lowercase_ ) ) shutil.rmtree(lowercase_ ) @require_torch @require_sentencepiece @require_tokenizers class lowercase__ ( unittest.TestCase ): '''simple docstring''' a : str = "facebook/mbart-large-50-one-to-many-mmt" a : Dict = [ " UN Chief Says There Is No Military Solution in Syria", " Secretary-General Ban Ki-moon says his response to Russia's stepped up military support for Syria is that \"there is no military solution\" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.", ] a : Union[str, Any] = [ "Şeful ONU declară că nu există o soluţie militară în Siria", "Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei" " pentru Siria este că \"nu există o soluţie militară\" la conflictul de aproape cinci ani şi că noi arme nu vor" " face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.", ] a : Union[str, Any] = [EN_CODE, 8274, 12_7873, 2_5916, 7, 8622, 2071, 438, 6_7485, 53, 18_7895, 23, 5_1712, 2] @classmethod def UpperCamelCase__ ( cls ) -> Optional[int]: """simple docstring""" UpperCamelCase__ : List[str] = MBartaaTokenizer.from_pretrained( cls.checkpoint_name, src_lang='''en_XX''', tgt_lang='''ro_RO''' ) UpperCamelCase__ : int = 1 return cls def UpperCamelCase__ ( self ) -> Optional[Any]: """simple docstring""" self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''ar_AR'''], 250001 ) self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''en_EN'''], 250004 ) self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''ro_RO'''], 250020 ) self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''mr_IN'''], 250038 ) def UpperCamelCase__ ( self ) -> Tuple: """simple docstring""" UpperCamelCase__ : Optional[int] = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0] self.assertListEqual(self.expected_src_tokens, lowercase_ ) def UpperCamelCase__ ( self ) -> List[Any]: """simple docstring""" self.assertIn(lowercase_, self.tokenizer.all_special_ids ) UpperCamelCase__ : Optional[Any] = [RO_CODE, 884, 9019, 96, 9, 916, 86792, 36, 18743, 15596, 5, 2] UpperCamelCase__ : Optional[Any] = self.tokenizer.decode(lowercase_, skip_special_tokens=lowercase_ ) UpperCamelCase__ : Optional[int] = self.tokenizer.decode(generated_ids[1:], skip_special_tokens=lowercase_ ) self.assertEqual(lowercase_, lowercase_ ) self.assertNotIn(self.tokenizer.eos_token, lowercase_ ) def UpperCamelCase__ ( self ) -> str: """simple docstring""" UpperCamelCase__ : Optional[int] = ['''this is gunna be a long sentence ''' * 20] assert isinstance(src_text[0], lowercase_ ) UpperCamelCase__ : Optional[int] = 10 UpperCamelCase__ : Dict = self.tokenizer(lowercase_, max_length=lowercase_, truncation=lowercase_ ).input_ids[0] self.assertEqual(ids[0], lowercase_ ) self.assertEqual(ids[-1], 2 ) self.assertEqual(len(lowercase_ ), lowercase_ ) def UpperCamelCase__ ( self ) -> List[Any]: """simple docstring""" self.assertListEqual(self.tokenizer.convert_tokens_to_ids(['''<mask>''', '''ar_AR'''] ), [250053, 250001] ) def UpperCamelCase__ ( self ) -> Optional[Any]: """simple docstring""" UpperCamelCase__ : Optional[int] = tempfile.mkdtemp() UpperCamelCase__ : Tuple = self.tokenizer.fairseq_tokens_to_ids self.tokenizer.save_pretrained(lowercase_ ) UpperCamelCase__ : Union[str, Any] = MBartaaTokenizer.from_pretrained(lowercase_ ) self.assertDictEqual(new_tok.fairseq_tokens_to_ids, lowercase_ ) @require_torch def UpperCamelCase__ ( self ) -> Any: """simple docstring""" UpperCamelCase__ : Tuple = self.tokenizer(self.src_text, text_target=self.tgt_text, padding=lowercase_, return_tensors='''pt''' ) UpperCamelCase__ : int = shift_tokens_right(batch['''labels'''], self.tokenizer.pad_token_id ) # fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4 assert batch.input_ids[1][0] == EN_CODE assert batch.input_ids[1][-1] == 2 assert batch.labels[1][0] == RO_CODE assert batch.labels[1][-1] == 2 assert batch.decoder_input_ids[1][:2].tolist() == [2, RO_CODE] @require_torch def UpperCamelCase__ ( self ) -> Dict: """simple docstring""" UpperCamelCase__ : Tuple = self.tokenizer( self.src_text, text_target=self.tgt_text, padding=lowercase_, truncation=lowercase_, max_length=len(self.expected_src_tokens ), return_tensors='''pt''', ) UpperCamelCase__ : Union[str, Any] = shift_tokens_right(batch['''labels'''], self.tokenizer.pad_token_id ) self.assertIsInstance(lowercase_, lowercase_ ) self.assertEqual((2, 14), batch.input_ids.shape ) self.assertEqual((2, 14), batch.attention_mask.shape ) UpperCamelCase__ : int = batch.input_ids.tolist()[0] self.assertListEqual(self.expected_src_tokens, lowercase_ ) self.assertEqual(2, batch.decoder_input_ids[0, 0] ) # decoder_start_token_id # Test that special tokens are reset self.assertEqual(self.tokenizer.prefix_tokens, [EN_CODE] ) self.assertEqual(self.tokenizer.suffix_tokens, [self.tokenizer.eos_token_id] ) def UpperCamelCase__ ( self ) -> Any: """simple docstring""" UpperCamelCase__ : str = self.tokenizer(self.src_text, padding=lowercase_, truncation=lowercase_, max_length=3, return_tensors='''pt''' ) UpperCamelCase__ : Any = self.tokenizer( text_target=self.tgt_text, padding=lowercase_, truncation=lowercase_, max_length=10, return_tensors='''pt''' ) UpperCamelCase__ : Any = targets['''input_ids'''] UpperCamelCase__ : Any = shift_tokens_right(lowercase_, self.tokenizer.pad_token_id ) self.assertEqual(batch.input_ids.shape[1], 3 ) self.assertEqual(batch.decoder_input_ids.shape[1], 10 ) @require_torch def UpperCamelCase__ ( self ) -> Any: """simple docstring""" UpperCamelCase__ : List[Any] = self.tokenizer._build_translation_inputs( '''A test''', return_tensors='''pt''', src_lang='''en_XX''', tgt_lang='''ar_AR''' ) self.assertEqual( nested_simplify(lowercase_ ), { # en_XX, A, test, EOS '''input_ids''': [[250004, 62, 3034, 2]], '''attention_mask''': [[1, 1, 1, 1]], # ar_AR '''forced_bos_token_id''': 250001, }, )
201
"""simple docstring""" class A_ : """simple docstring""" def __init__( self :List[Any] , lowercase_ :int ) -> None: UpperCAmelCase = size UpperCAmelCase = [0] * size UpperCAmelCase = [0] * size @staticmethod def UpperCAmelCase__ ( lowercase_ :int ) -> int: return index | (index + 1) @staticmethod def UpperCAmelCase__ ( lowercase_ :int ) -> int: return (index & (index + 1)) - 1 def UpperCAmelCase__ ( self :Any , lowercase_ :int , lowercase_ :int ) -> None: UpperCAmelCase = value while index < self.size: UpperCAmelCase = self.get_prev(lowercase_ ) + 1 if current_left_border == index: UpperCAmelCase = value else: UpperCAmelCase = max(lowercase_ , lowercase_ , lowercase_ ) UpperCAmelCase = self.get_next(lowercase_ ) def UpperCAmelCase__ ( self :List[str] , lowercase_ :int , lowercase_ :int ) -> int: right -= 1 # Because of right is exclusive UpperCAmelCase = 0 while left <= right: UpperCAmelCase = self.get_prev(lowercase_ ) if left <= current_left: UpperCAmelCase = max(lowercase_ , self.tree[right] ) UpperCAmelCase = current_left else: UpperCAmelCase = max(lowercase_ , self.arr[right] ) right -= 1 return result if __name__ == "__main__": import doctest doctest.testmod()
78
0
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tensorflow_text_available, is_torch_available _snake_case = { 'configuration_ernie': ['ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ErnieConfig', 'ErnieOnnxConfig'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _snake_case = [ 'ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST', 'ErnieForCausalLM', 'ErnieForMaskedLM', 'ErnieForMultipleChoice', 'ErnieForNextSentencePrediction', 'ErnieForPreTraining', 'ErnieForQuestionAnswering', 'ErnieForSequenceClassification', 'ErnieForTokenClassification', 'ErnieModel', 'ErniePreTrainedModel', ] if TYPE_CHECKING: from .configuration_ernie import ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP, ErnieConfig, ErnieOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_ernie import ( ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST, ErnieForCausalLM, ErnieForMaskedLM, ErnieForMultipleChoice, ErnieForNextSentencePrediction, ErnieForPreTraining, ErnieForQuestionAnswering, ErnieForSequenceClassification, ErnieForTokenClassification, ErnieModel, ErniePreTrainedModel, ) else: import sys _snake_case = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
294
"""simple docstring""" import json from typing import Iterator, List, Union from tokenizers import AddedToken, Regex, Tokenizer, decoders, normalizers, pre_tokenizers, trainers from tokenizers.implementations.base_tokenizer import BaseTokenizer from tokenizers.models import Unigram from tokenizers.processors import TemplateProcessing class A_ ( SCREAMING_SNAKE_CASE_ ): """simple docstring""" def __init__( self :Dict , lowercase_ :str = "▁" , lowercase_ :bool = True , lowercase_ :Union[str, AddedToken] = "<unk>" , lowercase_ :Union[str, AddedToken] = "</s>" , lowercase_ :Union[str, AddedToken] = "<pad>" , ) -> str: UpperCAmelCase = { 'pad': {'id': 0, 'token': pad_token}, 'eos': {'id': 1, 'token': eos_token}, 'unk': {'id': 2, 'token': unk_token}, } UpperCAmelCase = [None] * len(self.special_tokens ) for token_dict in self.special_tokens.values(): UpperCAmelCase = token_dict['token'] UpperCAmelCase = Tokenizer(Unigram() ) UpperCAmelCase = normalizers.Sequence( [ normalizers.Nmt(), normalizers.NFKC(), normalizers.Replace(Regex(' {2,}' ) , ' ' ), normalizers.Lowercase(), ] ) UpperCAmelCase = pre_tokenizers.Sequence( [ pre_tokenizers.Metaspace(replacement=lowercase_ , add_prefix_space=lowercase_ ), pre_tokenizers.Digits(individual_digits=lowercase_ ), pre_tokenizers.Punctuation(), ] ) UpperCAmelCase = decoders.Metaspace(replacement=lowercase_ , add_prefix_space=lowercase_ ) UpperCAmelCase = TemplateProcessing( single=f"""$A {self.special_tokens['eos']['token']}""" , special_tokens=[(self.special_tokens['eos']['token'], self.special_tokens['eos']['id'])] , ) UpperCAmelCase = { 'model': 'SentencePieceUnigram', 'replacement': replacement, 'add_prefix_space': add_prefix_space, } super().__init__(lowercase_ , lowercase_ ) def UpperCAmelCase__ ( self :Optional[int] , lowercase_ :Union[str, List[str]] , lowercase_ :int = 80_00 , lowercase_ :bool = True , ) -> Union[str, Any]: UpperCAmelCase = trainers.UnigramTrainer( vocab_size=lowercase_ , special_tokens=self.special_tokens_list , show_progress=lowercase_ , ) if isinstance(lowercase_ , lowercase_ ): UpperCAmelCase = [files] self._tokenizer.train(lowercase_ , trainer=lowercase_ ) self.add_unk_id() def UpperCAmelCase__ ( self :str , lowercase_ :Union[Iterator[str], Iterator[Iterator[str]]] , lowercase_ :int = 80_00 , lowercase_ :bool = True , ) -> Tuple: UpperCAmelCase = trainers.UnigramTrainer( vocab_size=lowercase_ , special_tokens=self.special_tokens_list , show_progress=lowercase_ , ) self._tokenizer.train_from_iterator(lowercase_ , trainer=lowercase_ ) self.add_unk_id() def UpperCAmelCase__ ( self :Union[str, Any] ) -> int: UpperCAmelCase = json.loads(self._tokenizer.to_str() ) UpperCAmelCase = self.special_tokens['unk']['id'] UpperCAmelCase = Tokenizer.from_str(json.dumps(lowercase_ ) )
78
0
"""simple docstring""" def lowercase ( _snake_case : Optional[int] ) ->List[str]: """simple docstring""" if number > 0: raise ValueError('''input must be a negative integer''' ) __snake_case : List[Any] = len(bin(lowercase_ )[3:] ) __snake_case : List[str] = bin(abs(lowercase_ ) - (1 << binary_number_length) )[3:] __snake_case : int = ( ( '''1''' + '''0''' * (binary_number_length - len(lowercase_ )) + twos_complement_number ) if number < 0 else '''0''' ) return "0b" + twos_complement_number if __name__ == "__main__": import doctest doctest.testmod()
102
"""simple docstring""" import secrets from random import shuffle from string import ascii_letters, ascii_lowercase, ascii_uppercase, digits, punctuation def _lowerCAmelCase ( lowercase_ = 8 ): UpperCAmelCase = ascii_letters + digits + punctuation return "".join(secrets.choice(lowercase_ ) for _ in range(lowercase_ ) ) def _lowerCAmelCase ( lowercase_ , lowercase_ ): # Password Generator = full boot with random_number, random_letters, and # random_character FUNCTIONS # Put your code here... i -= len(lowercase_ ) UpperCAmelCase = i // 3 UpperCAmelCase = i % 3 # chars = chars_incl + random_letters(ascii_letters, i / 3 + remainder) + # random_number(digits, i / 3) + random_characters(punctuation, i / 3) UpperCAmelCase = ( chars_incl + random(lowercase_ , quotient + remainder ) + random(lowercase_ , lowercase_ ) + random(lowercase_ , lowercase_ ) ) UpperCAmelCase = list(lowercase_ ) shuffle(lowercase_ ) return "".join(lowercase_ ) # random is a generalised function for letters, characters and numbers def _lowerCAmelCase ( lowercase_ , lowercase_ ): return "".join(secrets.choice(lowercase_ ) for _ in range(lowercase_ ) ) def _lowerCAmelCase ( lowercase_ , lowercase_ ): pass # Put your code here... def _lowerCAmelCase ( lowercase_ , lowercase_ ): pass # Put your code here... def _lowerCAmelCase ( lowercase_ , lowercase_ ): pass # Put your code here... def _lowerCAmelCase ( lowercase_ , lowercase_ = 8 ): if len(lowercase_ ) < min_length: # Your Password must be at least 8 characters long return False UpperCAmelCase = any(char in ascii_uppercase for char in password ) UpperCAmelCase = any(char in ascii_lowercase for char in password ) UpperCAmelCase = any(char in digits for char in password ) UpperCAmelCase = any(char in punctuation for char in password ) return upper and lower and num and spec_char # Passwords should contain UPPERCASE, lowerase # numbers, and special characters def _lowerCAmelCase ( ): UpperCAmelCase = int(input('Please indicate the max length of your password: ' ).strip() ) UpperCAmelCase = input( 'Please indicate the characters that must be in your password: ' ).strip() print('Password generated:' , password_generator(lowercase_ ) ) print( 'Alternative Password generated:' , alternative_password_generator(lowercase_ , lowercase_ ) , ) print('[If you are thinking of using this passsword, You better save it.]' ) if __name__ == "__main__": main()
78
0
import json import os from typing import Optional, Tuple from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging A__ : str = logging.get_logger(__name__) A__ : Tuple = {'''vocab_file''': '''vocab.json'''} A__ : Optional[int] = { '''vocab_file''': { '''mgp-str''': '''https://huggingface.co/alibaba-damo/mgp-str-base/blob/main/vocab.json''', } } A__ : List[str] = {'''mgp-str''': 27} class __snake_case ( SCREAMING_SNAKE_CASE_ ): _a = VOCAB_FILES_NAMES _a = PRETRAINED_VOCAB_FILES_MAP _a = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES def __init__( self : Dict , A_ : Tuple , A_ : Optional[int]="[GO]" , A_ : Tuple="[GO]" , A_ : Optional[Any]="[s]" , A_ : List[str]="[GO]" , **A_ : int): super().__init__( unk_token=lowercase_ , bos_token=lowercase_ , eos_token=lowercase_ , pad_token=lowercase_ , **lowercase_ , ) with open(lowercase_ , encoding='''utf-8''') as vocab_handle: lowerCAmelCase_ : Optional[int] = json.load(lowercase_) lowerCAmelCase_ : int = {v: k for k, v in self.vocab.items()} @property def UpperCAmelCase__ ( self : List[str]): return len(self.vocab) def UpperCAmelCase__ ( self : Dict): return dict(self.vocab , **self.added_tokens_encoder) def UpperCAmelCase__ ( self : Optional[int] , A_ : Optional[Any]): lowerCAmelCase_ : List[Any] = [] for s in text: char_tokens.extend(lowercase_) return char_tokens def UpperCAmelCase__ ( self : Union[str, Any] , A_ : Union[str, Any]): return self.vocab.get(lowercase_ , self.vocab.get(self.unk_token)) def UpperCAmelCase__ ( self : Optional[int] , A_ : Optional[int]): return self.decoder.get(lowercase_) def UpperCAmelCase__ ( self : List[str] , A_ : str , A_ : Optional[str] = None): if not os.path.isdir(lowercase_): logger.error('''Vocabulary path ({}) should be a directory'''.format(lowercase_)) return lowerCAmelCase_ : int = os.path.join( lowercase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file''']) with open(lowercase_ , '''w''' , encoding='''utf-8''') as f: f.write(json.dumps(self.vocab , indent=2 , sort_keys=lowercase_ , ensure_ascii=lowercase_) + '''\n''') return (vocab_file,)
103
"""simple docstring""" import tempfile import numpy as np import torch from transformers import AutoTokenizer, TaEncoderModel from diffusers import DDPMScheduler, UNetaDConditionModel from diffusers.models.attention_processor import AttnAddedKVProcessor from diffusers.pipelines.deepfloyd_if import IFWatermarker from diffusers.utils.testing_utils import torch_device from ..test_pipelines_common import to_np class A_ : """simple docstring""" def UpperCAmelCase__ ( self :Any ) -> List[str]: torch.manual_seed(0 ) UpperCAmelCase = TaEncoderModel.from_pretrained('hf-internal-testing/tiny-random-t5' ) torch.manual_seed(0 ) UpperCAmelCase = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-t5' ) torch.manual_seed(0 ) UpperCAmelCase = UNetaDConditionModel( sample_size=32 , layers_per_block=1 , block_out_channels=[32, 64] , down_block_types=[ 'ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D', ] , mid_block_type='UNetMidBlock2DSimpleCrossAttn' , up_block_types=['SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'] , in_channels=3 , out_channels=6 , cross_attention_dim=32 , encoder_hid_dim=32 , attention_head_dim=8 , addition_embed_type='text' , addition_embed_type_num_heads=2 , cross_attention_norm='group_norm' , resnet_time_scale_shift='scale_shift' , act_fn='gelu' , ) unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests torch.manual_seed(0 ) UpperCAmelCase = DDPMScheduler( num_train_timesteps=10_00 , beta_schedule='squaredcos_cap_v2' , beta_start=0.0001 , beta_end=0.02 , thresholding=lowercase_ , dynamic_thresholding_ratio=0.95 , sample_max_value=1.0 , prediction_type='epsilon' , variance_type='learned_range' , ) torch.manual_seed(0 ) UpperCAmelCase = IFWatermarker() return { "text_encoder": text_encoder, "tokenizer": tokenizer, "unet": unet, "scheduler": scheduler, "watermarker": watermarker, "safety_checker": None, "feature_extractor": None, } def UpperCAmelCase__ ( self :List[Any] ) -> Any: torch.manual_seed(0 ) UpperCAmelCase = TaEncoderModel.from_pretrained('hf-internal-testing/tiny-random-t5' ) torch.manual_seed(0 ) UpperCAmelCase = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-t5' ) torch.manual_seed(0 ) UpperCAmelCase = UNetaDConditionModel( sample_size=32 , layers_per_block=[1, 2] , block_out_channels=[32, 64] , down_block_types=[ 'ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D', ] , mid_block_type='UNetMidBlock2DSimpleCrossAttn' , up_block_types=['SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'] , in_channels=6 , out_channels=6 , cross_attention_dim=32 , encoder_hid_dim=32 , attention_head_dim=8 , addition_embed_type='text' , addition_embed_type_num_heads=2 , cross_attention_norm='group_norm' , resnet_time_scale_shift='scale_shift' , act_fn='gelu' , class_embed_type='timestep' , mid_block_scale_factor=1.414 , time_embedding_act_fn='gelu' , time_embedding_dim=32 , ) unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests torch.manual_seed(0 ) UpperCAmelCase = DDPMScheduler( num_train_timesteps=10_00 , beta_schedule='squaredcos_cap_v2' , beta_start=0.0001 , beta_end=0.02 , thresholding=lowercase_ , dynamic_thresholding_ratio=0.95 , sample_max_value=1.0 , prediction_type='epsilon' , variance_type='learned_range' , ) torch.manual_seed(0 ) UpperCAmelCase = DDPMScheduler( num_train_timesteps=10_00 , beta_schedule='squaredcos_cap_v2' , beta_start=0.0001 , beta_end=0.02 , ) torch.manual_seed(0 ) UpperCAmelCase = IFWatermarker() return { "text_encoder": text_encoder, "tokenizer": tokenizer, "unet": unet, "scheduler": scheduler, "image_noising_scheduler": image_noising_scheduler, "watermarker": watermarker, "safety_checker": None, "feature_extractor": None, } def UpperCAmelCase__ ( self :List[str] ) -> str: UpperCAmelCase = self.get_dummy_components() UpperCAmelCase = self.pipeline_class(**lowercase_ ) pipe.to(lowercase_ ) pipe.set_progress_bar_config(disable=lowercase_ ) UpperCAmelCase = self.get_dummy_inputs(lowercase_ ) UpperCAmelCase = inputs['prompt'] UpperCAmelCase = inputs['generator'] UpperCAmelCase = inputs['num_inference_steps'] UpperCAmelCase = inputs['output_type'] if "image" in inputs: UpperCAmelCase = inputs['image'] else: UpperCAmelCase = None if "mask_image" in inputs: UpperCAmelCase = inputs['mask_image'] else: UpperCAmelCase = None if "original_image" in inputs: UpperCAmelCase = inputs['original_image'] else: UpperCAmelCase = None UpperCAmelCase , UpperCAmelCase = pipe.encode_prompt(lowercase_ ) # inputs with prompt converted to embeddings UpperCAmelCase = { 'prompt_embeds': prompt_embeds, 'negative_prompt_embeds': negative_prompt_embeds, 'generator': generator, 'num_inference_steps': num_inference_steps, 'output_type': output_type, } if image is not None: UpperCAmelCase = image if mask_image is not None: UpperCAmelCase = mask_image if original_image is not None: UpperCAmelCase = original_image # set all optional components to None for optional_component in pipe._optional_components: setattr(lowercase_ , lowercase_ , lowercase_ ) UpperCAmelCase = pipe(**lowercase_ )[0] with tempfile.TemporaryDirectory() as tmpdir: pipe.save_pretrained(lowercase_ ) UpperCAmelCase = self.pipeline_class.from_pretrained(lowercase_ ) pipe_loaded.to(lowercase_ ) pipe_loaded.set_progress_bar_config(disable=lowercase_ ) pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests for optional_component in pipe._optional_components: self.assertTrue( getattr(lowercase_ , lowercase_ ) is None , f"""`{optional_component}` did not stay set to None after loading.""" , ) UpperCAmelCase = self.get_dummy_inputs(lowercase_ ) UpperCAmelCase = inputs['generator'] UpperCAmelCase = inputs['num_inference_steps'] UpperCAmelCase = inputs['output_type'] # inputs with prompt converted to embeddings UpperCAmelCase = { 'prompt_embeds': prompt_embeds, 'negative_prompt_embeds': negative_prompt_embeds, 'generator': generator, 'num_inference_steps': num_inference_steps, 'output_type': output_type, } if image is not None: UpperCAmelCase = image if mask_image is not None: UpperCAmelCase = mask_image if original_image is not None: UpperCAmelCase = original_image UpperCAmelCase = pipe_loaded(**lowercase_ )[0] UpperCAmelCase = np.abs(to_np(lowercase_ ) - to_np(lowercase_ ) ).max() self.assertLess(lowercase_ , 1E-4 ) def UpperCAmelCase__ ( self :List[Any] ) -> str: UpperCAmelCase = self.get_dummy_components() UpperCAmelCase = self.pipeline_class(**lowercase_ ) pipe.to(lowercase_ ) pipe.set_progress_bar_config(disable=lowercase_ ) UpperCAmelCase = self.get_dummy_inputs(lowercase_ ) UpperCAmelCase = pipe(**lowercase_ )[0] with tempfile.TemporaryDirectory() as tmpdir: pipe.save_pretrained(lowercase_ ) UpperCAmelCase = self.pipeline_class.from_pretrained(lowercase_ ) pipe_loaded.to(lowercase_ ) pipe_loaded.set_progress_bar_config(disable=lowercase_ ) pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests UpperCAmelCase = self.get_dummy_inputs(lowercase_ ) UpperCAmelCase = pipe_loaded(**lowercase_ )[0] UpperCAmelCase = np.abs(to_np(lowercase_ ) - to_np(lowercase_ ) ).max() self.assertLess(lowercase_ , 1E-4 )
78
0
"""simple docstring""" import functools import gc import inspect import torch from .imports import is_npu_available, is_xpu_available def UpperCAmelCase__ ( *SCREAMING_SNAKE_CASE : List[Any] ): '''simple docstring''' if not isinstance(lowercase_ , lowercase_ ): lowerCAmelCase = list(lowercase_ ) for i in range(len(lowercase_ ) ): lowerCAmelCase = None gc.collect() if is_xpu_available(): torch.xpu.empty_cache() elif is_npu_available(): torch.npu.empty_cache() else: torch.cuda.empty_cache() return objects def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : Tuple ): '''simple docstring''' lowerCAmelCase = [ """CUDA out of memory.""", # CUDA OOM """cuDNN error: CUDNN_STATUS_NOT_SUPPORTED.""", # CUDNN SNAFU """DefaultCPUAllocator: can\'t allocate memory""", # CPU OOM ] if isinstance(lowercase_ , lowercase_ ) and len(exception.args ) == 1: return any(err in exception.args[0] for err in _statements ) return False def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : Any = None , SCREAMING_SNAKE_CASE : Dict = 1_28 ): '''simple docstring''' if function is None: return functools.partial(lowercase_ , starting_batch_size=lowercase_ ) lowerCAmelCase = starting_batch_size def decorator(*SCREAMING_SNAKE_CASE : Dict , **SCREAMING_SNAKE_CASE : Dict ): nonlocal batch_size gc.collect() if is_xpu_available(): torch.xpu.empty_cache() elif is_npu_available(): torch.npu.empty_cache() else: torch.cuda.empty_cache() lowerCAmelCase = list(inspect.signature(lowercase_ ).parameters.keys() ) # Guard against user error if len(lowercase_ ) < (len(lowercase_ ) + 1): lowerCAmelCase = """, """.join([F'{arg}={value}' for arg, value in zip(params[1:] , args[1:] )] ) raise TypeError( F'Batch size was passed into `{function.__name__}` as the first argument when called.' F'Remove this as the decorator already does so: `{function.__name__}({arg_str})`' ) while True: if batch_size == 0: raise RuntimeError("""No executable batch size found, reached zero.""" ) try: return function(lowercase_ , *lowercase_ , **lowercase_ ) except Exception as e: if should_reduce_batch_size(lowercase_ ): gc.collect() if is_xpu_available(): torch.xpu.empty_cache() elif is_npu_available(): torch.npu.empty_cache() else: torch.cuda.empty_cache() batch_size //= 2 else: raise return decorator
46
"""simple docstring""" from typing import List, Optional, Union import numpy as np import PIL import torch from PIL import Image from ...models import UNetaDConditionModel, VQModel from ...pipelines import DiffusionPipeline from ...pipelines.pipeline_utils import ImagePipelineOutput from ...schedulers import DDPMScheduler from ...utils import ( is_accelerate_available, is_accelerate_version, logging, randn_tensor, replace_example_docstring, ) snake_case_ = logging.get_logger(__name__) # pylint: disable=invalid-name snake_case_ = """ Examples: ```py >>> from diffusers import KandinskyV22Img2ImgPipeline, KandinskyV22PriorPipeline >>> from diffusers.utils import load_image >>> import torch >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained( ... \"kandinsky-community/kandinsky-2-2-prior\", torch_dtype=torch.float16 ... ) >>> pipe_prior.to(\"cuda\") >>> prompt = \"A red cartoon frog, 4k\" >>> image_emb, zero_image_emb = pipe_prior(prompt, return_dict=False) >>> pipe = KandinskyV22Img2ImgPipeline.from_pretrained( ... \"kandinsky-community/kandinsky-2-2-decoder\", torch_dtype=torch.float16 ... ) >>> pipe.to(\"cuda\") >>> init_image = load_image( ... \"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main\" ... \"/kandinsky/frog.png\" ... ) >>> image = pipe( ... image=init_image, ... image_embeds=image_emb, ... negative_image_embeds=zero_image_emb, ... height=768, ... width=768, ... num_inference_steps=100, ... strength=0.2, ... ).images >>> image[0].save(\"red_frog.png\") ``` """ def _lowerCAmelCase ( lowercase_ , lowercase_ , lowercase_=8 ): UpperCAmelCase = height // scale_factor**2 if height % scale_factor**2 != 0: new_height += 1 UpperCAmelCase = width // scale_factor**2 if width % scale_factor**2 != 0: new_width += 1 return new_height * scale_factor, new_width * scale_factor def _lowerCAmelCase ( lowercase_ , lowercase_=512 , lowercase_=512 ): UpperCAmelCase = pil_image.resize((w, h) , resample=Image.BICUBIC , reducing_gap=1 ) UpperCAmelCase = np.array(pil_image.convert('RGB' ) ) UpperCAmelCase = arr.astype(np.floataa ) / 1_2_7.5 - 1 UpperCAmelCase = np.transpose(lowercase_ , [2, 0, 1] ) UpperCAmelCase = torch.from_numpy(lowercase_ ).unsqueeze(0 ) return image class A_ ( SCREAMING_SNAKE_CASE_ ): """simple docstring""" def __init__( self :Dict , lowercase_ :UNetaDConditionModel , lowercase_ :DDPMScheduler , lowercase_ :VQModel , ) -> List[str]: super().__init__() self.register_modules( unet=lowercase_ , scheduler=lowercase_ , movq=lowercase_ , ) UpperCAmelCase = 2 ** (len(self.movq.config.block_out_channels ) - 1) def UpperCAmelCase__ ( self :Optional[int] , lowercase_ :Optional[Any] , lowercase_ :Tuple , lowercase_ :Any ) -> Optional[int]: # get the original timestep using init_timestep UpperCAmelCase = min(int(num_inference_steps * strength ) , lowercase_ ) UpperCAmelCase = max(num_inference_steps - init_timestep , 0 ) UpperCAmelCase = self.scheduler.timesteps[t_start:] return timesteps, num_inference_steps - t_start def UpperCAmelCase__ ( self :List[Any] , lowercase_ :Dict , lowercase_ :str , lowercase_ :Optional[Any] , lowercase_ :Union[str, Any] , lowercase_ :List[Any] , lowercase_ :Optional[Any] , lowercase_ :Any=None ) -> Any: if not isinstance(lowercase_ , (torch.Tensor, PIL.Image.Image, list) ): raise ValueError( f"""`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(lowercase_ )}""" ) UpperCAmelCase = image.to(device=lowercase_ , dtype=lowercase_ ) UpperCAmelCase = batch_size * num_images_per_prompt if image.shape[1] == 4: UpperCAmelCase = image else: if isinstance(lowercase_ , lowercase_ ) and len(lowercase_ ) != batch_size: raise ValueError( f"""You have passed a list of generators of length {len(lowercase_ )}, but requested an effective batch""" f""" size of {batch_size}. Make sure the batch size matches the length of the generators.""" ) elif isinstance(lowercase_ , lowercase_ ): UpperCAmelCase = [ self.movq.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(lowercase_ ) ] UpperCAmelCase = torch.cat(lowercase_ , dim=0 ) else: UpperCAmelCase = self.movq.encode(lowercase_ ).latent_dist.sample(lowercase_ ) UpperCAmelCase = self.movq.config.scaling_factor * init_latents UpperCAmelCase = torch.cat([init_latents] , dim=0 ) UpperCAmelCase = init_latents.shape UpperCAmelCase = randn_tensor(lowercase_ , generator=lowercase_ , device=lowercase_ , dtype=lowercase_ ) # get latents UpperCAmelCase = self.scheduler.add_noise(lowercase_ , lowercase_ , lowercase_ ) UpperCAmelCase = init_latents return latents def UpperCAmelCase__ ( self :int , lowercase_ :int=0 ) -> List[str]: if is_accelerate_available(): from accelerate import cpu_offload else: raise ImportError('Please install accelerate via `pip install accelerate`' ) UpperCAmelCase = torch.device(f"""cuda:{gpu_id}""" ) UpperCAmelCase = [ self.unet, self.movq, ] for cpu_offloaded_model in models: if cpu_offloaded_model is not None: cpu_offload(lowercase_ , lowercase_ ) def UpperCAmelCase__ ( self :Union[str, Any] , lowercase_ :str=0 ) -> Dict: if is_accelerate_available() and is_accelerate_version('>=' , '0.17.0.dev0' ): from accelerate import cpu_offload_with_hook else: raise ImportError('`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.' ) UpperCAmelCase = torch.device(f"""cuda:{gpu_id}""" ) if self.device.type != "cpu": self.to('cpu' , silence_dtype_warnings=lowercase_ ) torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist) UpperCAmelCase = None for cpu_offloaded_model in [self.unet, self.movq]: UpperCAmelCase , UpperCAmelCase = cpu_offload_with_hook(lowercase_ , lowercase_ , prev_module_hook=lowercase_ ) # We'll offload the last model manually. UpperCAmelCase = hook @property # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device def UpperCAmelCase__ ( self :List[Any] ) -> Dict: if not hasattr(self.unet , '_hf_hook' ): return self.device for module in self.unet.modules(): if ( hasattr(lowercase_ , '_hf_hook' ) and hasattr(module._hf_hook , 'execution_device' ) and module._hf_hook.execution_device is not None ): return torch.device(module._hf_hook.execution_device ) return self.device @torch.no_grad() @replace_example_docstring(lowercase_ ) def __call__( self :str , lowercase_ :Union[torch.FloatTensor, List[torch.FloatTensor]] , lowercase_ :Union[torch.FloatTensor, PIL.Image.Image, List[torch.FloatTensor], List[PIL.Image.Image]] , lowercase_ :Union[torch.FloatTensor, List[torch.FloatTensor]] , lowercase_ :int = 5_12 , lowercase_ :int = 5_12 , lowercase_ :int = 1_00 , lowercase_ :float = 4.0 , lowercase_ :float = 0.3 , lowercase_ :int = 1 , lowercase_ :Optional[Union[torch.Generator, List[torch.Generator]]] = None , lowercase_ :Optional[str] = "pil" , lowercase_ :bool = True , ) -> List[str]: UpperCAmelCase = self._execution_device UpperCAmelCase = guidance_scale > 1.0 if isinstance(lowercase_ , lowercase_ ): UpperCAmelCase = torch.cat(lowercase_ , dim=0 ) UpperCAmelCase = image_embeds.shape[0] if isinstance(lowercase_ , lowercase_ ): UpperCAmelCase = torch.cat(lowercase_ , dim=0 ) if do_classifier_free_guidance: UpperCAmelCase = image_embeds.repeat_interleave(lowercase_ , dim=0 ) UpperCAmelCase = negative_image_embeds.repeat_interleave(lowercase_ , dim=0 ) UpperCAmelCase = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=lowercase_ ) if not isinstance(lowercase_ , lowercase_ ): UpperCAmelCase = [image] if not all(isinstance(lowercase_ , (PIL.Image.Image, torch.Tensor) ) for i in image ): raise ValueError( f"""Input is in incorrect format: {[type(lowercase_ ) for i in image]}. Currently, we only support PIL image and pytorch tensor""" ) UpperCAmelCase = torch.cat([prepare_image(lowercase_ , lowercase_ , lowercase_ ) for i in image] , dim=0 ) UpperCAmelCase = image.to(dtype=image_embeds.dtype , device=lowercase_ ) UpperCAmelCase = self.movq.encode(lowercase_ )['latents'] UpperCAmelCase = latents.repeat_interleave(lowercase_ , dim=0 ) self.scheduler.set_timesteps(lowercase_ , device=lowercase_ ) UpperCAmelCase , UpperCAmelCase = self.get_timesteps(lowercase_ , lowercase_ , lowercase_ ) UpperCAmelCase = timesteps[:1].repeat(batch_size * num_images_per_prompt ) UpperCAmelCase , UpperCAmelCase = downscale_height_and_width(lowercase_ , lowercase_ , self.movq_scale_factor ) UpperCAmelCase = self.prepare_latents( lowercase_ , lowercase_ , lowercase_ , lowercase_ , image_embeds.dtype , lowercase_ , lowercase_ ) for i, t in enumerate(self.progress_bar(lowercase_ ) ): # expand the latents if we are doing classifier free guidance UpperCAmelCase = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents UpperCAmelCase = {'image_embeds': image_embeds} UpperCAmelCase = self.unet( sample=lowercase_ , timestep=lowercase_ , encoder_hidden_states=lowercase_ , added_cond_kwargs=lowercase_ , return_dict=lowercase_ , )[0] if do_classifier_free_guidance: UpperCAmelCase , UpperCAmelCase = noise_pred.split(latents.shape[1] , dim=1 ) UpperCAmelCase , UpperCAmelCase = noise_pred.chunk(2 ) UpperCAmelCase , UpperCAmelCase = variance_pred.chunk(2 ) UpperCAmelCase = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) UpperCAmelCase = torch.cat([noise_pred, variance_pred_text] , dim=1 ) if not ( hasattr(self.scheduler.config , 'variance_type' ) and self.scheduler.config.variance_type in ["learned", "learned_range"] ): UpperCAmelCase , UpperCAmelCase = noise_pred.split(latents.shape[1] , dim=1 ) # compute the previous noisy sample x_t -> x_t-1 UpperCAmelCase = self.scheduler.step( lowercase_ , lowercase_ , lowercase_ , generator=lowercase_ , )[0] # post-processing UpperCAmelCase = self.movq.decode(lowercase_ , force_not_quantize=lowercase_ )['sample'] if output_type not in ["pt", "np", "pil"]: raise ValueError(f"""Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}""" ) if output_type in ["np", "pil"]: UpperCAmelCase = image * 0.5 + 0.5 UpperCAmelCase = image.clamp(0 , 1 ) UpperCAmelCase = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy() if output_type == "pil": UpperCAmelCase = self.numpy_to_pil(lowercase_ ) if not return_dict: return (image,) return ImagePipelineOutput(images=lowercase_ )
78
0
"""simple docstring""" import torch from diffusers import DDIMParallelScheduler from .test_schedulers import SchedulerCommonTest class _UpperCAmelCase ( SCREAMING_SNAKE_CASE_ ): '''simple docstring''' lowercase_ : Optional[Any] = (DDIMParallelScheduler,) lowercase_ : Dict = (("""eta""", 0.0), ("""num_inference_steps""", 50)) def lowerCamelCase_ ( self , **snake_case_ ): """simple docstring""" A_ : Dict = { 'num_train_timesteps': 1_0_0_0, 'beta_start': 0.00_01, 'beta_end': 0.02, 'beta_schedule': 'linear', 'clip_sample': True, } config.update(**lowercase_ ) return config def lowerCamelCase_ ( self , **snake_case_ ): """simple docstring""" A_ : Dict = self.scheduler_classes[0] A_ : int = self.get_scheduler_config(**lowercase_ ) A_ : Dict = scheduler_class(**lowercase_ ) A_ , A_ : int = 1_0, 0.0 A_ : Optional[int] = self.dummy_model() A_ : List[Any] = self.dummy_sample_deter scheduler.set_timesteps(lowercase_ ) for t in scheduler.timesteps: A_ : Any = model(lowercase_ , lowercase_ ) A_ : Tuple = scheduler.step(lowercase_ , lowercase_ , lowercase_ , lowercase_ ).prev_sample return sample def lowerCamelCase_ ( self ): """simple docstring""" for timesteps in [1_0_0, 5_0_0, 1_0_0_0]: self.check_over_configs(num_train_timesteps=lowercase_ ) def lowerCamelCase_ ( self ): """simple docstring""" for steps_offset in [0, 1]: self.check_over_configs(steps_offset=lowercase_ ) A_ : Tuple = self.scheduler_classes[0] A_ : Optional[Any] = self.get_scheduler_config(steps_offset=1 ) A_ : List[Any] = scheduler_class(**lowercase_ ) scheduler.set_timesteps(5 ) assert torch.equal(scheduler.timesteps , torch.LongTensor([8_0_1, 6_0_1, 4_0_1, 2_0_1, 1] ) ) def lowerCamelCase_ ( self ): """simple docstring""" for beta_start, beta_end in zip([0.00_01, 0.0_01, 0.01, 0.1] , [0.0_02, 0.02, 0.2, 2] ): self.check_over_configs(beta_start=lowercase_ , beta_end=lowercase_ ) def lowerCamelCase_ ( self ): """simple docstring""" for schedule in ["linear", "squaredcos_cap_v2"]: self.check_over_configs(beta_schedule=lowercase_ ) def lowerCamelCase_ ( self ): """simple docstring""" for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=lowercase_ ) def lowerCamelCase_ ( self ): """simple docstring""" for clip_sample in [True, False]: self.check_over_configs(clip_sample=lowercase_ ) def lowerCamelCase_ ( self ): """simple docstring""" for timestep_spacing in ["trailing", "leading"]: self.check_over_configs(timestep_spacing=lowercase_ ) def lowerCamelCase_ ( self ): """simple docstring""" for rescale_betas_zero_snr in [True, False]: self.check_over_configs(rescale_betas_zero_snr=lowercase_ ) def lowerCamelCase_ ( self ): """simple docstring""" self.check_over_configs(thresholding=lowercase_ ) for threshold in [0.5, 1.0, 2.0]: for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs( thresholding=lowercase_ , prediction_type=lowercase_ , sample_max_value=lowercase_ , ) def lowerCamelCase_ ( self ): """simple docstring""" for t in [1, 1_0, 4_9]: self.check_over_forward(time_step=lowercase_ ) def lowerCamelCase_ ( self ): """simple docstring""" for t, num_inference_steps in zip([1, 1_0, 5_0] , [1_0, 5_0, 5_0_0] ): self.check_over_forward(time_step=lowercase_ , num_inference_steps=lowercase_ ) def lowerCamelCase_ ( self ): """simple docstring""" for t, eta in zip([1, 1_0, 4_9] , [0.0, 0.5, 1.0] ): self.check_over_forward(time_step=lowercase_ , eta=lowercase_ ) def lowerCamelCase_ ( self ): """simple docstring""" A_ : Union[str, Any] = self.scheduler_classes[0] A_ : List[Any] = self.get_scheduler_config() A_ : List[str] = scheduler_class(**lowercase_ ) assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(4_2_0 , 4_0_0 ) - 0.1_47_71 ) ) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(9_8_0 , 9_6_0 ) - 0.3_24_60 ) ) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(4_8_7 , 4_8_6 ) - 0.0_09_79 ) ) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(9_9_9 , 9_9_8 ) - 0.02 ) ) < 1E-5 def lowerCamelCase_ ( self ): """simple docstring""" A_ : List[str] = self.scheduler_classes[0] A_ : List[Any] = self.get_scheduler_config() A_ : Dict = scheduler_class(**lowercase_ ) A_ , A_ : int = 1_0, 0.0 scheduler.set_timesteps(lowercase_ ) A_ : str = self.dummy_model() A_ : Dict = self.dummy_sample_deter A_ : Union[str, Any] = self.dummy_sample_deter + 0.1 A_ : List[str] = self.dummy_sample_deter - 0.1 A_ : Union[str, Any] = samplea.shape[0] A_ : Optional[Any] = torch.stack([samplea, samplea, samplea] , dim=0 ) A_ : Union[str, Any] = torch.arange(lowercase_ )[0:3, None].repeat(1 , lowercase_ ) A_ : Optional[int] = model(samples.flatten(0 , 1 ) , timesteps.flatten(0 , 1 ) ) A_ : Union[str, Any] = scheduler.batch_step_no_noise(lowercase_ , timesteps.flatten(0 , 1 ) , samples.flatten(0 , 1 ) , lowercase_ ) A_ : Any = torch.sum(torch.abs(lowercase_ ) ) A_ : Any = torch.mean(torch.abs(lowercase_ ) ) assert abs(result_sum.item() - 11_47.79_04 ) < 1E-2 assert abs(result_mean.item() - 0.49_82 ) < 1E-3 def lowerCamelCase_ ( self ): """simple docstring""" A_ : Union[str, Any] = self.full_loop() A_ : int = torch.sum(torch.abs(lowercase_ ) ) A_ : Any = torch.mean(torch.abs(lowercase_ ) ) assert abs(result_sum.item() - 1_72.00_67 ) < 1E-2 assert abs(result_mean.item() - 0.22_39_67 ) < 1E-3 def lowerCamelCase_ ( self ): """simple docstring""" A_ : str = self.full_loop(prediction_type='v_prediction' ) A_ : Tuple = torch.sum(torch.abs(lowercase_ ) ) A_ : int = torch.mean(torch.abs(lowercase_ ) ) assert abs(result_sum.item() - 52.53_02 ) < 1E-2 assert abs(result_mean.item() - 0.06_84 ) < 1E-3 def lowerCamelCase_ ( self ): """simple docstring""" A_ : Dict = self.full_loop(set_alpha_to_one=lowercase_ , beta_start=0.01 ) A_ : Dict = torch.sum(torch.abs(lowercase_ ) ) A_ : Tuple = torch.mean(torch.abs(lowercase_ ) ) assert abs(result_sum.item() - 1_49.82_95 ) < 1E-2 assert abs(result_mean.item() - 0.19_51 ) < 1E-3 def lowerCamelCase_ ( self ): """simple docstring""" A_ : Union[str, Any] = self.full_loop(set_alpha_to_one=lowercase_ , beta_start=0.01 ) A_ : Dict = torch.sum(torch.abs(lowercase_ ) ) A_ : Optional[int] = torch.mean(torch.abs(lowercase_ ) ) assert abs(result_sum.item() - 1_49.07_84 ) < 1E-2 assert abs(result_mean.item() - 0.19_41 ) < 1E-3
286
"""simple docstring""" import colorsys from PIL import Image # type: ignore def _lowerCAmelCase ( lowercase_ , lowercase_ , lowercase_ ): UpperCAmelCase = x UpperCAmelCase = y for step in range(lowercase_ ): # noqa: B007 UpperCAmelCase = a * a - b * b + x UpperCAmelCase = 2 * a * b + y UpperCAmelCase = a_new # divergence happens for all complex number with an absolute value # greater than 4 if a * a + b * b > 4: break return step / (max_step - 1) def _lowerCAmelCase ( lowercase_ ): if distance == 1: return (0, 0, 0) else: return (255, 255, 255) def _lowerCAmelCase ( lowercase_ ): if distance == 1: return (0, 0, 0) else: return tuple(round(i * 255 ) for i in colorsys.hsv_to_rgb(lowercase_ , 1 , 1 ) ) def _lowerCAmelCase ( lowercase_ = 800 , lowercase_ = 600 , lowercase_ = -0.6 , lowercase_ = 0 , lowercase_ = 3.2 , lowercase_ = 50 , lowercase_ = True , ): UpperCAmelCase = Image.new('RGB' , (image_width, image_height) ) UpperCAmelCase = img.load() # loop through the image-coordinates for image_x in range(lowercase_ ): for image_y in range(lowercase_ ): # determine the figure-coordinates based on the image-coordinates UpperCAmelCase = figure_width / image_width * image_height UpperCAmelCase = figure_center_x + (image_x / image_width - 0.5) * figure_width UpperCAmelCase = figure_center_y + (image_y / image_height - 0.5) * figure_height UpperCAmelCase = get_distance(lowercase_ , lowercase_ , lowercase_ ) # color the corresponding pixel based on the selected coloring-function if use_distance_color_coding: UpperCAmelCase = get_color_coded_rgb(lowercase_ ) else: UpperCAmelCase = get_black_and_white_rgb(lowercase_ ) return img if __name__ == "__main__": import doctest doctest.testmod() # colored version, full figure snake_case_ = get_image() # uncomment for colored version, different section, zoomed in # img = get_image(figure_center_x = -0.6, figure_center_y = -0.4, # figure_width = 0.8) # uncomment for black and white version, full figure # img = get_image(use_distance_color_coding = False) # uncomment to save the image # img.save("mandelbrot.png") img.show()
78
0
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available A: Optional[int] = { "configuration_longt5": ["LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP", "LongT5Config", "LongT5OnnxConfig"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A: int = [ "LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST", "LongT5EncoderModel", "LongT5ForConditionalGeneration", "LongT5Model", "LongT5PreTrainedModel", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A: Dict = [ "FlaxLongT5ForConditionalGeneration", "FlaxLongT5Model", "FlaxLongT5PreTrainedModel", ] if TYPE_CHECKING: from .configuration_longta import LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP, LongTaConfig, LongTaOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_longta import ( LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST, LongTaEncoderModel, LongTaForConditionalGeneration, LongTaModel, LongTaPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_longta import ( FlaxLongTaForConditionalGeneration, FlaxLongTaModel, FlaxLongTaPreTrainedModel, ) else: import sys A: int = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
109
"""simple docstring""" import requests snake_case_ = """""" # <-- Put your OpenWeatherMap appid here! snake_case_ = """https://api.openweathermap.org/data/2.5/""" def _lowerCAmelCase ( lowercase_ = "Chicago" , lowercase_ = APPID ): return requests.get(URL_BASE + 'weather' , params=locals() ).json() def _lowerCAmelCase ( lowercase_ = "Kolkata, India" , lowercase_ = APPID ): return requests.get(URL_BASE + 'forecast' , params=locals() ).json() def _lowerCAmelCase ( lowercase_ = 5_5.6_8 , lowercase_ = 1_2.5_7 , lowercase_ = APPID ): return requests.get(URL_BASE + 'onecall' , params=locals() ).json() if __name__ == "__main__": from pprint import pprint while True: snake_case_ = input("""Enter a location:""").strip() if location: pprint(current_weather(location)) else: break
78
0
'''simple docstring''' from .pipelines import DiffusionPipeline, ImagePipelineOutput # noqa: F401 from .utils import deprecate deprecate( 'pipelines_utils', '0.22.0', 'Importing `DiffusionPipeline` or `ImagePipelineOutput` from diffusers.pipeline_utils is deprecated. Please import from diffusers.pipelines.pipeline_utils instead.', standard_warn=False, stacklevel=3, )
346
"""simple docstring""" import warnings from typing import List, Optional, Union from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType class A_ ( SCREAMING_SNAKE_CASE_ ): """simple docstring""" __UpperCamelCase = ["""image_processor""", """tokenizer"""] __UpperCamelCase = """LayoutLMv2ImageProcessor""" __UpperCamelCase = ("""LayoutXLMTokenizer""", """LayoutXLMTokenizerFast""") def __init__( self :Any , lowercase_ :int=None , lowercase_ :Union[str, Any]=None , **lowercase_ :Optional[Any] ) -> Dict: if "feature_extractor" in kwargs: warnings.warn( 'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`' ' instead.' , lowercase_ , ) UpperCAmelCase = kwargs.pop('feature_extractor' ) UpperCAmelCase = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError('You need to specify an `image_processor`.' ) if tokenizer is None: raise ValueError('You need to specify a `tokenizer`.' ) super().__init__(lowercase_ , lowercase_ ) def __call__( self :str , lowercase_ :Optional[int] , lowercase_ :Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , lowercase_ :Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]] = None , lowercase_ :Union[List[List[int]], List[List[List[int]]]] = None , lowercase_ :Optional[Union[List[int], List[List[int]]]] = None , lowercase_ :bool = True , lowercase_ :Union[bool, str, PaddingStrategy] = False , lowercase_ :Union[bool, str, TruncationStrategy] = None , lowercase_ :Optional[int] = None , lowercase_ :int = 0 , lowercase_ :Optional[int] = None , lowercase_ :Optional[bool] = None , lowercase_ :Optional[bool] = None , lowercase_ :bool = False , lowercase_ :bool = False , lowercase_ :bool = False , lowercase_ :bool = False , lowercase_ :bool = True , lowercase_ :Optional[Union[str, TensorType]] = None , **lowercase_ :Any , ) -> BatchEncoding: # verify input if self.image_processor.apply_ocr and (boxes is not None): raise ValueError( 'You cannot provide bounding boxes ' 'if you initialized the image processor with apply_ocr set to True.' ) if self.image_processor.apply_ocr and (word_labels is not None): raise ValueError( 'You cannot provide word labels if you initialized the image processor with apply_ocr set to True.' ) if return_overflowing_tokens is True and return_offsets_mapping is False: raise ValueError('You cannot return overflowing tokens without returning the offsets mapping.' ) # first, apply the image processor UpperCAmelCase = self.image_processor(images=lowercase_ , return_tensors=lowercase_ ) # second, apply the tokenizer if text is not None and self.image_processor.apply_ocr and text_pair is None: if isinstance(lowercase_ , lowercase_ ): UpperCAmelCase = [text] # add batch dimension (as the image processor always adds a batch dimension) UpperCAmelCase = features['words'] UpperCAmelCase = self.tokenizer( text=text if text is not None else features['words'] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features['boxes'] , word_labels=lowercase_ , add_special_tokens=lowercase_ , padding=lowercase_ , truncation=lowercase_ , max_length=lowercase_ , stride=lowercase_ , pad_to_multiple_of=lowercase_ , return_token_type_ids=lowercase_ , return_attention_mask=lowercase_ , return_overflowing_tokens=lowercase_ , return_special_tokens_mask=lowercase_ , return_offsets_mapping=lowercase_ , return_length=lowercase_ , verbose=lowercase_ , return_tensors=lowercase_ , **lowercase_ , ) # add pixel values UpperCAmelCase = features.pop('pixel_values' ) if return_overflowing_tokens is True: UpperCAmelCase = self.get_overflowing_images(lowercase_ , encoded_inputs['overflow_to_sample_mapping'] ) UpperCAmelCase = images return encoded_inputs def UpperCAmelCase__ ( self :Dict , lowercase_ :List[Any] , lowercase_ :Any ) -> Optional[Any]: # in case there's an overflow, ensure each `input_ids` sample is mapped to its corresponding image UpperCAmelCase = [] for sample_idx in overflow_to_sample_mapping: images_with_overflow.append(images[sample_idx] ) if len(lowercase_ ) != len(lowercase_ ): raise ValueError( 'Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got' f""" {len(lowercase_ )} and {len(lowercase_ )}""" ) return images_with_overflow def UpperCAmelCase__ ( self :Any , *lowercase_ :int , **lowercase_ :Tuple ) -> Tuple: return self.tokenizer.batch_decode(*lowercase_ , **lowercase_ ) def UpperCAmelCase__ ( self :Any , *lowercase_ :List[Any] , **lowercase_ :Optional[int] ) -> Optional[Any]: return self.tokenizer.decode(*lowercase_ , **lowercase_ ) @property def UpperCAmelCase__ ( self :int ) -> Optional[int]: return ["input_ids", "bbox", "attention_mask", "image"] @property def UpperCAmelCase__ ( self :int ) -> Dict: warnings.warn( '`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , lowercase_ , ) return self.image_processor_class @property def UpperCAmelCase__ ( self :Union[str, Any] ) -> Optional[int]: warnings.warn( '`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , lowercase_ , ) return self.image_processor
78
0
'''simple docstring''' from collections.abc import Sequence def __snake_case( _lowerCAmelCase , _lowerCAmelCase = False ) -> Dict: if not arr: return 0 snake_case__ : List[Any] = 0 if allow_empty_subarrays else float("""-inf""" ) snake_case__ : Optional[Any] = 0.0 for num in arr: snake_case__ : Union[str, Any] = max(0 if allow_empty_subarrays else num , curr_sum + num ) snake_case__ : Tuple = max(lowercase_ , lowercase_ ) return max_sum if __name__ == "__main__": from doctest import testmod testmod() __a = [-2, 1, -3, 4, -1, 2, 1, -5, 4] print(F"{max_subarray_sum(nums) = }")
35
"""simple docstring""" from collections import deque from math import floor from random import random from time import time class A_ : """simple docstring""" def __init__( self :Union[str, Any] ) -> str: UpperCAmelCase = {} def UpperCAmelCase__ ( self :Any , lowercase_ :List[Any] , lowercase_ :List[str] , lowercase_ :Dict=1 ) -> List[Any]: if self.graph.get(lowercase_ ): if self.graph[u].count([w, v] ) == 0: self.graph[u].append([w, v] ) else: UpperCAmelCase = [[w, v]] if not self.graph.get(lowercase_ ): UpperCAmelCase = [] def UpperCAmelCase__ ( self :Any ) -> Optional[int]: return list(self.graph ) def UpperCAmelCase__ ( self :List[Any] , lowercase_ :Optional[int] , lowercase_ :Optional[Any] ) -> Dict: if self.graph.get(lowercase_ ): for _ in self.graph[u]: if _[1] == v: self.graph[u].remove(lowercase_ ) def UpperCAmelCase__ ( self :List[str] , lowercase_ :Tuple=-2 , lowercase_ :List[Any]=-1 ) -> List[Any]: if s == d: return [] UpperCAmelCase = [] UpperCAmelCase = [] if s == -2: UpperCAmelCase = list(self.graph )[0] stack.append(lowercase_ ) visited.append(lowercase_ ) UpperCAmelCase = s while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: UpperCAmelCase = s for node in self.graph[s]: if visited.count(node[1] ) < 1: if node[1] == d: visited.append(lowercase_ ) return visited else: stack.append(node[1] ) visited.append(node[1] ) UpperCAmelCase = node[1] break # check if all the children are visited if s == ss: stack.pop() if len(lowercase_ ) != 0: UpperCAmelCase = stack[len(lowercase_ ) - 1] else: UpperCAmelCase = ss # check if se have reached the starting point if len(lowercase_ ) == 0: return visited def UpperCAmelCase__ ( self :List[str] , lowercase_ :int=-1 ) -> Tuple: if c == -1: UpperCAmelCase = floor(random() * 1_00_00 ) + 10 for i in range(lowercase_ ): # every vertex has max 100 edges for _ in range(floor(random() * 1_02 ) + 1 ): UpperCAmelCase = floor(random() * c ) + 1 if n != i: self.add_pair(lowercase_ , lowercase_ , 1 ) def UpperCAmelCase__ ( self :Tuple , lowercase_ :Optional[Any]=-2 ) -> Optional[int]: UpperCAmelCase = deque() UpperCAmelCase = [] if s == -2: UpperCAmelCase = list(self.graph )[0] d.append(lowercase_ ) visited.append(lowercase_ ) while d: UpperCAmelCase = d.popleft() if len(self.graph[s] ) != 0: for node in self.graph[s]: if visited.count(node[1] ) < 1: d.append(node[1] ) visited.append(node[1] ) return visited def UpperCAmelCase__ ( self :Any , lowercase_ :Optional[int] ) -> List[Any]: UpperCAmelCase = 0 for x in self.graph: for y in self.graph[x]: if y[1] == u: count += 1 return count def UpperCAmelCase__ ( self :Tuple , lowercase_ :List[str] ) -> List[str]: return len(self.graph[u] ) def UpperCAmelCase__ ( self :Optional[int] , lowercase_ :Any=-2 ) -> int: UpperCAmelCase = [] UpperCAmelCase = [] if s == -2: UpperCAmelCase = list(self.graph )[0] stack.append(lowercase_ ) visited.append(lowercase_ ) UpperCAmelCase = s UpperCAmelCase = [] while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: UpperCAmelCase = s for node in self.graph[s]: if visited.count(node[1] ) < 1: stack.append(node[1] ) visited.append(node[1] ) UpperCAmelCase = node[1] break # check if all the children are visited if s == ss: sorted_nodes.append(stack.pop() ) if len(lowercase_ ) != 0: UpperCAmelCase = stack[len(lowercase_ ) - 1] else: UpperCAmelCase = ss # check if se have reached the starting point if len(lowercase_ ) == 0: return sorted_nodes def UpperCAmelCase__ ( self :str ) -> str: UpperCAmelCase = [] UpperCAmelCase = [] UpperCAmelCase = list(self.graph )[0] stack.append(lowercase_ ) visited.append(lowercase_ ) UpperCAmelCase = -2 UpperCAmelCase = [] UpperCAmelCase = s UpperCAmelCase = False UpperCAmelCase = set() while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: UpperCAmelCase = s for node in self.graph[s]: if ( visited.count(node[1] ) > 0 and node[1] != parent and indirect_parents.count(node[1] ) > 0 and not on_the_way_back ): UpperCAmelCase = len(lowercase_ ) - 1 while len_stack >= 0: if stack[len_stack] == node[1]: anticipating_nodes.add(node[1] ) break else: anticipating_nodes.add(stack[len_stack] ) len_stack -= 1 if visited.count(node[1] ) < 1: stack.append(node[1] ) visited.append(node[1] ) UpperCAmelCase = node[1] break # check if all the children are visited if s == ss: stack.pop() UpperCAmelCase = True if len(lowercase_ ) != 0: UpperCAmelCase = stack[len(lowercase_ ) - 1] else: UpperCAmelCase = False indirect_parents.append(lowercase_ ) UpperCAmelCase = s UpperCAmelCase = ss # check if se have reached the starting point if len(lowercase_ ) == 0: return list(lowercase_ ) def UpperCAmelCase__ ( self :Optional[int] ) -> Tuple: UpperCAmelCase = [] UpperCAmelCase = [] UpperCAmelCase = list(self.graph )[0] stack.append(lowercase_ ) visited.append(lowercase_ ) UpperCAmelCase = -2 UpperCAmelCase = [] UpperCAmelCase = s UpperCAmelCase = False UpperCAmelCase = set() while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: UpperCAmelCase = s for node in self.graph[s]: if ( visited.count(node[1] ) > 0 and node[1] != parent and indirect_parents.count(node[1] ) > 0 and not on_the_way_back ): UpperCAmelCase = len(lowercase_ ) - 1 while len_stack_minus_one >= 0: if stack[len_stack_minus_one] == node[1]: anticipating_nodes.add(node[1] ) break else: return True if visited.count(node[1] ) < 1: stack.append(node[1] ) visited.append(node[1] ) UpperCAmelCase = node[1] break # check if all the children are visited if s == ss: stack.pop() UpperCAmelCase = True if len(lowercase_ ) != 0: UpperCAmelCase = stack[len(lowercase_ ) - 1] else: UpperCAmelCase = False indirect_parents.append(lowercase_ ) UpperCAmelCase = s UpperCAmelCase = ss # check if se have reached the starting point if len(lowercase_ ) == 0: return False def UpperCAmelCase__ ( self :Optional[Any] , lowercase_ :int=-2 , lowercase_ :List[str]=-1 ) -> Any: UpperCAmelCase = time() self.dfs(lowercase_ , lowercase_ ) UpperCAmelCase = time() return end - begin def UpperCAmelCase__ ( self :Union[str, Any] , lowercase_ :List[str]=-2 ) -> str: UpperCAmelCase = time() self.bfs(lowercase_ ) UpperCAmelCase = time() return end - begin class A_ : """simple docstring""" def __init__( self :List[str] ) -> Union[str, Any]: UpperCAmelCase = {} def UpperCAmelCase__ ( self :str , lowercase_ :Dict , lowercase_ :Optional[Any] , lowercase_ :Optional[int]=1 ) -> Dict: # check if the u exists if self.graph.get(lowercase_ ): # if there already is a edge if self.graph[u].count([w, v] ) == 0: self.graph[u].append([w, v] ) else: # if u does not exist UpperCAmelCase = [[w, v]] # add the other way if self.graph.get(lowercase_ ): # if there already is a edge if self.graph[v].count([w, u] ) == 0: self.graph[v].append([w, u] ) else: # if u does not exist UpperCAmelCase = [[w, u]] def UpperCAmelCase__ ( self :Any , lowercase_ :Union[str, Any] , lowercase_ :Tuple ) -> Optional[Any]: if self.graph.get(lowercase_ ): for _ in self.graph[u]: if _[1] == v: self.graph[u].remove(lowercase_ ) # the other way round if self.graph.get(lowercase_ ): for _ in self.graph[v]: if _[1] == u: self.graph[v].remove(lowercase_ ) def UpperCAmelCase__ ( self :Optional[Any] , lowercase_ :Optional[int]=-2 , lowercase_ :Optional[int]=-1 ) -> List[str]: if s == d: return [] UpperCAmelCase = [] UpperCAmelCase = [] if s == -2: UpperCAmelCase = list(self.graph )[0] stack.append(lowercase_ ) visited.append(lowercase_ ) UpperCAmelCase = s while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: UpperCAmelCase = s for node in self.graph[s]: if visited.count(node[1] ) < 1: if node[1] == d: visited.append(lowercase_ ) return visited else: stack.append(node[1] ) visited.append(node[1] ) UpperCAmelCase = node[1] break # check if all the children are visited if s == ss: stack.pop() if len(lowercase_ ) != 0: UpperCAmelCase = stack[len(lowercase_ ) - 1] else: UpperCAmelCase = ss # check if se have reached the starting point if len(lowercase_ ) == 0: return visited def UpperCAmelCase__ ( self :List[str] , lowercase_ :Optional[int]=-1 ) -> Any: if c == -1: UpperCAmelCase = floor(random() * 1_00_00 ) + 10 for i in range(lowercase_ ): # every vertex has max 100 edges for _ in range(floor(random() * 1_02 ) + 1 ): UpperCAmelCase = floor(random() * c ) + 1 if n != i: self.add_pair(lowercase_ , lowercase_ , 1 ) def UpperCAmelCase__ ( self :Dict , lowercase_ :int=-2 ) -> int: UpperCAmelCase = deque() UpperCAmelCase = [] if s == -2: UpperCAmelCase = list(self.graph )[0] d.append(lowercase_ ) visited.append(lowercase_ ) while d: UpperCAmelCase = d.popleft() if len(self.graph[s] ) != 0: for node in self.graph[s]: if visited.count(node[1] ) < 1: d.append(node[1] ) visited.append(node[1] ) return visited def UpperCAmelCase__ ( self :Union[str, Any] , lowercase_ :List[Any] ) -> str: return len(self.graph[u] ) def UpperCAmelCase__ ( self :Optional[Any] ) -> Any: UpperCAmelCase = [] UpperCAmelCase = [] UpperCAmelCase = list(self.graph )[0] stack.append(lowercase_ ) visited.append(lowercase_ ) UpperCAmelCase = -2 UpperCAmelCase = [] UpperCAmelCase = s UpperCAmelCase = False UpperCAmelCase = set() while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: UpperCAmelCase = s for node in self.graph[s]: if ( visited.count(node[1] ) > 0 and node[1] != parent and indirect_parents.count(node[1] ) > 0 and not on_the_way_back ): UpperCAmelCase = len(lowercase_ ) - 1 while len_stack >= 0: if stack[len_stack] == node[1]: anticipating_nodes.add(node[1] ) break else: anticipating_nodes.add(stack[len_stack] ) len_stack -= 1 if visited.count(node[1] ) < 1: stack.append(node[1] ) visited.append(node[1] ) UpperCAmelCase = node[1] break # check if all the children are visited if s == ss: stack.pop() UpperCAmelCase = True if len(lowercase_ ) != 0: UpperCAmelCase = stack[len(lowercase_ ) - 1] else: UpperCAmelCase = False indirect_parents.append(lowercase_ ) UpperCAmelCase = s UpperCAmelCase = ss # check if se have reached the starting point if len(lowercase_ ) == 0: return list(lowercase_ ) def UpperCAmelCase__ ( self :Optional[int] ) -> str: UpperCAmelCase = [] UpperCAmelCase = [] UpperCAmelCase = list(self.graph )[0] stack.append(lowercase_ ) visited.append(lowercase_ ) UpperCAmelCase = -2 UpperCAmelCase = [] UpperCAmelCase = s UpperCAmelCase = False UpperCAmelCase = set() while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: UpperCAmelCase = s for node in self.graph[s]: if ( visited.count(node[1] ) > 0 and node[1] != parent and indirect_parents.count(node[1] ) > 0 and not on_the_way_back ): UpperCAmelCase = len(lowercase_ ) - 1 while len_stack_minus_one >= 0: if stack[len_stack_minus_one] == node[1]: anticipating_nodes.add(node[1] ) break else: return True if visited.count(node[1] ) < 1: stack.append(node[1] ) visited.append(node[1] ) UpperCAmelCase = node[1] break # check if all the children are visited if s == ss: stack.pop() UpperCAmelCase = True if len(lowercase_ ) != 0: UpperCAmelCase = stack[len(lowercase_ ) - 1] else: UpperCAmelCase = False indirect_parents.append(lowercase_ ) UpperCAmelCase = s UpperCAmelCase = ss # check if se have reached the starting point if len(lowercase_ ) == 0: return False def UpperCAmelCase__ ( self :Union[str, Any] ) -> Union[str, Any]: return list(self.graph ) def UpperCAmelCase__ ( self :List[Any] , lowercase_ :Union[str, Any]=-2 , lowercase_ :List[str]=-1 ) -> str: UpperCAmelCase = time() self.dfs(lowercase_ , lowercase_ ) UpperCAmelCase = time() return end - begin def UpperCAmelCase__ ( self :Any , lowercase_ :int=-2 ) -> str: UpperCAmelCase = time() self.bfs(lowercase_ ) UpperCAmelCase = time() return end - begin
78
0
"""simple docstring""" def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Tuple ) -> Any: _lowerCAmelCase : Optional[int] = abs(lowercase_ ) _lowerCAmelCase : Any = 0 while n > 0: res += n % 10 n //= 10 return res def SCREAMING_SNAKE_CASE ( _lowerCamelCase : List[str] ) -> Union[str, Any]: _lowerCAmelCase : Optional[int] = abs(lowercase_ ) return n if n < 10 else n % 10 + sum_of_digits(n // 10 ) def SCREAMING_SNAKE_CASE ( _lowerCamelCase : List[Any] ) -> Optional[int]: return sum(int(lowercase_ ) for c in str(abs(lowercase_ ) ) ) def SCREAMING_SNAKE_CASE ( ) -> Tuple: from collections.abc import Callable from timeit import timeit def benchmark_a_function(_lowerCamelCase : Dict ,_lowerCamelCase : int ) -> None: _lowerCAmelCase : List[Any] = f"{func.__name__}({value})" _lowerCAmelCase : Optional[int] = timeit(f"__main__.{call}" ,setup="""import __main__""" ) print(f"{call:56} = {func(lowercase_ )} -- {timing:.4f} seconds" ) for value in (262144, 1125899906842624, 1267650600228229401496703205376): for func in (sum_of_digits, sum_of_digits_recursion, sum_of_digits_compact): benchmark_a_function(lowercase_ ,lowercase_ ) print() if __name__ == "__main__": import doctest doctest.testmod() benchmark()
44
"""simple docstring""" from .glue import GlueDataset, GlueDataTrainingArguments from .language_modeling import ( LineByLineTextDataset, LineByLineWithRefDataset, LineByLineWithSOPTextDataset, TextDataset, TextDatasetForNextSentencePrediction, ) from .squad import SquadDataset, SquadDataTrainingArguments
78
0
from manim import * class UpperCamelCase_ ( SCREAMING_SNAKE_CASE_ ): '''simple docstring''' def SCREAMING_SNAKE_CASE ( self : List[Any]) ->Tuple: '''simple docstring''' A__ = Rectangle(height=0.5 , width=0.5) A__ = Rectangle(height=0.46 , width=0.46).set_stroke(width=0) A__ = Rectangle(height=0.25 , width=0.25) A__ = [mem.copy() for i in range(6)] A__ = [mem.copy() for i in range(6)] A__ = VGroup(*lowercase_).arrange(lowercase_ , buff=0) A__ = VGroup(*lowercase_).arrange(lowercase_ , buff=0) A__ = VGroup(lowercase_ , lowercase_).arrange(lowercase_ , buff=0) A__ = Text('''CPU''' , font_size=24) A__ = Group(lowercase_ , lowercase_).arrange(lowercase_ , buff=0.5 , aligned_edge=lowercase_) cpu.move_to([-2.5, -0.5, 0]) self.add(lowercase_) A__ = [mem.copy() for i in range(4)] A__ = VGroup(*lowercase_).arrange(lowercase_ , buff=0) A__ = Text('''GPU''' , font_size=24) A__ = Group(lowercase_ , lowercase_).arrange(lowercase_ , buff=0.5 , aligned_edge=lowercase_) gpu.move_to([-1, -1, 0]) self.add(lowercase_) A__ = [mem.copy() for i in range(6)] A__ = VGroup(*lowercase_).arrange(lowercase_ , buff=0) A__ = Text('''Model''' , font_size=24) A__ = Group(lowercase_ , lowercase_).arrange(lowercase_ , buff=0.5 , aligned_edge=lowercase_) model.move_to([3, -1.0, 0]) self.add(lowercase_) A__ = [] A__ = [] for i, rect in enumerate(lowercase_): A__ = fill.copy().set_fill(lowercase_ , opacity=0.8) target.move_to(lowercase_) model_arr.append(lowercase_) A__ = Rectangle(height=0.46 , width=0.46).set_stroke(width=0.0).set_fill(lowercase_ , opacity=0.8) cpu_target.move_to(cpu_left_col_base[i]) model_cpu_arr.append(lowercase_) self.add(*lowercase_ , *lowercase_) A__ = [meta_mem.copy() for i in range(6)] A__ = [meta_mem.copy() for i in range(6)] A__ = VGroup(*lowercase_).arrange(lowercase_ , buff=0) A__ = VGroup(*lowercase_).arrange(lowercase_ , buff=0) A__ = VGroup(lowercase_ , lowercase_).arrange(lowercase_ , buff=0) A__ = Text('''Disk''' , font_size=24) A__ = Group(lowercase_ , lowercase_).arrange(lowercase_ , buff=0.5 , aligned_edge=lowercase_) disk.move_to([-4, -1.25, 0]) self.add(lowercase_ , lowercase_) A__ = Square(side_length=2.2) key.move_to([-5, 2, 0]) A__ = MarkupText( f"""<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model""" , font_size=18 , ) key_text.move_to([-5, 2.4, 0]) self.add(lowercase_ , lowercase_) A__ = MarkupText( f"""<span fgcolor='{BLUE}'>●</span> Checkpoint""" , font_size=18 , ) blue_text.next_to(lowercase_ , DOWN * 2.4 , aligned_edge=key_text.get_left()) self.add(lowercase_) A__ = MarkupText( f"""Now watch as an input is passed through the model\nand how the memory is utilized and handled.""" , font_size=24 , ) step_a.move_to([2, 2, 0]) self.play(Write(lowercase_)) A__ = Square(0.3) input.set_fill(lowercase_ , opacity=1.0) input.set_stroke(width=0.0) input.next_to(model_base[0] , lowercase_ , buff=0.5) self.play(Write(lowercase_)) input.generate_target() input.target.next_to(model_arr[0] , direction=lowercase_ , buff=0.02) self.play(MoveToTarget(lowercase_)) self.play(FadeOut(lowercase_)) A__ = Arrow(start=lowercase_ , end=lowercase_ , color=lowercase_ , buff=0.5) a.next_to(model_arr[0].get_left() , lowercase_ , buff=0.2) model_cpu_arr[0].generate_target() model_cpu_arr[0].target.move_to(gpu_rect[0]) A__ = MarkupText( f"""As the input reaches a layer, the hook triggers\nand weights are moved from the CPU\nto the GPU and back.""" , font_size=24 , ) step_a.move_to([2, 2, 0]) self.play(Write(lowercase_ , run_time=3)) A__ = {'''run_time''': 1, '''fade_in''': True, '''fade_out''': True, '''buff''': 0.02} self.play( Write(lowercase_) , Circumscribe(model_arr[0] , color=lowercase_ , **lowercase_) , Circumscribe(model_cpu_arr[0] , color=lowercase_ , **lowercase_) , Circumscribe(gpu_rect[0] , color=lowercase_ , **lowercase_) , ) self.play(MoveToTarget(model_cpu_arr[0])) A__ = a.copy() for i in range(6): a_c.next_to(model_arr[i].get_right() + 0.02 , lowercase_ , buff=0.2) input.generate_target() input.target.move_to(model_arr[i].get_right() + 0.02) A__ = AnimationGroup( FadeOut(lowercase_ , run_time=0.5) , MoveToTarget(lowercase_ , run_time=0.5) , FadeIn(lowercase_ , run_time=0.5) , lag_ratio=0.2) self.play(lowercase_) model_cpu_arr[i].generate_target() model_cpu_arr[i].target.move_to(cpu_left_col_base[i]) if i < 5: model_cpu_arr[i + 1].generate_target() model_cpu_arr[i + 1].target.move_to(gpu_rect[0]) if i >= 1: A__ = 0.7 self.play( Circumscribe(model_arr[i] , **lowercase_) , Circumscribe(cpu_left_col_base[i] , **lowercase_) , Circumscribe(cpu_left_col_base[i + 1] , color=lowercase_ , **lowercase_) , Circumscribe(gpu_rect[0] , color=lowercase_ , **lowercase_) , Circumscribe(model_arr[i + 1] , color=lowercase_ , **lowercase_) , ) if i < 1: self.play( MoveToTarget(model_cpu_arr[i]) , MoveToTarget(model_cpu_arr[i + 1]) , ) else: self.play( MoveToTarget(model_cpu_arr[i] , run_time=0.7) , MoveToTarget(model_cpu_arr[i + 1] , run_time=0.7) , ) else: model_cpu_arr[i].generate_target() model_cpu_arr[i].target.move_to(cpu_left_col_base[-1]) input.generate_target() input.target.next_to(model_arr[-1].get_right() , RIGHT + 0.02 , buff=0.2) self.play( Circumscribe(model_arr[-1] , color=lowercase_ , **lowercase_) , Circumscribe(cpu_left_col_base[-1] , color=lowercase_ , **lowercase_) , Circumscribe(gpu_rect[0] , color=lowercase_ , **lowercase_) , ) self.play(MoveToTarget(model_cpu_arr[i])) A__ = a_c A__ = a_c.copy() input.generate_target() input.target.next_to(model_base[-1] , RIGHT + 0.02 , buff=0.5) self.play( FadeOut(lowercase_) , FadeOut(lowercase_ , run_time=0.5) , ) A__ = MarkupText(f"""Inference on a model too large for GPU memory\nis successfully completed.""" , font_size=24) step_a.move_to([2, 2, 0]) self.play(Write(lowercase_ , run_time=3) , MoveToTarget(lowercase_)) self.wait()
14
"""simple docstring""" def _lowerCAmelCase ( lowercase_ , lowercase_ = " " ): UpperCAmelCase = [] UpperCAmelCase = 0 for index, char in enumerate(lowercase_ ): if char == separator: split_words.append(string[last_index:index] ) UpperCAmelCase = index + 1 elif index + 1 == len(lowercase_ ): split_words.append(string[last_index : index + 1] ) return split_words if __name__ == "__main__": from doctest import testmod testmod()
78
0
from math import ceil def lowerCAmelCase_ ( __UpperCAmelCase: Optional[int] = 1001 ) -> int: UpperCamelCase__ : List[Any] = 1 for i in range(1 , int(ceil(n / 2.0 ) ) ): UpperCamelCase__ : Optional[Any] = 2 * i + 1 UpperCamelCase__ : int = 2 * i UpperCamelCase__ : int = total + 4 * odd**2 - 6 * even return total if __name__ == "__main__": import sys if len(sys.argv) == 1: print(solution()) else: try: UpperCAmelCase_ = int(sys.argv[1]) print(solution(n)) except ValueError: print('Invalid entry - please enter a number')
201
"""simple docstring""" import json import logging import os import socket import git import numpy as np import torch logging.basicConfig( format="""%(asctime)s - %(levelname)s - %(name)s - PID: %(process)d - %(message)s""", datefmt="""%m/%d/%Y %H:%M:%S""", level=logging.INFO, ) snake_case_ = logging.getLogger(__name__) def _lowerCAmelCase ( lowercase_ ): UpperCAmelCase = git.Repo(search_parent_directories=lowercase_ ) UpperCAmelCase = { 'repo_id': str(lowercase_ ), 'repo_sha': str(repo.head.object.hexsha ), 'repo_branch': str(repo.active_branch ), } with open(os.path.join(lowercase_ , 'git_log.json' ) , 'w' ) as f: json.dump(lowercase_ , lowercase_ , indent=4 ) def _lowerCAmelCase ( lowercase_ ): if params.n_gpu <= 0: UpperCAmelCase = 0 UpperCAmelCase = -1 UpperCAmelCase = True UpperCAmelCase = False return assert torch.cuda.is_available() logger.info('Initializing GPUs' ) if params.n_gpu > 1: assert params.local_rank != -1 UpperCAmelCase = int(os.environ['WORLD_SIZE'] ) UpperCAmelCase = int(os.environ['N_GPU_NODE'] ) UpperCAmelCase = int(os.environ['RANK'] ) # number of nodes / node ID UpperCAmelCase = params.world_size // params.n_gpu_per_node UpperCAmelCase = params.global_rank // params.n_gpu_per_node UpperCAmelCase = True assert params.n_nodes == int(os.environ['N_NODES'] ) assert params.node_id == int(os.environ['NODE_RANK'] ) # local job (single GPU) else: assert params.local_rank == -1 UpperCAmelCase = 1 UpperCAmelCase = 0 UpperCAmelCase = 0 UpperCAmelCase = 0 UpperCAmelCase = 1 UpperCAmelCase = 1 UpperCAmelCase = False # sanity checks assert params.n_nodes >= 1 assert 0 <= params.node_id < params.n_nodes assert 0 <= params.local_rank <= params.global_rank < params.world_size assert params.world_size == params.n_nodes * params.n_gpu_per_node # define whether this is the master process / if we are in multi-node distributed mode UpperCAmelCase = params.node_id == 0 and params.local_rank == 0 UpperCAmelCase = params.n_nodes > 1 # summary UpperCAmelCase = F"""--- Global rank: {params.global_rank} - """ logger.info(PREFIX + 'Number of nodes: %i' % params.n_nodes ) logger.info(PREFIX + 'Node ID : %i' % params.node_id ) logger.info(PREFIX + 'Local rank : %i' % params.local_rank ) logger.info(PREFIX + 'World size : %i' % params.world_size ) logger.info(PREFIX + 'GPUs per node : %i' % params.n_gpu_per_node ) logger.info(PREFIX + 'Master : %s' % str(params.is_master ) ) logger.info(PREFIX + 'Multi-node : %s' % str(params.multi_node ) ) logger.info(PREFIX + 'Multi-GPU : %s' % str(params.multi_gpu ) ) logger.info(PREFIX + 'Hostname : %s' % socket.gethostname() ) # set GPU device torch.cuda.set_device(params.local_rank ) # initialize multi-GPU if params.multi_gpu: logger.info('Initializing PyTorch distributed' ) torch.distributed.init_process_group( init_method='env://' , backend='nccl' , ) def _lowerCAmelCase ( lowercase_ ): np.random.seed(args.seed ) torch.manual_seed(args.seed ) if args.n_gpu > 0: torch.cuda.manual_seed_all(args.seed )
78
0
"""simple docstring""" import os from datetime import datetime as dt from github import Github _snake_case = [ 'good first issue', 'feature request', 'wip', ] def lowerCAmelCase__ ( ): '''simple docstring''' _a : List[str] = Github(os.environ["""GITHUB_TOKEN"""] ) _a : Any = g.get_repo("""huggingface/accelerate""" ) _a : Any = repo.get_issues(state="""open""" ) for issue in open_issues: _a : List[Any] = sorted([comment for comment in issue.get_comments()] , key=lambda UpperCamelCase__ : i.created_at , reverse=lowercase_ ) _a : Union[str, Any] = comments[0] if len(lowercase_ ) > 0 else None _a : Optional[Any] = dt.utcnow() _a : List[str] = (current_time - issue.updated_at).days _a : List[Any] = (current_time - issue.created_at).days if ( last_comment is not None and last_comment.user.login == "github-actions[bot]" and days_since_updated > 7 and days_since_creation >= 3_0 and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() ) ): # Close issue since it has been 7 days of inactivity since bot mention. issue.edit(state="""closed""" ) elif ( days_since_updated > 2_3 and days_since_creation >= 3_0 and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() ) ): # Add stale comment issue.create_comment( """This issue has been automatically marked as stale because it has not had """ """recent activity. If you think this still needs to be addressed """ """please comment on this thread.\n\nPlease note that issues that do not follow the """ """[contributing guidelines](https://github.com/huggingface/accelerate/blob/main/CONTRIBUTING.md) """ """are likely to be ignored.""" ) if __name__ == "__main__": main()
294
"""simple docstring""" import os import time import numpy as np import onnxruntime as ort snake_case_ = """1""" snake_case_ = """0""" snake_case_ = """1""" snake_case_ = ort.SessionOptions() snake_case_ = ort.GraphOptimizationLevel.ORT_DISABLE_ALL print("""Create inference session...""") snake_case_ = ["""TensorrtExecutionProvider""", """CUDAExecutionProvider"""] snake_case_ = ort.InferenceSession("""model.onnx""", sess_options=sess_opt, providers=execution_provider) snake_case_ = ort.RunOptions() snake_case_ = 128 snake_case_ = 1 snake_case_ = np.ones((batch, sequence), dtype=np.intaa) snake_case_ = np.ones((batch, sequence), dtype=np.intaa) snake_case_ = np.ones((batch, sequence), dtype=np.intaa) print("""Warm up phase...""") sess.run( None, { sess.get_inputs()[0].name: input_ids, sess.get_inputs()[1].name: attention_mask, sess.get_inputs()[2].name: token_type_ids, }, run_options=run_opt, ) print("""Start inference...""") snake_case_ = time.time() snake_case_ = 2000 snake_case_ = {} for iter in range(max_iters): snake_case_ = sess.run( None, { sess.get_inputs()[0].name: input_ids, sess.get_inputs()[1].name: attention_mask, sess.get_inputs()[2].name: token_type_ids, }, run_options=run_opt, ) print("""Average Inference Time = {:.3f} ms""".format((time.time() - start_time) * 1000 / max_iters))
78
0
"""simple docstring""" import argparse import importlib from pathlib import Path # Test all the extensions added in the setup SCREAMING_SNAKE_CASE : Optional[int] = [ """kernels/rwkv/wkv_cuda.cu""", """kernels/rwkv/wkv_op.cpp""", """kernels/deformable_detr/ms_deform_attn.h""", """kernels/deformable_detr/cuda/ms_deform_im2col_cuda.cuh""", """models/graphormer/algos_graphormer.pyx""", ] def lowercase ( _snake_case : Optional[int] ) ->int: """simple docstring""" for file in FILES_TO_FIND: if not (transformers_path / file).exists(): return False return True if __name__ == "__main__": SCREAMING_SNAKE_CASE : Union[str, Any] = argparse.ArgumentParser() parser.add_argument("""--check_lib""", action="""store_true""", help="""Whether to check the build or the actual package.""") SCREAMING_SNAKE_CASE : List[str] = parser.parse_args() if args.check_lib: SCREAMING_SNAKE_CASE : Union[str, Any] = importlib.import_module("""transformers""") SCREAMING_SNAKE_CASE : Union[str, Any] = Path(transformers_module.__file__).parent else: SCREAMING_SNAKE_CASE : List[Any] = Path.cwd() / """build/lib/transformers""" if not test_custom_files_are_present(transformers_path): raise ValueError("""The built release does not contain the custom files. Fix this before going further!""")
102
"""simple docstring""" from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import convert_to_rgb, normalize, rescale, resize, to_channel_dimension_format from ...image_utils import ( OPENAI_CLIP_MEAN, OPENAI_CLIP_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging if is_vision_available(): import PIL snake_case_ = logging.get_logger(__name__) class A_ ( SCREAMING_SNAKE_CASE_ ): """simple docstring""" __UpperCamelCase = ["""pixel_values"""] def __init__( self :int , lowercase_ :bool = True , lowercase_ :Dict[str, int] = None , lowercase_ :PILImageResampling = PILImageResampling.BICUBIC , lowercase_ :bool = True , lowercase_ :Union[int, float] = 1 / 2_55 , lowercase_ :bool = True , lowercase_ :Optional[Union[float, List[float]]] = None , lowercase_ :Optional[Union[float, List[float]]] = None , lowercase_ :bool = True , **lowercase_ :Union[str, Any] , ) -> None: super().__init__(**lowercase_ ) UpperCAmelCase = size if size is not None else {'height': 3_84, 'width': 3_84} UpperCAmelCase = get_size_dict(lowercase_ , default_to_square=lowercase_ ) UpperCAmelCase = do_resize UpperCAmelCase = size UpperCAmelCase = resample UpperCAmelCase = do_rescale UpperCAmelCase = rescale_factor UpperCAmelCase = do_normalize UpperCAmelCase = image_mean if image_mean is not None else OPENAI_CLIP_MEAN UpperCAmelCase = image_std if image_std is not None else OPENAI_CLIP_STD UpperCAmelCase = do_convert_rgb def UpperCAmelCase__ ( self :Optional[int] , lowercase_ :np.ndarray , lowercase_ :Dict[str, int] , lowercase_ :PILImageResampling = PILImageResampling.BICUBIC , lowercase_ :Optional[Union[str, ChannelDimension]] = None , **lowercase_ :Any , ) -> np.ndarray: UpperCAmelCase = get_size_dict(lowercase_ , default_to_square=lowercase_ ) if "height" not in size or "width" not in size: raise ValueError(f"""The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}""" ) UpperCAmelCase = (size['height'], size['width']) return resize(lowercase_ , size=lowercase_ , resample=lowercase_ , data_format=lowercase_ , **lowercase_ ) def UpperCAmelCase__ ( self :Union[str, Any] , lowercase_ :np.ndarray , lowercase_ :Union[int, float] , lowercase_ :Optional[Union[str, ChannelDimension]] = None , **lowercase_ :Optional[int] , ) -> int: return rescale(lowercase_ , scale=lowercase_ , data_format=lowercase_ , **lowercase_ ) def UpperCAmelCase__ ( self :Any , lowercase_ :np.ndarray , lowercase_ :Union[float, List[float]] , lowercase_ :Union[float, List[float]] , lowercase_ :Optional[Union[str, ChannelDimension]] = None , **lowercase_ :Optional[Any] , ) -> np.ndarray: return normalize(lowercase_ , mean=lowercase_ , std=lowercase_ , data_format=lowercase_ , **lowercase_ ) def UpperCAmelCase__ ( self :List[Any] , lowercase_ :ImageInput , lowercase_ :Optional[bool] = None , lowercase_ :Optional[Dict[str, int]] = None , lowercase_ :PILImageResampling = None , lowercase_ :Optional[bool] = None , lowercase_ :Optional[float] = None , lowercase_ :Optional[bool] = None , lowercase_ :Optional[Union[float, List[float]]] = None , lowercase_ :Optional[Union[float, List[float]]] = None , lowercase_ :Optional[Union[str, TensorType]] = None , lowercase_ :bool = None , lowercase_ :ChannelDimension = ChannelDimension.FIRST , **lowercase_ :Tuple , ) -> PIL.Image.Image: UpperCAmelCase = do_resize if do_resize is not None else self.do_resize UpperCAmelCase = resample if resample is not None else self.resample UpperCAmelCase = do_rescale if do_rescale is not None else self.do_rescale UpperCAmelCase = rescale_factor if rescale_factor is not None else self.rescale_factor UpperCAmelCase = do_normalize if do_normalize is not None else self.do_normalize UpperCAmelCase = image_mean if image_mean is not None else self.image_mean UpperCAmelCase = image_std if image_std is not None else self.image_std UpperCAmelCase = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb UpperCAmelCase = size if size is not None else self.size UpperCAmelCase = get_size_dict(lowercase_ , default_to_square=lowercase_ ) UpperCAmelCase = make_list_of_images(lowercase_ ) if not valid_images(lowercase_ ): raise ValueError( 'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ' 'torch.Tensor, tf.Tensor or jax.ndarray.' ) if do_resize and size is None or resample is None: raise ValueError('Size and resample must be specified if do_resize is True.' ) if do_rescale and rescale_factor is None: raise ValueError('Rescale factor must be specified if do_rescale is True.' ) if do_normalize and (image_mean is None or image_std is None): raise ValueError('Image mean and std must be specified if do_normalize is True.' ) # PIL RGBA images are converted to RGB if do_convert_rgb: UpperCAmelCase = [convert_to_rgb(lowercase_ ) for image in images] # All transformations expect numpy arrays. UpperCAmelCase = [to_numpy_array(lowercase_ ) for image in images] if do_resize: UpperCAmelCase = [self.resize(image=lowercase_ , size=lowercase_ , resample=lowercase_ ) for image in images] if do_rescale: UpperCAmelCase = [self.rescale(image=lowercase_ , scale=lowercase_ ) for image in images] if do_normalize: UpperCAmelCase = [self.normalize(image=lowercase_ , mean=lowercase_ , std=lowercase_ ) for image in images] UpperCAmelCase = [to_channel_dimension_format(lowercase_ , lowercase_ ) for image in images] UpperCAmelCase = BatchFeature(data={'pixel_values': images} , tensor_type=lowercase_ ) return encoded_outputs
78
0
import gc import unittest import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, DDPMScheduler, PriorTransformer, StableUnCLIPPipeline, UNetaDConditionModel, ) from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer from diffusers.utils.testing_utils import enable_full_determinism, load_numpy, require_torch_gpu, slow, torch_device from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import ( PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin, assert_mean_pixel_difference, ) enable_full_determinism() class __snake_case ( SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,unittest.TestCase ): _a = StableUnCLIPPipeline _a = TEXT_TO_IMAGE_PARAMS _a = TEXT_TO_IMAGE_BATCH_PARAMS _a = TEXT_TO_IMAGE_IMAGE_PARAMS _a = TEXT_TO_IMAGE_IMAGE_PARAMS # TODO(will) Expected attn_bias.stride(1) == 0 to be true, but got false _a = False def UpperCAmelCase__ ( self : List[Any]): lowerCAmelCase_ : str = 3_2 lowerCAmelCase_ : Optional[Any] = embedder_hidden_size # prior components torch.manual_seed(0) lowerCAmelCase_ : str = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''') torch.manual_seed(0) lowerCAmelCase_ : Optional[Any] = CLIPTextModelWithProjection( CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=lowercase_ , projection_dim=lowercase_ , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , )) torch.manual_seed(0) lowerCAmelCase_ : List[Any] = PriorTransformer( num_attention_heads=2 , attention_head_dim=1_2 , embedding_dim=lowercase_ , num_layers=1 , ) torch.manual_seed(0) lowerCAmelCase_ : Dict = DDPMScheduler( variance_type='''fixed_small_log''' , prediction_type='''sample''' , num_train_timesteps=1_0_0_0 , clip_sample=lowercase_ , clip_sample_range=5.0 , beta_schedule='''squaredcos_cap_v2''' , ) # regular denoising components torch.manual_seed(0) lowerCAmelCase_ : Optional[int] = StableUnCLIPImageNormalizer(embedding_dim=lowercase_) lowerCAmelCase_ : Dict = DDPMScheduler(beta_schedule='''squaredcos_cap_v2''') torch.manual_seed(0) lowerCAmelCase_ : Dict = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''') torch.manual_seed(0) lowerCAmelCase_ : Optional[int] = CLIPTextModel( CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=lowercase_ , projection_dim=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , )) torch.manual_seed(0) lowerCAmelCase_ : Optional[Any] = UNetaDConditionModel( sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=('''CrossAttnDownBlock2D''', '''DownBlock2D''') , up_block_types=('''UpBlock2D''', '''CrossAttnUpBlock2D''') , block_out_channels=(3_2, 6_4) , attention_head_dim=(2, 4) , class_embed_type='''projection''' , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=lowercase_ , layers_per_block=1 , upcast_attention=lowercase_ , use_linear_projection=lowercase_ , ) torch.manual_seed(0) lowerCAmelCase_ : Dict = DDIMScheduler( beta_schedule='''scaled_linear''' , beta_start=0.0_0085 , beta_end=0.012 , prediction_type='''v_prediction''' , set_alpha_to_one=lowercase_ , steps_offset=1 , ) torch.manual_seed(0) lowerCAmelCase_ : Any = AutoencoderKL() lowerCAmelCase_ : List[str] = { # prior components '''prior_tokenizer''': prior_tokenizer, '''prior_text_encoder''': prior_text_encoder, '''prior''': prior, '''prior_scheduler''': prior_scheduler, # image noising components '''image_normalizer''': image_normalizer, '''image_noising_scheduler''': image_noising_scheduler, # regular denoising components '''tokenizer''': tokenizer, '''text_encoder''': text_encoder, '''unet''': unet, '''scheduler''': scheduler, '''vae''': vae, } return components def UpperCAmelCase__ ( self : Union[str, Any] , A_ : str , A_ : Dict=0): if str(lowercase_).startswith('''mps'''): lowerCAmelCase_ : Optional[Any] = torch.manual_seed(lowercase_) else: lowerCAmelCase_ : Optional[Any] = torch.Generator(device=lowercase_).manual_seed(lowercase_) lowerCAmelCase_ : Optional[int] = { '''prompt''': '''A painting of a squirrel eating a burger''', '''generator''': generator, '''num_inference_steps''': 2, '''prior_num_inference_steps''': 2, '''output_type''': '''numpy''', } return inputs def UpperCAmelCase__ ( self : Optional[Any]): lowerCAmelCase_ : Optional[Any] = torch_device == '''cpu''' self._test_attention_slicing_forward_pass(test_max_difference=lowercase_) def UpperCAmelCase__ ( self : Union[str, Any]): lowerCAmelCase_ : List[str] = torch_device in ['''cpu''', '''mps'''] self._test_inference_batch_single_identical(test_max_difference=lowercase_) @slow @require_torch_gpu class __snake_case ( unittest.TestCase ): def UpperCAmelCase__ ( self : Any): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def UpperCAmelCase__ ( self : Tuple): lowerCAmelCase_ : Tuple = load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_anime_turtle_fp16.npy''') lowerCAmelCase_ : Tuple = StableUnCLIPPipeline.from_pretrained('''fusing/stable-unclip-2-1-l''' , torch_dtype=torch.floataa) pipe.to(lowercase_) pipe.set_progress_bar_config(disable=lowercase_) # stable unclip will oom when integration tests are run on a V100, # so turn on memory savings pipe.enable_attention_slicing() pipe.enable_sequential_cpu_offload() lowerCAmelCase_ : Tuple = torch.Generator(device='''cpu''').manual_seed(0) lowerCAmelCase_ : List[str] = pipe('''anime turle''' , generator=lowercase_ , output_type='''np''') lowerCAmelCase_ : Tuple = output.images[0] assert image.shape == (7_6_8, 7_6_8, 3) assert_mean_pixel_difference(lowercase_ , lowercase_) def UpperCAmelCase__ ( self : Tuple): torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() lowerCAmelCase_ : Dict = StableUnCLIPPipeline.from_pretrained('''fusing/stable-unclip-2-1-l''' , torch_dtype=torch.floataa) lowerCAmelCase_ : Union[str, Any] = pipe.to(lowercase_) pipe.set_progress_bar_config(disable=lowercase_) pipe.enable_attention_slicing() pipe.enable_sequential_cpu_offload() lowerCAmelCase_ : List[Any] = pipe( '''anime turtle''' , prior_num_inference_steps=2 , num_inference_steps=2 , output_type='''np''' , ) lowerCAmelCase_ : Dict = torch.cuda.max_memory_allocated() # make sure that less than 7 GB is allocated assert mem_bytes < 7 * 1_0**9
103
"""simple docstring""" from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging snake_case_ = logging.get_logger(__name__) snake_case_ = { """microsoft/beit-base-patch16-224-pt22k""": ( """https://huggingface.co/microsoft/beit-base-patch16-224-pt22k/resolve/main/config.json""" ), # See all BEiT models at https://huggingface.co/models?filter=beit } class A_ ( SCREAMING_SNAKE_CASE_ ): """simple docstring""" __UpperCamelCase = """beit""" def __init__( self :List[str] , lowercase_ :List[Any]=81_92 , lowercase_ :str=7_68 , lowercase_ :List[str]=12 , lowercase_ :Optional[int]=12 , lowercase_ :Dict=30_72 , lowercase_ :Tuple="gelu" , lowercase_ :Any=0.0 , lowercase_ :Optional[int]=0.0 , lowercase_ :Dict=0.02 , lowercase_ :int=1E-12 , lowercase_ :List[Any]=2_24 , lowercase_ :Dict=16 , lowercase_ :List[Any]=3 , lowercase_ :List[str]=False , lowercase_ :Optional[Any]=False , lowercase_ :Optional[Any]=False , lowercase_ :Optional[Any]=False , lowercase_ :Union[str, Any]=0.1 , lowercase_ :str=0.1 , lowercase_ :str=True , lowercase_ :List[str]=[3, 5, 7, 11] , lowercase_ :Optional[int]=[1, 2, 3, 6] , lowercase_ :str=True , lowercase_ :int=0.4 , lowercase_ :Union[str, Any]=2_56 , lowercase_ :int=1 , lowercase_ :Tuple=False , lowercase_ :Optional[int]=2_55 , **lowercase_ :str , ) -> Any: super().__init__(**lowercase_ ) UpperCAmelCase = vocab_size UpperCAmelCase = hidden_size UpperCAmelCase = num_hidden_layers UpperCAmelCase = num_attention_heads UpperCAmelCase = intermediate_size UpperCAmelCase = hidden_act UpperCAmelCase = hidden_dropout_prob UpperCAmelCase = attention_probs_dropout_prob UpperCAmelCase = initializer_range UpperCAmelCase = layer_norm_eps UpperCAmelCase = image_size UpperCAmelCase = patch_size UpperCAmelCase = num_channels UpperCAmelCase = use_mask_token UpperCAmelCase = use_absolute_position_embeddings UpperCAmelCase = use_relative_position_bias UpperCAmelCase = use_shared_relative_position_bias UpperCAmelCase = layer_scale_init_value UpperCAmelCase = drop_path_rate UpperCAmelCase = use_mean_pooling # decode head attributes (semantic segmentation) UpperCAmelCase = out_indices UpperCAmelCase = pool_scales # auxiliary head attributes (semantic segmentation) UpperCAmelCase = use_auxiliary_head UpperCAmelCase = auxiliary_loss_weight UpperCAmelCase = auxiliary_channels UpperCAmelCase = auxiliary_num_convs UpperCAmelCase = auxiliary_concat_input UpperCAmelCase = semantic_loss_ignore_index class A_ ( SCREAMING_SNAKE_CASE_ ): """simple docstring""" __UpperCamelCase = version.parse("""1.11""" ) @property def UpperCAmelCase__ ( self :Dict ) -> Mapping[str, Mapping[int, str]]: return OrderedDict( [ ('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}), ] ) @property def UpperCAmelCase__ ( self :Tuple ) -> float: return 1E-4
78
0
"""simple docstring""" def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : Union[str, Any] ): '''simple docstring''' return number & 1 == 0 if __name__ == "__main__": import doctest doctest.testmod()
46
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available snake_case_ = { """configuration_longt5""": ["""LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP""", """LongT5Config""", """LongT5OnnxConfig"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case_ = [ """LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST""", """LongT5EncoderModel""", """LongT5ForConditionalGeneration""", """LongT5Model""", """LongT5PreTrainedModel""", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case_ = [ """FlaxLongT5ForConditionalGeneration""", """FlaxLongT5Model""", """FlaxLongT5PreTrainedModel""", ] if TYPE_CHECKING: from .configuration_longta import LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP, LongTaConfig, LongTaOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_longta import ( LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST, LongTaEncoderModel, LongTaForConditionalGeneration, LongTaModel, LongTaPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_longta import ( FlaxLongTaForConditionalGeneration, FlaxLongTaModel, FlaxLongTaPreTrainedModel, ) else: import sys snake_case_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
78
0
"""simple docstring""" def UpperCAmelCase__ ( _UpperCAmelCase , _UpperCAmelCase ): """simple docstring""" if b == 0: return 1 if (b % 2) == 0: return actual_power(lowercase_ , int(b / 2 ) ) * actual_power(lowercase_ , int(b / 2 ) ) else: return a * actual_power(lowercase_ , int(b / 2 ) ) * actual_power(lowercase_ , int(b / 2 ) ) def UpperCAmelCase__ ( _UpperCAmelCase , _UpperCAmelCase ): """simple docstring""" if b < 0: return 1 / actual_power(lowercase_ , lowercase_ ) return actual_power(lowercase_ , lowercase_ ) if __name__ == "__main__": print(power(-2, -3))
286
"""simple docstring""" import argparse import os import re import numpy as np import PIL import torch from timm import create_model from torch.optim.lr_scheduler import OneCycleLR from torch.utils.data import DataLoader, Dataset from torchvision.transforms import Compose, RandomResizedCrop, Resize, ToTensor from accelerate import Accelerator def _lowerCAmelCase ( lowercase_ ): UpperCAmelCase = fname.split(os.path.sep )[-1] return re.search(R'^(.*)_\d+\.jpg$' , lowercase_ ).groups()[0] class A_ ( SCREAMING_SNAKE_CASE_ ): """simple docstring""" def __init__( self :List[str] , lowercase_ :Dict , lowercase_ :List[str]=None , lowercase_ :Optional[Any]=None ) -> Optional[int]: UpperCAmelCase = file_names UpperCAmelCase = image_transform UpperCAmelCase = label_to_id def __len__( self :Optional[int] ) -> Optional[Any]: return len(self.file_names ) def __getitem__( self :int , lowercase_ :str ) -> List[str]: UpperCAmelCase = self.file_names[idx] UpperCAmelCase = PIL.Image.open(lowercase_ ) UpperCAmelCase = raw_image.convert('RGB' ) if self.image_transform is not None: UpperCAmelCase = self.image_transform(lowercase_ ) UpperCAmelCase = extract_label(lowercase_ ) if self.label_to_id is not None: UpperCAmelCase = self.label_to_id[label] return {"image": image, "label": label} def _lowerCAmelCase ( lowercase_ , lowercase_ ): # Initialize accelerator if args.with_tracking: UpperCAmelCase = Accelerator( cpu=args.cpu , mixed_precision=args.mixed_precision , log_with='all' , project_dir=args.project_dir ) else: UpperCAmelCase = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision ) # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs UpperCAmelCase = config['lr'] UpperCAmelCase = int(config['num_epochs'] ) UpperCAmelCase = int(config['seed'] ) UpperCAmelCase = int(config['batch_size'] ) UpperCAmelCase = config['image_size'] if not isinstance(lowercase_ , (list, tuple) ): UpperCAmelCase = (image_size, image_size) # Parse out whether we are saving every epoch or after a certain number of batches if hasattr(args.checkpointing_steps , 'isdigit' ): if args.checkpointing_steps == "epoch": UpperCAmelCase = args.checkpointing_steps elif args.checkpointing_steps.isdigit(): UpperCAmelCase = int(args.checkpointing_steps ) else: raise ValueError( F"""Argument `checkpointing_steps` must be either a number or `epoch`. `{args.checkpointing_steps}` passed.""" ) else: UpperCAmelCase = None # We need to initialize the trackers we use, and also store our configuration if args.with_tracking: UpperCAmelCase = os.path.split(lowercase_ )[-1].split('.' )[0] accelerator.init_trackers(lowercase_ , lowercase_ ) # Grab all the image filenames UpperCAmelCase = [os.path.join(args.data_dir , lowercase_ ) for fname in os.listdir(args.data_dir ) if fname.endswith('.jpg' )] # Build the label correspondences UpperCAmelCase = [extract_label(lowercase_ ) for fname in file_names] UpperCAmelCase = list(set(lowercase_ ) ) id_to_label.sort() UpperCAmelCase = {lbl: i for i, lbl in enumerate(lowercase_ )} # Set the seed before splitting the data. np.random.seed(lowercase_ ) torch.manual_seed(lowercase_ ) torch.cuda.manual_seed_all(lowercase_ ) # Split our filenames between train and validation UpperCAmelCase = np.random.permutation(len(lowercase_ ) ) UpperCAmelCase = int(0.8 * len(lowercase_ ) ) UpperCAmelCase = random_perm[:cut] UpperCAmelCase = random_perm[cut:] # For training we use a simple RandomResizedCrop UpperCAmelCase = Compose([RandomResizedCrop(lowercase_ , scale=(0.5, 1.0) ), ToTensor()] ) UpperCAmelCase = PetsDataset( [file_names[i] for i in train_split] , image_transform=lowercase_ , label_to_id=lowercase_ ) # For evaluation, we use a deterministic Resize UpperCAmelCase = Compose([Resize(lowercase_ ), ToTensor()] ) UpperCAmelCase = PetsDataset([file_names[i] for i in eval_split] , image_transform=lowercase_ , label_to_id=lowercase_ ) # Instantiate dataloaders. UpperCAmelCase = DataLoader(lowercase_ , shuffle=lowercase_ , batch_size=lowercase_ , num_workers=4 ) UpperCAmelCase = DataLoader(lowercase_ , shuffle=lowercase_ , batch_size=lowercase_ , num_workers=4 ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) UpperCAmelCase = create_model('resnet50d' , pretrained=lowercase_ , num_classes=len(lowercase_ ) ) # We could avoid this line since the accelerator is set with `device_placement=True` (default value). # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). UpperCAmelCase = model.to(accelerator.device ) # Freezing the base model for param in model.parameters(): UpperCAmelCase = False for param in model.get_classifier().parameters(): UpperCAmelCase = True # We normalize the batches of images to be a bit faster. UpperCAmelCase = torch.tensor(model.default_cfg['mean'] )[None, :, None, None].to(accelerator.device ) UpperCAmelCase = torch.tensor(model.default_cfg['std'] )[None, :, None, None].to(accelerator.device ) # Instantiate optimizer UpperCAmelCase = torch.optim.Adam(params=model.parameters() , lr=lr / 25 ) # Instantiate learning rate scheduler UpperCAmelCase = OneCycleLR(optimizer=lowercase_ , max_lr=lowercase_ , epochs=lowercase_ , steps_per_epoch=len(lowercase_ ) ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = accelerator.prepare( lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ ) # We need to keep track of how many total steps we have iterated over UpperCAmelCase = 0 # We also need to keep track of the starting epoch so files are named properly UpperCAmelCase = 0 # Potentially load in the weights and states from a previous save if args.resume_from_checkpoint: if args.resume_from_checkpoint is not None or args.resume_from_checkpoint != "": accelerator.print(F"""Resumed from checkpoint: {args.resume_from_checkpoint}""" ) accelerator.load_state(args.resume_from_checkpoint ) UpperCAmelCase = os.path.basename(args.resume_from_checkpoint ) else: # Get the most recent checkpoint UpperCAmelCase = [f.name for f in os.scandir(os.getcwd() ) if f.is_dir()] dirs.sort(key=os.path.getctime ) UpperCAmelCase = dirs[-1] # Sorts folders by date modified, most recent checkpoint is the last # Extract `epoch_{i}` or `step_{i}` UpperCAmelCase = os.path.splitext(lowercase_ )[0] if "epoch" in training_difference: UpperCAmelCase = int(training_difference.replace('epoch_' , '' ) ) + 1 UpperCAmelCase = None else: UpperCAmelCase = int(training_difference.replace('step_' , '' ) ) UpperCAmelCase = resume_step // len(lowercase_ ) resume_step -= starting_epoch * len(lowercase_ ) # Now we train the model for epoch in range(lowercase_ , lowercase_ ): model.train() if args.with_tracking: UpperCAmelCase = 0 if args.resume_from_checkpoint and epoch == starting_epoch and resume_step is not None: # We need to skip steps until we reach the resumed step UpperCAmelCase = accelerator.skip_first_batches(lowercase_ , lowercase_ ) overall_step += resume_step else: # After the first iteration though, we need to go back to the original dataloader UpperCAmelCase = train_dataloader for batch in active_dataloader: # We could avoid this line since we set the accelerator with `device_placement=True`. UpperCAmelCase = {k: v.to(accelerator.device ) for k, v in batch.items()} UpperCAmelCase = (batch['image'] - mean) / std UpperCAmelCase = model(lowercase_ ) UpperCAmelCase = torch.nn.functional.cross_entropy(lowercase_ , batch['label'] ) # We keep track of the loss at each epoch if args.with_tracking: total_loss += loss.detach().float() accelerator.backward(lowercase_ ) optimizer.step() lr_scheduler.step() optimizer.zero_grad() overall_step += 1 if isinstance(lowercase_ , lowercase_ ): UpperCAmelCase = F"""step_{overall_step}""" if overall_step % checkpointing_steps == 0: if args.output_dir is not None: UpperCAmelCase = os.path.join(args.output_dir , lowercase_ ) accelerator.save_state(lowercase_ ) model.eval() UpperCAmelCase = 0 UpperCAmelCase = 0 for step, batch in enumerate(lowercase_ ): # We could avoid this line since we set the accelerator with `device_placement=True`. UpperCAmelCase = {k: v.to(accelerator.device ) for k, v in batch.items()} UpperCAmelCase = (batch['image'] - mean) / std with torch.no_grad(): UpperCAmelCase = model(lowercase_ ) UpperCAmelCase = outputs.argmax(dim=-1 ) UpperCAmelCase , UpperCAmelCase = accelerator.gather_for_metrics((predictions, batch['label']) ) UpperCAmelCase = predictions == references num_elems += accurate_preds.shape[0] accurate += accurate_preds.long().sum() UpperCAmelCase = accurate.item() / num_elems # Use accelerator.print to print only on the main process. accelerator.print(F"""epoch {epoch}: {100 * eval_metric:.2f}""" ) if args.with_tracking: accelerator.log( { 'accuracy': 100 * eval_metric, 'train_loss': total_loss.item() / len(lowercase_ ), 'epoch': epoch, } , step=lowercase_ , ) if checkpointing_steps == "epoch": UpperCAmelCase = F"""epoch_{epoch}""" if args.output_dir is not None: UpperCAmelCase = os.path.join(args.output_dir , lowercase_ ) accelerator.save_state(lowercase_ ) if args.with_tracking: accelerator.end_training() def _lowerCAmelCase ( ): UpperCAmelCase = argparse.ArgumentParser(description='Simple example of training script.' ) parser.add_argument('--data_dir' , required=lowercase_ , help='The data folder on disk.' ) parser.add_argument('--fp16' , action='store_true' , help='If passed, will use FP16 training.' ) parser.add_argument( '--mixed_precision' , type=lowercase_ , default=lowercase_ , choices=['no', 'fp16', 'bf16', 'fp8'] , help='Whether to use mixed precision. Choose' 'between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.' 'and an Nvidia Ampere GPU.' , ) parser.add_argument('--cpu' , action='store_true' , help='If passed, will train on the CPU.' ) parser.add_argument( '--checkpointing_steps' , type=lowercase_ , default=lowercase_ , help='Whether the various states should be saved at the end of every n steps, or \'epoch\' for each epoch.' , ) parser.add_argument( '--output_dir' , type=lowercase_ , default='.' , help='Optional save directory where all checkpoint folders will be stored. Default is the current working directory.' , ) parser.add_argument( '--resume_from_checkpoint' , type=lowercase_ , default=lowercase_ , help='If the training should continue from a checkpoint folder.' , ) parser.add_argument( '--with_tracking' , action='store_true' , help='Whether to load in all available experiment trackers from the environment and use them for logging.' , ) parser.add_argument( '--project_dir' , type=lowercase_ , default='logs' , help='Location on where to store experiment tracking logs` and relevent project information' , ) UpperCAmelCase = parser.parse_args() UpperCAmelCase = {'lr': 3e-2, 'num_epochs': 3, 'seed': 42, 'batch_size': 64, 'image_size': 224} training_function(lowercase_ , lowercase_ ) if __name__ == "__main__": main()
78
0
"""simple docstring""" from .dependency_versions_table import deps from .utils.versions import require_version, require_version_core # define which module versions we always want to check at run time # (usually the ones defined in `install_requires` in setup.py) # # order specific notes: # - tqdm must be checked before tokenizers A: Any = [ "python", "tqdm", "regex", "requests", "packaging", "filelock", "numpy", "tokenizers", "huggingface-hub", "safetensors", "accelerate", "pyyaml", ] for pkg in pkgs_to_check_at_runtime: if pkg in deps: if pkg == "tokenizers": # must be loaded here, or else tqdm check may fail from .utils import is_tokenizers_available if not is_tokenizers_available(): continue # not required, check version only if installed elif pkg == "accelerate": # must be loaded here, or else tqdm check may fail from .utils import is_accelerate_available # Maybe switch to is_torch_available in the future here so that Accelerate is hard dep of # Transformers with PyTorch if not is_accelerate_available(): continue # not required, check version only if installed require_version_core(deps[pkg]) else: raise ValueError(f"""can\'t find {pkg} in {deps.keys()}, check dependency_versions_table.py""") def _snake_case ( UpperCamelCase : List[str] , UpperCamelCase : str=None ): require_version(deps[pkg] , lowercase_ )
109
"""simple docstring""" from __future__ import annotations def _lowerCAmelCase ( lowercase_ , lowercase_ , lowercase_ ): UpperCAmelCase = list(range(len(lowercase_ ) ) ) UpperCAmelCase = [v / w for v, w in zip(lowercase_ , lowercase_ )] index.sort(key=lambda lowercase_ : ratio[i] , reverse=lowercase_ ) UpperCAmelCase = 0 UpperCAmelCase = [0] * len(lowercase_ ) for i in index: if weight[i] <= capacity: UpperCAmelCase = 1 max_value += value[i] capacity -= weight[i] else: UpperCAmelCase = capacity / weight[i] max_value += value[i] * capacity / weight[i] break return max_value, fractions if __name__ == "__main__": import doctest doctest.testmod()
78
0
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available UpperCAmelCase_ = {'configuration_speech_encoder_decoder': ['SpeechEncoderDecoderConfig']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase_ = ['SpeechEncoderDecoderModel'] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase_ = ['FlaxSpeechEncoderDecoderModel'] if TYPE_CHECKING: from .configuration_speech_encoder_decoder import SpeechEncoderDecoderConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_speech_encoder_decoder import SpeechEncoderDecoderModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_speech_encoder_decoder import FlaxSpeechEncoderDecoderModel else: import sys UpperCAmelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
346
"""simple docstring""" from typing import Union from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_torch_available(): from ..models.auto.modeling_auto import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING snake_case_ = logging.get_logger(__name__) @add_end_docstrings(SCREAMING_SNAKE_CASE_ ) class A_ ( SCREAMING_SNAKE_CASE_ ): """simple docstring""" def __init__( self :Any , *lowercase_ :str , **lowercase_ :List[Any] ) -> Union[str, Any]: super().__init__(*lowercase_ , **lowercase_ ) self.check_model_type(lowercase_ ) def UpperCAmelCase__ ( self :Union[str, Any] , lowercase_ :Any=None , lowercase_ :Optional[int]=None , lowercase_ :Tuple=None , **lowercase_ :Tuple ) -> Dict: UpperCAmelCase , UpperCAmelCase = {}, {} if padding is not None: UpperCAmelCase = padding if truncation is not None: UpperCAmelCase = truncation if top_k is not None: UpperCAmelCase = top_k return preprocess_params, {}, postprocess_params def __call__( self :List[Any] , lowercase_ :Union["Image.Image", str] , lowercase_ :str = None , **lowercase_ :Union[str, Any] ) -> Union[str, Any]: if isinstance(lowercase_ , (Image.Image, str) ) and isinstance(lowercase_ , lowercase_ ): UpperCAmelCase = {'image': image, 'question': question} else: UpperCAmelCase = image UpperCAmelCase = super().__call__(lowercase_ , **lowercase_ ) return results def UpperCAmelCase__ ( self :List[str] , lowercase_ :List[Any] , lowercase_ :int=False , lowercase_ :Optional[int]=False ) -> Union[str, Any]: UpperCAmelCase = load_image(inputs['image'] ) UpperCAmelCase = self.tokenizer( inputs['question'] , return_tensors=self.framework , padding=lowercase_ , truncation=lowercase_ ) UpperCAmelCase = self.image_processor(images=lowercase_ , return_tensors=self.framework ) model_inputs.update(lowercase_ ) return model_inputs def UpperCAmelCase__ ( self :List[Any] , lowercase_ :List[str] ) -> Any: UpperCAmelCase = self.model(**lowercase_ ) return model_outputs def UpperCAmelCase__ ( self :Dict , lowercase_ :Tuple , lowercase_ :List[Any]=5 ) -> Union[str, Any]: if top_k > self.model.config.num_labels: UpperCAmelCase = self.model.config.num_labels if self.framework == "pt": UpperCAmelCase = model_outputs.logits.sigmoid()[0] UpperCAmelCase , UpperCAmelCase = probs.topk(lowercase_ ) else: raise ValueError(f"""Unsupported framework: {self.framework}""" ) UpperCAmelCase = scores.tolist() UpperCAmelCase = ids.tolist() return [{"score": score, "answer": self.model.config.idalabel[_id]} for score, _id in zip(lowercase_ , lowercase_ )]
78
0
'''simple docstring''' def __snake_case( _lowerCAmelCase ) -> List[Any]: snake_case__ : Any = [False] * len(lowercase_ ) snake_case__ : Any = [-1] * len(lowercase_ ) def dfs(_lowerCAmelCase , _lowerCAmelCase ): snake_case__ : Any = True snake_case__ : Any = c for u in graph[v]: if not visited[u]: dfs(lowercase_ , 1 - c ) for i in range(len(lowercase_ ) ): if not visited[i]: dfs(lowercase_ , 0 ) for i in range(len(lowercase_ ) ): for j in graph[i]: if color[i] == color[j]: return False return True # Adjacency list of graph __a = {0: [1, 3], 1: [0, 2], 2: [1, 3], 3: [0, 2], 4: []} print(check_bipartite_dfs(graph))
35
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging snake_case_ = logging.get_logger(__name__) snake_case_ = { """transfo-xl-wt103""": """https://huggingface.co/transfo-xl-wt103/resolve/main/config.json""", } class A_ ( SCREAMING_SNAKE_CASE_ ): """simple docstring""" __UpperCamelCase = """transfo-xl""" __UpperCamelCase = ["""mems"""] __UpperCamelCase = { """n_token""": """vocab_size""", """hidden_size""": """d_model""", """num_attention_heads""": """n_head""", """num_hidden_layers""": """n_layer""", } def __init__( self :List[Any] , lowercase_ :Optional[int]=26_77_35 , lowercase_ :Union[str, Any]=[2_00_00, 4_00_00, 20_00_00] , lowercase_ :List[Any]=10_24 , lowercase_ :Optional[Any]=10_24 , lowercase_ :Tuple=16 , lowercase_ :Tuple=64 , lowercase_ :Any=40_96 , lowercase_ :int=4 , lowercase_ :List[str]=False , lowercase_ :Union[str, Any]=18 , lowercase_ :Optional[Any]=16_00 , lowercase_ :Dict=10_00 , lowercase_ :Optional[int]=True , lowercase_ :Tuple=True , lowercase_ :Dict=0 , lowercase_ :Tuple=-1 , lowercase_ :Optional[int]=True , lowercase_ :Optional[int]=0.1 , lowercase_ :str=0.0 , lowercase_ :List[str]=True , lowercase_ :int="normal" , lowercase_ :Dict=0.01 , lowercase_ :Optional[Any]=0.01 , lowercase_ :Dict=0.02 , lowercase_ :Tuple=1E-5 , lowercase_ :str=0 , **lowercase_ :Tuple , ) -> List[str]: UpperCAmelCase = vocab_size UpperCAmelCase = [] self.cutoffs.extend(lowercase_ ) if proj_share_all_but_first: UpperCAmelCase = [False] + [True] * len(self.cutoffs ) else: UpperCAmelCase = [False] + [False] * len(self.cutoffs ) UpperCAmelCase = d_model UpperCAmelCase = d_embed UpperCAmelCase = d_head UpperCAmelCase = d_inner UpperCAmelCase = div_val UpperCAmelCase = pre_lnorm UpperCAmelCase = n_layer UpperCAmelCase = n_head UpperCAmelCase = mem_len UpperCAmelCase = same_length UpperCAmelCase = attn_type UpperCAmelCase = clamp_len UpperCAmelCase = sample_softmax UpperCAmelCase = adaptive UpperCAmelCase = dropout UpperCAmelCase = dropatt UpperCAmelCase = untie_r UpperCAmelCase = init UpperCAmelCase = init_range UpperCAmelCase = proj_init_std UpperCAmelCase = init_std UpperCAmelCase = layer_norm_epsilon super().__init__(eos_token_id=lowercase_ , **lowercase_ ) @property def UpperCAmelCase__ ( self :Union[str, Any] ) -> Any: # Message copied from Transformer-XL documentation logger.info(f"""The model {self.model_type} is one of the few models that has no sequence length limit.""" ) return -1 @max_position_embeddings.setter def UpperCAmelCase__ ( self :Union[str, Any] , lowercase_ :Any ) -> Tuple: # Message copied from Transformer-XL documentation raise NotImplementedError( f"""The model {self.model_type} is one of the few models that has no sequence length limit.""" )
78
0
"""simple docstring""" import unittest from transformers import ( MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING, TextGenerationPipeline, logging, pipeline, ) from transformers.testing_utils import ( CaptureLogger, is_pipeline_test, require_accelerate, require_tf, require_torch, require_torch_gpu, require_torch_or_tf, ) from .test_pipelines_common import ANY @is_pipeline_test @require_torch_or_tf class __A ( unittest.TestCase ): _UpperCamelCase : int = MODEL_FOR_CAUSAL_LM_MAPPING _UpperCamelCase : int = TF_MODEL_FOR_CAUSAL_LM_MAPPING @require_torch def __A ( self ): _lowerCAmelCase : Tuple = pipeline(task="""text-generation""" , model="""sshleifer/tiny-ctrl""" , framework="""pt""" ) # Using `do_sample=False` to force deterministic output _lowerCAmelCase : List[Any] = text_generator("""This is a test""" , do_sample=lowercase_ ) self.assertEqual( lowercase_ , [ { """generated_text""": ( """This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope.""" """ oscope. FiliFili@@""" ) } ] , ) _lowerCAmelCase : str = text_generator(["""This is a test""", """This is a second test"""] ) self.assertEqual( lowercase_ , [ [ { """generated_text""": ( """This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope.""" """ oscope. FiliFili@@""" ) } ], [ { """generated_text""": ( """This is a second test ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy""" """ oscope. oscope. FiliFili@@""" ) } ], ] , ) _lowerCAmelCase : Optional[int] = text_generator("""This is a test""" , do_sample=lowercase_ , num_return_sequences=2 , return_tensors=lowercase_ ) self.assertEqual( lowercase_ , [ {"""generated_token_ids""": ANY(lowercase_ )}, {"""generated_token_ids""": ANY(lowercase_ )}, ] , ) _lowerCAmelCase : List[Any] = text_generator.model.config.eos_token_id _lowerCAmelCase : Optional[int] = """<pad>""" _lowerCAmelCase : str = text_generator( ["""This is a test""", """This is a second test"""] , do_sample=lowercase_ , num_return_sequences=2 , batch_size=2 , return_tensors=lowercase_ , ) self.assertEqual( lowercase_ , [ [ {"""generated_token_ids""": ANY(lowercase_ )}, {"""generated_token_ids""": ANY(lowercase_ )}, ], [ {"""generated_token_ids""": ANY(lowercase_ )}, {"""generated_token_ids""": ANY(lowercase_ )}, ], ] , ) @require_tf def __A ( self ): _lowerCAmelCase : List[str] = pipeline(task="""text-generation""" , model="""sshleifer/tiny-ctrl""" , framework="""tf""" ) # Using `do_sample=False` to force deterministic output _lowerCAmelCase : Optional[Any] = text_generator("""This is a test""" , do_sample=lowercase_ ) self.assertEqual( lowercase_ , [ { """generated_text""": ( """This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵""" """ please,""" ) } ] , ) _lowerCAmelCase : List[str] = text_generator(["""This is a test""", """This is a second test"""] , do_sample=lowercase_ ) self.assertEqual( lowercase_ , [ [ { """generated_text""": ( """This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵""" """ please,""" ) } ], [ { """generated_text""": ( """This is a second test Chieftain Chieftain prefecture prefecture prefecture Cannes Cannes""" """ Cannes 閲閲Cannes Cannes Cannes 攵 please,""" ) } ], ] , ) def __A ( self , a__ , a__ , a__ ): _lowerCAmelCase : Tuple = TextGenerationPipeline(model=lowercase_ , tokenizer=lowercase_ ) return text_generator, ["This is a test", "Another test"] def __A ( self ): _lowerCAmelCase : Optional[int] = """Hello I believe in""" _lowerCAmelCase : int = pipeline("""text-generation""" , model="""hf-internal-testing/tiny-random-gpt2""" ) _lowerCAmelCase : Union[str, Any] = text_generator(lowercase_ ) self.assertEqual( lowercase_ , [{"""generated_text""": """Hello I believe in fe fe fe fe fe fe fe fe fe fe fe fe"""}] , ) _lowerCAmelCase : List[Any] = text_generator(lowercase_ , stop_sequence=""" fe""" ) self.assertEqual(lowercase_ , [{"""generated_text""": """Hello I believe in fe"""}] ) def __A ( self , a__ , a__ ): _lowerCAmelCase : List[str] = text_generator.model _lowerCAmelCase : Union[str, Any] = text_generator.tokenizer _lowerCAmelCase : Tuple = text_generator("""This is a test""" ) self.assertEqual(lowercase_ , [{"""generated_text""": ANY(lowercase_ )}] ) self.assertTrue(outputs[0]["""generated_text"""].startswith("""This is a test""" ) ) _lowerCAmelCase : Optional[Any] = text_generator("""This is a test""" , return_full_text=lowercase_ ) self.assertEqual(lowercase_ , [{"""generated_text""": ANY(lowercase_ )}] ) self.assertNotIn("""This is a test""" , outputs[0]["""generated_text"""] ) _lowerCAmelCase : Dict = pipeline(task="""text-generation""" , model=lowercase_ , tokenizer=lowercase_ , return_full_text=lowercase_ ) _lowerCAmelCase : Dict = text_generator("""This is a test""" ) self.assertEqual(lowercase_ , [{"""generated_text""": ANY(lowercase_ )}] ) self.assertNotIn("""This is a test""" , outputs[0]["""generated_text"""] ) _lowerCAmelCase : List[str] = text_generator("""This is a test""" , return_full_text=lowercase_ ) self.assertEqual(lowercase_ , [{"""generated_text""": ANY(lowercase_ )}] ) self.assertTrue(outputs[0]["""generated_text"""].startswith("""This is a test""" ) ) _lowerCAmelCase : Dict = text_generator(["""This is great !""", """Something else"""] , num_return_sequences=2 , do_sample=lowercase_ ) self.assertEqual( lowercase_ , [ [{"""generated_text""": ANY(lowercase_ )}, {"""generated_text""": ANY(lowercase_ )}], [{"""generated_text""": ANY(lowercase_ )}, {"""generated_text""": ANY(lowercase_ )}], ] , ) if text_generator.tokenizer.pad_token is not None: _lowerCAmelCase : List[Any] = text_generator( ["""This is great !""", """Something else"""] , num_return_sequences=2 , batch_size=2 , do_sample=lowercase_ ) self.assertEqual( lowercase_ , [ [{"""generated_text""": ANY(lowercase_ )}, {"""generated_text""": ANY(lowercase_ )}], [{"""generated_text""": ANY(lowercase_ )}, {"""generated_text""": ANY(lowercase_ )}], ] , ) with self.assertRaises(lowercase_ ): _lowerCAmelCase : List[str] = text_generator("""test""" , return_full_text=lowercase_ , return_text=lowercase_ ) with self.assertRaises(lowercase_ ): _lowerCAmelCase : Dict = text_generator("""test""" , return_full_text=lowercase_ , return_tensors=lowercase_ ) with self.assertRaises(lowercase_ ): _lowerCAmelCase : List[Any] = text_generator("""test""" , return_text=lowercase_ , return_tensors=lowercase_ ) # Empty prompt is slighly special # it requires BOS token to exist. # Special case for Pegasus which will always append EOS so will # work even without BOS. if ( text_generator.tokenizer.bos_token_id is not None or "Pegasus" in tokenizer.__class__.__name__ or "Git" in model.__class__.__name__ ): _lowerCAmelCase : Optional[int] = text_generator("""""" ) self.assertEqual(lowercase_ , [{"""generated_text""": ANY(lowercase_ )}] ) else: with self.assertRaises((ValueError, AssertionError) ): _lowerCAmelCase : int = text_generator("""""" ) if text_generator.framework == "tf": # TF generation does not support max_new_tokens, and it's impossible # to control long generation with only max_length without # fancy calculation, dismissing tests for now. return # We don't care about infinite range models. # They already work. # Skip this test for XGLM, since it uses sinusoidal positional embeddings which are resized on-the-fly. _lowerCAmelCase : Tuple = ["""RwkvForCausalLM""", """XGLMForCausalLM""", """GPTNeoXForCausalLM"""] if ( tokenizer.model_max_length < 10000 and text_generator.model.__class__.__name__ not in EXTRA_MODELS_CAN_HANDLE_LONG_INPUTS ): # Handling of large generations with self.assertRaises((RuntimeError, IndexError, ValueError, AssertionError) ): text_generator("""This is a test""" * 500 , max_new_tokens=20 ) _lowerCAmelCase : Any = text_generator("""This is a test""" * 500 , handle_long_generation="""hole""" , max_new_tokens=20 ) # Hole strategy cannot work with self.assertRaises(lowercase_ ): text_generator( """This is a test""" * 500 , handle_long_generation="""hole""" , max_new_tokens=tokenizer.model_max_length + 10 , ) @require_torch @require_accelerate @require_torch_gpu def __A ( self ): import torch # Classic `model_kwargs` _lowerCAmelCase : Optional[int] = pipeline( model="""hf-internal-testing/tiny-random-bloom""" , model_kwargs={"""device_map""": """auto""", """torch_dtype""": torch.bfloataa} , ) self.assertEqual(pipe.model.device , torch.device(0 ) ) self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa ) _lowerCAmelCase : int = pipe("""This is a test""" ) self.assertEqual( lowercase_ , [ { """generated_text""": ( """This is a test test test test test test test test test test test test test test test test""" """ test""" ) } ] , ) # Upgraded those two to real pipeline arguments (they just get sent for the model as they're unlikely to mean anything else.) _lowerCAmelCase : List[Any] = pipeline(model="""hf-internal-testing/tiny-random-bloom""" , device_map="""auto""" , torch_dtype=torch.bfloataa ) self.assertEqual(pipe.model.device , torch.device(0 ) ) self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa ) _lowerCAmelCase : Optional[Any] = pipe("""This is a test""" ) self.assertEqual( lowercase_ , [ { """generated_text""": ( """This is a test test test test test test test test test test test test test test test test""" """ test""" ) } ] , ) # torch_dtype will be automatically set to float32 if not provided - check: https://github.com/huggingface/transformers/pull/20602 _lowerCAmelCase : Union[str, Any] = pipeline(model="""hf-internal-testing/tiny-random-bloom""" , device_map="""auto""" ) self.assertEqual(pipe.model.device , torch.device(0 ) ) self.assertEqual(pipe.model.lm_head.weight.dtype , torch.floataa ) _lowerCAmelCase : Optional[Any] = pipe("""This is a test""" ) self.assertEqual( lowercase_ , [ { """generated_text""": ( """This is a test test test test test test test test test test test test test test test test""" """ test""" ) } ] , ) @require_torch @require_torch_gpu def __A ( self ): import torch _lowerCAmelCase : Optional[Any] = pipeline(model="""hf-internal-testing/tiny-random-bloom""" , device=0 , torch_dtype=torch.floataa ) pipe("""This is a test""" ) @require_torch @require_accelerate @require_torch_gpu def __A ( self ): import torch _lowerCAmelCase : Optional[Any] = pipeline(model="""hf-internal-testing/tiny-random-bloom""" , device_map="""auto""" , torch_dtype=torch.floataa ) pipe("""This is a test""" , do_sample=lowercase_ , top_p=0.5 ) def __A ( self ): _lowerCAmelCase : Any = """Hello world""" _lowerCAmelCase : Tuple = pipeline("""text-generation""" , model="""hf-internal-testing/tiny-random-gpt2""" ) if text_generator.model.framework == "tf": _lowerCAmelCase : Union[str, Any] = logging.get_logger("""transformers.generation.tf_utils""" ) else: _lowerCAmelCase : int = logging.get_logger("""transformers.generation.utils""" ) _lowerCAmelCase : Optional[int] = """Both `max_new_tokens`""" # The beggining of the message to be checked in this test # Both are set by the user -> log warning with CaptureLogger(lowercase_ ) as cl: _lowerCAmelCase : Any = text_generator(lowercase_ , max_length=10 , max_new_tokens=1 ) self.assertIn(lowercase_ , cl.out ) # The user only sets one -> no warning with CaptureLogger(lowercase_ ) as cl: _lowerCAmelCase : Tuple = text_generator(lowercase_ , max_new_tokens=1 ) self.assertNotIn(lowercase_ , cl.out ) with CaptureLogger(lowercase_ ) as cl: _lowerCAmelCase : Union[str, Any] = text_generator(lowercase_ , max_length=10 ) self.assertNotIn(lowercase_ , cl.out )
44
"""simple docstring""" from json import JSONDecodeError # Workaround for requests.exceptions.JSONDecodeError import requests def _lowerCAmelCase ( lowercase_ = "isbn/0140328726" ): UpperCAmelCase = olid.strip().strip('/' ) # Remove leading/trailing whitespace & slashes if new_olid.count('/' ) != 1: UpperCAmelCase = F"""{olid} is not a valid Open Library olid""" raise ValueError(lowercase_ ) return requests.get(F"""https://openlibrary.org/{new_olid}.json""" ).json() def _lowerCAmelCase ( lowercase_ ): UpperCAmelCase = { 'title': 'Title', 'publish_date': 'Publish date', 'authors': 'Authors', 'number_of_pages': 'Number of pages:', 'first_sentence': 'First sentence', 'isbn_10': 'ISBN (10)', 'isbn_13': 'ISBN (13)', } UpperCAmelCase = {better_key: ol_book_data[key] for key, better_key in desired_keys.items()} UpperCAmelCase = [ get_openlibrary_data(author['key'] )['name'] for author in data['Authors'] ] UpperCAmelCase = data['First sentence']['value'] for key, value in data.items(): if isinstance(lowercase_ , lowercase_ ): UpperCAmelCase = ', '.join(lowercase_ ) return data if __name__ == "__main__": import doctest doctest.testmod() while True: snake_case_ = input("""\nEnter the ISBN code to search (or 'quit' to stop): """).strip() if isbn.lower() in ("", "q", "quit", "exit", "stop"): break if len(isbn) not in (10, 13) or not isbn.isdigit(): print(f'''Sorry, {isbn} is not a valid ISBN. Please, input a valid ISBN.''') continue print(f'''\nSearching Open Library for ISBN: {isbn}...\n''') try: snake_case_ = summarize_book(get_openlibrary_data(f'''isbn/{isbn}''')) print("""\n""".join(f'''{key}: {value}''' for key, value in book_summary.items())) except JSONDecodeError: # Workaround for requests.exceptions.RequestException: print(f'''Sorry, there are no results for ISBN: {isbn}.''')
78
0
import unittest from parameterized import parameterized from transformers import LlamaConfig, is_torch_available, set_seed from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaTokenizer class UpperCamelCase_ : '''simple docstring''' def __init__( self : Optional[Any] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Tuple=13 , UpperCAmelCase__ : Union[str, Any]=7 , UpperCAmelCase__ : Optional[Any]=True , UpperCAmelCase__ : Optional[int]=True , UpperCAmelCase__ : str=False , UpperCAmelCase__ : Dict=True , UpperCAmelCase__ : Optional[int]=99 , UpperCAmelCase__ : List[str]=32 , UpperCAmelCase__ : List[Any]=5 , UpperCAmelCase__ : int=4 , UpperCAmelCase__ : str=37 , UpperCAmelCase__ : Optional[int]="gelu" , UpperCAmelCase__ : Union[str, Any]=0.1 , UpperCAmelCase__ : Optional[int]=0.1 , UpperCAmelCase__ : Tuple=512 , UpperCAmelCase__ : Optional[int]=16 , UpperCAmelCase__ : Optional[int]=2 , UpperCAmelCase__ : int=0.02 , UpperCAmelCase__ : Union[str, Any]=3 , UpperCAmelCase__ : Tuple=4 , UpperCAmelCase__ : Any=None , ) ->str: '''simple docstring''' A__ = parent A__ = batch_size A__ = seq_length A__ = is_training A__ = use_input_mask A__ = use_token_type_ids A__ = use_labels A__ = vocab_size A__ = hidden_size A__ = num_hidden_layers A__ = num_attention_heads A__ = intermediate_size A__ = hidden_act A__ = hidden_dropout_prob A__ = attention_probs_dropout_prob A__ = max_position_embeddings A__ = type_vocab_size A__ = type_sequence_label_size A__ = initializer_range A__ = num_labels A__ = num_choices A__ = scope def SCREAMING_SNAKE_CASE ( self : Dict) ->Union[str, Any]: '''simple docstring''' A__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size) A__ = None if self.use_input_mask: A__ = random_attention_mask([self.batch_size, self.seq_length]) A__ = None if self.use_token_type_ids: A__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size) A__ = None A__ = None A__ = None if self.use_labels: A__ = ids_tensor([self.batch_size] , self.type_sequence_label_size) A__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels) A__ = ids_tensor([self.batch_size] , self.num_choices) A__ = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def SCREAMING_SNAKE_CASE ( self : int) ->Optional[Any]: '''simple docstring''' return LlamaConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowercase_ , initializer_range=self.initializer_range , ) def SCREAMING_SNAKE_CASE ( self : List[Any] , UpperCAmelCase__ : int , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Union[str, Any]) ->int: '''simple docstring''' A__ = LlamaModel(config=lowercase_) model.to(lowercase_) model.eval() A__ = model(lowercase_ , attention_mask=lowercase_) A__ = model(lowercase_) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size)) def SCREAMING_SNAKE_CASE ( self : Dict , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : int , UpperCAmelCase__ : Dict , UpperCAmelCase__ : str , ) ->Dict: '''simple docstring''' A__ = True A__ = LlamaModel(lowercase_) model.to(lowercase_) model.eval() A__ = model( lowercase_ , attention_mask=lowercase_ , encoder_hidden_states=lowercase_ , encoder_attention_mask=lowercase_ , ) A__ = model( lowercase_ , attention_mask=lowercase_ , encoder_hidden_states=lowercase_ , ) A__ = model(lowercase_ , attention_mask=lowercase_) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size)) def SCREAMING_SNAKE_CASE ( self : Dict , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Any , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Union[str, Any] , ) ->List[str]: '''simple docstring''' A__ = LlamaForCausalLM(config=lowercase_) model.to(lowercase_) model.eval() A__ = model(lowercase_ , attention_mask=lowercase_ , labels=lowercase_) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size)) def SCREAMING_SNAKE_CASE ( self : str , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : str , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Any , ) ->List[str]: '''simple docstring''' A__ = True A__ = True A__ = LlamaForCausalLM(config=lowercase_) model.to(lowercase_) model.eval() # first forward pass A__ = model( lowercase_ , attention_mask=lowercase_ , encoder_hidden_states=lowercase_ , encoder_attention_mask=lowercase_ , use_cache=lowercase_ , ) A__ = outputs.past_key_values # create hypothetical multiple next token and extent to next_input_ids A__ = ids_tensor((self.batch_size, 3) , config.vocab_size) A__ = ids_tensor((self.batch_size, 3) , vocab_size=2) # append to next input_ids and A__ = torch.cat([input_ids, next_tokens] , dim=-1) A__ = torch.cat([input_mask, next_mask] , dim=-1) A__ = model( lowercase_ , attention_mask=lowercase_ , encoder_hidden_states=lowercase_ , encoder_attention_mask=lowercase_ , output_hidden_states=lowercase_ , )['''hidden_states'''][0] A__ = model( lowercase_ , attention_mask=lowercase_ , encoder_hidden_states=lowercase_ , encoder_attention_mask=lowercase_ , past_key_values=lowercase_ , output_hidden_states=lowercase_ , )['''hidden_states'''][0] # select random slice A__ = ids_tensor((1,) , output_from_past.shape[-1]).item() A__ = output_from_no_past[:, -3:, random_slice_idx].detach() A__ = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1]) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(lowercase_ , lowercase_ , atol=1e-3)) def SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->Union[str, Any]: '''simple docstring''' A__ = self.prepare_config_and_inputs() ( ( A__ ) , ( A__ ) , ( A__ ) , ( A__ ) , ( A__ ) , ( A__ ) , ( A__ ) , ) = config_and_inputs A__ = {'''input_ids''': input_ids, '''attention_mask''': input_mask} return config, inputs_dict @require_torch class UpperCamelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ): '''simple docstring''' UpperCAmelCase__ = (LlamaModel, LlamaForCausalLM, LlamaForSequenceClassification) if is_torch_available() else () UpperCAmelCase__ = (LlamaForCausalLM,) if is_torch_available() else () UpperCAmelCase__ = ( { '''feature-extraction''': LlamaModel, '''text-classification''': LlamaForSequenceClassification, '''text-generation''': LlamaForCausalLM, '''zero-shot''': LlamaForSequenceClassification, } if is_torch_available() else {} ) UpperCAmelCase__ = False UpperCAmelCase__ = False def SCREAMING_SNAKE_CASE ( self : Optional[int]) ->str: '''simple docstring''' A__ = LlamaModelTester(self) A__ = ConfigTester(self , config_class=lowercase_ , hidden_size=37) def SCREAMING_SNAKE_CASE ( self : str) ->Optional[int]: '''simple docstring''' self.config_tester.run_common_tests() def SCREAMING_SNAKE_CASE ( self : List[Any]) ->Dict: '''simple docstring''' A__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowercase_) def SCREAMING_SNAKE_CASE ( self : Tuple) ->List[str]: '''simple docstring''' A__ = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: A__ = type self.model_tester.create_and_check_model(*lowercase_) def SCREAMING_SNAKE_CASE ( self : str) ->List[str]: '''simple docstring''' A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common() A__ = 3 A__ = input_dict['''input_ids'''] A__ = input_ids.ne(1).to(lowercase_) A__ = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size) A__ = LlamaForSequenceClassification(lowercase_) model.to(lowercase_) model.eval() A__ = model(lowercase_ , attention_mask=lowercase_ , labels=lowercase_) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels)) def SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->Union[str, Any]: '''simple docstring''' A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common() A__ = 3 A__ = '''single_label_classification''' A__ = input_dict['''input_ids'''] A__ = input_ids.ne(1).to(lowercase_) A__ = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size) A__ = LlamaForSequenceClassification(lowercase_) model.to(lowercase_) model.eval() A__ = model(lowercase_ , attention_mask=lowercase_ , labels=lowercase_) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels)) def SCREAMING_SNAKE_CASE ( self : List[str]) ->Tuple: '''simple docstring''' A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common() A__ = 3 A__ = '''multi_label_classification''' A__ = input_dict['''input_ids'''] A__ = input_ids.ne(1).to(lowercase_) A__ = ids_tensor( [self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size).to(torch.float) A__ = LlamaForSequenceClassification(lowercase_) model.to(lowercase_) model.eval() A__ = model(lowercase_ , attention_mask=lowercase_ , labels=lowercase_) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels)) @unittest.skip('''LLaMA buffers include complex numbers, which breaks this test''') def SCREAMING_SNAKE_CASE ( self : Dict) ->List[str]: '''simple docstring''' pass @parameterized.expand([('''linear''',), ('''dynamic''',)]) def SCREAMING_SNAKE_CASE ( self : Optional[int] , UpperCAmelCase__ : Optional[int]) ->int: '''simple docstring''' A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common() A__ = ids_tensor([1, 10] , config.vocab_size) A__ = ids_tensor([1, int(config.max_position_embeddings * 1.5)] , config.vocab_size) set_seed(42) # Fixed seed at init time so the two models get the same random weights A__ = LlamaModel(lowercase_) original_model.to(lowercase_) original_model.eval() A__ = original_model(lowercase_).last_hidden_state A__ = original_model(lowercase_).last_hidden_state set_seed(42) # Fixed seed at init time so the two models get the same random weights A__ = {'''type''': scaling_type, '''factor''': 10.0} A__ = LlamaModel(lowercase_) scaled_model.to(lowercase_) scaled_model.eval() A__ = scaled_model(lowercase_).last_hidden_state A__ = scaled_model(lowercase_).last_hidden_state # Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original # maximum sequence length, so the outputs for the short input should match. if scaling_type == "dynamic": self.assertTrue(torch.allclose(lowercase_ , lowercase_ , atol=1e-5)) else: self.assertFalse(torch.allclose(lowercase_ , lowercase_ , atol=1e-5)) # The output should be different for long inputs self.assertFalse(torch.allclose(lowercase_ , lowercase_ , atol=1e-5)) @require_torch class UpperCamelCase_ ( unittest.TestCase ): '''simple docstring''' @unittest.skip('''Logits are not exactly the same, once we fix the instabalities somehow, will update!''') @slow def SCREAMING_SNAKE_CASE ( self : Any) ->Union[str, Any]: '''simple docstring''' A__ = [1, 306, 4_658, 278, 6_593, 310, 2_834, 338] A__ = LlamaForCausalLM.from_pretrained('''meta-llama/Llama-2-7b-hf''' , device_map='''auto''') A__ = model(torch.tensor([input_ids])) # Expected mean on dim = -1 A__ = torch.tensor([[-6.6550, -4.1227, -4.9859, -3.2406, 0.8262, -3.0033, 1.2964, -3.3699]]) torch.testing.assert_close(out.mean(-1) , lowercase_ , atol=1e-2 , rtol=1e-2) # slicing logits[0, 0, 0:30] # fmt: off A__ = torch.tensor([-12.8281, -7.4453, -0.4639, -8.0625, -7.2500, -8.0000, -6.4883, -7.7695, -7.8438, -7.0312, -6.2188, -7.1328, -1.8496, 1.9961, -8.6250, -6.7227, -12.8281, -6.9492, -7.0742, -7.7852, -7.5820, -7.9062, -6.9375, -7.9805, -8.3438, -8.1562, -8.0469, -7.6250, -7.7422, -7.3398,]) # fmt: on torch.testing.assert_close(out[0, 0, :30] , lowercase_ , atol=1e-5 , rtol=1e-5) @unittest.skip('''Logits are not exactly the same, once we fix the instabalities somehow, will update!''') @slow def SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->Any: '''simple docstring''' A__ = [1, 306, 4_658, 278, 6_593, 310, 2_834, 338] A__ = LlamaForCausalLM.from_pretrained('''meta-llama/Llama-2-13b-hf''' , device_map='''auto''') A__ = model(torch.tensor(lowercase_)) # Expected mean on dim = -1 A__ = torch.tensor([[-2.0622, -1.2794, -1.1638, -0.9788, -1.4603, -1.0238, -1.7893, -1.4411]]) torch.testing.assert_close(out.mean(-1) , lowercase_ , atol=1e-2 , rtol=1e-2) # slicing logits[0, 0, 0:30] # fmt: off A__ = torch.tensor([-8.1406, -8.0547, 2.7461, -1.2344, -0.1448, -1.8262, -1.0020, -1.8154, -1.6895, -1.8516, -2.3574, -0.9277, 3.7598, 6.5742, -1.2998, -0.1177, -8.1406, -2.9688, -2.9199, -3.1699, -3.5254, -2.3555, -2.7988, -3.4141, -2.8262, -4.5195, -3.3379, -3.3164, -2.7832, -3.0273]) # fmt: on torch.testing.assert_close(out[0, 0, :30] , lowercase_ , atol=1e-5 , rtol=1e-5) @unittest.skip('''Logits are not exactly the same, once we fix the instabalities somehow, will update!''') @slow def SCREAMING_SNAKE_CASE ( self : List[str]) ->List[str]: '''simple docstring''' A__ = [1, 306, 4_658, 278, 6_593, 310, 2_834, 338] A__ = LlamaForCausalLM.from_pretrained('''meta-llama/Llama-2-13b-chat-hf''' , device_map='''auto''') A__ = model(torch.tensor(lowercase_)) # Expected mean on dim = -1 A__ = torch.tensor([[-0.8562, -1.8520, -0.7551, -0.4162, -1.5161, -1.2038, -2.4823, -2.3254]]) torch.testing.assert_close(out.mean(-1) , lowercase_ , atol=1e-2 , rtol=1e-2) # slicing logits[0, 0, 0:30] # fmt: off A__ = torch.tensor([-2.2227, 4.8828, 0.9023, -0.4578, -0.7871, -0.1033, -0.6221, -0.5786, -0.7803, -1.0674, -1.2920, -0.1570, 0.8008, 2.0723, -0.9497, 0.2771, -2.2227, -0.7612, -1.4346, -1.2061, -1.6426, -0.3000, -0.7139, -1.1934, -1.8691, -1.6973, -1.5947, -1.2705, -0.3523, -0.5513]) # fmt: on torch.testing.assert_close(out.mean(-1) , lowercase_ , atol=1e-2 , rtol=1e-2) @unittest.skip( '''Logits are not exactly the same, once we fix the instabalities somehow, will update! Also it is gonna be a `too_slow` test''') @slow def SCREAMING_SNAKE_CASE ( self : int) ->Optional[int]: '''simple docstring''' A__ = [1, 306, 4_658, 278, 6_593, 310, 2_834, 338] A__ = LlamaForCausalLM.from_pretrained('''meta-llama/Llama-2-70b-hf''' , device_map='''auto''') A__ = model(torch.tensor(lowercase_)) A__ = torch.tensor( [[-4.2327, -3.3360, -4.6665, -4.7631, -1.8180, -3.4170, -1.4211, -3.1810]] , dtype=torch.floataa) torch.testing.assert_close(out.mean(-1) , lowercase_ , atol=1e-2 , rtol=1e-2) # fmt: off A__ = torch.tensor([-9.4922, -3.9551, 1.7998, -5.6758, -5.1055, -5.8984, -4.8320, -6.8086, -6.5391, -5.6172, -5.5820, -5.5352, 1.7881, 3.6289, -6.5117, -3.4785, -9.5000, -6.0352, -6.8125, -6.0195, -6.6836, -5.4727, -6.2812, -6.0391, -7.3398, -7.4297, -7.4844, -6.5820, -5.8789, -5.5312]) # fmt: on torch.testing.assert_close(out[0, 0, :30] , lowercase_ , atol=1e-5 , rtol=1e-5) @unittest.skip('''Model is curently gated''') @slow def SCREAMING_SNAKE_CASE ( self : Any) ->int: '''simple docstring''' A__ = '''Simply put, the theory of relativity states that 1) the laws of physics are the same everywhere in the universe and 2) the passage of time and the length of objects can vary depending on the observer\'s frame of reference.\n\nThe first part of the theory, that the laws of physics are the same everywhere, is known as the "princi''' A__ = '''Simply put, the theory of relativity states that ''' A__ = LlamaTokenizer.from_pretrained('''meta-llama/Llama-2-13b-chat-hf''') A__ = tokenizer.encode(lowercase_ , return_tensors='''pt''') A__ = LlamaForCausalLM.from_pretrained( '''meta-llama/Llama-2-13b-chat-hf''' , device_map='''sequential''' , use_safetensors=lowercase_) # greedy generation outputs A__ = model.generate(lowercase_ , max_new_tokens=64 , top_p=lowercase_ , temperature=1 , do_sample=lowercase_) A__ = tokenizer.decode(generated_ids[0] , skip_special_tokens=lowercase_) self.assertEqual(lowercase_ , lowercase_)
14
"""simple docstring""" class A_ : """simple docstring""" def __init__( self :List[str] , lowercase_ :int , lowercase_ :Optional[int]=None , lowercase_ :List[str]=None ) -> str: UpperCAmelCase = data UpperCAmelCase = previous UpperCAmelCase = next_node def __str__( self :Optional[Any] ) -> str: return f"""{self.data}""" def UpperCAmelCase__ ( self :int ) -> int: return self.data def UpperCAmelCase__ ( self :List[str] ) -> Any: return self.next def UpperCAmelCase__ ( self :Tuple ) -> Optional[int]: return self.previous class A_ : """simple docstring""" def __init__( self :Optional[Any] , lowercase_ :Optional[Any] ) -> str: UpperCAmelCase = head def __iter__( self :List[str] ) -> List[str]: return self def UpperCAmelCase__ ( self :int ) -> Any: if not self.current: raise StopIteration else: UpperCAmelCase = self.current.get_data() UpperCAmelCase = self.current.get_next() return value class A_ : """simple docstring""" def __init__( self :Union[str, Any] ) -> List[Any]: UpperCAmelCase = None # First node in list UpperCAmelCase = None # Last node in list def __str__( self :List[Any] ) -> Optional[Any]: UpperCAmelCase = self.head UpperCAmelCase = [] while current is not None: nodes.append(current.get_data() ) UpperCAmelCase = current.get_next() return " ".join(str(lowercase_ ) for node in nodes ) def __contains__( self :str , lowercase_ :int ) -> str: UpperCAmelCase = self.head while current: if current.get_data() == value: return True UpperCAmelCase = current.get_next() return False def __iter__( self :Tuple ) -> Dict: return LinkedListIterator(self.head ) def UpperCAmelCase__ ( self :Optional[int] ) -> Optional[Any]: if self.head: return self.head.get_data() return None def UpperCAmelCase__ ( self :Union[str, Any] ) -> List[str]: if self.tail: return self.tail.get_data() return None def UpperCAmelCase__ ( self :Union[str, Any] , lowercase_ :Node ) -> None: if self.head is None: UpperCAmelCase = node UpperCAmelCase = node else: self.insert_before_node(self.head , lowercase_ ) def UpperCAmelCase__ ( self :Any , lowercase_ :Node ) -> None: if self.head is None: self.set_head(lowercase_ ) else: self.insert_after_node(self.tail , lowercase_ ) def UpperCAmelCase__ ( self :List[str] , lowercase_ :int ) -> None: UpperCAmelCase = Node(lowercase_ ) if self.head is None: self.set_head(lowercase_ ) else: self.set_tail(lowercase_ ) def UpperCAmelCase__ ( self :int , lowercase_ :Node , lowercase_ :Node ) -> None: UpperCAmelCase = node UpperCAmelCase = node.previous if node.get_previous() is None: UpperCAmelCase = node_to_insert else: UpperCAmelCase = node_to_insert UpperCAmelCase = node_to_insert def UpperCAmelCase__ ( self :Union[str, Any] , lowercase_ :Node , lowercase_ :Node ) -> None: UpperCAmelCase = node UpperCAmelCase = node.next if node.get_next() is None: UpperCAmelCase = node_to_insert else: UpperCAmelCase = node_to_insert UpperCAmelCase = node_to_insert def UpperCAmelCase__ ( self :Any , lowercase_ :int , lowercase_ :int ) -> None: UpperCAmelCase = 1 UpperCAmelCase = Node(lowercase_ ) UpperCAmelCase = self.head while node: if current_position == position: self.insert_before_node(lowercase_ , lowercase_ ) return current_position += 1 UpperCAmelCase = node.next self.insert_after_node(self.tail , lowercase_ ) def UpperCAmelCase__ ( self :Optional[Any] , lowercase_ :int ) -> Node: UpperCAmelCase = self.head while node: if node.get_data() == item: return node UpperCAmelCase = node.get_next() raise Exception('Node not found' ) def UpperCAmelCase__ ( self :Any , lowercase_ :Optional[Any] ) -> Dict: if (node := self.get_node(lowercase_ )) is not None: if node == self.head: UpperCAmelCase = self.head.get_next() if node == self.tail: UpperCAmelCase = self.tail.get_previous() self.remove_node_pointers(lowercase_ ) @staticmethod def UpperCAmelCase__ ( lowercase_ :Node ) -> None: if node.get_next(): UpperCAmelCase = node.previous if node.get_previous(): UpperCAmelCase = node.next UpperCAmelCase = None UpperCAmelCase = None def UpperCAmelCase__ ( self :Union[str, Any] ) -> List[str]: return self.head is None def _lowerCAmelCase ( ): pass if __name__ == "__main__": import doctest doctest.testmod()
78
0
def lowerCAmelCase_ ( __UpperCAmelCase: List[str] ) -> Any: if len(lowercase_ ) < 2: return collection def circle_sort_util(__UpperCAmelCase: Tuple , __UpperCAmelCase: List[str] , __UpperCAmelCase: int ) -> bool: UpperCamelCase__ : List[str] = False if low == high: return swapped UpperCamelCase__ : Tuple = low UpperCamelCase__ : int = high while left < right: if collection[left] > collection[right]: UpperCamelCase__ ,UpperCamelCase__ : Optional[Any] = ( collection[right], collection[left], ) UpperCamelCase__ : Optional[Any] = True left += 1 right -= 1 if left == right and collection[left] > collection[right + 1]: UpperCamelCase__ ,UpperCamelCase__ : Dict = ( collection[right + 1], collection[left], ) UpperCamelCase__ : Optional[int] = True UpperCamelCase__ : Dict = low + int((high - low) / 2 ) UpperCamelCase__ : Union[str, Any] = circle_sort_util(lowercase_ , lowercase_ , lowercase_ ) UpperCamelCase__ : Optional[Any] = circle_sort_util(lowercase_ , mid + 1 , lowercase_ ) return swapped or left_swap or right_swap UpperCamelCase__ : int = True while is_not_sorted is True: UpperCamelCase__ : Optional[Any] = circle_sort_util(lowercase_ , 0 , len(lowercase_ ) - 1 ) return collection if __name__ == "__main__": UpperCAmelCase_ = input('Enter numbers separated by a comma:\n').strip() UpperCAmelCase_ = [int(item) for item in user_input.split(',')] print(circle_sort(unsorted))
201
"""simple docstring""" class A_ : """simple docstring""" def __init__( self :List[Any] , lowercase_ :int ) -> None: UpperCAmelCase = size UpperCAmelCase = [0] * size UpperCAmelCase = [0] * size @staticmethod def UpperCAmelCase__ ( lowercase_ :int ) -> int: return index | (index + 1) @staticmethod def UpperCAmelCase__ ( lowercase_ :int ) -> int: return (index & (index + 1)) - 1 def UpperCAmelCase__ ( self :Any , lowercase_ :int , lowercase_ :int ) -> None: UpperCAmelCase = value while index < self.size: UpperCAmelCase = self.get_prev(lowercase_ ) + 1 if current_left_border == index: UpperCAmelCase = value else: UpperCAmelCase = max(lowercase_ , lowercase_ , lowercase_ ) UpperCAmelCase = self.get_next(lowercase_ ) def UpperCAmelCase__ ( self :List[str] , lowercase_ :int , lowercase_ :int ) -> int: right -= 1 # Because of right is exclusive UpperCAmelCase = 0 while left <= right: UpperCAmelCase = self.get_prev(lowercase_ ) if left <= current_left: UpperCAmelCase = max(lowercase_ , self.tree[right] ) UpperCAmelCase = current_left else: UpperCAmelCase = max(lowercase_ , self.arr[right] ) right -= 1 return result if __name__ == "__main__": import doctest doctest.testmod()
78
0
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available _snake_case = { 'configuration_data2vec_audio': ['DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP', 'Data2VecAudioConfig'], 'configuration_data2vec_text': [ 'DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'Data2VecTextConfig', 'Data2VecTextOnnxConfig', ], 'configuration_data2vec_vision': [ 'DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP', 'Data2VecVisionConfig', 'Data2VecVisionOnnxConfig', ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _snake_case = [ 'DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST', 'Data2VecAudioForAudioFrameClassification', 'Data2VecAudioForCTC', 'Data2VecAudioForSequenceClassification', 'Data2VecAudioForXVector', 'Data2VecAudioModel', 'Data2VecAudioPreTrainedModel', ] _snake_case = [ 'DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST', 'Data2VecTextForCausalLM', 'Data2VecTextForMaskedLM', 'Data2VecTextForMultipleChoice', 'Data2VecTextForQuestionAnswering', 'Data2VecTextForSequenceClassification', 'Data2VecTextForTokenClassification', 'Data2VecTextModel', 'Data2VecTextPreTrainedModel', ] _snake_case = [ 'DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST', 'Data2VecVisionForImageClassification', 'Data2VecVisionForMaskedImageModeling', 'Data2VecVisionForSemanticSegmentation', 'Data2VecVisionModel', 'Data2VecVisionPreTrainedModel', ] if is_tf_available(): _snake_case = [ 'TFData2VecVisionForImageClassification', 'TFData2VecVisionForSemanticSegmentation', 'TFData2VecVisionModel', 'TFData2VecVisionPreTrainedModel', ] if TYPE_CHECKING: from .configuration_dataavec_audio import DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP, DataaVecAudioConfig from .configuration_dataavec_text import ( DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, DataaVecTextConfig, DataaVecTextOnnxConfig, ) from .configuration_dataavec_vision import ( DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP, DataaVecVisionConfig, DataaVecVisionOnnxConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_dataavec_audio import ( DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST, DataaVecAudioForAudioFrameClassification, DataaVecAudioForCTC, DataaVecAudioForSequenceClassification, DataaVecAudioForXVector, DataaVecAudioModel, DataaVecAudioPreTrainedModel, ) from .modeling_dataavec_text import ( DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST, DataaVecTextForCausalLM, DataaVecTextForMaskedLM, DataaVecTextForMultipleChoice, DataaVecTextForQuestionAnswering, DataaVecTextForSequenceClassification, DataaVecTextForTokenClassification, DataaVecTextModel, DataaVecTextPreTrainedModel, ) from .modeling_dataavec_vision import ( DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST, DataaVecVisionForImageClassification, DataaVecVisionForMaskedImageModeling, DataaVecVisionForSemanticSegmentation, DataaVecVisionModel, DataaVecVisionPreTrainedModel, ) if is_tf_available(): from .modeling_tf_dataavec_vision import ( TFDataaVecVisionForImageClassification, TFDataaVecVisionForSemanticSegmentation, TFDataaVecVisionModel, TFDataaVecVisionPreTrainedModel, ) else: import sys _snake_case = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
294
"""simple docstring""" import json from typing import Iterator, List, Union from tokenizers import AddedToken, Regex, Tokenizer, decoders, normalizers, pre_tokenizers, trainers from tokenizers.implementations.base_tokenizer import BaseTokenizer from tokenizers.models import Unigram from tokenizers.processors import TemplateProcessing class A_ ( SCREAMING_SNAKE_CASE_ ): """simple docstring""" def __init__( self :Dict , lowercase_ :str = "▁" , lowercase_ :bool = True , lowercase_ :Union[str, AddedToken] = "<unk>" , lowercase_ :Union[str, AddedToken] = "</s>" , lowercase_ :Union[str, AddedToken] = "<pad>" , ) -> str: UpperCAmelCase = { 'pad': {'id': 0, 'token': pad_token}, 'eos': {'id': 1, 'token': eos_token}, 'unk': {'id': 2, 'token': unk_token}, } UpperCAmelCase = [None] * len(self.special_tokens ) for token_dict in self.special_tokens.values(): UpperCAmelCase = token_dict['token'] UpperCAmelCase = Tokenizer(Unigram() ) UpperCAmelCase = normalizers.Sequence( [ normalizers.Nmt(), normalizers.NFKC(), normalizers.Replace(Regex(' {2,}' ) , ' ' ), normalizers.Lowercase(), ] ) UpperCAmelCase = pre_tokenizers.Sequence( [ pre_tokenizers.Metaspace(replacement=lowercase_ , add_prefix_space=lowercase_ ), pre_tokenizers.Digits(individual_digits=lowercase_ ), pre_tokenizers.Punctuation(), ] ) UpperCAmelCase = decoders.Metaspace(replacement=lowercase_ , add_prefix_space=lowercase_ ) UpperCAmelCase = TemplateProcessing( single=f"""$A {self.special_tokens['eos']['token']}""" , special_tokens=[(self.special_tokens['eos']['token'], self.special_tokens['eos']['id'])] , ) UpperCAmelCase = { 'model': 'SentencePieceUnigram', 'replacement': replacement, 'add_prefix_space': add_prefix_space, } super().__init__(lowercase_ , lowercase_ ) def UpperCAmelCase__ ( self :Optional[int] , lowercase_ :Union[str, List[str]] , lowercase_ :int = 80_00 , lowercase_ :bool = True , ) -> Union[str, Any]: UpperCAmelCase = trainers.UnigramTrainer( vocab_size=lowercase_ , special_tokens=self.special_tokens_list , show_progress=lowercase_ , ) if isinstance(lowercase_ , lowercase_ ): UpperCAmelCase = [files] self._tokenizer.train(lowercase_ , trainer=lowercase_ ) self.add_unk_id() def UpperCAmelCase__ ( self :str , lowercase_ :Union[Iterator[str], Iterator[Iterator[str]]] , lowercase_ :int = 80_00 , lowercase_ :bool = True , ) -> Tuple: UpperCAmelCase = trainers.UnigramTrainer( vocab_size=lowercase_ , special_tokens=self.special_tokens_list , show_progress=lowercase_ , ) self._tokenizer.train_from_iterator(lowercase_ , trainer=lowercase_ ) self.add_unk_id() def UpperCAmelCase__ ( self :Union[str, Any] ) -> int: UpperCAmelCase = json.loads(self._tokenizer.to_str() ) UpperCAmelCase = self.special_tokens['unk']['id'] UpperCAmelCase = Tokenizer.from_str(json.dumps(lowercase_ ) )
78
0
"""simple docstring""" import argparse import json import os import fairseq import torch from fairseq.data import Dictionary # Register SEW's fairseq modules from sew_asapp import tasks # noqa: F401 from transformers import ( SEWConfig, SEWForCTC, SEWModel, WavaVecaCTCTokenizer, WavaVecaFeatureExtractor, WavaVecaProcessor, logging, ) logging.set_verbosity_info() SCREAMING_SNAKE_CASE : Dict = logging.get_logger(__name__) SCREAMING_SNAKE_CASE : int = { """post_extract_proj""": """feature_projection""", """encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""", """self_attn.k_proj""": """encoder.layers.*.attention.k_proj""", """self_attn.v_proj""": """encoder.layers.*.attention.v_proj""", """self_attn.q_proj""": """encoder.layers.*.attention.q_proj""", """self_attn.out_proj""": """encoder.layers.*.attention.out_proj""", """self_attn_layer_norm""": """encoder.layers.*.layer_norm""", """fc1""": """encoder.layers.*.feed_forward.intermediate_dense""", """fc2""": """encoder.layers.*.feed_forward.output_dense""", """final_layer_norm""": """encoder.layers.*.final_layer_norm""", """encoder.upsample.0""": """encoder.upsample.projection""", """encoder.layer_norm""": """encoder.layer_norm""", """w2v_model.layer_norm""": """layer_norm""", """w2v_encoder.proj""": """lm_head""", """mask_emb""": """masked_spec_embed""", } def lowercase ( _snake_case : Optional[int] , _snake_case : Optional[Any] , _snake_case : Dict , _snake_case : Dict , _snake_case : List[str] ) ->str: """simple docstring""" for attribute in key.split('''.''' ): __snake_case : Tuple = getattr(lowercase_ , lowercase_ ) if weight_type is not None: __snake_case : Any = getattr(lowercase_ , lowercase_ ).shape else: __snake_case : Optional[Any] = hf_pointer.shape assert hf_shape == value.shape, ( f"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be""" f""" {value.shape} for {full_name}""" ) if weight_type == "weight": __snake_case : Union[str, Any] = value elif weight_type == "weight_g": __snake_case : int = value elif weight_type == "weight_v": __snake_case : List[Any] = value elif weight_type == "bias": __snake_case : Union[str, Any] = value else: __snake_case : Dict = value logger.info(f"""{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.""" ) def lowercase ( _snake_case : int , _snake_case : Union[str, Any] , _snake_case : Union[str, Any] ) ->int: """simple docstring""" __snake_case : List[Any] = [] __snake_case : Union[str, Any] = fairseq_model.state_dict() __snake_case : List[Any] = hf_model.sew.feature_extractor if is_finetuned else hf_model.feature_extractor for name, value in fairseq_dict.items(): __snake_case : Optional[int] = False if "conv_layers" in name: load_conv_layer( lowercase_ , lowercase_ , lowercase_ , lowercase_ , hf_model.config.feat_extract_norm == '''group''' , ) __snake_case : Union[str, Any] = True else: for key, mapped_key in MAPPING.items(): __snake_case : Optional[int] = '''sew.''' + mapped_key if (is_finetuned and mapped_key != '''lm_head''') else mapped_key if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]: __snake_case : Optional[Any] = True if "*" in mapped_key: __snake_case : str = name.split(lowercase_ )[0].split('''.''' )[-2] __snake_case : List[Any] = mapped_key.replace('''*''' , lowercase_ ) if "weight_g" in name: __snake_case : List[str] = '''weight_g''' elif "weight_v" in name: __snake_case : List[str] = '''weight_v''' elif "weight" in name: __snake_case : int = '''weight''' elif "bias" in name: __snake_case : str = '''bias''' else: __snake_case : Any = None set_recursively(lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ ) continue if not is_used: unused_weights.append(lowercase_ ) logger.warning(f"""Unused weights: {unused_weights}""" ) def lowercase ( _snake_case : Tuple , _snake_case : Any , _snake_case : List[Any] , _snake_case : int , _snake_case : List[str] ) ->int: """simple docstring""" __snake_case : str = full_name.split('''conv_layers.''' )[-1] __snake_case : Dict = name.split('''.''' ) __snake_case : Any = int(items[0] ) __snake_case : Optional[int] = int(items[1] ) if type_id == 0: if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, ( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" ) __snake_case : List[str] = value logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, ( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" ) __snake_case : List[str] = value logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, ( f"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was""" " found." ) __snake_case : Tuple = value logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, ( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.""" ) __snake_case : Any = value logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) else: unused_weights.append(lowercase_ ) def lowercase ( _snake_case : Optional[Any] , _snake_case : Union[str, Any] ) ->Optional[Any]: """simple docstring""" __snake_case : List[Any] = SEWConfig() if is_finetuned: __snake_case : Union[str, Any] = model.wav_encoder.wav_model.cfg else: __snake_case : List[Any] = model.cfg __snake_case : List[Any] = fs_config.conv_bias __snake_case : int = eval(fs_config.conv_feature_layers ) __snake_case : Dict = [x[0] for x in conv_layers] __snake_case : Dict = [x[1] for x in conv_layers] __snake_case : List[Any] = [x[2] for x in conv_layers] __snake_case : List[Any] = '''gelu''' __snake_case : Dict = '''layer''' if fs_config.extractor_mode == '''layer_norm''' else '''group''' __snake_case : str = 0.0 __snake_case : Dict = fs_config.activation_fn.name __snake_case : str = fs_config.encoder_embed_dim __snake_case : List[str] = 0.02 __snake_case : Any = fs_config.encoder_ffn_embed_dim __snake_case : Optional[Any] = 1e-5 __snake_case : List[Any] = fs_config.encoder_layerdrop __snake_case : List[Any] = fs_config.encoder_attention_heads __snake_case : int = fs_config.conv_pos_groups __snake_case : Any = fs_config.conv_pos __snake_case : Optional[Any] = len(lowercase_ ) __snake_case : Optional[Any] = fs_config.encoder_layers __snake_case : str = fs_config.squeeze_factor # take care of any params that are overridden by the Wav2VecCtc model if is_finetuned: __snake_case : str = model.cfg __snake_case : int = fs_config.final_dropout __snake_case : Optional[Any] = fs_config.layerdrop __snake_case : Dict = fs_config.activation_dropout __snake_case : int = fs_config.mask_prob > 0 or fs_config.mask_channel_prob > 0 __snake_case : Tuple = fs_config.attention_dropout __snake_case : Union[str, Any] = fs_config.dropout_input __snake_case : Union[str, Any] = fs_config.dropout __snake_case : Tuple = fs_config.mask_channel_length __snake_case : Dict = fs_config.mask_channel_prob __snake_case : Optional[Any] = fs_config.mask_length __snake_case : List[Any] = fs_config.mask_prob __snake_case : str = '''Wav2Vec2FeatureExtractor''' __snake_case : Optional[Any] = '''Wav2Vec2CTCTokenizer''' return config @torch.no_grad() def lowercase ( _snake_case : List[Any] , _snake_case : Optional[int] , _snake_case : Union[str, Any]=None , _snake_case : List[Any]=None , _snake_case : Optional[Any]=True ) ->Optional[int]: """simple docstring""" if is_finetuned: __snake_case , __snake_case , __snake_case : List[Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} ) else: __snake_case , __snake_case , __snake_case : Any = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] ) if config_path is not None: __snake_case : Optional[Any] = SEWConfig.from_pretrained(lowercase_ ) else: __snake_case : Tuple = convert_config(model[0] , lowercase_ ) __snake_case : Optional[Any] = model[0].eval() __snake_case : List[Any] = True if config.feat_extract_norm == '''layer''' else False __snake_case : int = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=16_000 , padding_value=0 , do_normalize=lowercase_ , return_attention_mask=lowercase_ , ) if is_finetuned: if dict_path: __snake_case : List[Any] = Dictionary.load(lowercase_ ) # important change bos & pad token id since CTC symbol is <pad> and # not <s> as in fairseq __snake_case : str = target_dict.pad_index __snake_case : str = target_dict.bos_index __snake_case : Union[str, Any] = target_dict.pad_index __snake_case : Dict = target_dict.bos_index __snake_case : int = target_dict.eos_index __snake_case : Dict = len(target_dict.symbols ) __snake_case : Dict = os.path.join(lowercase_ , '''vocab.json''' ) if not os.path.isdir(lowercase_ ): logger.error('''--pytorch_dump_folder_path ({}) should be a directory'''.format(lowercase_ ) ) return os.makedirs(lowercase_ , exist_ok=lowercase_ ) with open(lowercase_ , '''w''' , encoding='''utf-8''' ) as vocab_handle: json.dump(target_dict.indices , lowercase_ ) __snake_case : str = WavaVecaCTCTokenizer( lowercase_ , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='''|''' , do_lower_case=lowercase_ , ) __snake_case : int = WavaVecaProcessor(feature_extractor=lowercase_ , tokenizer=lowercase_ ) processor.save_pretrained(lowercase_ ) __snake_case : Tuple = SEWForCTC(lowercase_ ) else: __snake_case : Optional[int] = SEWModel(lowercase_ ) feature_extractor.save_pretrained(lowercase_ ) recursively_load_weights(lowercase_ , lowercase_ , lowercase_ ) hf_model.save_pretrained(lowercase_ ) if __name__ == "__main__": SCREAMING_SNAKE_CASE : str = argparse.ArgumentParser() parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""") parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""") parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""") parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""") parser.add_argument( """--is_finetuned""", action="""store_true""", help="""Whether the model to convert is a fine-tuned model or not""" ) SCREAMING_SNAKE_CASE : Tuple = parser.parse_args() convert_sew_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, args.is_finetuned )
102
"""simple docstring""" import secrets from random import shuffle from string import ascii_letters, ascii_lowercase, ascii_uppercase, digits, punctuation def _lowerCAmelCase ( lowercase_ = 8 ): UpperCAmelCase = ascii_letters + digits + punctuation return "".join(secrets.choice(lowercase_ ) for _ in range(lowercase_ ) ) def _lowerCAmelCase ( lowercase_ , lowercase_ ): # Password Generator = full boot with random_number, random_letters, and # random_character FUNCTIONS # Put your code here... i -= len(lowercase_ ) UpperCAmelCase = i // 3 UpperCAmelCase = i % 3 # chars = chars_incl + random_letters(ascii_letters, i / 3 + remainder) + # random_number(digits, i / 3) + random_characters(punctuation, i / 3) UpperCAmelCase = ( chars_incl + random(lowercase_ , quotient + remainder ) + random(lowercase_ , lowercase_ ) + random(lowercase_ , lowercase_ ) ) UpperCAmelCase = list(lowercase_ ) shuffle(lowercase_ ) return "".join(lowercase_ ) # random is a generalised function for letters, characters and numbers def _lowerCAmelCase ( lowercase_ , lowercase_ ): return "".join(secrets.choice(lowercase_ ) for _ in range(lowercase_ ) ) def _lowerCAmelCase ( lowercase_ , lowercase_ ): pass # Put your code here... def _lowerCAmelCase ( lowercase_ , lowercase_ ): pass # Put your code here... def _lowerCAmelCase ( lowercase_ , lowercase_ ): pass # Put your code here... def _lowerCAmelCase ( lowercase_ , lowercase_ = 8 ): if len(lowercase_ ) < min_length: # Your Password must be at least 8 characters long return False UpperCAmelCase = any(char in ascii_uppercase for char in password ) UpperCAmelCase = any(char in ascii_lowercase for char in password ) UpperCAmelCase = any(char in digits for char in password ) UpperCAmelCase = any(char in punctuation for char in password ) return upper and lower and num and spec_char # Passwords should contain UPPERCASE, lowerase # numbers, and special characters def _lowerCAmelCase ( ): UpperCAmelCase = int(input('Please indicate the max length of your password: ' ).strip() ) UpperCAmelCase = input( 'Please indicate the characters that must be in your password: ' ).strip() print('Password generated:' , password_generator(lowercase_ ) ) print( 'Alternative Password generated:' , alternative_password_generator(lowercase_ , lowercase_ ) , ) print('[If you are thinking of using this passsword, You better save it.]' ) if __name__ == "__main__": main()
78
0
from dataclasses import dataclass, field from typing import Tuple from ..utils import cached_property, is_torch_available, is_torch_tpu_available, logging, requires_backends from .benchmark_args_utils import BenchmarkArguments if is_torch_available(): import torch if is_torch_tpu_available(check_device=False): import torch_xla.core.xla_model as xm A__ : List[str] = logging.get_logger(__name__) @dataclass class __snake_case ( SCREAMING_SNAKE_CASE_ ): _a = [ '''no_inference''', '''no_cuda''', '''no_tpu''', '''no_speed''', '''no_memory''', '''no_env_print''', '''no_multi_process''', ] def __init__( self : List[str] , **A_ : str): for deprecated_arg in self.deprecated_args: if deprecated_arg in kwargs: lowerCAmelCase_ : Optional[Any] = deprecated_arg[3:] setattr(self , lowercase_ , not kwargs.pop(lowercase_)) logger.warning( F"""{deprecated_arg} is depreciated. Please use --no_{positive_arg} or""" F""" {positive_arg}={kwargs[positive_arg]}""") lowerCAmelCase_ : Optional[int] = kwargs.pop('''torchscript''' , self.torchscript) lowerCAmelCase_ : Any = kwargs.pop('''torch_xla_tpu_print_metrics''' , self.torch_xla_tpu_print_metrics) lowerCAmelCase_ : Tuple = kwargs.pop('''fp16_opt_level''' , self.fpaa_opt_level) super().__init__(**lowercase_) _a = field(default=SCREAMING_SNAKE_CASE_ ,metadata={'''help''': '''Trace the models using torchscript'''} ) _a = field(default=SCREAMING_SNAKE_CASE_ ,metadata={'''help''': '''Print Xla/PyTorch tpu metrics'''} ) _a = field( default='''O1''' ,metadata={ '''help''': ( '''For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\']. ''' '''See details at https://nvidia.github.io/apex/amp.html''' ) } ,) @cached_property def UpperCAmelCase__ ( self : Any): requires_backends(self , ['''torch''']) logger.info('''PyTorch: setting up devices''') if not self.cuda: lowerCAmelCase_ : Union[str, Any] = torch.device('''cpu''') lowerCAmelCase_ : str = 0 elif is_torch_tpu_available(): lowerCAmelCase_ : List[str] = xm.xla_device() lowerCAmelCase_ : List[Any] = 0 else: lowerCAmelCase_ : Tuple = torch.device('''cuda''' if torch.cuda.is_available() else '''cpu''') lowerCAmelCase_ : Optional[int] = torch.cuda.device_count() return device, n_gpu @property def UpperCAmelCase__ ( self : Tuple): return is_torch_tpu_available() and self.tpu @property def UpperCAmelCase__ ( self : List[str]): requires_backends(self , ['''torch''']) # TODO(PVP): currently only single GPU is supported return torch.cuda.current_device() @property def UpperCAmelCase__ ( self : Tuple): requires_backends(self , ['''torch''']) return self._setup_devices[0] @property def UpperCAmelCase__ ( self : Dict): requires_backends(self , ['''torch''']) return self._setup_devices[1] @property def UpperCAmelCase__ ( self : Optional[int]): return self.n_gpu > 0
103
"""simple docstring""" import tempfile import numpy as np import torch from transformers import AutoTokenizer, TaEncoderModel from diffusers import DDPMScheduler, UNetaDConditionModel from diffusers.models.attention_processor import AttnAddedKVProcessor from diffusers.pipelines.deepfloyd_if import IFWatermarker from diffusers.utils.testing_utils import torch_device from ..test_pipelines_common import to_np class A_ : """simple docstring""" def UpperCAmelCase__ ( self :Any ) -> List[str]: torch.manual_seed(0 ) UpperCAmelCase = TaEncoderModel.from_pretrained('hf-internal-testing/tiny-random-t5' ) torch.manual_seed(0 ) UpperCAmelCase = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-t5' ) torch.manual_seed(0 ) UpperCAmelCase = UNetaDConditionModel( sample_size=32 , layers_per_block=1 , block_out_channels=[32, 64] , down_block_types=[ 'ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D', ] , mid_block_type='UNetMidBlock2DSimpleCrossAttn' , up_block_types=['SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'] , in_channels=3 , out_channels=6 , cross_attention_dim=32 , encoder_hid_dim=32 , attention_head_dim=8 , addition_embed_type='text' , addition_embed_type_num_heads=2 , cross_attention_norm='group_norm' , resnet_time_scale_shift='scale_shift' , act_fn='gelu' , ) unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests torch.manual_seed(0 ) UpperCAmelCase = DDPMScheduler( num_train_timesteps=10_00 , beta_schedule='squaredcos_cap_v2' , beta_start=0.0001 , beta_end=0.02 , thresholding=lowercase_ , dynamic_thresholding_ratio=0.95 , sample_max_value=1.0 , prediction_type='epsilon' , variance_type='learned_range' , ) torch.manual_seed(0 ) UpperCAmelCase = IFWatermarker() return { "text_encoder": text_encoder, "tokenizer": tokenizer, "unet": unet, "scheduler": scheduler, "watermarker": watermarker, "safety_checker": None, "feature_extractor": None, } def UpperCAmelCase__ ( self :List[Any] ) -> Any: torch.manual_seed(0 ) UpperCAmelCase = TaEncoderModel.from_pretrained('hf-internal-testing/tiny-random-t5' ) torch.manual_seed(0 ) UpperCAmelCase = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-t5' ) torch.manual_seed(0 ) UpperCAmelCase = UNetaDConditionModel( sample_size=32 , layers_per_block=[1, 2] , block_out_channels=[32, 64] , down_block_types=[ 'ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D', ] , mid_block_type='UNetMidBlock2DSimpleCrossAttn' , up_block_types=['SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'] , in_channels=6 , out_channels=6 , cross_attention_dim=32 , encoder_hid_dim=32 , attention_head_dim=8 , addition_embed_type='text' , addition_embed_type_num_heads=2 , cross_attention_norm='group_norm' , resnet_time_scale_shift='scale_shift' , act_fn='gelu' , class_embed_type='timestep' , mid_block_scale_factor=1.414 , time_embedding_act_fn='gelu' , time_embedding_dim=32 , ) unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests torch.manual_seed(0 ) UpperCAmelCase = DDPMScheduler( num_train_timesteps=10_00 , beta_schedule='squaredcos_cap_v2' , beta_start=0.0001 , beta_end=0.02 , thresholding=lowercase_ , dynamic_thresholding_ratio=0.95 , sample_max_value=1.0 , prediction_type='epsilon' , variance_type='learned_range' , ) torch.manual_seed(0 ) UpperCAmelCase = DDPMScheduler( num_train_timesteps=10_00 , beta_schedule='squaredcos_cap_v2' , beta_start=0.0001 , beta_end=0.02 , ) torch.manual_seed(0 ) UpperCAmelCase = IFWatermarker() return { "text_encoder": text_encoder, "tokenizer": tokenizer, "unet": unet, "scheduler": scheduler, "image_noising_scheduler": image_noising_scheduler, "watermarker": watermarker, "safety_checker": None, "feature_extractor": None, } def UpperCAmelCase__ ( self :List[str] ) -> str: UpperCAmelCase = self.get_dummy_components() UpperCAmelCase = self.pipeline_class(**lowercase_ ) pipe.to(lowercase_ ) pipe.set_progress_bar_config(disable=lowercase_ ) UpperCAmelCase = self.get_dummy_inputs(lowercase_ ) UpperCAmelCase = inputs['prompt'] UpperCAmelCase = inputs['generator'] UpperCAmelCase = inputs['num_inference_steps'] UpperCAmelCase = inputs['output_type'] if "image" in inputs: UpperCAmelCase = inputs['image'] else: UpperCAmelCase = None if "mask_image" in inputs: UpperCAmelCase = inputs['mask_image'] else: UpperCAmelCase = None if "original_image" in inputs: UpperCAmelCase = inputs['original_image'] else: UpperCAmelCase = None UpperCAmelCase , UpperCAmelCase = pipe.encode_prompt(lowercase_ ) # inputs with prompt converted to embeddings UpperCAmelCase = { 'prompt_embeds': prompt_embeds, 'negative_prompt_embeds': negative_prompt_embeds, 'generator': generator, 'num_inference_steps': num_inference_steps, 'output_type': output_type, } if image is not None: UpperCAmelCase = image if mask_image is not None: UpperCAmelCase = mask_image if original_image is not None: UpperCAmelCase = original_image # set all optional components to None for optional_component in pipe._optional_components: setattr(lowercase_ , lowercase_ , lowercase_ ) UpperCAmelCase = pipe(**lowercase_ )[0] with tempfile.TemporaryDirectory() as tmpdir: pipe.save_pretrained(lowercase_ ) UpperCAmelCase = self.pipeline_class.from_pretrained(lowercase_ ) pipe_loaded.to(lowercase_ ) pipe_loaded.set_progress_bar_config(disable=lowercase_ ) pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests for optional_component in pipe._optional_components: self.assertTrue( getattr(lowercase_ , lowercase_ ) is None , f"""`{optional_component}` did not stay set to None after loading.""" , ) UpperCAmelCase = self.get_dummy_inputs(lowercase_ ) UpperCAmelCase = inputs['generator'] UpperCAmelCase = inputs['num_inference_steps'] UpperCAmelCase = inputs['output_type'] # inputs with prompt converted to embeddings UpperCAmelCase = { 'prompt_embeds': prompt_embeds, 'negative_prompt_embeds': negative_prompt_embeds, 'generator': generator, 'num_inference_steps': num_inference_steps, 'output_type': output_type, } if image is not None: UpperCAmelCase = image if mask_image is not None: UpperCAmelCase = mask_image if original_image is not None: UpperCAmelCase = original_image UpperCAmelCase = pipe_loaded(**lowercase_ )[0] UpperCAmelCase = np.abs(to_np(lowercase_ ) - to_np(lowercase_ ) ).max() self.assertLess(lowercase_ , 1E-4 ) def UpperCAmelCase__ ( self :List[Any] ) -> str: UpperCAmelCase = self.get_dummy_components() UpperCAmelCase = self.pipeline_class(**lowercase_ ) pipe.to(lowercase_ ) pipe.set_progress_bar_config(disable=lowercase_ ) UpperCAmelCase = self.get_dummy_inputs(lowercase_ ) UpperCAmelCase = pipe(**lowercase_ )[0] with tempfile.TemporaryDirectory() as tmpdir: pipe.save_pretrained(lowercase_ ) UpperCAmelCase = self.pipeline_class.from_pretrained(lowercase_ ) pipe_loaded.to(lowercase_ ) pipe_loaded.set_progress_bar_config(disable=lowercase_ ) pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests UpperCAmelCase = self.get_dummy_inputs(lowercase_ ) UpperCAmelCase = pipe_loaded(**lowercase_ )[0] UpperCAmelCase = np.abs(to_np(lowercase_ ) - to_np(lowercase_ ) ).max() self.assertLess(lowercase_ , 1E-4 )
78
0
"""simple docstring""" from collections import defaultdict from graphs.minimum_spanning_tree_prims import prisms_algorithm as mst def UpperCAmelCase__ ( ): '''simple docstring''' lowerCAmelCase , lowerCAmelCase = 9, 14 # noqa: F841 lowerCAmelCase = [ [0, 1, 4], [0, 7, 8], [1, 2, 8], [7, 8, 7], [7, 6, 1], [2, 8, 2], [8, 6, 6], [2, 3, 7], [2, 5, 4], [6, 5, 2], [3, 5, 14], [3, 4, 9], [5, 4, 10], [1, 7, 11], ] lowerCAmelCase = defaultdict(lowercase_ ) for nodea, nodea, cost in edges: adjancency[nodea].append([nodea, cost] ) adjancency[nodea].append([nodea, cost] ) lowerCAmelCase = mst(lowercase_ ) lowerCAmelCase = [ [7, 6, 1], [2, 8, 2], [6, 5, 2], [0, 1, 4], [2, 5, 4], [2, 3, 7], [0, 7, 8], [3, 4, 9], ] for answer in expected: lowerCAmelCase = tuple(answer[:2] ) lowerCAmelCase = tuple(edge[::-1] ) assert edge in result or reverse in result
46
"""simple docstring""" from typing import List, Optional, Union import numpy as np import PIL import torch from PIL import Image from ...models import UNetaDConditionModel, VQModel from ...pipelines import DiffusionPipeline from ...pipelines.pipeline_utils import ImagePipelineOutput from ...schedulers import DDPMScheduler from ...utils import ( is_accelerate_available, is_accelerate_version, logging, randn_tensor, replace_example_docstring, ) snake_case_ = logging.get_logger(__name__) # pylint: disable=invalid-name snake_case_ = """ Examples: ```py >>> from diffusers import KandinskyV22Img2ImgPipeline, KandinskyV22PriorPipeline >>> from diffusers.utils import load_image >>> import torch >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained( ... \"kandinsky-community/kandinsky-2-2-prior\", torch_dtype=torch.float16 ... ) >>> pipe_prior.to(\"cuda\") >>> prompt = \"A red cartoon frog, 4k\" >>> image_emb, zero_image_emb = pipe_prior(prompt, return_dict=False) >>> pipe = KandinskyV22Img2ImgPipeline.from_pretrained( ... \"kandinsky-community/kandinsky-2-2-decoder\", torch_dtype=torch.float16 ... ) >>> pipe.to(\"cuda\") >>> init_image = load_image( ... \"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main\" ... \"/kandinsky/frog.png\" ... ) >>> image = pipe( ... image=init_image, ... image_embeds=image_emb, ... negative_image_embeds=zero_image_emb, ... height=768, ... width=768, ... num_inference_steps=100, ... strength=0.2, ... ).images >>> image[0].save(\"red_frog.png\") ``` """ def _lowerCAmelCase ( lowercase_ , lowercase_ , lowercase_=8 ): UpperCAmelCase = height // scale_factor**2 if height % scale_factor**2 != 0: new_height += 1 UpperCAmelCase = width // scale_factor**2 if width % scale_factor**2 != 0: new_width += 1 return new_height * scale_factor, new_width * scale_factor def _lowerCAmelCase ( lowercase_ , lowercase_=512 , lowercase_=512 ): UpperCAmelCase = pil_image.resize((w, h) , resample=Image.BICUBIC , reducing_gap=1 ) UpperCAmelCase = np.array(pil_image.convert('RGB' ) ) UpperCAmelCase = arr.astype(np.floataa ) / 1_2_7.5 - 1 UpperCAmelCase = np.transpose(lowercase_ , [2, 0, 1] ) UpperCAmelCase = torch.from_numpy(lowercase_ ).unsqueeze(0 ) return image class A_ ( SCREAMING_SNAKE_CASE_ ): """simple docstring""" def __init__( self :Dict , lowercase_ :UNetaDConditionModel , lowercase_ :DDPMScheduler , lowercase_ :VQModel , ) -> List[str]: super().__init__() self.register_modules( unet=lowercase_ , scheduler=lowercase_ , movq=lowercase_ , ) UpperCAmelCase = 2 ** (len(self.movq.config.block_out_channels ) - 1) def UpperCAmelCase__ ( self :Optional[int] , lowercase_ :Optional[Any] , lowercase_ :Tuple , lowercase_ :Any ) -> Optional[int]: # get the original timestep using init_timestep UpperCAmelCase = min(int(num_inference_steps * strength ) , lowercase_ ) UpperCAmelCase = max(num_inference_steps - init_timestep , 0 ) UpperCAmelCase = self.scheduler.timesteps[t_start:] return timesteps, num_inference_steps - t_start def UpperCAmelCase__ ( self :List[Any] , lowercase_ :Dict , lowercase_ :str , lowercase_ :Optional[Any] , lowercase_ :Union[str, Any] , lowercase_ :List[Any] , lowercase_ :Optional[Any] , lowercase_ :Any=None ) -> Any: if not isinstance(lowercase_ , (torch.Tensor, PIL.Image.Image, list) ): raise ValueError( f"""`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(lowercase_ )}""" ) UpperCAmelCase = image.to(device=lowercase_ , dtype=lowercase_ ) UpperCAmelCase = batch_size * num_images_per_prompt if image.shape[1] == 4: UpperCAmelCase = image else: if isinstance(lowercase_ , lowercase_ ) and len(lowercase_ ) != batch_size: raise ValueError( f"""You have passed a list of generators of length {len(lowercase_ )}, but requested an effective batch""" f""" size of {batch_size}. Make sure the batch size matches the length of the generators.""" ) elif isinstance(lowercase_ , lowercase_ ): UpperCAmelCase = [ self.movq.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(lowercase_ ) ] UpperCAmelCase = torch.cat(lowercase_ , dim=0 ) else: UpperCAmelCase = self.movq.encode(lowercase_ ).latent_dist.sample(lowercase_ ) UpperCAmelCase = self.movq.config.scaling_factor * init_latents UpperCAmelCase = torch.cat([init_latents] , dim=0 ) UpperCAmelCase = init_latents.shape UpperCAmelCase = randn_tensor(lowercase_ , generator=lowercase_ , device=lowercase_ , dtype=lowercase_ ) # get latents UpperCAmelCase = self.scheduler.add_noise(lowercase_ , lowercase_ , lowercase_ ) UpperCAmelCase = init_latents return latents def UpperCAmelCase__ ( self :int , lowercase_ :int=0 ) -> List[str]: if is_accelerate_available(): from accelerate import cpu_offload else: raise ImportError('Please install accelerate via `pip install accelerate`' ) UpperCAmelCase = torch.device(f"""cuda:{gpu_id}""" ) UpperCAmelCase = [ self.unet, self.movq, ] for cpu_offloaded_model in models: if cpu_offloaded_model is not None: cpu_offload(lowercase_ , lowercase_ ) def UpperCAmelCase__ ( self :Union[str, Any] , lowercase_ :str=0 ) -> Dict: if is_accelerate_available() and is_accelerate_version('>=' , '0.17.0.dev0' ): from accelerate import cpu_offload_with_hook else: raise ImportError('`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.' ) UpperCAmelCase = torch.device(f"""cuda:{gpu_id}""" ) if self.device.type != "cpu": self.to('cpu' , silence_dtype_warnings=lowercase_ ) torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist) UpperCAmelCase = None for cpu_offloaded_model in [self.unet, self.movq]: UpperCAmelCase , UpperCAmelCase = cpu_offload_with_hook(lowercase_ , lowercase_ , prev_module_hook=lowercase_ ) # We'll offload the last model manually. UpperCAmelCase = hook @property # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device def UpperCAmelCase__ ( self :List[Any] ) -> Dict: if not hasattr(self.unet , '_hf_hook' ): return self.device for module in self.unet.modules(): if ( hasattr(lowercase_ , '_hf_hook' ) and hasattr(module._hf_hook , 'execution_device' ) and module._hf_hook.execution_device is not None ): return torch.device(module._hf_hook.execution_device ) return self.device @torch.no_grad() @replace_example_docstring(lowercase_ ) def __call__( self :str , lowercase_ :Union[torch.FloatTensor, List[torch.FloatTensor]] , lowercase_ :Union[torch.FloatTensor, PIL.Image.Image, List[torch.FloatTensor], List[PIL.Image.Image]] , lowercase_ :Union[torch.FloatTensor, List[torch.FloatTensor]] , lowercase_ :int = 5_12 , lowercase_ :int = 5_12 , lowercase_ :int = 1_00 , lowercase_ :float = 4.0 , lowercase_ :float = 0.3 , lowercase_ :int = 1 , lowercase_ :Optional[Union[torch.Generator, List[torch.Generator]]] = None , lowercase_ :Optional[str] = "pil" , lowercase_ :bool = True , ) -> List[str]: UpperCAmelCase = self._execution_device UpperCAmelCase = guidance_scale > 1.0 if isinstance(lowercase_ , lowercase_ ): UpperCAmelCase = torch.cat(lowercase_ , dim=0 ) UpperCAmelCase = image_embeds.shape[0] if isinstance(lowercase_ , lowercase_ ): UpperCAmelCase = torch.cat(lowercase_ , dim=0 ) if do_classifier_free_guidance: UpperCAmelCase = image_embeds.repeat_interleave(lowercase_ , dim=0 ) UpperCAmelCase = negative_image_embeds.repeat_interleave(lowercase_ , dim=0 ) UpperCAmelCase = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=lowercase_ ) if not isinstance(lowercase_ , lowercase_ ): UpperCAmelCase = [image] if not all(isinstance(lowercase_ , (PIL.Image.Image, torch.Tensor) ) for i in image ): raise ValueError( f"""Input is in incorrect format: {[type(lowercase_ ) for i in image]}. Currently, we only support PIL image and pytorch tensor""" ) UpperCAmelCase = torch.cat([prepare_image(lowercase_ , lowercase_ , lowercase_ ) for i in image] , dim=0 ) UpperCAmelCase = image.to(dtype=image_embeds.dtype , device=lowercase_ ) UpperCAmelCase = self.movq.encode(lowercase_ )['latents'] UpperCAmelCase = latents.repeat_interleave(lowercase_ , dim=0 ) self.scheduler.set_timesteps(lowercase_ , device=lowercase_ ) UpperCAmelCase , UpperCAmelCase = self.get_timesteps(lowercase_ , lowercase_ , lowercase_ ) UpperCAmelCase = timesteps[:1].repeat(batch_size * num_images_per_prompt ) UpperCAmelCase , UpperCAmelCase = downscale_height_and_width(lowercase_ , lowercase_ , self.movq_scale_factor ) UpperCAmelCase = self.prepare_latents( lowercase_ , lowercase_ , lowercase_ , lowercase_ , image_embeds.dtype , lowercase_ , lowercase_ ) for i, t in enumerate(self.progress_bar(lowercase_ ) ): # expand the latents if we are doing classifier free guidance UpperCAmelCase = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents UpperCAmelCase = {'image_embeds': image_embeds} UpperCAmelCase = self.unet( sample=lowercase_ , timestep=lowercase_ , encoder_hidden_states=lowercase_ , added_cond_kwargs=lowercase_ , return_dict=lowercase_ , )[0] if do_classifier_free_guidance: UpperCAmelCase , UpperCAmelCase = noise_pred.split(latents.shape[1] , dim=1 ) UpperCAmelCase , UpperCAmelCase = noise_pred.chunk(2 ) UpperCAmelCase , UpperCAmelCase = variance_pred.chunk(2 ) UpperCAmelCase = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) UpperCAmelCase = torch.cat([noise_pred, variance_pred_text] , dim=1 ) if not ( hasattr(self.scheduler.config , 'variance_type' ) and self.scheduler.config.variance_type in ["learned", "learned_range"] ): UpperCAmelCase , UpperCAmelCase = noise_pred.split(latents.shape[1] , dim=1 ) # compute the previous noisy sample x_t -> x_t-1 UpperCAmelCase = self.scheduler.step( lowercase_ , lowercase_ , lowercase_ , generator=lowercase_ , )[0] # post-processing UpperCAmelCase = self.movq.decode(lowercase_ , force_not_quantize=lowercase_ )['sample'] if output_type not in ["pt", "np", "pil"]: raise ValueError(f"""Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}""" ) if output_type in ["np", "pil"]: UpperCAmelCase = image * 0.5 + 0.5 UpperCAmelCase = image.clamp(0 , 1 ) UpperCAmelCase = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy() if output_type == "pil": UpperCAmelCase = self.numpy_to_pil(lowercase_ ) if not return_dict: return (image,) return ImagePipelineOutput(images=lowercase_ )
78
0
"""simple docstring""" import argparse import json import os import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType from accelerate.utils.deepspeed import DummyOptim, DummyScheduler lowerCamelCase_ : Tuple = 16 lowerCamelCase_ : List[Any] = 32 def UpperCAmelCase__ ( _UpperCAmelCase , _UpperCAmelCase = 16 , _UpperCAmelCase = "bert-base-cased" ): """simple docstring""" A_ : Tuple = AutoTokenizer.from_pretrained(lowercase_ ) A_ : List[Any] = load_dataset('glue' , 'mrpc' ) def tokenize_function(_UpperCAmelCase ): # max_length=None => use the model max length (it's actually the default) A_ : List[str] = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=lowercase_ , max_length=lowercase_ ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset A_ : Tuple = datasets.map( lowercase_ , batched=lowercase_ , remove_columns=['idx', 'sentence1', 'sentence2'] , load_from_cache_file=lowercase_ ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library A_ : Dict = tokenized_datasets.rename_column('label' , 'labels' ) def collate_fn(_UpperCAmelCase ): # On TPU it's best to pad everything to the same length or training will be very slow. if accelerator.distributed_type == DistributedType.TPU: return tokenizer.pad(lowercase_ , padding='max_length' , max_length=128 , return_tensors='pt' ) return tokenizer.pad(lowercase_ , padding='longest' , return_tensors='pt' ) # Instantiate dataloaders. A_ : Any = DataLoader( tokenized_datasets['train'] , shuffle=lowercase_ , collate_fn=lowercase_ , batch_size=lowercase_ ) A_ : List[Any] = DataLoader( tokenized_datasets['validation'] , shuffle=lowercase_ , collate_fn=lowercase_ , batch_size=lowercase_ ) return train_dataloader, eval_dataloader def UpperCAmelCase__ ( _UpperCAmelCase , _UpperCAmelCase ): """simple docstring""" A_ : str = Accelerator() # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs A_ : Tuple = config['lr'] A_ : List[str] = int(config['num_epochs'] ) A_ : Tuple = int(config['seed'] ) A_ : Tuple = int(config['batch_size'] ) A_ : List[Any] = args.model_name_or_path set_seed(lowercase_ ) A_ , A_ : int = get_dataloaders(lowercase_ , lowercase_ , lowercase_ ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) A_ : Optional[Any] = AutoModelForSequenceClassification.from_pretrained(lowercase_ , return_dict=lowercase_ ) # Instantiate optimizer A_ : Dict = ( AdamW if accelerator.state.deepspeed_plugin is None or 'optimizer' not in accelerator.state.deepspeed_plugin.deepspeed_config else DummyOptim ) A_ : Tuple = optimizer_cls(params=model.parameters() , lr=lowercase_ ) if accelerator.state.deepspeed_plugin is not None: A_ : List[str] = accelerator.state.deepspeed_plugin.deepspeed_config[ 'gradient_accumulation_steps' ] else: A_ : int = 1 A_ : Any = (len(lowercase_ ) * num_epochs) // gradient_accumulation_steps # Instantiate scheduler if ( accelerator.state.deepspeed_plugin is None or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config ): A_ : Any = get_linear_schedule_with_warmup( optimizer=lowercase_ , num_warmup_steps=0 , num_training_steps=lowercase_ , ) else: A_ : Tuple = DummyScheduler(lowercase_ , total_num_steps=lowercase_ , warmup_num_steps=0 ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. A_ , A_ , A_ , A_ , A_ : Dict = accelerator.prepare( lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ ) # We need to keep track of how many total steps we have iterated over A_ : List[Any] = 0 # We also need to keep track of the stating epoch so files are named properly A_ : Any = 0 # Now we train the model A_ : Optional[int] = evaluate.load('glue' , 'mrpc' ) A_ : Any = 0 A_ : Tuple = {} for epoch in range(lowercase_ , lowercase_ ): model.train() for step, batch in enumerate(lowercase_ ): A_ : List[Any] = model(**lowercase_ ) A_ : str = outputs.loss A_ : str = loss / gradient_accumulation_steps accelerator.backward(lowercase_ ) if step % gradient_accumulation_steps == 0: optimizer.step() lr_scheduler.step() optimizer.zero_grad() overall_step += 1 model.eval() A_ : int = 0 for step, batch in enumerate(lowercase_ ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): A_ : Dict = model(**lowercase_ ) A_ : Any = outputs.logits.argmax(dim=-1 ) # It is slightly faster to call this once, than multiple times A_ , A_ : str = accelerator.gather( (predictions, batch['labels']) ) # If we are in a multiprocess environment, the last batch has duplicates if accelerator.use_distributed: if step == len(lowercase_ ) - 1: A_ : str = predictions[: len(eval_dataloader.dataset ) - samples_seen] A_ : str = references[: len(eval_dataloader.dataset ) - samples_seen] else: samples_seen += references.shape[0] metric.add_batch( predictions=lowercase_ , references=lowercase_ , ) A_ : List[str] = metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(f"""epoch {epoch}:""" , lowercase_ ) A_ : List[Any] = eval_metric['accuracy'] if best_performance < eval_metric["accuracy"]: A_ : Any = eval_metric['accuracy'] if args.performance_lower_bound is not None: assert ( args.performance_lower_bound <= best_performance ), f"""Best performance metric {best_performance} is lower than the lower bound {args.performance_lower_bound}""" accelerator.wait_for_everyone() if accelerator.is_main_process: with open(os.path.join(args.output_dir , 'all_results.json' ) , 'w' ) as f: json.dump(lowercase_ , lowercase_ ) def UpperCAmelCase__ ( ): """simple docstring""" A_ : Optional[int] = argparse.ArgumentParser(description='Simple example of training script tracking peak GPU memory usage.' ) parser.add_argument( '--model_name_or_path' , type=lowercase_ , default='bert-base-cased' , help='Path to pretrained model or model identifier from huggingface.co/models.' , required=lowercase_ , ) parser.add_argument( '--output_dir' , type=lowercase_ , default='.' , help='Optional save directory where all checkpoint folders will be stored. Default is the current working directory.' , ) parser.add_argument( '--performance_lower_bound' , type=lowercase_ , default=lowercase_ , help='Optional lower bound for the performance metric. If set, the training will throw error when the performance metric drops below this value.' , ) parser.add_argument( '--num_epochs' , type=lowercase_ , default=3 , help='Number of train epochs.' , ) A_ : Optional[int] = parser.parse_args() A_ : Union[str, Any] = {'lr': 2E-5, 'num_epochs': args.num_epochs, 'seed': 42, 'batch_size': 16} training_function(lowercase_ , lowercase_ ) if __name__ == "__main__": main()
286
"""simple docstring""" import colorsys from PIL import Image # type: ignore def _lowerCAmelCase ( lowercase_ , lowercase_ , lowercase_ ): UpperCAmelCase = x UpperCAmelCase = y for step in range(lowercase_ ): # noqa: B007 UpperCAmelCase = a * a - b * b + x UpperCAmelCase = 2 * a * b + y UpperCAmelCase = a_new # divergence happens for all complex number with an absolute value # greater than 4 if a * a + b * b > 4: break return step / (max_step - 1) def _lowerCAmelCase ( lowercase_ ): if distance == 1: return (0, 0, 0) else: return (255, 255, 255) def _lowerCAmelCase ( lowercase_ ): if distance == 1: return (0, 0, 0) else: return tuple(round(i * 255 ) for i in colorsys.hsv_to_rgb(lowercase_ , 1 , 1 ) ) def _lowerCAmelCase ( lowercase_ = 800 , lowercase_ = 600 , lowercase_ = -0.6 , lowercase_ = 0 , lowercase_ = 3.2 , lowercase_ = 50 , lowercase_ = True , ): UpperCAmelCase = Image.new('RGB' , (image_width, image_height) ) UpperCAmelCase = img.load() # loop through the image-coordinates for image_x in range(lowercase_ ): for image_y in range(lowercase_ ): # determine the figure-coordinates based on the image-coordinates UpperCAmelCase = figure_width / image_width * image_height UpperCAmelCase = figure_center_x + (image_x / image_width - 0.5) * figure_width UpperCAmelCase = figure_center_y + (image_y / image_height - 0.5) * figure_height UpperCAmelCase = get_distance(lowercase_ , lowercase_ , lowercase_ ) # color the corresponding pixel based on the selected coloring-function if use_distance_color_coding: UpperCAmelCase = get_color_coded_rgb(lowercase_ ) else: UpperCAmelCase = get_black_and_white_rgb(lowercase_ ) return img if __name__ == "__main__": import doctest doctest.testmod() # colored version, full figure snake_case_ = get_image() # uncomment for colored version, different section, zoomed in # img = get_image(figure_center_x = -0.6, figure_center_y = -0.4, # figure_width = 0.8) # uncomment for black and white version, full figure # img = get_image(use_distance_color_coding = False) # uncomment to save the image # img.save("mandelbrot.png") img.show()
78
0
"""simple docstring""" def _snake_case ( UpperCamelCase : Union[str, Any] , UpperCamelCase : Optional[Any] ): UpperCAmelCase : Any = 1 # To kept the Calculated Value # Since C(n, k) = C(n, n-k) if k > (n - k): UpperCAmelCase : str = n - k # Calculate C(n,k) for i in range(lowercase_ ): result *= n - i result //= i + 1 return result def _snake_case ( UpperCamelCase : List[str] ): return binomial_coefficient(2 * node_count , lowercase_ ) // (node_count + 1) def _snake_case ( UpperCamelCase : Dict ): if n < 0: raise ValueError("""factorial() not defined for negative values""" ) UpperCAmelCase : Tuple = 1 for i in range(1 , n + 1 ): result *= i return result def _snake_case ( UpperCamelCase : Optional[int] ): return catalan_number(lowercase_ ) * factorial(lowercase_ ) if __name__ == "__main__": A: Tuple = int(input("Enter the number of nodes: ").strip() or 0) if node_count <= 0: raise ValueError("We need some nodes to work with.") print( f"""Given {node_count} nodes, there are {binary_tree_count(node_count)} """ f"""binary trees and {catalan_number(node_count)} binary search trees.""" )
109
"""simple docstring""" import requests snake_case_ = """""" # <-- Put your OpenWeatherMap appid here! snake_case_ = """https://api.openweathermap.org/data/2.5/""" def _lowerCAmelCase ( lowercase_ = "Chicago" , lowercase_ = APPID ): return requests.get(URL_BASE + 'weather' , params=locals() ).json() def _lowerCAmelCase ( lowercase_ = "Kolkata, India" , lowercase_ = APPID ): return requests.get(URL_BASE + 'forecast' , params=locals() ).json() def _lowerCAmelCase ( lowercase_ = 5_5.6_8 , lowercase_ = 1_2.5_7 , lowercase_ = APPID ): return requests.get(URL_BASE + 'onecall' , params=locals() ).json() if __name__ == "__main__": from pprint import pprint while True: snake_case_ = input("""Enter a location:""").strip() if location: pprint(current_weather(location)) else: break
78
0
'''simple docstring''' from json import JSONDecodeError # Workaround for requests.exceptions.JSONDecodeError import requests def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : Optional[int] = "isbn/0140328726" ): '''simple docstring''' UpperCAmelCase__ = olid.strip().strip("""/""" ) # Remove leading/trailing whitespace & slashes if new_olid.count("""/""" ) != 1: UpperCAmelCase__ = F'''{olid} is not a valid Open Library olid''' raise ValueError(lowercase_ ) return requests.get(F'''https://openlibrary.org/{new_olid}.json''' ).json() def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : Tuple ): '''simple docstring''' UpperCAmelCase__ = { """title""": """Title""", """publish_date""": """Publish date""", """authors""": """Authors""", """number_of_pages""": """Number of pages:""", """first_sentence""": """First sentence""", """isbn_10""": """ISBN (10)""", """isbn_13""": """ISBN (13)""", } UpperCAmelCase__ = {better_key: ol_book_data[key] for key, better_key in desired_keys.items()} UpperCAmelCase__ = [ get_openlibrary_data(author["""key"""] )["""name"""] for author in data["""Authors"""] ] UpperCAmelCase__ = data["""First sentence"""]["""value"""] for key, value in data.items(): if isinstance(lowercase_ , lowercase_ ): UpperCAmelCase__ = """, """.join(lowercase_ ) return data if __name__ == "__main__": import doctest doctest.testmod() while True: UpperCAmelCase_ = input('\nEnter the ISBN code to search (or \'quit\' to stop): ').strip() if isbn.lower() in ("", "q", "quit", "exit", "stop"): break if len(isbn) not in (1_0, 1_3) or not isbn.isdigit(): print(f"Sorry, {isbn} is not a valid ISBN. Please, input a valid ISBN.") continue print(f"\nSearching Open Library for ISBN: {isbn}...\n") try: UpperCAmelCase_ = summarize_book(get_openlibrary_data(f"isbn/{isbn}")) print('\n'.join(f"{key}: {value}" for key, value in book_summary.items())) except JSONDecodeError: # Workaround for requests.exceptions.RequestException: print(f"Sorry, there are no results for ISBN: {isbn}.")
346
"""simple docstring""" import warnings from typing import List, Optional, Union from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType class A_ ( SCREAMING_SNAKE_CASE_ ): """simple docstring""" __UpperCamelCase = ["""image_processor""", """tokenizer"""] __UpperCamelCase = """LayoutLMv2ImageProcessor""" __UpperCamelCase = ("""LayoutXLMTokenizer""", """LayoutXLMTokenizerFast""") def __init__( self :Any , lowercase_ :int=None , lowercase_ :Union[str, Any]=None , **lowercase_ :Optional[Any] ) -> Dict: if "feature_extractor" in kwargs: warnings.warn( 'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`' ' instead.' , lowercase_ , ) UpperCAmelCase = kwargs.pop('feature_extractor' ) UpperCAmelCase = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError('You need to specify an `image_processor`.' ) if tokenizer is None: raise ValueError('You need to specify a `tokenizer`.' ) super().__init__(lowercase_ , lowercase_ ) def __call__( self :str , lowercase_ :Optional[int] , lowercase_ :Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , lowercase_ :Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]] = None , lowercase_ :Union[List[List[int]], List[List[List[int]]]] = None , lowercase_ :Optional[Union[List[int], List[List[int]]]] = None , lowercase_ :bool = True , lowercase_ :Union[bool, str, PaddingStrategy] = False , lowercase_ :Union[bool, str, TruncationStrategy] = None , lowercase_ :Optional[int] = None , lowercase_ :int = 0 , lowercase_ :Optional[int] = None , lowercase_ :Optional[bool] = None , lowercase_ :Optional[bool] = None , lowercase_ :bool = False , lowercase_ :bool = False , lowercase_ :bool = False , lowercase_ :bool = False , lowercase_ :bool = True , lowercase_ :Optional[Union[str, TensorType]] = None , **lowercase_ :Any , ) -> BatchEncoding: # verify input if self.image_processor.apply_ocr and (boxes is not None): raise ValueError( 'You cannot provide bounding boxes ' 'if you initialized the image processor with apply_ocr set to True.' ) if self.image_processor.apply_ocr and (word_labels is not None): raise ValueError( 'You cannot provide word labels if you initialized the image processor with apply_ocr set to True.' ) if return_overflowing_tokens is True and return_offsets_mapping is False: raise ValueError('You cannot return overflowing tokens without returning the offsets mapping.' ) # first, apply the image processor UpperCAmelCase = self.image_processor(images=lowercase_ , return_tensors=lowercase_ ) # second, apply the tokenizer if text is not None and self.image_processor.apply_ocr and text_pair is None: if isinstance(lowercase_ , lowercase_ ): UpperCAmelCase = [text] # add batch dimension (as the image processor always adds a batch dimension) UpperCAmelCase = features['words'] UpperCAmelCase = self.tokenizer( text=text if text is not None else features['words'] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features['boxes'] , word_labels=lowercase_ , add_special_tokens=lowercase_ , padding=lowercase_ , truncation=lowercase_ , max_length=lowercase_ , stride=lowercase_ , pad_to_multiple_of=lowercase_ , return_token_type_ids=lowercase_ , return_attention_mask=lowercase_ , return_overflowing_tokens=lowercase_ , return_special_tokens_mask=lowercase_ , return_offsets_mapping=lowercase_ , return_length=lowercase_ , verbose=lowercase_ , return_tensors=lowercase_ , **lowercase_ , ) # add pixel values UpperCAmelCase = features.pop('pixel_values' ) if return_overflowing_tokens is True: UpperCAmelCase = self.get_overflowing_images(lowercase_ , encoded_inputs['overflow_to_sample_mapping'] ) UpperCAmelCase = images return encoded_inputs def UpperCAmelCase__ ( self :Dict , lowercase_ :List[Any] , lowercase_ :Any ) -> Optional[Any]: # in case there's an overflow, ensure each `input_ids` sample is mapped to its corresponding image UpperCAmelCase = [] for sample_idx in overflow_to_sample_mapping: images_with_overflow.append(images[sample_idx] ) if len(lowercase_ ) != len(lowercase_ ): raise ValueError( 'Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got' f""" {len(lowercase_ )} and {len(lowercase_ )}""" ) return images_with_overflow def UpperCAmelCase__ ( self :Any , *lowercase_ :int , **lowercase_ :Tuple ) -> Tuple: return self.tokenizer.batch_decode(*lowercase_ , **lowercase_ ) def UpperCAmelCase__ ( self :Any , *lowercase_ :List[Any] , **lowercase_ :Optional[int] ) -> Optional[Any]: return self.tokenizer.decode(*lowercase_ , **lowercase_ ) @property def UpperCAmelCase__ ( self :int ) -> Optional[int]: return ["input_ids", "bbox", "attention_mask", "image"] @property def UpperCAmelCase__ ( self :int ) -> Dict: warnings.warn( '`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , lowercase_ , ) return self.image_processor_class @property def UpperCAmelCase__ ( self :Union[str, Any] ) -> Optional[int]: warnings.warn( '`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , lowercase_ , ) return self.image_processor
78
0
'''simple docstring''' def __snake_case( _lowerCAmelCase , _lowerCAmelCase ) -> Optional[int]: return int((input_a, input_a).count(0 ) != 0 ) def __snake_case( ) -> Optional[int]: assert nand_gate(0 , 0 ) == 1 assert nand_gate(0 , 1 ) == 1 assert nand_gate(1 , 0 ) == 1 assert nand_gate(1 , 1 ) == 0 if __name__ == "__main__": print(nand_gate(0, 0)) print(nand_gate(0, 1)) print(nand_gate(1, 0)) print(nand_gate(1, 1))
35
"""simple docstring""" from collections import deque from math import floor from random import random from time import time class A_ : """simple docstring""" def __init__( self :Union[str, Any] ) -> str: UpperCAmelCase = {} def UpperCAmelCase__ ( self :Any , lowercase_ :List[Any] , lowercase_ :List[str] , lowercase_ :Dict=1 ) -> List[Any]: if self.graph.get(lowercase_ ): if self.graph[u].count([w, v] ) == 0: self.graph[u].append([w, v] ) else: UpperCAmelCase = [[w, v]] if not self.graph.get(lowercase_ ): UpperCAmelCase = [] def UpperCAmelCase__ ( self :Any ) -> Optional[int]: return list(self.graph ) def UpperCAmelCase__ ( self :List[Any] , lowercase_ :Optional[int] , lowercase_ :Optional[Any] ) -> Dict: if self.graph.get(lowercase_ ): for _ in self.graph[u]: if _[1] == v: self.graph[u].remove(lowercase_ ) def UpperCAmelCase__ ( self :List[str] , lowercase_ :Tuple=-2 , lowercase_ :List[Any]=-1 ) -> List[Any]: if s == d: return [] UpperCAmelCase = [] UpperCAmelCase = [] if s == -2: UpperCAmelCase = list(self.graph )[0] stack.append(lowercase_ ) visited.append(lowercase_ ) UpperCAmelCase = s while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: UpperCAmelCase = s for node in self.graph[s]: if visited.count(node[1] ) < 1: if node[1] == d: visited.append(lowercase_ ) return visited else: stack.append(node[1] ) visited.append(node[1] ) UpperCAmelCase = node[1] break # check if all the children are visited if s == ss: stack.pop() if len(lowercase_ ) != 0: UpperCAmelCase = stack[len(lowercase_ ) - 1] else: UpperCAmelCase = ss # check if se have reached the starting point if len(lowercase_ ) == 0: return visited def UpperCAmelCase__ ( self :List[str] , lowercase_ :int=-1 ) -> Tuple: if c == -1: UpperCAmelCase = floor(random() * 1_00_00 ) + 10 for i in range(lowercase_ ): # every vertex has max 100 edges for _ in range(floor(random() * 1_02 ) + 1 ): UpperCAmelCase = floor(random() * c ) + 1 if n != i: self.add_pair(lowercase_ , lowercase_ , 1 ) def UpperCAmelCase__ ( self :Tuple , lowercase_ :Optional[Any]=-2 ) -> Optional[int]: UpperCAmelCase = deque() UpperCAmelCase = [] if s == -2: UpperCAmelCase = list(self.graph )[0] d.append(lowercase_ ) visited.append(lowercase_ ) while d: UpperCAmelCase = d.popleft() if len(self.graph[s] ) != 0: for node in self.graph[s]: if visited.count(node[1] ) < 1: d.append(node[1] ) visited.append(node[1] ) return visited def UpperCAmelCase__ ( self :Any , lowercase_ :Optional[int] ) -> List[Any]: UpperCAmelCase = 0 for x in self.graph: for y in self.graph[x]: if y[1] == u: count += 1 return count def UpperCAmelCase__ ( self :Tuple , lowercase_ :List[str] ) -> List[str]: return len(self.graph[u] ) def UpperCAmelCase__ ( self :Optional[int] , lowercase_ :Any=-2 ) -> int: UpperCAmelCase = [] UpperCAmelCase = [] if s == -2: UpperCAmelCase = list(self.graph )[0] stack.append(lowercase_ ) visited.append(lowercase_ ) UpperCAmelCase = s UpperCAmelCase = [] while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: UpperCAmelCase = s for node in self.graph[s]: if visited.count(node[1] ) < 1: stack.append(node[1] ) visited.append(node[1] ) UpperCAmelCase = node[1] break # check if all the children are visited if s == ss: sorted_nodes.append(stack.pop() ) if len(lowercase_ ) != 0: UpperCAmelCase = stack[len(lowercase_ ) - 1] else: UpperCAmelCase = ss # check if se have reached the starting point if len(lowercase_ ) == 0: return sorted_nodes def UpperCAmelCase__ ( self :str ) -> str: UpperCAmelCase = [] UpperCAmelCase = [] UpperCAmelCase = list(self.graph )[0] stack.append(lowercase_ ) visited.append(lowercase_ ) UpperCAmelCase = -2 UpperCAmelCase = [] UpperCAmelCase = s UpperCAmelCase = False UpperCAmelCase = set() while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: UpperCAmelCase = s for node in self.graph[s]: if ( visited.count(node[1] ) > 0 and node[1] != parent and indirect_parents.count(node[1] ) > 0 and not on_the_way_back ): UpperCAmelCase = len(lowercase_ ) - 1 while len_stack >= 0: if stack[len_stack] == node[1]: anticipating_nodes.add(node[1] ) break else: anticipating_nodes.add(stack[len_stack] ) len_stack -= 1 if visited.count(node[1] ) < 1: stack.append(node[1] ) visited.append(node[1] ) UpperCAmelCase = node[1] break # check if all the children are visited if s == ss: stack.pop() UpperCAmelCase = True if len(lowercase_ ) != 0: UpperCAmelCase = stack[len(lowercase_ ) - 1] else: UpperCAmelCase = False indirect_parents.append(lowercase_ ) UpperCAmelCase = s UpperCAmelCase = ss # check if se have reached the starting point if len(lowercase_ ) == 0: return list(lowercase_ ) def UpperCAmelCase__ ( self :Optional[int] ) -> Tuple: UpperCAmelCase = [] UpperCAmelCase = [] UpperCAmelCase = list(self.graph )[0] stack.append(lowercase_ ) visited.append(lowercase_ ) UpperCAmelCase = -2 UpperCAmelCase = [] UpperCAmelCase = s UpperCAmelCase = False UpperCAmelCase = set() while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: UpperCAmelCase = s for node in self.graph[s]: if ( visited.count(node[1] ) > 0 and node[1] != parent and indirect_parents.count(node[1] ) > 0 and not on_the_way_back ): UpperCAmelCase = len(lowercase_ ) - 1 while len_stack_minus_one >= 0: if stack[len_stack_minus_one] == node[1]: anticipating_nodes.add(node[1] ) break else: return True if visited.count(node[1] ) < 1: stack.append(node[1] ) visited.append(node[1] ) UpperCAmelCase = node[1] break # check if all the children are visited if s == ss: stack.pop() UpperCAmelCase = True if len(lowercase_ ) != 0: UpperCAmelCase = stack[len(lowercase_ ) - 1] else: UpperCAmelCase = False indirect_parents.append(lowercase_ ) UpperCAmelCase = s UpperCAmelCase = ss # check if se have reached the starting point if len(lowercase_ ) == 0: return False def UpperCAmelCase__ ( self :Optional[Any] , lowercase_ :int=-2 , lowercase_ :List[str]=-1 ) -> Any: UpperCAmelCase = time() self.dfs(lowercase_ , lowercase_ ) UpperCAmelCase = time() return end - begin def UpperCAmelCase__ ( self :Union[str, Any] , lowercase_ :List[str]=-2 ) -> str: UpperCAmelCase = time() self.bfs(lowercase_ ) UpperCAmelCase = time() return end - begin class A_ : """simple docstring""" def __init__( self :List[str] ) -> Union[str, Any]: UpperCAmelCase = {} def UpperCAmelCase__ ( self :str , lowercase_ :Dict , lowercase_ :Optional[Any] , lowercase_ :Optional[int]=1 ) -> Dict: # check if the u exists if self.graph.get(lowercase_ ): # if there already is a edge if self.graph[u].count([w, v] ) == 0: self.graph[u].append([w, v] ) else: # if u does not exist UpperCAmelCase = [[w, v]] # add the other way if self.graph.get(lowercase_ ): # if there already is a edge if self.graph[v].count([w, u] ) == 0: self.graph[v].append([w, u] ) else: # if u does not exist UpperCAmelCase = [[w, u]] def UpperCAmelCase__ ( self :Any , lowercase_ :Union[str, Any] , lowercase_ :Tuple ) -> Optional[Any]: if self.graph.get(lowercase_ ): for _ in self.graph[u]: if _[1] == v: self.graph[u].remove(lowercase_ ) # the other way round if self.graph.get(lowercase_ ): for _ in self.graph[v]: if _[1] == u: self.graph[v].remove(lowercase_ ) def UpperCAmelCase__ ( self :Optional[Any] , lowercase_ :Optional[int]=-2 , lowercase_ :Optional[int]=-1 ) -> List[str]: if s == d: return [] UpperCAmelCase = [] UpperCAmelCase = [] if s == -2: UpperCAmelCase = list(self.graph )[0] stack.append(lowercase_ ) visited.append(lowercase_ ) UpperCAmelCase = s while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: UpperCAmelCase = s for node in self.graph[s]: if visited.count(node[1] ) < 1: if node[1] == d: visited.append(lowercase_ ) return visited else: stack.append(node[1] ) visited.append(node[1] ) UpperCAmelCase = node[1] break # check if all the children are visited if s == ss: stack.pop() if len(lowercase_ ) != 0: UpperCAmelCase = stack[len(lowercase_ ) - 1] else: UpperCAmelCase = ss # check if se have reached the starting point if len(lowercase_ ) == 0: return visited def UpperCAmelCase__ ( self :List[str] , lowercase_ :Optional[int]=-1 ) -> Any: if c == -1: UpperCAmelCase = floor(random() * 1_00_00 ) + 10 for i in range(lowercase_ ): # every vertex has max 100 edges for _ in range(floor(random() * 1_02 ) + 1 ): UpperCAmelCase = floor(random() * c ) + 1 if n != i: self.add_pair(lowercase_ , lowercase_ , 1 ) def UpperCAmelCase__ ( self :Dict , lowercase_ :int=-2 ) -> int: UpperCAmelCase = deque() UpperCAmelCase = [] if s == -2: UpperCAmelCase = list(self.graph )[0] d.append(lowercase_ ) visited.append(lowercase_ ) while d: UpperCAmelCase = d.popleft() if len(self.graph[s] ) != 0: for node in self.graph[s]: if visited.count(node[1] ) < 1: d.append(node[1] ) visited.append(node[1] ) return visited def UpperCAmelCase__ ( self :Union[str, Any] , lowercase_ :List[Any] ) -> str: return len(self.graph[u] ) def UpperCAmelCase__ ( self :Optional[Any] ) -> Any: UpperCAmelCase = [] UpperCAmelCase = [] UpperCAmelCase = list(self.graph )[0] stack.append(lowercase_ ) visited.append(lowercase_ ) UpperCAmelCase = -2 UpperCAmelCase = [] UpperCAmelCase = s UpperCAmelCase = False UpperCAmelCase = set() while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: UpperCAmelCase = s for node in self.graph[s]: if ( visited.count(node[1] ) > 0 and node[1] != parent and indirect_parents.count(node[1] ) > 0 and not on_the_way_back ): UpperCAmelCase = len(lowercase_ ) - 1 while len_stack >= 0: if stack[len_stack] == node[1]: anticipating_nodes.add(node[1] ) break else: anticipating_nodes.add(stack[len_stack] ) len_stack -= 1 if visited.count(node[1] ) < 1: stack.append(node[1] ) visited.append(node[1] ) UpperCAmelCase = node[1] break # check if all the children are visited if s == ss: stack.pop() UpperCAmelCase = True if len(lowercase_ ) != 0: UpperCAmelCase = stack[len(lowercase_ ) - 1] else: UpperCAmelCase = False indirect_parents.append(lowercase_ ) UpperCAmelCase = s UpperCAmelCase = ss # check if se have reached the starting point if len(lowercase_ ) == 0: return list(lowercase_ ) def UpperCAmelCase__ ( self :Optional[int] ) -> str: UpperCAmelCase = [] UpperCAmelCase = [] UpperCAmelCase = list(self.graph )[0] stack.append(lowercase_ ) visited.append(lowercase_ ) UpperCAmelCase = -2 UpperCAmelCase = [] UpperCAmelCase = s UpperCAmelCase = False UpperCAmelCase = set() while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: UpperCAmelCase = s for node in self.graph[s]: if ( visited.count(node[1] ) > 0 and node[1] != parent and indirect_parents.count(node[1] ) > 0 and not on_the_way_back ): UpperCAmelCase = len(lowercase_ ) - 1 while len_stack_minus_one >= 0: if stack[len_stack_minus_one] == node[1]: anticipating_nodes.add(node[1] ) break else: return True if visited.count(node[1] ) < 1: stack.append(node[1] ) visited.append(node[1] ) UpperCAmelCase = node[1] break # check if all the children are visited if s == ss: stack.pop() UpperCAmelCase = True if len(lowercase_ ) != 0: UpperCAmelCase = stack[len(lowercase_ ) - 1] else: UpperCAmelCase = False indirect_parents.append(lowercase_ ) UpperCAmelCase = s UpperCAmelCase = ss # check if se have reached the starting point if len(lowercase_ ) == 0: return False def UpperCAmelCase__ ( self :Union[str, Any] ) -> Union[str, Any]: return list(self.graph ) def UpperCAmelCase__ ( self :List[Any] , lowercase_ :Union[str, Any]=-2 , lowercase_ :List[str]=-1 ) -> str: UpperCAmelCase = time() self.dfs(lowercase_ , lowercase_ ) UpperCAmelCase = time() return end - begin def UpperCAmelCase__ ( self :Any , lowercase_ :int=-2 ) -> str: UpperCAmelCase = time() self.bfs(lowercase_ ) UpperCAmelCase = time() return end - begin
78
0
"""simple docstring""" def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Dict ,_lowerCamelCase : Dict = " " ) -> Optional[int]: _lowerCAmelCase : Any = [] _lowerCAmelCase : Union[str, Any] = 0 for index, char in enumerate(lowercase_ ): if char == separator: split_words.append(string[last_index:index] ) _lowerCAmelCase : Dict = index + 1 elif index + 1 == len(lowercase_ ): split_words.append(string[last_index : index + 1] ) return split_words if __name__ == "__main__": from doctest import testmod testmod()
44
"""simple docstring""" from .glue import GlueDataset, GlueDataTrainingArguments from .language_modeling import ( LineByLineTextDataset, LineByLineWithRefDataset, LineByLineWithSOPTextDataset, TextDataset, TextDatasetForNextSentencePrediction, ) from .squad import SquadDataset, SquadDataTrainingArguments
78
0
import numpy # List of input, output pairs _lowerCamelCase : int = ( ((5, 2, 3), 15), ((6, 5, 9), 25), ((11, 12, 13), 41), ((1, 1, 1), 8), ((11, 12, 13), 41), ) _lowerCamelCase : int = (((515, 22, 13), 555), ((61, 35, 49), 150)) _lowerCamelCase : List[Any] = [2, 4, 1, 5] _lowerCamelCase : Optional[int] = len(train_data) _lowerCamelCase : List[str] = 0.009 def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_="train" ) -> Optional[Any]: """simple docstring""" return calculate_hypothesis_value(lowercase_ , lowercase_ ) - output( lowercase_ , lowercase_ ) def SCREAMING_SNAKE_CASE ( lowercase_ ) -> Any: """simple docstring""" A__ = 0 for i in range(len(lowercase_ ) - 1 ): hyp_val += data_input_tuple[i] * parameter_vector[i + 1] hyp_val += parameter_vector[0] return hyp_val def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> Tuple: """simple docstring""" if data_set == "train": return train_data[example_no][1] elif data_set == "test": return test_data[example_no][1] return None def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> Tuple: """simple docstring""" if data_set == "train": return _hypothesis_value(train_data[example_no][0] ) elif data_set == "test": return _hypothesis_value(test_data[example_no][0] ) return None def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_=m ) -> Dict: """simple docstring""" A__ = 0 for i in range(lowercase_ ): if index == -1: summation_value += _error(lowercase_ ) else: summation_value += _error(lowercase_ ) * train_data[i][0][index] return summation_value def SCREAMING_SNAKE_CASE ( lowercase_ ) -> Optional[int]: """simple docstring""" A__ = summation_of_cost_derivative(lowercase_ , lowercase_ ) / m return cost_derivative_value def SCREAMING_SNAKE_CASE ( ) -> List[Any]: """simple docstring""" global parameter_vector # Tune these values to set a tolerance value for predicted output A__ = 0.00_00_02 A__ = 0 A__ = 0 while True: j += 1 A__ = [0, 0, 0, 0] for i in range(0 , len(lowercase_ ) ): A__ = get_cost_derivative(i - 1 ) A__ = ( parameter_vector[i] - LEARNING_RATE * cost_derivative ) if numpy.allclose( lowercase_ , lowercase_ , atol=lowercase_ , rtol=lowercase_ , ): break A__ = temp_parameter_vector print(('''Number of iterations:''', j) ) def SCREAMING_SNAKE_CASE ( ) -> Union[str, Any]: """simple docstring""" for i in range(len(lowercase_ ) ): print(('''Actual output value:''', output(lowercase_ , '''test''' )) ) print(('''Hypothesis output:''', calculate_hypothesis_value(lowercase_ , '''test''' )) ) if __name__ == "__main__": run_gradient_descent() print("""\nTesting gradient descent for a linear hypothesis function.\n""") test_gradient_descent()
14
"""simple docstring""" def _lowerCAmelCase ( lowercase_ , lowercase_ = " " ): UpperCAmelCase = [] UpperCAmelCase = 0 for index, char in enumerate(lowercase_ ): if char == separator: split_words.append(string[last_index:index] ) UpperCAmelCase = index + 1 elif index + 1 == len(lowercase_ ): split_words.append(string[last_index : index + 1] ) return split_words if __name__ == "__main__": from doctest import testmod testmod()
78
0
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available, ) UpperCAmelCase_ = { 'configuration_layoutlmv2': ['LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP', 'LayoutLMv2Config'], 'processing_layoutlmv2': ['LayoutLMv2Processor'], 'tokenization_layoutlmv2': ['LayoutLMv2Tokenizer'], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase_ = ['LayoutLMv2TokenizerFast'] try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase_ = ['LayoutLMv2FeatureExtractor'] UpperCAmelCase_ = ['LayoutLMv2ImageProcessor'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase_ = [ 'LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST', 'LayoutLMv2ForQuestionAnswering', 'LayoutLMv2ForSequenceClassification', 'LayoutLMv2ForTokenClassification', 'LayoutLMv2Layer', 'LayoutLMv2Model', 'LayoutLMv2PreTrainedModel', ] if TYPE_CHECKING: from .configuration_layoutlmva import LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP, LayoutLMvaConfig from .processing_layoutlmva import LayoutLMvaProcessor from .tokenization_layoutlmva import LayoutLMvaTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor, LayoutLMvaImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_layoutlmva import ( LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST, LayoutLMvaForQuestionAnswering, LayoutLMvaForSequenceClassification, LayoutLMvaForTokenClassification, LayoutLMvaLayer, LayoutLMvaModel, LayoutLMvaPreTrainedModel, ) else: import sys UpperCAmelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
201
"""simple docstring""" import json import logging import os import socket import git import numpy as np import torch logging.basicConfig( format="""%(asctime)s - %(levelname)s - %(name)s - PID: %(process)d - %(message)s""", datefmt="""%m/%d/%Y %H:%M:%S""", level=logging.INFO, ) snake_case_ = logging.getLogger(__name__) def _lowerCAmelCase ( lowercase_ ): UpperCAmelCase = git.Repo(search_parent_directories=lowercase_ ) UpperCAmelCase = { 'repo_id': str(lowercase_ ), 'repo_sha': str(repo.head.object.hexsha ), 'repo_branch': str(repo.active_branch ), } with open(os.path.join(lowercase_ , 'git_log.json' ) , 'w' ) as f: json.dump(lowercase_ , lowercase_ , indent=4 ) def _lowerCAmelCase ( lowercase_ ): if params.n_gpu <= 0: UpperCAmelCase = 0 UpperCAmelCase = -1 UpperCAmelCase = True UpperCAmelCase = False return assert torch.cuda.is_available() logger.info('Initializing GPUs' ) if params.n_gpu > 1: assert params.local_rank != -1 UpperCAmelCase = int(os.environ['WORLD_SIZE'] ) UpperCAmelCase = int(os.environ['N_GPU_NODE'] ) UpperCAmelCase = int(os.environ['RANK'] ) # number of nodes / node ID UpperCAmelCase = params.world_size // params.n_gpu_per_node UpperCAmelCase = params.global_rank // params.n_gpu_per_node UpperCAmelCase = True assert params.n_nodes == int(os.environ['N_NODES'] ) assert params.node_id == int(os.environ['NODE_RANK'] ) # local job (single GPU) else: assert params.local_rank == -1 UpperCAmelCase = 1 UpperCAmelCase = 0 UpperCAmelCase = 0 UpperCAmelCase = 0 UpperCAmelCase = 1 UpperCAmelCase = 1 UpperCAmelCase = False # sanity checks assert params.n_nodes >= 1 assert 0 <= params.node_id < params.n_nodes assert 0 <= params.local_rank <= params.global_rank < params.world_size assert params.world_size == params.n_nodes * params.n_gpu_per_node # define whether this is the master process / if we are in multi-node distributed mode UpperCAmelCase = params.node_id == 0 and params.local_rank == 0 UpperCAmelCase = params.n_nodes > 1 # summary UpperCAmelCase = F"""--- Global rank: {params.global_rank} - """ logger.info(PREFIX + 'Number of nodes: %i' % params.n_nodes ) logger.info(PREFIX + 'Node ID : %i' % params.node_id ) logger.info(PREFIX + 'Local rank : %i' % params.local_rank ) logger.info(PREFIX + 'World size : %i' % params.world_size ) logger.info(PREFIX + 'GPUs per node : %i' % params.n_gpu_per_node ) logger.info(PREFIX + 'Master : %s' % str(params.is_master ) ) logger.info(PREFIX + 'Multi-node : %s' % str(params.multi_node ) ) logger.info(PREFIX + 'Multi-GPU : %s' % str(params.multi_gpu ) ) logger.info(PREFIX + 'Hostname : %s' % socket.gethostname() ) # set GPU device torch.cuda.set_device(params.local_rank ) # initialize multi-GPU if params.multi_gpu: logger.info('Initializing PyTorch distributed' ) torch.distributed.init_process_group( init_method='env://' , backend='nccl' , ) def _lowerCAmelCase ( lowercase_ ): np.random.seed(args.seed ) torch.manual_seed(args.seed ) if args.n_gpu > 0: torch.cuda.manual_seed_all(args.seed )
78
0
'''simple docstring''' import inspect import unittest import numpy as np from transformers import BeitConfig from transformers.testing_utils import require_flax, require_vision, slow from transformers.utils import cached_property, is_flax_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor if is_flax_available(): import jax from transformers import FlaxBeitForImageClassification, FlaxBeitForMaskedImageModeling, FlaxBeitModel if is_vision_available(): from PIL import Image from transformers import BeitImageProcessor class _UpperCAmelCase ( unittest.TestCase ): """simple docstring""" def __init__( self : str , __UpperCAmelCase : Any , __UpperCAmelCase : List[str]=100 , __UpperCAmelCase : Any=13 , __UpperCAmelCase : Optional[Any]=30 , __UpperCAmelCase : Any=2 , __UpperCAmelCase : Dict=3 , __UpperCAmelCase : str=True , __UpperCAmelCase : str=True , __UpperCAmelCase : Optional[Any]=32 , __UpperCAmelCase : int=5 , __UpperCAmelCase : List[str]=4 , __UpperCAmelCase : Optional[int]=37 , __UpperCAmelCase : List[Any]="gelu" , __UpperCAmelCase : List[Any]=0.1 , __UpperCAmelCase : str=0.1 , __UpperCAmelCase : Any=10 , __UpperCAmelCase : Dict=0.02 , __UpperCAmelCase : str=3 , ): '''simple docstring''' _A = parent _A = vocab_size _A = batch_size _A = image_size _A = patch_size _A = num_channels _A = is_training _A = use_labels _A = hidden_size _A = num_hidden_layers _A = num_attention_heads _A = intermediate_size _A = hidden_act _A = hidden_dropout_prob _A = attention_probs_dropout_prob _A = type_sequence_label_size _A = initializer_range # in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token) _A = (image_size // patch_size) ** 2 _A = num_patches + 1 def lowerCAmelCase ( self : str ): '''simple docstring''' _A = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) _A = None if self.use_labels: _A = ids_tensor([self.batch_size] , self.type_sequence_label_size ) _A = BeitConfig( vocab_size=self.vocab_size , image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__UpperCAmelCase , initializer_range=self.initializer_range , ) return config, pixel_values, labels def lowerCAmelCase ( self : Optional[int] , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : List[str] , __UpperCAmelCase : Union[str, Any] ): '''simple docstring''' _A = FlaxBeitModel(config=__UpperCAmelCase ) _A = model(__UpperCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def lowerCAmelCase ( self : List[Any] , __UpperCAmelCase : Any , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Dict ): '''simple docstring''' _A = FlaxBeitForMaskedImageModeling(config=__UpperCAmelCase ) _A = model(__UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length - 1, self.vocab_size) ) def lowerCAmelCase ( self : Dict , __UpperCAmelCase : List[Any] , __UpperCAmelCase : List[str] , __UpperCAmelCase : str ): '''simple docstring''' _A = self.type_sequence_label_size _A = FlaxBeitForImageClassification(config=__UpperCAmelCase ) _A = model(__UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # test greyscale images _A = 1 _A = FlaxBeitForImageClassification(__UpperCAmelCase ) _A = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) _A = model(__UpperCAmelCase ) def lowerCAmelCase ( self : int ): '''simple docstring''' _A = self.prepare_config_and_inputs() ( ( _A ) , ( _A ) , ( _A ) , ) = config_and_inputs _A = {"pixel_values": pixel_values} return config, inputs_dict @require_flax class _UpperCAmelCase ( snake_case_ , unittest.TestCase ): """simple docstring""" snake_case = ( (FlaxBeitModel, FlaxBeitForImageClassification, FlaxBeitForMaskedImageModeling) if is_flax_available() else () ) def lowerCAmelCase ( self : List[Any] ): '''simple docstring''' _A = FlaxBeitModelTester(self ) _A = ConfigTester(self , config_class=__UpperCAmelCase , has_text_modality=__UpperCAmelCase , hidden_size=37 ) def lowerCAmelCase ( self : int ): '''simple docstring''' self.config_tester.run_common_tests() def lowerCAmelCase ( self : Optional[int] ): '''simple docstring''' _A , _A = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _A = model_class(__UpperCAmelCase ) _A = inspect.signature(model.__call__ ) # signature.parameters is an OrderedDict => so arg_names order is deterministic _A = [*signature.parameters.keys()] _A = ["pixel_values"] self.assertListEqual(arg_names[:1] , __UpperCAmelCase ) def lowerCAmelCase ( self : List[Any] ): '''simple docstring''' _A , _A = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): _A = self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase ) _A = model_class(__UpperCAmelCase ) @jax.jit def model_jitted(__UpperCAmelCase : int , **__UpperCAmelCase : str ): return model(pixel_values=__UpperCAmelCase , **__UpperCAmelCase ) with self.subTest("JIT Enabled" ): _A = model_jitted(**__UpperCAmelCase ).to_tuple() with self.subTest("JIT Disabled" ): with jax.disable_jit(): _A = model_jitted(**__UpperCAmelCase ).to_tuple() self.assertEqual(len(__UpperCAmelCase ) , len(__UpperCAmelCase ) ) for jitted_output, output in zip(__UpperCAmelCase , __UpperCAmelCase ): self.assertEqual(jitted_output.shape , output.shape ) def lowerCAmelCase ( self : Optional[Any] ): '''simple docstring''' _A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__UpperCAmelCase ) def lowerCAmelCase ( self : Any ): '''simple docstring''' _A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*__UpperCAmelCase ) def lowerCAmelCase ( self : Optional[int] ): '''simple docstring''' _A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*__UpperCAmelCase ) @slow def lowerCAmelCase ( self : List[str] ): '''simple docstring''' for model_class_name in self.all_model_classes: _A = model_class_name.from_pretrained("microsoft/beit-base-patch16-224" ) _A = model(np.ones((1, 3, 224, 224) ) ) self.assertIsNotNone(__UpperCAmelCase ) def __lowercase ( ) -> int: '''simple docstring''' _A = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) return image @require_vision @require_flax class _UpperCAmelCase ( unittest.TestCase ): """simple docstring""" @cached_property def lowerCAmelCase ( self : Union[str, Any] ): '''simple docstring''' return BeitImageProcessor.from_pretrained("microsoft/beit-base-patch16-224" ) if is_vision_available() else None @slow def lowerCAmelCase ( self : Dict ): '''simple docstring''' _A = FlaxBeitForMaskedImageModeling.from_pretrained("microsoft/beit-base-patch16-224-pt22k" ) _A = self.default_image_processor _A = prepare_img() _A = image_processor(images=__UpperCAmelCase , return_tensors="np" ).pixel_values # prepare bool_masked_pos _A = np.ones((1, 196) , dtype=__UpperCAmelCase ) # forward pass _A = model(pixel_values=__UpperCAmelCase , bool_masked_pos=__UpperCAmelCase ) _A = outputs.logits # verify the logits _A = (1, 196, 8192) self.assertEqual(logits.shape , __UpperCAmelCase ) _A = np.array( [[-3.2437, 0.5072, -13.9174], [-3.2456, 0.4948, -13.9401], [-3.2033, 0.5121, -13.8550]] ) self.assertTrue(np.allclose(logits[bool_masked_pos][:3, :3] , __UpperCAmelCase , atol=1E-2 ) ) @slow def lowerCAmelCase ( self : List[str] ): '''simple docstring''' _A = FlaxBeitForImageClassification.from_pretrained("microsoft/beit-base-patch16-224" ) _A = self.default_image_processor _A = prepare_img() _A = image_processor(images=__UpperCAmelCase , return_tensors="np" ) # forward pass _A = model(**__UpperCAmelCase ) _A = outputs.logits # verify the logits _A = (1, 1000) self.assertEqual(logits.shape , __UpperCAmelCase ) _A = np.array([-1.2385, -1.0987, -1.0108] ) self.assertTrue(np.allclose(logits[0, :3] , __UpperCAmelCase , atol=1E-4 ) ) _A = 281 self.assertEqual(logits.argmax(-1 ).item() , __UpperCAmelCase ) @slow def lowerCAmelCase ( self : Union[str, Any] ): '''simple docstring''' _A = FlaxBeitForImageClassification.from_pretrained("microsoft/beit-large-patch16-224-pt22k-ft22k" ) _A = self.default_image_processor _A = prepare_img() _A = image_processor(images=__UpperCAmelCase , return_tensors="np" ) # forward pass _A = model(**__UpperCAmelCase ) _A = outputs.logits # verify the logits _A = (1, 21841) self.assertEqual(logits.shape , __UpperCAmelCase ) _A = np.array([1.6881, -0.2787, 0.5901] ) self.assertTrue(np.allclose(logits[0, :3] , __UpperCAmelCase , atol=1E-4 ) ) _A = 2396 self.assertEqual(logits.argmax(-1 ).item() , __UpperCAmelCase )
79
'''simple docstring''' def __lowercase ( __lowercase , __lowercase , __lowercase=False ) -> Union[str, Any]: '''simple docstring''' if isinstance(__lowercase , __lowercase ) and isinstance(__lowercase , __lowercase ): _A = len(set_a.intersection(__lowercase ) ) if alternative_union: _A = len(__lowercase ) + len(__lowercase ) else: _A = len(set_a.union(__lowercase ) ) return intersection / union if isinstance(__lowercase , (list, tuple) ) and isinstance(__lowercase , (list, tuple) ): _A = [element for element in set_a if element in set_b] if alternative_union: _A = len(__lowercase ) + len(__lowercase ) return len(__lowercase ) / union else: _A = set_a + [element for element in set_b if element not in set_a] return len(__lowercase ) / len(__lowercase ) return len(__lowercase ) / len(__lowercase ) return None if __name__ == "__main__": lowerCamelCase_ = {'''a''', '''b''', '''c''', '''d''', '''e'''} lowerCamelCase_ = {'''c''', '''d''', '''e''', '''f''', '''h''', '''i'''} print(jaccard_similarity(set_a, set_b))
79
1
'''simple docstring''' def __lowercase ( __lowercase , __lowercase = False ) -> str: '''simple docstring''' if not isinstance(__lowercase , __lowercase ): _A = F'''Expected string as input, found {type(__lowercase )}''' raise ValueError(__lowercase ) if not isinstance(__lowercase , __lowercase ): _A = F'''Expected boolean as use_pascal parameter, found {type(__lowercase )}''' raise ValueError(__lowercase ) _A = input_str.split("_" ) _A = 0 if use_pascal else 1 _A = words[start_index:] _A = [word[0].upper() + word[1:] for word in words_to_capitalize] _A = "" if use_pascal else words[0] return "".join([initial_word, *capitalized_words] ) if __name__ == "__main__": from doctest import testmod testmod()
79
'''simple docstring''' import inspect import os import unittest from dataclasses import dataclass import torch from accelerate import Accelerator, DistributedDataParallelKwargs, GradScalerKwargs from accelerate.state import AcceleratorState from accelerate.test_utils import execute_subprocess_async, require_cuda, require_multi_gpu from accelerate.utils import KwargsHandler @dataclass class _UpperCAmelCase ( snake_case_ ): """simple docstring""" snake_case = 0 snake_case = False snake_case = 3.0 class _UpperCAmelCase ( unittest.TestCase ): """simple docstring""" def lowerCAmelCase ( self : Union[str, Any] ): '''simple docstring''' self.assertDictEqual(MockClass().to_kwargs() , {} ) self.assertDictEqual(MockClass(a=2 ).to_kwargs() , {"a": 2} ) self.assertDictEqual(MockClass(a=2 , b=__UpperCAmelCase ).to_kwargs() , {"a": 2, "b": True} ) self.assertDictEqual(MockClass(a=2 , c=2.25 ).to_kwargs() , {"a": 2, "c": 2.25} ) @require_cuda def lowerCAmelCase ( self : int ): '''simple docstring''' _A = GradScalerKwargs(init_scale=1024 , growth_factor=2 ) AcceleratorState._reset_state() _A = Accelerator(mixed_precision="fp16" , kwargs_handlers=[scaler_handler] ) print(accelerator.use_fpaa ) _A = accelerator.scaler # Check the kwargs have been applied self.assertEqual(scaler._init_scale , 1024.0 ) self.assertEqual(scaler._growth_factor , 2.0 ) # Check the other values are at the default self.assertEqual(scaler._backoff_factor , 0.5 ) self.assertEqual(scaler._growth_interval , 2000 ) self.assertEqual(scaler._enabled , __UpperCAmelCase ) @require_multi_gpu def lowerCAmelCase ( self : List[Any] ): '''simple docstring''' _A = ["torchrun", f'''--nproc_per_node={torch.cuda.device_count()}''', inspect.getfile(self.__class__ )] execute_subprocess_async(__UpperCAmelCase , env=os.environ.copy() ) if __name__ == "__main__": lowerCamelCase_ = DistributedDataParallelKwargs(bucket_cap_mb=15, find_unused_parameters=True) lowerCamelCase_ = Accelerator(kwargs_handlers=[ddp_scaler]) lowerCamelCase_ = torch.nn.Linear(1_00, 2_00) lowerCamelCase_ = accelerator.prepare(model) # Check the values changed in kwargs lowerCamelCase_ = '''''' lowerCamelCase_ = model.bucket_bytes_cap // (10_24 * 10_24) if observed_bucket_cap_map != 15: error_msg += F"Kwargs badly passed, should have `15` but found {observed_bucket_cap_map}.\n" if model.find_unused_parameters is not True: error_msg += F"Kwargs badly passed, should have `True` but found {model.find_unused_parameters}.\n" # Check the values of the defaults if model.dim != 0: error_msg += F"Default value not respected, should have `0` but found {model.dim}.\n" if model.broadcast_buffers is not True: error_msg += F"Default value not respected, should have `True` but found {model.broadcast_buffers}.\n" if model.gradient_as_bucket_view is not False: error_msg += F"Default value not respected, should have `False` but found {model.gradient_as_bucket_view}.\n" # Raise error at the end to make sure we don't stop at the first failure. if len(error_msg) > 0: raise ValueError(error_msg)
79
1
'''simple docstring''' import unittest from transformers import GPTSwaTokenizer from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin lowerCamelCase_ = get_tests_dir('''fixtures/test_sentencepiece_with_bytefallback.model''') @require_sentencepiece @require_tokenizers class _UpperCAmelCase ( snake_case_ , unittest.TestCase ): """simple docstring""" snake_case = GPTSwaTokenizer snake_case = False snake_case = True snake_case = False def lowerCAmelCase ( self : Dict ): '''simple docstring''' super().setUp() # We have a SentencePiece fixture for testing _A = GPTSwaTokenizer(__UpperCAmelCase , eos_token="<unk>" , bos_token="<unk>" , pad_token="<unk>" ) tokenizer.save_pretrained(self.tmpdirname ) def lowerCAmelCase ( self : Optional[int] , __UpperCAmelCase : Dict ): '''simple docstring''' _A = "This is a test" _A = "This is a test" return input_text, output_text def lowerCAmelCase ( self : Any ): '''simple docstring''' _A = "<s>" _A = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(__UpperCAmelCase ) , __UpperCAmelCase ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(__UpperCAmelCase ) , __UpperCAmelCase ) def lowerCAmelCase ( self : str ): '''simple docstring''' _A = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , "<unk>" ) self.assertEqual(vocab_keys[1] , "<s>" ) self.assertEqual(vocab_keys[-1] , "j" ) self.assertEqual(len(__UpperCAmelCase ) , 2000 ) def lowerCAmelCase ( self : str ): '''simple docstring''' self.assertEqual(self.get_tokenizer().vocab_size , 2000 ) def lowerCAmelCase ( self : Tuple ): '''simple docstring''' _A = GPTSwaTokenizer(__UpperCAmelCase ) _A = tokenizer.tokenize("This is a test" ) self.assertListEqual(__UpperCAmelCase , ["▁This", "▁is", "▁a", "▁t", "est"] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(__UpperCAmelCase ) , [465, 287, 265, 631, 842] ) _A = tokenizer.tokenize("I was born in 92000, and this is falsé." ) # fmt: off self.assertListEqual( __UpperCAmelCase , ["▁I", "▁was", "▁bor", "n", "▁in", "▁", "<0x39>", "2", "0", "0", "0", ",", "▁and", "▁this", "▁is", "▁f", "al", "s", "<0xC3>", "<0xA9>", "."] , ) # fmt: on _A = tokenizer.convert_tokens_to_ids(__UpperCAmelCase ) self.assertListEqual( __UpperCAmelCase , [262, 272, 1525, 286, 271, 268, 60, 916, 633, 633, 633, 259, 266, 301, 287, 384, 367, 263, 198, 172, 260] , ) _A = tokenizer.convert_ids_to_tokens(__UpperCAmelCase ) # fmt: off self.assertListEqual( __UpperCAmelCase , ["▁I", "▁was", "▁bor", "n", "▁in", "▁", "<0x39>", "2", "0", "0", "0", ",", "▁and", "▁this", "▁is", "▁f", "al", "s", "<0xC3>", "<0xA9>", "."] ) # fmt: on def lowerCAmelCase ( self : Any ): '''simple docstring''' _A = GPTSwaTokenizer(__UpperCAmelCase ) _A = ["This is a test", "I was born in 92000, and this is falsé."] _A = [ [465, 287, 265, 631, 842], [262, 272, 1525, 286, 271, 268, 60, 916, 633, 633, 633, 259, 266, 301, 287, 384, 367, 263, 198, 172, 260], ] # Test that encode_fast returns the same as tokenize + convert_tokens_to_ids for text, expected_ids in zip(__UpperCAmelCase , __UpperCAmelCase ): self.assertListEqual(tokenizer.encode_fast(__UpperCAmelCase ) , __UpperCAmelCase ) # Test that decode_fast returns the input text for text, token_ids in zip(__UpperCAmelCase , __UpperCAmelCase ): self.assertEqual(tokenizer.decode_fast(__UpperCAmelCase ) , __UpperCAmelCase ) @slow def lowerCAmelCase ( self : Tuple ): '''simple docstring''' _A = [ "<|python|>def fibonacci(n)\n if n < 0:\n print('Incorrect input')", "Hey there, how are you doing this fine day?", "This is a text with a trailing spaces followed by a dot .", "Häj sväjs lillebrör! =)", "Det är inget fel på Mr. Cool", ] # fmt: off _A = {"input_ids": [[63423, 5, 6811, 14954, 282, 816, 3821, 63466, 63425, 63462, 18, 63978, 678, 301, 1320, 63423, 63455, 63458, 18, 63982, 4246, 3940, 1901, 47789, 5547, 18994], [19630, 1100, 63446, 1342, 633, 544, 4488, 593, 5102, 2416, 63495, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1652, 428, 268, 1936, 515, 268, 58593, 22413, 9106, 546, 268, 33213, 63979, 698, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [55130, 63450, 924, 63449, 2249, 4062, 1558, 318, 63504, 21498, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [509, 377, 2827, 2559, 332, 6575, 63443, 26801, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "token_type_ids": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # fmt: on self.tokenizer_integration_test_util( expected_encoding=__UpperCAmelCase , model_name="AI-Sweden/gpt-sw3-126m" , sequences=__UpperCAmelCase , )
79
'''simple docstring''' def __lowercase ( __lowercase = 100 ) -> int: '''simple docstring''' _A = n * (n + 1) * (2 * n + 1) / 6 _A = (n * (n + 1) / 2) ** 2 return int(square_of_sum - sum_of_squares ) if __name__ == "__main__": print(F"""{solution() = }""")
79
1
'''simple docstring''' from __future__ import annotations import unittest from transformers import AutoTokenizer, PegasusConfig, is_tf_available from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFAutoModelForSeqaSeqLM, TFPegasusForConditionalGeneration, TFPegasusModel @require_tf class _UpperCAmelCase : """simple docstring""" snake_case = PegasusConfig snake_case = {} snake_case = '''gelu''' def __init__( self : Tuple , __UpperCAmelCase : List[str] , __UpperCAmelCase : Optional[Any]=13 , __UpperCAmelCase : int=7 , __UpperCAmelCase : List[str]=True , __UpperCAmelCase : str=False , __UpperCAmelCase : Union[str, Any]=99 , __UpperCAmelCase : Tuple=32 , __UpperCAmelCase : Tuple=2 , __UpperCAmelCase : int=4 , __UpperCAmelCase : Tuple=37 , __UpperCAmelCase : Optional[int]=0.1 , __UpperCAmelCase : Optional[int]=0.1 , __UpperCAmelCase : List[str]=40 , __UpperCAmelCase : Tuple=2 , __UpperCAmelCase : Optional[int]=1 , __UpperCAmelCase : Any=0 , ): '''simple docstring''' _A = parent _A = batch_size _A = seq_length _A = is_training _A = use_labels _A = vocab_size _A = hidden_size _A = num_hidden_layers _A = num_attention_heads _A = intermediate_size _A = hidden_dropout_prob _A = attention_probs_dropout_prob _A = max_position_embeddings _A = eos_token_id _A = pad_token_id _A = bos_token_id def lowerCAmelCase ( self : List[Any] ): '''simple docstring''' _A = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) _A = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 ) _A = tf.concat([input_ids, eos_tensor] , axis=1 ) _A = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) _A = self.config_cls( vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , ) _A = prepare_pegasus_inputs_dict(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) return config, inputs_dict def lowerCAmelCase ( self : Tuple , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : int ): '''simple docstring''' _A = TFPegasusModel(config=__UpperCAmelCase ).get_decoder() _A = inputs_dict["input_ids"] _A = input_ids[:1, :] _A = inputs_dict["attention_mask"][:1, :] _A = inputs_dict["head_mask"] _A = 1 # first forward pass _A = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , head_mask=__UpperCAmelCase , use_cache=__UpperCAmelCase ) _A , _A = outputs.to_tuple() # create hypothetical next token and extent to next_input_ids _A = ids_tensor((self.batch_size, 3) , config.vocab_size ) _A = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta ) # append to next input_ids and _A = tf.concat([input_ids, next_tokens] , axis=-1 ) _A = tf.concat([attention_mask, next_attn_mask] , axis=-1 ) _A = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase )[0] _A = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , past_key_values=__UpperCAmelCase )[0] self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] ) # select random slice _A = int(ids_tensor((1,) , output_from_past.shape[-1] ) ) _A = output_from_no_past[:, -3:, random_slice_idx] _A = output_from_past[:, :, random_slice_idx] # test that outputs are equal for slice tf.debugging.assert_near(__UpperCAmelCase , __UpperCAmelCase , rtol=1E-3 ) def __lowercase ( __lowercase , __lowercase , __lowercase , __lowercase=None , __lowercase=None , __lowercase=None , __lowercase=None , __lowercase=None , ) -> Union[str, Any]: '''simple docstring''' if attention_mask is None: _A = tf.cast(tf.math.not_equal(__lowercase , config.pad_token_id ) , tf.inta ) if decoder_attention_mask is None: _A = tf.concat( [ tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ), tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ), ] , axis=-1 , ) if head_mask is None: _A = tf.ones((config.encoder_layers, config.encoder_attention_heads) ) if decoder_head_mask is None: _A = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) if cross_attn_head_mask is None: _A = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": decoder_attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, "cross_attn_head_mask": cross_attn_head_mask, } @require_tf class _UpperCAmelCase ( snake_case_ , snake_case_ , unittest.TestCase ): """simple docstring""" snake_case = (TFPegasusForConditionalGeneration, TFPegasusModel) if is_tf_available() else () snake_case = (TFPegasusForConditionalGeneration,) if is_tf_available() else () snake_case = ( { '''conversational''': TFPegasusForConditionalGeneration, '''feature-extraction''': TFPegasusModel, '''summarization''': TFPegasusForConditionalGeneration, '''text2text-generation''': TFPegasusForConditionalGeneration, '''translation''': TFPegasusForConditionalGeneration, } if is_tf_available() else {} ) snake_case = True snake_case = False snake_case = False def lowerCAmelCase ( self : str ): '''simple docstring''' _A = TFPegasusModelTester(self ) _A = ConfigTester(self , config_class=__UpperCAmelCase ) def lowerCAmelCase ( self : List[Any] ): '''simple docstring''' self.config_tester.run_common_tests() def lowerCAmelCase ( self : Tuple ): '''simple docstring''' _A = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_decoder_model_past_large_inputs(*__UpperCAmelCase ) @require_sentencepiece @require_tokenizers @require_tf class _UpperCAmelCase ( unittest.TestCase ): """simple docstring""" snake_case = [ ''' PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.''', ''' The London trio are up for best UK act and best album, as well as getting two nominations in the best song category."We got told like this morning \'Oh I think you\'re nominated\'", said Dappy."And I was like \'Oh yeah, which one?\' And now we\'ve got nominated for four awards. I mean, wow!"Bandmate Fazer added: "We thought it\'s best of us to come down and mingle with everyone and say hello to the cameras. And now we find we\'ve got four nominations."The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn\'t be too disappointed if they didn\'t win this time around."At the end of the day we\'re grateful to be where we are in our careers."If it don\'t happen then it don\'t happen - live to fight another day and keep on making albums and hits for the fans."Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers\' All These Things That I\'ve Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year\'s Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border."We just done Edinburgh the other day," said Dappy."We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!" ''', ] snake_case = [ '''California\'s largest electricity provider has cut power to hundreds of thousands of customers in an effort to''' ''' reduce the risk of wildfires.''', '''N-Dubz have revealed they\'re "grateful" to have been nominated for four Mobo Awards.''', ] # differs slightly from pytorch, likely due to numerical differences in linear layers snake_case = '''google/pegasus-xsum''' @cached_property def lowerCAmelCase ( self : Optional[int] ): '''simple docstring''' return AutoTokenizer.from_pretrained(self.model_name ) @cached_property def lowerCAmelCase ( self : Dict ): '''simple docstring''' _A = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name ) return model def lowerCAmelCase ( self : List[Any] , **__UpperCAmelCase : Union[str, Any] ): '''simple docstring''' _A = self.translate_src_text(**__UpperCAmelCase ) assert self.expected_text == generated_words def lowerCAmelCase ( self : Dict , **__UpperCAmelCase : Optional[int] ): '''simple docstring''' _A = self.tokenizer(self.src_text , **__UpperCAmelCase , padding=__UpperCAmelCase , return_tensors="tf" ) _A = self.model.generate( model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 , use_cache=__UpperCAmelCase , ) _A = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=__UpperCAmelCase ) return generated_words @slow def lowerCAmelCase ( self : Optional[int] ): '''simple docstring''' self._assert_generated_batch_equal_expected()
79
'''simple docstring''' import logging import os import sys from dataclasses import dataclass, field from typing import Optional import numpy as np import torch from datasets import load_dataset from torchvision.transforms import Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, ToTensor import transformers from transformers import ( CONFIG_MAPPING, IMAGE_PROCESSOR_MAPPING, MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING, AutoConfig, AutoImageProcessor, AutoModelForMaskedImageModeling, HfArgumentParser, Trainer, TrainingArguments, ) from transformers.trainer_utils import get_last_checkpoint from transformers.utils import check_min_version, send_example_telemetry from transformers.utils.versions import require_version lowerCamelCase_ = logging.getLogger(__name__) # Will error if the minimal version of Transformers is not installed. Remove at your own risks. check_min_version('''4.31.0''') require_version('''datasets>=1.8.0''', '''To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt''') lowerCamelCase_ = list(MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING.keys()) lowerCamelCase_ = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES) @dataclass class _UpperCAmelCase : """simple docstring""" snake_case = field( default='''cifar10''' , metadata={'''help''': '''Name of a dataset from the datasets package'''} ) snake_case = field( default=snake_case_ , metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''} ) snake_case = field( default=snake_case_ , metadata={'''help''': '''The column name of the images in the files. If not set, will try to use \'image\' or \'img\'.'''} , ) snake_case = field(default=snake_case_ , metadata={'''help''': '''A folder containing the training data.'''} ) snake_case = field(default=snake_case_ , metadata={'''help''': '''A folder containing the validation data.'''} ) snake_case = field( default=0.15 , metadata={'''help''': '''Percent to split off of train for validation.'''} ) snake_case = field(default=32 , metadata={'''help''': '''The size of the square patches to use for masking.'''} ) snake_case = field( default=0.6 , metadata={'''help''': '''Percentage of patches to mask.'''} , ) snake_case = field( default=snake_case_ , metadata={ '''help''': ( '''For debugging purposes or quicker training, truncate the number of training examples to this ''' '''value if set.''' ) } , ) snake_case = field( default=snake_case_ , metadata={ '''help''': ( '''For debugging purposes or quicker training, truncate the number of evaluation examples to this ''' '''value if set.''' ) } , ) def lowerCAmelCase ( self : Any ): '''simple docstring''' _A = {} if self.train_dir is not None: _A = self.train_dir if self.validation_dir is not None: _A = self.validation_dir _A = data_files if data_files else None @dataclass class _UpperCAmelCase : """simple docstring""" snake_case = field( default=snake_case_ , metadata={ '''help''': ( '''The model checkpoint for weights initialization. Can be a local path to a pytorch_model.bin or a ''' '''checkpoint identifier on the hub. ''' '''Don\'t set if you want to train a model from scratch.''' ) } , ) snake_case = field( default=snake_case_ , metadata={'''help''': '''If training from scratch, pass a model type from the list: ''' + ''', '''.join(snake_case_ )} , ) snake_case = field( default=snake_case_ , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} ) snake_case = field( default=snake_case_ , metadata={ '''help''': ( '''Override some existing default config settings when a model is trained from scratch. Example: ''' '''n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index''' ) } , ) snake_case = field( default=snake_case_ , metadata={'''help''': '''Where do you want to store (cache) the pretrained models/datasets downloaded from the hub'''} , ) snake_case = field( default='''main''' , metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''} , ) snake_case = field(default=snake_case_ , metadata={'''help''': '''Name or path of preprocessor config.'''} ) snake_case = field( default=snake_case_ , metadata={ '''help''': ( '''Will use the token generated when running `huggingface-cli login` (necessary to use this script ''' '''with private models).''' ) } , ) snake_case = field( default=snake_case_ , metadata={ '''help''': ( '''The size (resolution) of each image. If not specified, will use `image_size` of the configuration.''' ) } , ) snake_case = field( default=snake_case_ , metadata={ '''help''': ( '''The size (resolution) of each patch. If not specified, will use `patch_size` of the configuration.''' ) } , ) snake_case = field( default=snake_case_ , metadata={'''help''': '''Stride to use for the encoder.'''} , ) class _UpperCAmelCase : """simple docstring""" def __init__( self : Tuple , __UpperCAmelCase : Optional[int]=192 , __UpperCAmelCase : Dict=32 , __UpperCAmelCase : int=4 , __UpperCAmelCase : int=0.6 ): '''simple docstring''' _A = input_size _A = mask_patch_size _A = model_patch_size _A = mask_ratio if self.input_size % self.mask_patch_size != 0: raise ValueError("Input size must be divisible by mask patch size" ) if self.mask_patch_size % self.model_patch_size != 0: raise ValueError("Mask patch size must be divisible by model patch size" ) _A = self.input_size // self.mask_patch_size _A = self.mask_patch_size // self.model_patch_size _A = self.rand_size**2 _A = int(np.ceil(self.token_count * self.mask_ratio ) ) def __call__( self : Any ): '''simple docstring''' _A = np.random.permutation(self.token_count )[: self.mask_count] _A = np.zeros(self.token_count , dtype=__UpperCAmelCase ) _A = 1 _A = mask.reshape((self.rand_size, self.rand_size) ) _A = mask.repeat(self.scale , axis=0 ).repeat(self.scale , axis=1 ) return torch.tensor(mask.flatten() ) def __lowercase ( __lowercase ) -> str: '''simple docstring''' _A = torch.stack([example["pixel_values"] for example in examples] ) _A = torch.stack([example["mask"] for example in examples] ) return {"pixel_values": pixel_values, "bool_masked_pos": mask} def __lowercase ( ) -> Dict: '''simple docstring''' _A = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. _A , _A , _A = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: _A , _A , _A = parser.parse_args_into_dataclasses() # Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The # information sent is the one passed as arguments along with your Python/PyTorch versions. send_example_telemetry("run_mim" , __lowercase , __lowercase ) # Setup logging logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , ) if training_args.should_log: # The default of training_args.log_level is passive, so we set log level at info here to have that default. transformers.utils.logging.set_verbosity_info() _A = training_args.get_process_log_level() logger.setLevel(__lowercase ) transformers.utils.logging.set_verbosity(__lowercase ) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Log on each process the small summary: logger.warning( F'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}''' + F'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' ) logger.info(F'''Training/evaluation parameters {training_args}''' ) # Detecting last checkpoint. _A = None if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir: _A = get_last_checkpoint(training_args.output_dir ) if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0: raise ValueError( F'''Output directory ({training_args.output_dir}) already exists and is not empty. ''' "Use --overwrite_output_dir to overcome." ) elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: logger.info( F'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change ''' "the `--output_dir` or add `--overwrite_output_dir` to train from scratch." ) # Initialize our dataset. _A = load_dataset( data_args.dataset_name , data_args.dataset_config_name , data_files=data_args.data_files , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , ) # If we don't have a validation split, split off a percentage of train as validation. _A = None if "validation" in ds.keys() else data_args.train_val_split if isinstance(data_args.train_val_split , __lowercase ) and data_args.train_val_split > 0.0: _A = ds["train"].train_test_split(data_args.train_val_split ) _A = split["train"] _A = split["test"] # Create config # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. _A = { "cache_dir": model_args.cache_dir, "revision": model_args.model_revision, "use_auth_token": True if model_args.use_auth_token else None, } if model_args.config_name_or_path: _A = AutoConfig.from_pretrained(model_args.config_name_or_path , **__lowercase ) elif model_args.model_name_or_path: _A = AutoConfig.from_pretrained(model_args.model_name_or_path , **__lowercase ) else: _A = CONFIG_MAPPING[model_args.model_type]() logger.warning("You are instantiating a new config instance from scratch." ) if model_args.config_overrides is not None: logger.info(F'''Overriding config: {model_args.config_overrides}''' ) config.update_from_string(model_args.config_overrides ) logger.info(F'''New config: {config}''' ) # make sure the decoder_type is "simmim" (only relevant for BEiT) if hasattr(__lowercase , "decoder_type" ): _A = "simmim" # adapt config _A = model_args.image_size if model_args.image_size is not None else config.image_size _A = model_args.patch_size if model_args.patch_size is not None else config.patch_size _A = ( model_args.encoder_stride if model_args.encoder_stride is not None else config.encoder_stride ) config.update( { "image_size": model_args.image_size, "patch_size": model_args.patch_size, "encoder_stride": model_args.encoder_stride, } ) # create image processor if model_args.image_processor_name: _A = AutoImageProcessor.from_pretrained(model_args.image_processor_name , **__lowercase ) elif model_args.model_name_or_path: _A = AutoImageProcessor.from_pretrained(model_args.model_name_or_path , **__lowercase ) else: _A = { conf.model_type: image_processor_class for conf, image_processor_class in IMAGE_PROCESSOR_MAPPING.items() } _A = IMAGE_PROCESSOR_TYPES[model_args.model_type]() # create model if model_args.model_name_or_path: _A = AutoModelForMaskedImageModeling.from_pretrained( model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=__lowercase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) else: logger.info("Training new model from scratch" ) _A = AutoModelForMaskedImageModeling.from_config(__lowercase ) if training_args.do_train: _A = ds["train"].column_names else: _A = ds["validation"].column_names if data_args.image_column_name is not None: _A = data_args.image_column_name elif "image" in column_names: _A = "image" elif "img" in column_names: _A = "img" else: _A = column_names[0] # transformations as done in original SimMIM paper # source: https://github.com/microsoft/SimMIM/blob/main/data/data_simmim.py _A = Compose( [ Lambda(lambda __lowercase : img.convert("RGB" ) if img.mode != "RGB" else img ), RandomResizedCrop(model_args.image_size , scale=(0.67, 1.0) , ratio=(3.0 / 4.0, 4.0 / 3.0) ), RandomHorizontalFlip(), ToTensor(), Normalize(mean=image_processor.image_mean , std=image_processor.image_std ), ] ) # create mask generator _A = MaskGenerator( input_size=model_args.image_size , mask_patch_size=data_args.mask_patch_size , model_patch_size=model_args.patch_size , mask_ratio=data_args.mask_ratio , ) def preprocess_images(__lowercase ): _A = [transforms(__lowercase ) for image in examples[image_column_name]] _A = [mask_generator() for i in range(len(examples[image_column_name] ) )] return examples if training_args.do_train: if "train" not in ds: raise ValueError("--do_train requires a train dataset" ) if data_args.max_train_samples is not None: _A = ds["train"].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) ) # Set the training transforms ds["train"].set_transform(__lowercase ) if training_args.do_eval: if "validation" not in ds: raise ValueError("--do_eval requires a validation dataset" ) if data_args.max_eval_samples is not None: _A = ( ds["validation"].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) ) ) # Set the validation transforms ds["validation"].set_transform(__lowercase ) # Initialize our trainer _A = Trainer( model=__lowercase , args=__lowercase , train_dataset=ds["train"] if training_args.do_train else None , eval_dataset=ds["validation"] if training_args.do_eval else None , tokenizer=__lowercase , data_collator=__lowercase , ) # Training if training_args.do_train: _A = None if training_args.resume_from_checkpoint is not None: _A = training_args.resume_from_checkpoint elif last_checkpoint is not None: _A = last_checkpoint _A = trainer.train(resume_from_checkpoint=__lowercase ) trainer.save_model() trainer.log_metrics("train" , train_result.metrics ) trainer.save_metrics("train" , train_result.metrics ) trainer.save_state() # Evaluation if training_args.do_eval: _A = trainer.evaluate() trainer.log_metrics("eval" , __lowercase ) trainer.save_metrics("eval" , __lowercase ) # Write model card and (optionally) push to hub _A = { "finetuned_from": model_args.model_name_or_path, "tasks": "masked-image-modeling", "dataset": data_args.dataset_name, "tags": ["masked-image-modeling"], } if training_args.push_to_hub: trainer.push_to_hub(**__lowercase ) else: trainer.create_model_card(**__lowercase ) if __name__ == "__main__": main()
79
1
'''simple docstring''' from typing import Callable, List, Optional, Tuple, Union import torch from transformers import CLIPTextModel, CLIPTokenizer from ...configuration_utils import ConfigMixin, register_to_config from ...models import ModelMixin, TransformeraDModel, VQModel from ...schedulers import VQDiffusionScheduler from ...utils import logging from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput lowerCamelCase_ = logging.get_logger(__name__) # pylint: disable=invalid-name class _UpperCAmelCase ( snake_case_ , snake_case_ ): """simple docstring""" @register_to_config def __init__( self : Union[str, Any] , __UpperCAmelCase : bool , __UpperCAmelCase : Optional[int] = None , __UpperCAmelCase : Optional[int] = None ): '''simple docstring''' super().__init__() _A = learnable if self.learnable: assert hidden_size is not None, "learnable=True requires `hidden_size` to be set" assert length is not None, "learnable=True requires `length` to be set" _A = torch.zeros(__UpperCAmelCase , __UpperCAmelCase ) else: _A = None _A = torch.nn.Parameter(__UpperCAmelCase ) class _UpperCAmelCase ( snake_case_ ): """simple docstring""" snake_case = 42 snake_case = 42 snake_case = 42 snake_case = 42 snake_case = 42 snake_case = 42 def __init__( self : Any , __UpperCAmelCase : VQModel , __UpperCAmelCase : CLIPTextModel , __UpperCAmelCase : CLIPTokenizer , __UpperCAmelCase : TransformeraDModel , __UpperCAmelCase : VQDiffusionScheduler , __UpperCAmelCase : LearnedClassifierFreeSamplingEmbeddings , ): '''simple docstring''' super().__init__() self.register_modules( vqvae=__UpperCAmelCase , transformer=__UpperCAmelCase , text_encoder=__UpperCAmelCase , tokenizer=__UpperCAmelCase , scheduler=__UpperCAmelCase , learned_classifier_free_sampling_embeddings=__UpperCAmelCase , ) def lowerCAmelCase ( self : int , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Dict , __UpperCAmelCase : Any ): '''simple docstring''' _A = len(__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else 1 # get prompt text embeddings _A = self.tokenizer( __UpperCAmelCase , padding="max_length" , max_length=self.tokenizer.model_max_length , return_tensors="pt" , ) _A = text_inputs.input_ids if text_input_ids.shape[-1] > self.tokenizer.model_max_length: _A = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] ) logger.warning( "The following part of your input was truncated because CLIP can only handle sequences up to" f''' {self.tokenizer.model_max_length} tokens: {removed_text}''' ) _A = text_input_ids[:, : self.tokenizer.model_max_length] _A = self.text_encoder(text_input_ids.to(self.device ) )[0] # NOTE: This additional step of normalizing the text embeddings is from VQ-Diffusion. # While CLIP does normalize the pooled output of the text transformer when combining # the image and text embeddings, CLIP does not directly normalize the last hidden state. # # CLIP normalizing the pooled output. # https://github.com/huggingface/transformers/blob/d92e22d1f28324f513f3080e5c47c071a3916721/src/transformers/models/clip/modeling_clip.py#L1052-L1053 _A = prompt_embeds / prompt_embeds.norm(dim=-1 , keepdim=__UpperCAmelCase ) # duplicate text embeddings for each generation per prompt _A = prompt_embeds.repeat_interleave(__UpperCAmelCase , dim=0 ) if do_classifier_free_guidance: if self.learned_classifier_free_sampling_embeddings.learnable: _A = self.learned_classifier_free_sampling_embeddings.embeddings _A = negative_prompt_embeds.unsqueeze(0 ).repeat(__UpperCAmelCase , 1 , 1 ) else: _A = [""] * batch_size _A = text_input_ids.shape[-1] _A = self.tokenizer( __UpperCAmelCase , padding="max_length" , max_length=__UpperCAmelCase , truncation=__UpperCAmelCase , return_tensors="pt" , ) _A = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0] # See comment for normalizing text embeddings _A = negative_prompt_embeds / negative_prompt_embeds.norm(dim=-1 , keepdim=__UpperCAmelCase ) # duplicate unconditional embeddings for each generation per prompt, using mps friendly method _A = negative_prompt_embeds.shape[1] _A = negative_prompt_embeds.repeat(1 , __UpperCAmelCase , 1 ) _A = negative_prompt_embeds.view(batch_size * num_images_per_prompt , __UpperCAmelCase , -1 ) # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes _A = torch.cat([negative_prompt_embeds, prompt_embeds] ) return prompt_embeds @torch.no_grad() def __call__( self : Optional[Any] , __UpperCAmelCase : Union[str, List[str]] , __UpperCAmelCase : int = 100 , __UpperCAmelCase : float = 5.0 , __UpperCAmelCase : float = 1.0 , __UpperCAmelCase : int = 1 , __UpperCAmelCase : Optional[Union[torch.Generator, List[torch.Generator]]] = None , __UpperCAmelCase : Optional[torch.FloatTensor] = None , __UpperCAmelCase : Optional[str] = "pil" , __UpperCAmelCase : bool = True , __UpperCAmelCase : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __UpperCAmelCase : int = 1 , ): '''simple docstring''' if isinstance(__UpperCAmelCase , __UpperCAmelCase ): _A = 1 elif isinstance(__UpperCAmelCase , __UpperCAmelCase ): _A = len(__UpperCAmelCase ) else: raise ValueError(f'''`prompt` has to be of type `str` or `list` but is {type(__UpperCAmelCase )}''' ) _A = batch_size * num_images_per_prompt _A = guidance_scale > 1.0 _A = self._encode_prompt(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) if (callback_steps is None) or ( callback_steps is not None and (not isinstance(__UpperCAmelCase , __UpperCAmelCase ) or callback_steps <= 0) ): raise ValueError( f'''`callback_steps` has to be a positive integer but is {callback_steps} of type''' f''' {type(__UpperCAmelCase )}.''' ) # get the initial completely masked latents unless the user supplied it _A = (batch_size, self.transformer.num_latent_pixels) if latents is None: _A = self.transformer.num_vector_embeds - 1 _A = torch.full(__UpperCAmelCase , __UpperCAmelCase ).to(self.device ) else: if latents.shape != latents_shape: raise ValueError(f'''Unexpected latents shape, got {latents.shape}, expected {latents_shape}''' ) if (latents < 0).any() or (latents >= self.transformer.num_vector_embeds).any(): raise ValueError( "Unexpected latents value(s). All latents be valid embedding indices i.e. in the range 0," f''' {self.transformer.num_vector_embeds - 1} (inclusive).''' ) _A = latents.to(self.device ) # set timesteps self.scheduler.set_timesteps(__UpperCAmelCase , device=self.device ) _A = self.scheduler.timesteps.to(self.device ) _A = latents for i, t in enumerate(self.progress_bar(__UpperCAmelCase ) ): # expand the sample if we are doing classifier free guidance _A = torch.cat([sample] * 2 ) if do_classifier_free_guidance else sample # predict the un-noised image # model_output == `log_p_x_0` _A = self.transformer(__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , timestep=__UpperCAmelCase ).sample if do_classifier_free_guidance: _A , _A = model_output.chunk(2 ) _A = model_output_uncond + guidance_scale * (model_output_text - model_output_uncond) model_output -= torch.logsumexp(__UpperCAmelCase , dim=1 , keepdim=__UpperCAmelCase ) _A = self.truncate(__UpperCAmelCase , __UpperCAmelCase ) # remove `log(0)`'s (`-inf`s) _A = model_output.clamp(-70 ) # compute the previous noisy sample x_t -> x_t-1 _A = self.scheduler.step(__UpperCAmelCase , timestep=__UpperCAmelCase , sample=__UpperCAmelCase , generator=__UpperCAmelCase ).prev_sample # call the callback, if provided if callback is not None and i % callback_steps == 0: callback(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) _A = self.vqvae.config.vq_embed_dim _A = (batch_size, self.transformer.height, self.transformer.width, embedding_channels) _A = self.vqvae.quantize.get_codebook_entry(__UpperCAmelCase , shape=__UpperCAmelCase ) _A = self.vqvae.decode(__UpperCAmelCase , force_not_quantize=__UpperCAmelCase ).sample _A = (image / 2 + 0.5).clamp(0 , 1 ) _A = image.cpu().permute(0 , 2 , 3 , 1 ).numpy() if output_type == "pil": _A = self.numpy_to_pil(__UpperCAmelCase ) if not return_dict: return (image,) return ImagePipelineOutput(images=__UpperCAmelCase ) def lowerCAmelCase ( self : Union[str, Any] , __UpperCAmelCase : torch.FloatTensor , __UpperCAmelCase : float ): '''simple docstring''' _A , _A = torch.sort(__UpperCAmelCase , 1 , descending=__UpperCAmelCase ) _A = torch.exp(__UpperCAmelCase ) _A = sorted_p_x_0.cumsum(dim=1 ) < truncation_rate # Ensure that at least the largest probability is not zeroed out _A = torch.full_like(keep_mask[:, 0:1, :] , __UpperCAmelCase ) _A = torch.cat((all_true, keep_mask) , dim=1 ) _A = keep_mask[:, :-1, :] _A = keep_mask.gather(1 , indices.argsort(1 ) ) _A = log_p_x_0.clone() _A = -torch.inf # -inf = log(0) return rv
79
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCamelCase_ = logging.get_logger(__name__) lowerCamelCase_ = { '''google/canine-s''': '''https://huggingface.co/google/canine-s/resolve/main/config.json''', # See all CANINE models at https://huggingface.co/models?filter=canine } class _UpperCAmelCase ( snake_case_ ): """simple docstring""" snake_case = '''canine''' def __init__( self : Dict , __UpperCAmelCase : List[str]=768 , __UpperCAmelCase : str=12 , __UpperCAmelCase : Union[str, Any]=12 , __UpperCAmelCase : int=3072 , __UpperCAmelCase : Optional[int]="gelu" , __UpperCAmelCase : Tuple=0.1 , __UpperCAmelCase : str=0.1 , __UpperCAmelCase : List[Any]=16384 , __UpperCAmelCase : Any=16 , __UpperCAmelCase : str=0.02 , __UpperCAmelCase : Dict=1E-12 , __UpperCAmelCase : Optional[Any]=0 , __UpperCAmelCase : int=0xE000 , __UpperCAmelCase : List[Any]=0xE001 , __UpperCAmelCase : Any=4 , __UpperCAmelCase : Dict=4 , __UpperCAmelCase : List[str]=8 , __UpperCAmelCase : int=16384 , __UpperCAmelCase : Union[str, Any]=128 , **__UpperCAmelCase : Dict , ): '''simple docstring''' super().__init__(pad_token_id=__UpperCAmelCase , bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase , **__UpperCAmelCase ) _A = max_position_embeddings _A = hidden_size _A = num_hidden_layers _A = num_attention_heads _A = intermediate_size _A = hidden_act _A = hidden_dropout_prob _A = attention_probs_dropout_prob _A = initializer_range _A = type_vocab_size _A = layer_norm_eps # Character config: _A = downsampling_rate _A = upsampling_kernel_size _A = num_hash_functions _A = num_hash_buckets _A = local_transformer_stride
79
1
'''simple docstring''' from __future__ import annotations import pandas as pd def __lowercase ( __lowercase , __lowercase , __lowercase ) -> list[int]: '''simple docstring''' _A = [0] * no_of_processes _A = [0] * no_of_processes # Copy the burst time into remaining_time[] for i in range(__lowercase ): _A = burst_time[i] _A = 0 _A = 0 _A = 9_9999_9999 _A = 0 _A = False # Process until all processes are completed while complete != no_of_processes: for j in range(__lowercase ): if arrival_time[j] <= increment_time and remaining_time[j] > 0: if remaining_time[j] < minm: _A = remaining_time[j] _A = j _A = True if not check: increment_time += 1 continue remaining_time[short] -= 1 _A = remaining_time[short] if minm == 0: _A = 9_9999_9999 if remaining_time[short] == 0: complete += 1 _A = False # Find finish time of current process _A = increment_time + 1 # Calculate waiting time _A = finish_time - arrival_time[short] _A = finar - burst_time[short] if waiting_time[short] < 0: _A = 0 # Increment time increment_time += 1 return waiting_time def __lowercase ( __lowercase , __lowercase , __lowercase ) -> list[int]: '''simple docstring''' _A = [0] * no_of_processes for i in range(__lowercase ): _A = burst_time[i] + waiting_time[i] return turn_around_time def __lowercase ( __lowercase , __lowercase , __lowercase ) -> None: '''simple docstring''' _A = 0 _A = 0 for i in range(__lowercase ): _A = total_waiting_time + waiting_time[i] _A = total_turn_around_time + turn_around_time[i] print(F'''Average waiting time = {total_waiting_time / no_of_processes:.5f}''' ) print("Average turn around time =" , total_turn_around_time / no_of_processes ) if __name__ == "__main__": print('''Enter how many process you want to analyze''') lowerCamelCase_ = int(input()) lowerCamelCase_ = [0] * no_of_processes lowerCamelCase_ = [0] * no_of_processes lowerCamelCase_ = list(range(1, no_of_processes + 1)) for i in range(no_of_processes): print('''Enter the arrival time and burst time for process:--''' + str(i + 1)) lowerCamelCase_ , lowerCamelCase_ = map(int, input().split()) lowerCamelCase_ = calculate_waitingtime(arrival_time, burst_time, no_of_processes) lowerCamelCase_ = burst_time lowerCamelCase_ = no_of_processes lowerCamelCase_ = waiting_time lowerCamelCase_ = calculate_turnaroundtime(bt, n, wt) calculate_average_times(waiting_time, turn_around_time, no_of_processes) lowerCamelCase_ = pd.DataFrame( list(zip(processes, burst_time, arrival_time, waiting_time, turn_around_time)), columns=[ '''Process''', '''BurstTime''', '''ArrivalTime''', '''WaitingTime''', '''TurnAroundTime''', ], ) # Printing the dataFrame pd.set_option('''display.max_rows''', fcfs.shape[0] + 1) print(fcfs)
79
'''simple docstring''' class _UpperCAmelCase : """simple docstring""" def __init__( self : List[str] , __UpperCAmelCase : list[int] ): '''simple docstring''' _A = len(__UpperCAmelCase ) _A = [0] * len_array if len_array > 0: _A = array[0] for i in range(1 , __UpperCAmelCase ): _A = self.prefix_sum[i - 1] + array[i] def lowerCAmelCase ( self : Dict , __UpperCAmelCase : int , __UpperCAmelCase : int ): '''simple docstring''' if start == 0: return self.prefix_sum[end] return self.prefix_sum[end] - self.prefix_sum[start - 1] def lowerCAmelCase ( self : List[str] , __UpperCAmelCase : int ): '''simple docstring''' _A = {0} for sum_item in self.prefix_sum: if sum_item - target_sum in sums: return True sums.add(__UpperCAmelCase ) return False if __name__ == "__main__": import doctest doctest.testmod()
79
1
'''simple docstring''' from typing import List, Union import numpy as np from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_torch_available(): import torch from ..models.auto.modeling_auto import MODEL_FOR_DEPTH_ESTIMATION_MAPPING lowerCamelCase_ = logging.get_logger(__name__) @add_end_docstrings(snake_case_ ) class _UpperCAmelCase ( snake_case_ ): """simple docstring""" def __init__( self : List[str] , *__UpperCAmelCase : str , **__UpperCAmelCase : Optional[int] ): '''simple docstring''' super().__init__(*__UpperCAmelCase , **__UpperCAmelCase ) requires_backends(self , "vision" ) self.check_model_type(__UpperCAmelCase ) def __call__( self : List[Any] , __UpperCAmelCase : Union[str, List[str], "Image.Image", List["Image.Image"]] , **__UpperCAmelCase : Any ): '''simple docstring''' return super().__call__(__UpperCAmelCase , **__UpperCAmelCase ) def lowerCAmelCase ( self : Tuple , **__UpperCAmelCase : str ): '''simple docstring''' return {}, {}, {} def lowerCAmelCase ( self : Union[str, Any] , __UpperCAmelCase : Optional[Any] ): '''simple docstring''' _A = load_image(__UpperCAmelCase ) _A = image.size _A = self.image_processor(images=__UpperCAmelCase , return_tensors=self.framework ) return model_inputs def lowerCAmelCase ( self : List[str] , __UpperCAmelCase : int ): '''simple docstring''' _A = self.model(**__UpperCAmelCase ) return model_outputs def lowerCAmelCase ( self : Dict , __UpperCAmelCase : List[Any] ): '''simple docstring''' _A = model_outputs.predicted_depth _A = torch.nn.functional.interpolate( predicted_depth.unsqueeze(1 ) , size=self.image_size[::-1] , mode="bicubic" , align_corners=__UpperCAmelCase ) _A = prediction.squeeze().cpu().numpy() _A = (output * 255 / np.max(__UpperCAmelCase )).astype("uint8" ) _A = Image.fromarray(__UpperCAmelCase ) _A = {} _A = predicted_depth _A = depth return output_dict
79
'''simple docstring''' from typing import List import numpy as np def __lowercase ( __lowercase ) -> int: '''simple docstring''' _A = {key: len(__lowercase ) for key, value in gen_kwargs.items() if isinstance(__lowercase , __lowercase )} if len(set(lists_lengths.values() ) ) > 1: raise RuntimeError( ( "Sharding is ambiguous for this dataset: " + "we found several data sources lists of different lengths, and we don't know over which list we should parallelize:\n" + "\n".join(F'''\t- key {key} has length {length}''' for key, length in lists_lengths.items() ) + "\nTo fix this, check the 'gen_kwargs' and make sure to use lists only for data sources, " + "and use tuples otherwise. In the end there should only be one single list, or several lists with the same length." ) ) _A = max(lists_lengths.values() , default=0 ) return max(1 , __lowercase ) def __lowercase ( __lowercase , __lowercase ) -> List[range]: '''simple docstring''' _A = [] for group_idx in range(__lowercase ): _A = num_shards // max_num_jobs + (group_idx < (num_shards % max_num_jobs)) if num_shards_to_add == 0: break _A = shards_indices_per_group[-1].stop if shards_indices_per_group else 0 _A = range(__lowercase , start + num_shards_to_add ) shards_indices_per_group.append(__lowercase ) return shards_indices_per_group def __lowercase ( __lowercase , __lowercase ) -> List[dict]: '''simple docstring''' _A = _number_of_shards_in_gen_kwargs(__lowercase ) if num_shards == 1: return [dict(__lowercase )] else: _A = _distribute_shards(num_shards=__lowercase , max_num_jobs=__lowercase ) return [ { key: [value[shard_idx] for shard_idx in shard_indices_per_group[group_idx]] if isinstance(__lowercase , __lowercase ) else value for key, value in gen_kwargs.items() } for group_idx in range(len(__lowercase ) ) ] def __lowercase ( __lowercase ) -> dict: '''simple docstring''' return { key: [value for gen_kwargs in gen_kwargs_list for value in gen_kwargs[key]] if isinstance(gen_kwargs_list[0][key] , __lowercase ) else gen_kwargs_list[0][key] for key in gen_kwargs_list[0] } def __lowercase ( __lowercase , __lowercase ) -> dict: '''simple docstring''' _A = {len(__lowercase ) for value in gen_kwargs.values() if isinstance(__lowercase , __lowercase )} _A = {} for size in list_sizes: _A = list(range(__lowercase ) ) rng.shuffle(indices_per_size[size] ) # Now let's copy the gen_kwargs and shuffle the lists based on their sizes _A = dict(__lowercase ) for key, value in shuffled_kwargs.items(): if isinstance(__lowercase , __lowercase ): _A = [value[i] for i in indices_per_size[len(__lowercase )]] return shuffled_kwargs
79
1
'''simple docstring''' def __lowercase ( __lowercase ) -> bool: '''simple docstring''' return number & 1 == 0 if __name__ == "__main__": import doctest doctest.testmod()
79
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available lowerCamelCase_ = { '''configuration_jukebox''': [ '''JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''JukeboxConfig''', '''JukeboxPriorConfig''', '''JukeboxVQVAEConfig''', ], '''tokenization_jukebox''': ['''JukeboxTokenizer'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase_ = [ '''JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST''', '''JukeboxModel''', '''JukeboxPreTrainedModel''', '''JukeboxVQVAE''', '''JukeboxPrior''', ] if TYPE_CHECKING: from .configuration_jukebox import ( JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP, JukeboxConfig, JukeboxPriorConfig, JukeboxVQVAEConfig, ) from .tokenization_jukebox import JukeboxTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_jukebox import ( JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST, JukeboxModel, JukeboxPreTrainedModel, JukeboxPrior, JukeboxVQVAE, ) else: import sys lowerCamelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
79
1
'''simple docstring''' from typing import List, Optional, Tuple, Union import torch from ...models import UNetaDModel from ...schedulers import ScoreSdeVeScheduler from ...utils import randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput class _UpperCAmelCase ( snake_case_ ): """simple docstring""" snake_case = 42 snake_case = 42 def __init__( self : str , __UpperCAmelCase : UNetaDModel , __UpperCAmelCase : ScoreSdeVeScheduler ): '''simple docstring''' super().__init__() self.register_modules(unet=__UpperCAmelCase , scheduler=__UpperCAmelCase ) @torch.no_grad() def __call__( self : Any , __UpperCAmelCase : int = 1 , __UpperCAmelCase : int = 2000 , __UpperCAmelCase : Optional[Union[torch.Generator, List[torch.Generator]]] = None , __UpperCAmelCase : Optional[str] = "pil" , __UpperCAmelCase : bool = True , **__UpperCAmelCase : str , ): '''simple docstring''' _A = self.unet.config.sample_size _A = (batch_size, 3, img_size, img_size) _A = self.unet _A = randn_tensor(__UpperCAmelCase , generator=__UpperCAmelCase ) * self.scheduler.init_noise_sigma _A = sample.to(self.device ) self.scheduler.set_timesteps(__UpperCAmelCase ) self.scheduler.set_sigmas(__UpperCAmelCase ) for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ): _A = self.scheduler.sigmas[i] * torch.ones(shape[0] , device=self.device ) # correction step for _ in range(self.scheduler.config.correct_steps ): _A = self.unet(__UpperCAmelCase , __UpperCAmelCase ).sample _A = self.scheduler.step_correct(__UpperCAmelCase , __UpperCAmelCase , generator=__UpperCAmelCase ).prev_sample # prediction step _A = model(__UpperCAmelCase , __UpperCAmelCase ).sample _A = self.scheduler.step_pred(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , generator=__UpperCAmelCase ) _A , _A = output.prev_sample, output.prev_sample_mean _A = sample_mean.clamp(0 , 1 ) _A = sample.cpu().permute(0 , 2 , 3 , 1 ).numpy() if output_type == "pil": _A = self.numpy_to_pil(__UpperCAmelCase ) if not return_dict: return (sample,) return ImagePipelineOutput(images=__UpperCAmelCase )
79
'''simple docstring''' from typing import Callable, List, Optional, Tuple, Union import torch from transformers import CLIPTextModel, CLIPTokenizer from ...configuration_utils import ConfigMixin, register_to_config from ...models import ModelMixin, TransformeraDModel, VQModel from ...schedulers import VQDiffusionScheduler from ...utils import logging from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput lowerCamelCase_ = logging.get_logger(__name__) # pylint: disable=invalid-name class _UpperCAmelCase ( snake_case_ , snake_case_ ): """simple docstring""" @register_to_config def __init__( self : Union[str, Any] , __UpperCAmelCase : bool , __UpperCAmelCase : Optional[int] = None , __UpperCAmelCase : Optional[int] = None ): '''simple docstring''' super().__init__() _A = learnable if self.learnable: assert hidden_size is not None, "learnable=True requires `hidden_size` to be set" assert length is not None, "learnable=True requires `length` to be set" _A = torch.zeros(__UpperCAmelCase , __UpperCAmelCase ) else: _A = None _A = torch.nn.Parameter(__UpperCAmelCase ) class _UpperCAmelCase ( snake_case_ ): """simple docstring""" snake_case = 42 snake_case = 42 snake_case = 42 snake_case = 42 snake_case = 42 snake_case = 42 def __init__( self : Any , __UpperCAmelCase : VQModel , __UpperCAmelCase : CLIPTextModel , __UpperCAmelCase : CLIPTokenizer , __UpperCAmelCase : TransformeraDModel , __UpperCAmelCase : VQDiffusionScheduler , __UpperCAmelCase : LearnedClassifierFreeSamplingEmbeddings , ): '''simple docstring''' super().__init__() self.register_modules( vqvae=__UpperCAmelCase , transformer=__UpperCAmelCase , text_encoder=__UpperCAmelCase , tokenizer=__UpperCAmelCase , scheduler=__UpperCAmelCase , learned_classifier_free_sampling_embeddings=__UpperCAmelCase , ) def lowerCAmelCase ( self : int , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Dict , __UpperCAmelCase : Any ): '''simple docstring''' _A = len(__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else 1 # get prompt text embeddings _A = self.tokenizer( __UpperCAmelCase , padding="max_length" , max_length=self.tokenizer.model_max_length , return_tensors="pt" , ) _A = text_inputs.input_ids if text_input_ids.shape[-1] > self.tokenizer.model_max_length: _A = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] ) logger.warning( "The following part of your input was truncated because CLIP can only handle sequences up to" f''' {self.tokenizer.model_max_length} tokens: {removed_text}''' ) _A = text_input_ids[:, : self.tokenizer.model_max_length] _A = self.text_encoder(text_input_ids.to(self.device ) )[0] # NOTE: This additional step of normalizing the text embeddings is from VQ-Diffusion. # While CLIP does normalize the pooled output of the text transformer when combining # the image and text embeddings, CLIP does not directly normalize the last hidden state. # # CLIP normalizing the pooled output. # https://github.com/huggingface/transformers/blob/d92e22d1f28324f513f3080e5c47c071a3916721/src/transformers/models/clip/modeling_clip.py#L1052-L1053 _A = prompt_embeds / prompt_embeds.norm(dim=-1 , keepdim=__UpperCAmelCase ) # duplicate text embeddings for each generation per prompt _A = prompt_embeds.repeat_interleave(__UpperCAmelCase , dim=0 ) if do_classifier_free_guidance: if self.learned_classifier_free_sampling_embeddings.learnable: _A = self.learned_classifier_free_sampling_embeddings.embeddings _A = negative_prompt_embeds.unsqueeze(0 ).repeat(__UpperCAmelCase , 1 , 1 ) else: _A = [""] * batch_size _A = text_input_ids.shape[-1] _A = self.tokenizer( __UpperCAmelCase , padding="max_length" , max_length=__UpperCAmelCase , truncation=__UpperCAmelCase , return_tensors="pt" , ) _A = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0] # See comment for normalizing text embeddings _A = negative_prompt_embeds / negative_prompt_embeds.norm(dim=-1 , keepdim=__UpperCAmelCase ) # duplicate unconditional embeddings for each generation per prompt, using mps friendly method _A = negative_prompt_embeds.shape[1] _A = negative_prompt_embeds.repeat(1 , __UpperCAmelCase , 1 ) _A = negative_prompt_embeds.view(batch_size * num_images_per_prompt , __UpperCAmelCase , -1 ) # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes _A = torch.cat([negative_prompt_embeds, prompt_embeds] ) return prompt_embeds @torch.no_grad() def __call__( self : Optional[Any] , __UpperCAmelCase : Union[str, List[str]] , __UpperCAmelCase : int = 100 , __UpperCAmelCase : float = 5.0 , __UpperCAmelCase : float = 1.0 , __UpperCAmelCase : int = 1 , __UpperCAmelCase : Optional[Union[torch.Generator, List[torch.Generator]]] = None , __UpperCAmelCase : Optional[torch.FloatTensor] = None , __UpperCAmelCase : Optional[str] = "pil" , __UpperCAmelCase : bool = True , __UpperCAmelCase : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __UpperCAmelCase : int = 1 , ): '''simple docstring''' if isinstance(__UpperCAmelCase , __UpperCAmelCase ): _A = 1 elif isinstance(__UpperCAmelCase , __UpperCAmelCase ): _A = len(__UpperCAmelCase ) else: raise ValueError(f'''`prompt` has to be of type `str` or `list` but is {type(__UpperCAmelCase )}''' ) _A = batch_size * num_images_per_prompt _A = guidance_scale > 1.0 _A = self._encode_prompt(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) if (callback_steps is None) or ( callback_steps is not None and (not isinstance(__UpperCAmelCase , __UpperCAmelCase ) or callback_steps <= 0) ): raise ValueError( f'''`callback_steps` has to be a positive integer but is {callback_steps} of type''' f''' {type(__UpperCAmelCase )}.''' ) # get the initial completely masked latents unless the user supplied it _A = (batch_size, self.transformer.num_latent_pixels) if latents is None: _A = self.transformer.num_vector_embeds - 1 _A = torch.full(__UpperCAmelCase , __UpperCAmelCase ).to(self.device ) else: if latents.shape != latents_shape: raise ValueError(f'''Unexpected latents shape, got {latents.shape}, expected {latents_shape}''' ) if (latents < 0).any() or (latents >= self.transformer.num_vector_embeds).any(): raise ValueError( "Unexpected latents value(s). All latents be valid embedding indices i.e. in the range 0," f''' {self.transformer.num_vector_embeds - 1} (inclusive).''' ) _A = latents.to(self.device ) # set timesteps self.scheduler.set_timesteps(__UpperCAmelCase , device=self.device ) _A = self.scheduler.timesteps.to(self.device ) _A = latents for i, t in enumerate(self.progress_bar(__UpperCAmelCase ) ): # expand the sample if we are doing classifier free guidance _A = torch.cat([sample] * 2 ) if do_classifier_free_guidance else sample # predict the un-noised image # model_output == `log_p_x_0` _A = self.transformer(__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , timestep=__UpperCAmelCase ).sample if do_classifier_free_guidance: _A , _A = model_output.chunk(2 ) _A = model_output_uncond + guidance_scale * (model_output_text - model_output_uncond) model_output -= torch.logsumexp(__UpperCAmelCase , dim=1 , keepdim=__UpperCAmelCase ) _A = self.truncate(__UpperCAmelCase , __UpperCAmelCase ) # remove `log(0)`'s (`-inf`s) _A = model_output.clamp(-70 ) # compute the previous noisy sample x_t -> x_t-1 _A = self.scheduler.step(__UpperCAmelCase , timestep=__UpperCAmelCase , sample=__UpperCAmelCase , generator=__UpperCAmelCase ).prev_sample # call the callback, if provided if callback is not None and i % callback_steps == 0: callback(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) _A = self.vqvae.config.vq_embed_dim _A = (batch_size, self.transformer.height, self.transformer.width, embedding_channels) _A = self.vqvae.quantize.get_codebook_entry(__UpperCAmelCase , shape=__UpperCAmelCase ) _A = self.vqvae.decode(__UpperCAmelCase , force_not_quantize=__UpperCAmelCase ).sample _A = (image / 2 + 0.5).clamp(0 , 1 ) _A = image.cpu().permute(0 , 2 , 3 , 1 ).numpy() if output_type == "pil": _A = self.numpy_to_pil(__UpperCAmelCase ) if not return_dict: return (image,) return ImagePipelineOutput(images=__UpperCAmelCase ) def lowerCAmelCase ( self : Union[str, Any] , __UpperCAmelCase : torch.FloatTensor , __UpperCAmelCase : float ): '''simple docstring''' _A , _A = torch.sort(__UpperCAmelCase , 1 , descending=__UpperCAmelCase ) _A = torch.exp(__UpperCAmelCase ) _A = sorted_p_x_0.cumsum(dim=1 ) < truncation_rate # Ensure that at least the largest probability is not zeroed out _A = torch.full_like(keep_mask[:, 0:1, :] , __UpperCAmelCase ) _A = torch.cat((all_true, keep_mask) , dim=1 ) _A = keep_mask[:, :-1, :] _A = keep_mask.gather(1 , indices.argsort(1 ) ) _A = log_p_x_0.clone() _A = -torch.inf # -inf = log(0) return rv
79
1
'''simple docstring''' import argparse import fairseq import torch from torch import nn from transformers import ( MBartaaTokenizer, MBartConfig, MBartForCausalLM, SpeechEncoderDecoderConfig, SpeechEncoderDecoderModel, WavaVecaConfig, WavaVecaFeatureExtractor, WavaVecaModel, logging, ) logging.set_verbosity_info() lowerCamelCase_ = logging.get_logger(__name__) lowerCamelCase_ = { '''post_extract_proj''': '''feature_projection.projection''', '''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''', '''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''', '''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''', '''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''', '''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''', '''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''', '''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''', '''fc2''': '''encoder.layers.*.feed_forward.output_dense''', '''final_layer_norm''': '''encoder.layers.*.final_layer_norm''', '''encoder.layer_norm''': '''encoder.layer_norm''', '''w2v_model.layer_norm''': '''feature_projection.layer_norm''', '''quantizer.weight_proj''': '''quantizer.weight_proj''', '''quantizer.vars''': '''quantizer.codevectors''', '''project_q''': '''project_q''', '''final_proj''': '''project_hid''', '''w2v_encoder.proj''': '''lm_head''', '''mask_emb''': '''masked_spec_embed''', } lowerCamelCase_ = [ '''lm_head''', '''quantizer.weight_proj''', '''quantizer.codevectors''', '''project_q''', '''project_hid''', ] def __lowercase ( __lowercase , __lowercase , __lowercase , __lowercase , __lowercase ) -> Tuple: '''simple docstring''' for attribute in key.split("." ): _A = getattr(__lowercase , __lowercase ) if weight_type is not None: _A = getattr(__lowercase , __lowercase ).shape else: _A = hf_pointer.shape assert hf_shape == value.shape, ( F'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be''' F''' {value.shape} for {full_name}''' ) if weight_type == "weight": _A = value elif weight_type == "weight_g": _A = value elif weight_type == "weight_v": _A = value elif weight_type == "bias": _A = value else: _A = value logger.info(F'''{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.''' ) def __lowercase ( __lowercase , __lowercase ) -> Optional[int]: '''simple docstring''' _A = [] _A = fairseq_model.state_dict() _A = hf_model.feature_extractor _A = hf_model.adapter for name, value in fairseq_dict.items(): _A = False if "conv_layers" in name: load_conv_layer( __lowercase , __lowercase , __lowercase , __lowercase , hf_model.config.feat_extract_norm == "group" , ) _A = True elif any(x in name for x in ["adaptor", "w2v_encoder.proj.", "w2v_proj_ln."] ): load_adapter(__lowercase , __lowercase , __lowercase , __lowercase ) _A = True else: for key, mapped_key in MAPPING.items(): if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]: _A = True if "*" in mapped_key: _A = name.split(__lowercase )[0].split("." )[-2] _A = mapped_key.replace("*" , __lowercase ) if "weight_g" in name: _A = "weight_g" elif "weight_v" in name: _A = "weight_v" elif "bias" in name: _A = "bias" elif "weight" in name: _A = "weight" else: _A = None set_recursively(__lowercase , __lowercase , __lowercase , __lowercase , __lowercase ) continue if not is_used: unused_weights.append(__lowercase ) logger.warning(F'''Unused weights: {unused_weights}''' ) def __lowercase ( __lowercase , __lowercase , __lowercase , __lowercase , __lowercase ) -> Optional[int]: '''simple docstring''' _A = full_name.split("conv_layers." )[-1] _A = name.split("." ) _A = int(items[0] ) _A = int(items[1] ) if type_id == 0: if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, ( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' ) _A = value logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, ( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' ) _A = value logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, ( F'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was''' " found." ) _A = value logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, ( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.''' ) _A = value logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' ) else: unused_weights.append(__lowercase ) def __lowercase ( __lowercase , __lowercase , __lowercase , __lowercase ) -> List[str]: '''simple docstring''' _A = full_name.split("adaptor." )[-1] _A = name.split("." ) if items[1].isdigit(): _A = int(items[1] ) else: _A = None if "adaptor" not in full_name: if "proj_ln" in full_name: # has to be layer norm if "bias" in name: assert ( value.shape == adapter.proj_layer_norm.bias.data.shape ), F'''{full_name} has size {value.shape}, but {adapter.proj_layer_norm.bias.data.shape} was found.''' _A = value logger.info(F'''Adapter proj layer norm bias was initialized from {full_name}.''' ) if "weight" in name: assert ( value.shape == adapter.proj_layer_norm.weight.data.shape ), F'''{full_name} has size {value.shape}, but {adapter.proj_layer_norm.weight.data.shape} was found.''' _A = value else: # has to be projection layer if "bias" in name: assert ( value.shape == adapter.proj.bias.data.shape ), F'''{full_name} has size {value.shape}, but {adapter.proj.bias.data.shape} was found.''' _A = value logger.info(F'''Adapter proj layer bias was initialized from {full_name}.''' ) if "weight" in name: assert ( value.shape == adapter.proj.weight.data.shape ), F'''{full_name} has size {value.shape}, but {adapter.proj.weight.data.shape} was found.''' _A = value logger.info(F'''Adapter proj layer weight was initialized from {full_name}.''' ) elif isinstance(__lowercase , __lowercase ): if "bias" in name: assert ( value.shape == adapter.layers[layer_id].conv.bias.data.shape ), F'''{full_name} has size {value.shape}, but {adapter.layers[layer_id].conv.bias.data.shape} was found.''' _A = value logger.info(F'''Adapter layer {layer_id} bias was initialized from {full_name}.''' ) elif "weight" in name: assert ( value.shape == adapter.layers[layer_id].conv.weight.data.shape ), F'''{full_name} has size {value.shape}, but {adapter.layers[layer_id].conv.weight.data.shape} was found.''' _A = value logger.info(F'''Adapter layer {layer_id} bias was initialized from {full_name}.''' ) else: unused_weights.append(__lowercase ) def __lowercase ( __lowercase ) -> Dict: '''simple docstring''' _A , _A = emb.weight.shape _A = nn.Linear(__lowercase , __lowercase , bias=__lowercase ) _A = emb.weight.data return lin_layer @torch.no_grad() def __lowercase ( __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , ) -> Any: '''simple docstring''' _A = WavaVecaConfig.from_pretrained( __lowercase , add_adapter=__lowercase , adapter_stride=__lowercase , adapter_kernel_size=__lowercase , use_auth_token=__lowercase , output_hidden_size=__lowercase , ) _A = MBartConfig.from_pretrained(__lowercase ) # load model _A , _A , _A = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={ "config_yaml": config_yaml_path, "data": "/".join(dict_path.split("/" )[:-1] ), "w2v_path": checkpoint_path, "load_pretrained_decoder_from": None, } , ) _A = model[0].eval() # load feature extractor _A = WavaVecaFeatureExtractor.from_pretrained(__lowercase , use_auth_token=__lowercase ) # set weights for wav2vec2 encoder _A = WavaVecaModel(__lowercase ) recursively_load_weights_wavaveca(model.encoder , __lowercase ) # load decoder weights _A = MBartForCausalLM(__lowercase ) _A , _A = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=__lowercase ) logger.warning(F'''The following keys are missing when loading the decoder weights: {missing_keys}''' ) logger.warning(F'''The following keys are unexpected when loading the decoder weights: {unexpected_keys}''' ) _A = SpeechEncoderDecoderModel(encoder=__lowercase , decoder=__lowercase ) _A = False _A = MBartaaTokenizer(__lowercase ) tokenizer.save_pretrained(__lowercase ) _A = hf_wavavec.config.to_dict() _A = tokenizer.pad_token_id _A = tokenizer.bos_token_id _A = tokenizer.eos_token_id _A = "mbart50" _A = "wav2vec2" _A = tokenizer.eos_token_id _A = 25_0004 _A = tokenizer.eos_token_id _A = SpeechEncoderDecoderConfig.from_dict(__lowercase ) hf_wavavec.save_pretrained(__lowercase ) feature_extractor.save_pretrained(__lowercase ) if __name__ == "__main__": lowerCamelCase_ = argparse.ArgumentParser() parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''') parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''') parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''') parser.add_argument('''--config_yaml_path''', default=None, type=str, help='''Path to yaml file of fine-tuned model''') parser.add_argument( '''--encoder_config_path''', default='''facebook/wav2vec2-xls-r-1b''', type=str, help='''Path to hf encoder wav2vec2 checkpoint config''', ) parser.add_argument( '''--decoder_config_path''', default='''facebook/mbart-large-50-one-to-many-mmt''', type=str, help='''Path to hf decoder checkpoint config''', ) parser.add_argument('''--add_adapter''', default=True, type=bool, help='''whethere to add model adapter layers''') parser.add_argument('''--adapter_stride''', default=2, type=int, help='''stride of adapter layers''') parser.add_argument('''--adapter_kernel_size''', default=3, type=int, help='''kernel size of adapter layers''') parser.add_argument('''--encoder_output_dim''', default=10_24, type=int, help='''encoder output dim''') parser.add_argument('''--start_token_id''', default=25_00_04, type=int, help='''`decoder_start_token_id` of model config''') lowerCamelCase_ = parser.parse_args() convert_wavaveca_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.dict_path, args.config_yaml_path, encoder_config_path=args.encoder_config_path, decoder_config_path=args.decoder_config_path, add_adapter=args.add_adapter, adapter_kernel_size=args.adapter_kernel_size, adapter_stride=args.adapter_stride, decoder_start_token_id=args.start_token_id, encoder_output_dim=args.encoder_output_dim, )
79
'''simple docstring''' import argparse import json from pathlib import Path import requests import timm import torch from huggingface_hub import hf_hub_download from PIL import Image from timm.data import resolve_data_config from timm.data.transforms_factory import create_transform from transformers import ( BitConfig, ViTHybridConfig, ViTHybridForImageClassification, ViTHybridImageProcessor, ViTHybridModel, ) from transformers.image_utils import PILImageResampling from transformers.utils import logging logging.set_verbosity_info() lowerCamelCase_ = logging.get_logger(__name__) def __lowercase ( __lowercase , __lowercase=False ) -> int: '''simple docstring''' _A = [] # fmt: off # stem: rename_keys.append(("cls_token", "vit.embeddings.cls_token") ) rename_keys.append(("pos_embed", "vit.embeddings.position_embeddings") ) rename_keys.append(("patch_embed.proj.weight", "vit.embeddings.patch_embeddings.projection.weight") ) rename_keys.append(("patch_embed.proj.bias", "vit.embeddings.patch_embeddings.projection.bias") ) # backbone rename_keys.append(("patch_embed.backbone.stem.conv.weight", "vit.embeddings.patch_embeddings.backbone.bit.embedder.convolution.weight") ) rename_keys.append(("patch_embed.backbone.stem.norm.weight", "vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.weight") ) rename_keys.append(("patch_embed.backbone.stem.norm.bias", "vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.bias") ) for stage_idx in range(len(config.backbone_config.depths ) ): for layer_idx in range(config.backbone_config.depths[stage_idx] ): rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv1.weight''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv1.weight''') ) rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.weight''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.weight''') ) rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.bias''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.bias''') ) rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv2.weight''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv2.weight''') ) rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.weight''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.weight''') ) rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.bias''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.bias''') ) rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv3.weight''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv3.weight''') ) rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.weight''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.weight''') ) rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.bias''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.bias''') ) rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.conv.weight''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.conv.weight''') ) rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.weight''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.weight''') ) rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.bias''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.bias''') ) # transformer encoder for i in range(config.num_hidden_layers ): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append((F'''blocks.{i}.norm1.weight''', F'''vit.encoder.layer.{i}.layernorm_before.weight''') ) rename_keys.append((F'''blocks.{i}.norm1.bias''', F'''vit.encoder.layer.{i}.layernorm_before.bias''') ) rename_keys.append((F'''blocks.{i}.attn.proj.weight''', F'''vit.encoder.layer.{i}.attention.output.dense.weight''') ) rename_keys.append((F'''blocks.{i}.attn.proj.bias''', F'''vit.encoder.layer.{i}.attention.output.dense.bias''') ) rename_keys.append((F'''blocks.{i}.norm2.weight''', F'''vit.encoder.layer.{i}.layernorm_after.weight''') ) rename_keys.append((F'''blocks.{i}.norm2.bias''', F'''vit.encoder.layer.{i}.layernorm_after.bias''') ) rename_keys.append((F'''blocks.{i}.mlp.fc1.weight''', F'''vit.encoder.layer.{i}.intermediate.dense.weight''') ) rename_keys.append((F'''blocks.{i}.mlp.fc1.bias''', F'''vit.encoder.layer.{i}.intermediate.dense.bias''') ) rename_keys.append((F'''blocks.{i}.mlp.fc2.weight''', F'''vit.encoder.layer.{i}.output.dense.weight''') ) rename_keys.append((F'''blocks.{i}.mlp.fc2.bias''', F'''vit.encoder.layer.{i}.output.dense.bias''') ) if base_model: # layernorm + pooler rename_keys.extend( [ ("norm.weight", "layernorm.weight"), ("norm.bias", "layernorm.bias"), ("pre_logits.fc.weight", "pooler.dense.weight"), ("pre_logits.fc.bias", "pooler.dense.bias"), ] ) # if just the base model, we should remove "vit" from all keys that start with "vit" _A = [(pair[0], pair[1][4:]) if pair[1].startswith("vit" ) else pair for pair in rename_keys] else: # layernorm + classification head rename_keys.extend( [ ("norm.weight", "vit.layernorm.weight"), ("norm.bias", "vit.layernorm.bias"), ("head.weight", "classifier.weight"), ("head.bias", "classifier.bias"), ] ) # fmt: on return rename_keys def __lowercase ( __lowercase , __lowercase , __lowercase=False ) -> Tuple: '''simple docstring''' for i in range(config.num_hidden_layers ): if base_model: _A = "" else: _A = "vit." # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) _A = state_dict.pop(F'''blocks.{i}.attn.qkv.weight''' ) _A = state_dict.pop(F'''blocks.{i}.attn.qkv.bias''' ) # next, add query, keys and values (in that order) to the state dict _A = in_proj_weight[ : config.hidden_size, : ] _A = in_proj_bias[: config.hidden_size] _A = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] _A = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] _A = in_proj_weight[ -config.hidden_size :, : ] _A = in_proj_bias[-config.hidden_size :] def __lowercase ( __lowercase ) -> List[str]: '''simple docstring''' _A = ["head.weight", "head.bias"] for k in ignore_keys: state_dict.pop(__lowercase , __lowercase ) def __lowercase ( __lowercase , __lowercase , __lowercase ) -> Tuple: '''simple docstring''' _A = dct.pop(__lowercase ) _A = val def __lowercase ( ) -> List[str]: '''simple docstring''' _A = "http://images.cocodataset.org/val2017/000000039769.jpg" _A = Image.open(requests.get(__lowercase , stream=__lowercase ).raw ) return im @torch.no_grad() def __lowercase ( __lowercase , __lowercase , __lowercase=False ) -> Tuple: '''simple docstring''' _A = BitConfig( global_padding="same" , layer_type="bottleneck" , depths=(3, 4, 9) , out_features=["stage3"] , embedding_dynamic_padding=__lowercase , ) _A = ViTHybridConfig(backbone_config=__lowercase , image_size=384 , num_labels=1000 ) _A = False # load original model from timm _A = timm.create_model(__lowercase , pretrained=__lowercase ) timm_model.eval() # load state_dict of original model, remove and rename some keys _A = timm_model.state_dict() if base_model: remove_classification_head_(__lowercase ) _A = create_rename_keys(__lowercase , __lowercase ) for src, dest in rename_keys: rename_key(__lowercase , __lowercase , __lowercase ) read_in_q_k_v(__lowercase , __lowercase , __lowercase ) _A = "huggingface/label-files" _A = "imagenet-1k-id2label.json" _A = json.load(open(hf_hub_download(__lowercase , __lowercase , repo_type="dataset" ) , "r" ) ) _A = {int(__lowercase ): v for k, v in idalabel.items()} _A = idalabel _A = {v: k for k, v in idalabel.items()} # load HuggingFace model if vit_name[-5:] == "in21k": _A = ViTHybridModel(__lowercase ).eval() else: _A = ViTHybridForImageClassification(__lowercase ).eval() model.load_state_dict(__lowercase ) # create image processor _A = create_transform(**resolve_data_config({} , model=__lowercase ) ) _A = transform.transforms _A = { "bilinear": PILImageResampling.BILINEAR, "bicubic": PILImageResampling.BICUBIC, "nearest": PILImageResampling.NEAREST, } _A = ViTHybridImageProcessor( do_resize=__lowercase , size={"shortest_edge": timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=__lowercase , crop_size={"height": timm_transforms[1].size[0], "width": timm_transforms[1].size[1]} , do_normalize=__lowercase , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , ) _A = prepare_img() _A = transform(__lowercase ).unsqueeze(0 ) _A = processor(__lowercase , return_tensors="pt" ).pixel_values # verify pixel values assert torch.allclose(__lowercase , __lowercase ) # verify logits with torch.no_grad(): _A = model(__lowercase ) _A = outputs.logits print("Predicted class:" , logits.argmax(-1 ).item() ) if base_model: _A = timm_model.forward_features(__lowercase ) assert timm_pooled_output.shape == outputs.pooler_output.shape assert torch.allclose(__lowercase , outputs.pooler_output , atol=1e-3 ) else: _A = timm_model(__lowercase ) assert timm_logits.shape == outputs.logits.shape assert torch.allclose(__lowercase , outputs.logits , atol=1e-3 ) print("Looks ok!" ) if pytorch_dump_folder_path is not None: Path(__lowercase ).mkdir(exist_ok=__lowercase ) print(F'''Saving model {vit_name} to {pytorch_dump_folder_path}''' ) model.save_pretrained(__lowercase ) print(F'''Saving processor to {pytorch_dump_folder_path}''' ) processor.save_pretrained(__lowercase ) if push_to_hub: print(F'''Pushing model and processor to the hub {vit_name}''' ) model.push_to_hub(F'''ybelkada/{vit_name}''' ) processor.push_to_hub(F'''ybelkada/{vit_name}''' ) if __name__ == "__main__": lowerCamelCase_ = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--vit_name''', default='''vit_base_r50_s16_384''', type=str, help='''Name of the hybrid ViT timm model you\'d like to convert.''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.''' ) parser.add_argument( '''--push_to_hub''', action='''store_true''', help='''Whether to upload the model to the HuggingFace hub.''' ) lowerCamelCase_ = parser.parse_args() convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path, args.push_to_hub)
79
1
'''simple docstring''' import warnings from .generation import TFGenerationMixin class _UpperCAmelCase ( snake_case_ ): """simple docstring""" warnings.warn( '''Importing `TFGenerationMixin` from `src/transformers/generation_tf_utils.py` is deprecated and will ''' '''be removed in Transformers v5. Import as `from transformers import TFGenerationMixin` instead.''' , snake_case_ , )
79
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available lowerCamelCase_ = { '''configuration_time_series_transformer''': [ '''TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''TimeSeriesTransformerConfig''', ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase_ = [ '''TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TimeSeriesTransformerForPrediction''', '''TimeSeriesTransformerModel''', '''TimeSeriesTransformerPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_time_series_transformer import ( TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TimeSeriesTransformerConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_time_series_transformer import ( TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, TimeSeriesTransformerForPrediction, TimeSeriesTransformerModel, TimeSeriesTransformerPreTrainedModel, ) else: import sys lowerCamelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
79
1
'''simple docstring''' import os import random import sys from . import cryptomath_module as cryptomath from . import rabin_miller lowerCamelCase_ = 3 def __lowercase ( __lowercase ) -> int: '''simple docstring''' print("Generating primitive root of p" ) while True: _A = random.randrange(3 , __lowercase ) if pow(__lowercase , 2 , __lowercase ) == 1: continue if pow(__lowercase , __lowercase , __lowercase ) == 1: continue return g def __lowercase ( __lowercase ) -> tuple[tuple[int, int, int, int], tuple[int, int]]: '''simple docstring''' print("Generating prime p..." ) _A = rabin_miller.generate_large_prime(__lowercase ) # select large prime number. _A = primitive_root(__lowercase ) # one primitive root on modulo p. _A = random.randrange(3 , __lowercase ) # private_key -> have to be greater than 2 for safety. _A = cryptomath.find_mod_inverse(pow(__lowercase , __lowercase , __lowercase ) , __lowercase ) _A = (key_size, e_a, e_a, p) _A = (key_size, d) return public_key, private_key def __lowercase ( __lowercase , __lowercase ) -> None: '''simple docstring''' if os.path.exists(F'''{name}_pubkey.txt''' ) or os.path.exists(F'''{name}_privkey.txt''' ): print("\nWARNING:" ) print( F'''"{name}_pubkey.txt" or "{name}_privkey.txt" already exists. \n''' "Use a different name or delete these files and re-run this program." ) sys.exit() _A , _A = generate_key(__lowercase ) print(F'''\nWriting public key to file {name}_pubkey.txt...''' ) with open(F'''{name}_pubkey.txt''' , "w" ) as fo: fo.write(F'''{public_key[0]},{public_key[1]},{public_key[2]},{public_key[3]}''' ) print(F'''Writing private key to file {name}_privkey.txt...''' ) with open(F'''{name}_privkey.txt''' , "w" ) as fo: fo.write(F'''{private_key[0]},{private_key[1]}''' ) def __lowercase ( ) -> None: '''simple docstring''' print("Making key files..." ) make_key_files("elgamal" , 2048 ) print("Key files generation successful" ) if __name__ == "__main__": main()
79
'''simple docstring''' import comet # From: unbabel-comet import torch import datasets lowerCamelCase_ = datasets.logging.get_logger(__name__) lowerCamelCase_ = '''\ @inproceedings{rei-EtAl:2020:WMT, author = {Rei, Ricardo and Stewart, Craig and Farinha, Ana C and Lavie, Alon}, title = {Unbabel\'s Participation in the WMT20 Metrics Shared Task}, booktitle = {Proceedings of the Fifth Conference on Machine Translation}, month = {November}, year = {2020}, address = {Online}, publisher = {Association for Computational Linguistics}, pages = {909--918}, } @inproceedings{rei-etal-2020-comet, title = "{COMET}: A Neural Framework for {MT} Evaluation", author = "Rei, Ricardo and Stewart, Craig and Farinha, Ana C and Lavie, Alon", booktitle = "Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP)", month = nov, year = "2020", address = "Online", publisher = "Association for Computational Linguistics", url = "https://www.aclweb.org/anthology/2020.emnlp-main.213", pages = "2685--2702", } ''' lowerCamelCase_ = '''\ Crosslingual Optimized Metric for Evaluation of Translation (COMET) is an open-source framework used to train Machine Translation metrics that achieve high levels of correlation with different types of human judgments (HTER, DA\'s or MQM). With the release of the framework the authors also released fully trained models that were used to compete in the WMT20 Metrics Shared Task achieving SOTA in that years competition. See the [README.md] file at https://unbabel.github.io/COMET/html/models.html for more information. ''' lowerCamelCase_ = ''' COMET score. Args: `sources` (list of str): Source sentences `predictions` (list of str): candidate translations `references` (list of str): reference translations `cuda` (bool): If set to True, runs COMET using GPU `show_progress` (bool): Shows progress `model`: COMET model to be used. Will default to `wmt-large-da-estimator-1719` if None. Returns: `samples`: List of dictionaries with `src`, `mt`, `ref` and `score`. `scores`: List of scores. Examples: >>> comet_metric = datasets.load_metric(\'comet\') >>> # comet_metric = load_metric(\'comet\', \'wmt20-comet-da\') # you can also choose which model to use >>> source = ["Dem Feuer konnte Einhalt geboten werden", "Schulen und Kindergärten wurden eröffnet."] >>> hypothesis = ["The fire could be stopped", "Schools and kindergartens were open"] >>> reference = ["They were able to control the fire.", "Schools and kindergartens opened"] >>> results = comet_metric.compute(predictions=hypothesis, references=reference, sources=source) >>> print([round(v, 2) for v in results["scores"]]) [0.19, 0.92] ''' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class _UpperCAmelCase ( datasets.Metric ): """simple docstring""" def lowerCAmelCase ( self : int ): '''simple docstring''' return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , homepage="https://unbabel.github.io/COMET/html/index.html" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { "sources": datasets.Value("string" , id="sequence" ), "predictions": datasets.Value("string" , id="sequence" ), "references": datasets.Value("string" , id="sequence" ), } ) , codebase_urls=["https://github.com/Unbabel/COMET"] , reference_urls=[ "https://github.com/Unbabel/COMET", "https://www.aclweb.org/anthology/2020.emnlp-main.213/", "http://www.statmt.org/wmt20/pdf/2020.wmt-1.101.pdf6", ] , ) def lowerCAmelCase ( self : Any , __UpperCAmelCase : str ): '''simple docstring''' if self.config_name == "default": _A = comet.load_from_checkpoint(comet.download_model("wmt20-comet-da" ) ) else: _A = comet.load_from_checkpoint(comet.download_model(self.config_name ) ) def lowerCAmelCase ( self : str , __UpperCAmelCase : str , __UpperCAmelCase : int , __UpperCAmelCase : Any , __UpperCAmelCase : Tuple=None , __UpperCAmelCase : int=False ): '''simple docstring''' if gpus is None: _A = 1 if torch.cuda.is_available() else 0 _A = {"src": sources, "mt": predictions, "ref": references} _A = [dict(zip(__UpperCAmelCase , __UpperCAmelCase ) ) for t in zip(*data.values() )] _A , _A = self.scorer.predict(__UpperCAmelCase , gpus=__UpperCAmelCase , progress_bar=__UpperCAmelCase ) return {"mean_score": mean_score, "scores": scores}
79
1
'''simple docstring''' import os import re import warnings from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_ta import TaTokenizer else: lowerCamelCase_ = None lowerCamelCase_ = logging.get_logger(__name__) lowerCamelCase_ = {'''vocab_file''': '''spiece.model''', '''tokenizer_file''': '''tokenizer.json'''} lowerCamelCase_ = { '''vocab_file''': { '''t5-small''': '''https://huggingface.co/t5-small/resolve/main/spiece.model''', '''t5-base''': '''https://huggingface.co/t5-base/resolve/main/spiece.model''', '''t5-large''': '''https://huggingface.co/t5-large/resolve/main/spiece.model''', '''t5-3b''': '''https://huggingface.co/t5-3b/resolve/main/spiece.model''', '''t5-11b''': '''https://huggingface.co/t5-11b/resolve/main/spiece.model''', }, '''tokenizer_file''': { '''t5-small''': '''https://huggingface.co/t5-small/resolve/main/tokenizer.json''', '''t5-base''': '''https://huggingface.co/t5-base/resolve/main/tokenizer.json''', '''t5-large''': '''https://huggingface.co/t5-large/resolve/main/tokenizer.json''', '''t5-3b''': '''https://huggingface.co/t5-3b/resolve/main/tokenizer.json''', '''t5-11b''': '''https://huggingface.co/t5-11b/resolve/main/tokenizer.json''', }, } # TODO(PVP) - this should be removed in Transformers v5 lowerCamelCase_ = { '''t5-small''': 5_12, '''t5-base''': 5_12, '''t5-large''': 5_12, '''t5-3b''': 5_12, '''t5-11b''': 5_12, } class _UpperCAmelCase ( snake_case_ ): """simple docstring""" snake_case = VOCAB_FILES_NAMES snake_case = PRETRAINED_VOCAB_FILES_MAP snake_case = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES snake_case = ['''input_ids''', '''attention_mask'''] snake_case = TaTokenizer snake_case = [] def __init__( self : Union[str, Any] , __UpperCAmelCase : Optional[int]=None , __UpperCAmelCase : List[str]=None , __UpperCAmelCase : List[str]="</s>" , __UpperCAmelCase : Any="<unk>" , __UpperCAmelCase : int="<pad>" , __UpperCAmelCase : Optional[Any]=100 , __UpperCAmelCase : str=None , **__UpperCAmelCase : Tuple , ): '''simple docstring''' if extra_ids > 0 and additional_special_tokens is None: _A = [f'''<extra_id_{i}>''' for i in range(__UpperCAmelCase )] elif extra_ids > 0 and additional_special_tokens is not None: # Check that we have the right number of extra special tokens _A = len(set(filter(lambda __UpperCAmelCase : bool("extra_id_" in str(__UpperCAmelCase ) ) , __UpperCAmelCase ) ) ) if extra_tokens != extra_ids: raise ValueError( f'''Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are''' " provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids" " tokens" ) super().__init__( __UpperCAmelCase , tokenizer_file=__UpperCAmelCase , eos_token=__UpperCAmelCase , unk_token=__UpperCAmelCase , pad_token=__UpperCAmelCase , extra_ids=__UpperCAmelCase , additional_special_tokens=__UpperCAmelCase , **__UpperCAmelCase , ) _A = vocab_file _A = False if not self.vocab_file else True _A = extra_ids @staticmethod def lowerCAmelCase ( __UpperCAmelCase : Tuple , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Tuple ): '''simple docstring''' if pretrained_model_name_or_path in TaTokenizerFast.max_model_input_sizes: _A = TaTokenizerFast.max_model_input_sizes[pretrained_model_name_or_path] if init_max_model_length is not None and init_max_model_length != max_model_length: return init_max_model_length elif init_max_model_length is None: warnings.warn( "This tokenizer was incorrectly instantiated with a model max length of" f''' {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this''' " behavior is kept to avoid breaking backwards compatibility when padding/encoding with" " `truncation is True`.\n- Be aware that you SHOULD NOT rely on" f''' {pretrained_model_name_or_path} automatically truncating your input to''' f''' {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences''' f''' longer than {deprecated_max_model_length} you can either instantiate this tokenizer with''' " `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please" " instantiate this tokenizer with `model_max_length` set to your preferred value." , __UpperCAmelCase , ) return max_model_length def lowerCAmelCase ( self : Tuple , __UpperCAmelCase : str , __UpperCAmelCase : Optional[str] = None ): '''simple docstring''' if not self.can_save_slow_tokenizer: raise ValueError( "Your fast tokenizer does not have the necessary information to save the vocabulary for a slow " "tokenizer." ) if not os.path.isdir(__UpperCAmelCase ): logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' ) return _A = os.path.join( __UpperCAmelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(__UpperCAmelCase ): copyfile(self.vocab_file , __UpperCAmelCase ) logger.info(f'''Copy vocab file to {out_vocab_file}''' ) return (out_vocab_file,) def lowerCAmelCase ( self : List[str] , __UpperCAmelCase : List[int] , __UpperCAmelCase : Optional[List[int]] = None ): '''simple docstring''' _A = token_ids_a + [self.eos_token_id] if token_ids_a is None: return self.prefix_tokens + token_ids_a else: _A = token_ids_a + [self.eos_token_id] return self.prefix_tokens + token_ids_a + token_ids_a def lowerCAmelCase ( self : List[Any] , __UpperCAmelCase : List[int] , __UpperCAmelCase : Optional[List[int]] = None ): '''simple docstring''' _A = [self.eos_token_id] if token_ids_a is None: return len(token_ids_a + eos ) * [0] return len(token_ids_a + eos + token_ids_a + eos ) * [0] def lowerCAmelCase ( self : Tuple ): '''simple docstring''' return list( set(filter(lambda __UpperCAmelCase : bool(re.search(R"<extra_id_\d+>" , __UpperCAmelCase ) ) is not None , self.additional_special_tokens ) ) ) def lowerCAmelCase ( self : Dict ): '''simple docstring''' return [self.convert_tokens_to_ids(__UpperCAmelCase ) for token in self.get_sentinel_tokens()]
79
'''simple docstring''' from __future__ import annotations def __lowercase ( __lowercase , __lowercase = None , __lowercase = None ) -> None: '''simple docstring''' if start is None: _A = 0 if end is None: _A = len(__lowercase ) - 1 if start >= end: return _A = (start + end) // 2 slowsort(__lowercase , __lowercase , __lowercase ) slowsort(__lowercase , mid + 1 , __lowercase ) if sequence[end] < sequence[mid]: _A , _A = sequence[mid], sequence[end] slowsort(__lowercase , __lowercase , end - 1 ) if __name__ == "__main__": from doctest import testmod testmod()
79
1
'''simple docstring''' from math import ceil from typing import List, Optional, Union import numpy as np from ...audio_utils import mel_filter_bank, spectrogram, window_function from ...feature_extraction_sequence_utils import BatchFeature, SequenceFeatureExtractor from ...utils import TensorType, logging lowerCamelCase_ = logging.get_logger(__name__) class _UpperCAmelCase ( snake_case_ ): """simple docstring""" snake_case = ['''audio_values''', '''audio_mask'''] def __init__( self : Union[str, Any] , __UpperCAmelCase : Optional[Any]=2048 , __UpperCAmelCase : Any=1 , __UpperCAmelCase : Optional[Any]=[16, 16] , __UpperCAmelCase : int=128 , __UpperCAmelCase : Union[str, Any]=44100 , __UpperCAmelCase : Optional[int]=86 , __UpperCAmelCase : List[Any]=2048 , __UpperCAmelCase : Dict=0.0 , **__UpperCAmelCase : List[str] , ): '''simple docstring''' super().__init__( feature_size=__UpperCAmelCase , sampling_rate=__UpperCAmelCase , padding_value=__UpperCAmelCase , **__UpperCAmelCase , ) _A = spectrogram_length _A = num_channels _A = patch_size _A = feature_size // self.patch_size[1] _A = n_fft _A = sampling_rate // hop_length_to_sampling_rate _A = sampling_rate _A = padding_value _A = mel_filter_bank( num_frequency_bins=1 + n_fft // 2 , num_mel_filters=__UpperCAmelCase , min_frequency=0.0 , max_frequency=22050.0 , sampling_rate=__UpperCAmelCase , norm="slaney" , mel_scale="slaney" , ).T def lowerCAmelCase ( self : Optional[int] , __UpperCAmelCase : np.array ): '''simple docstring''' _A = spectrogram( __UpperCAmelCase , window_function(self.n_fft , "hann" ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters.T , log_mel="dB" , db_range=80.0 , ) _A = log_spec[:, :-1] _A = log_spec - 20.0 _A = np.clip(log_spec / 40.0 , -2.0 , 0.0 ) + 1.0 return log_spec def __call__( self : List[Any] , __UpperCAmelCase : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , __UpperCAmelCase : Optional[Union[str, TensorType]] = None , __UpperCAmelCase : Optional[bool] = True , __UpperCAmelCase : Optional[int] = None , __UpperCAmelCase : bool = False , __UpperCAmelCase : bool = False , **__UpperCAmelCase : Dict , ): '''simple docstring''' if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( "This feature extractor is set to support sampling rate" f''' of {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled''' f''' with {self.sampling_rate} and not {sampling_rate}.''' ) else: logger.warning( "It is strongly recommended to pass the `sampling_rate` argument to this function. " "Failing to do so can result in silent errors that might be hard to debug." ) _A = isinstance(__UpperCAmelCase , np.ndarray ) and len(raw_speech.shape ) > 1 if is_batched_numpy and len(raw_speech.shape ) > 2: raise ValueError(f'''Only mono-channel audio is supported for input to {self}''' ) _A = is_batched_numpy or ( isinstance(__UpperCAmelCase , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) )) ) if is_batched: _A = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech] elif not is_batched and not isinstance(__UpperCAmelCase , np.ndarray ): _A = np.asarray(__UpperCAmelCase , dtype=np.floataa ) elif isinstance(__UpperCAmelCase , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ): _A = raw_speech.astype(np.floataa ) # always return batch if not is_batched: _A = [np.asarray([raw_speech] ).T] # Convert audio signals to log mel spectrograms, truncate by time axis _A = [ self._np_extract_fbank_features(waveform.squeeze() ).T[: self.spectrogram_length] for waveform in raw_speech ] if isinstance(audio_features[0] , __UpperCAmelCase ): _A = [np.asarray(__UpperCAmelCase , dtype=np.floataa ) for feature in audio_features] # Create audio attention mask _A = max( [ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len for feature in audio_features] ) # The maximum number of audio patches in a batch if return_attention_mask: _A = [ (ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [1] + (max_patch_len - ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [0] for feature in audio_features ] _A = np.array(__UpperCAmelCase ).astype(np.floataa ) # convert into correct format for padding _A = max_patch_len // self.freq_len * self.patch_size[0] # The maximum audio size in a batch _A = np.ones([len(__UpperCAmelCase ), 1, max_time_len, self.feature_size] ).astype(np.floataa ) _A = padded_audio_features * self.padding_value for i in range(len(__UpperCAmelCase ) ): _A = audio_features[i] _A = feature # return as BatchFeature if return_attention_mask: _A = {"audio_values": padded_audio_features, "audio_mask": audio_mask} else: _A = {"audio_values": padded_audio_features} _A = BatchFeature(data=__UpperCAmelCase , tensor_type=__UpperCAmelCase ) return encoded_inputs
79
'''simple docstring''' from __future__ import annotations import unittest from transformers import AutoTokenizer, PegasusConfig, is_tf_available from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFAutoModelForSeqaSeqLM, TFPegasusForConditionalGeneration, TFPegasusModel @require_tf class _UpperCAmelCase : """simple docstring""" snake_case = PegasusConfig snake_case = {} snake_case = '''gelu''' def __init__( self : Tuple , __UpperCAmelCase : List[str] , __UpperCAmelCase : Optional[Any]=13 , __UpperCAmelCase : int=7 , __UpperCAmelCase : List[str]=True , __UpperCAmelCase : str=False , __UpperCAmelCase : Union[str, Any]=99 , __UpperCAmelCase : Tuple=32 , __UpperCAmelCase : Tuple=2 , __UpperCAmelCase : int=4 , __UpperCAmelCase : Tuple=37 , __UpperCAmelCase : Optional[int]=0.1 , __UpperCAmelCase : Optional[int]=0.1 , __UpperCAmelCase : List[str]=40 , __UpperCAmelCase : Tuple=2 , __UpperCAmelCase : Optional[int]=1 , __UpperCAmelCase : Any=0 , ): '''simple docstring''' _A = parent _A = batch_size _A = seq_length _A = is_training _A = use_labels _A = vocab_size _A = hidden_size _A = num_hidden_layers _A = num_attention_heads _A = intermediate_size _A = hidden_dropout_prob _A = attention_probs_dropout_prob _A = max_position_embeddings _A = eos_token_id _A = pad_token_id _A = bos_token_id def lowerCAmelCase ( self : List[Any] ): '''simple docstring''' _A = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) _A = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 ) _A = tf.concat([input_ids, eos_tensor] , axis=1 ) _A = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) _A = self.config_cls( vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , ) _A = prepare_pegasus_inputs_dict(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) return config, inputs_dict def lowerCAmelCase ( self : Tuple , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : int ): '''simple docstring''' _A = TFPegasusModel(config=__UpperCAmelCase ).get_decoder() _A = inputs_dict["input_ids"] _A = input_ids[:1, :] _A = inputs_dict["attention_mask"][:1, :] _A = inputs_dict["head_mask"] _A = 1 # first forward pass _A = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , head_mask=__UpperCAmelCase , use_cache=__UpperCAmelCase ) _A , _A = outputs.to_tuple() # create hypothetical next token and extent to next_input_ids _A = ids_tensor((self.batch_size, 3) , config.vocab_size ) _A = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta ) # append to next input_ids and _A = tf.concat([input_ids, next_tokens] , axis=-1 ) _A = tf.concat([attention_mask, next_attn_mask] , axis=-1 ) _A = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase )[0] _A = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , past_key_values=__UpperCAmelCase )[0] self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] ) # select random slice _A = int(ids_tensor((1,) , output_from_past.shape[-1] ) ) _A = output_from_no_past[:, -3:, random_slice_idx] _A = output_from_past[:, :, random_slice_idx] # test that outputs are equal for slice tf.debugging.assert_near(__UpperCAmelCase , __UpperCAmelCase , rtol=1E-3 ) def __lowercase ( __lowercase , __lowercase , __lowercase , __lowercase=None , __lowercase=None , __lowercase=None , __lowercase=None , __lowercase=None , ) -> Union[str, Any]: '''simple docstring''' if attention_mask is None: _A = tf.cast(tf.math.not_equal(__lowercase , config.pad_token_id ) , tf.inta ) if decoder_attention_mask is None: _A = tf.concat( [ tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ), tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ), ] , axis=-1 , ) if head_mask is None: _A = tf.ones((config.encoder_layers, config.encoder_attention_heads) ) if decoder_head_mask is None: _A = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) if cross_attn_head_mask is None: _A = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": decoder_attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, "cross_attn_head_mask": cross_attn_head_mask, } @require_tf class _UpperCAmelCase ( snake_case_ , snake_case_ , unittest.TestCase ): """simple docstring""" snake_case = (TFPegasusForConditionalGeneration, TFPegasusModel) if is_tf_available() else () snake_case = (TFPegasusForConditionalGeneration,) if is_tf_available() else () snake_case = ( { '''conversational''': TFPegasusForConditionalGeneration, '''feature-extraction''': TFPegasusModel, '''summarization''': TFPegasusForConditionalGeneration, '''text2text-generation''': TFPegasusForConditionalGeneration, '''translation''': TFPegasusForConditionalGeneration, } if is_tf_available() else {} ) snake_case = True snake_case = False snake_case = False def lowerCAmelCase ( self : str ): '''simple docstring''' _A = TFPegasusModelTester(self ) _A = ConfigTester(self , config_class=__UpperCAmelCase ) def lowerCAmelCase ( self : List[Any] ): '''simple docstring''' self.config_tester.run_common_tests() def lowerCAmelCase ( self : Tuple ): '''simple docstring''' _A = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_decoder_model_past_large_inputs(*__UpperCAmelCase ) @require_sentencepiece @require_tokenizers @require_tf class _UpperCAmelCase ( unittest.TestCase ): """simple docstring""" snake_case = [ ''' PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.''', ''' The London trio are up for best UK act and best album, as well as getting two nominations in the best song category."We got told like this morning \'Oh I think you\'re nominated\'", said Dappy."And I was like \'Oh yeah, which one?\' And now we\'ve got nominated for four awards. I mean, wow!"Bandmate Fazer added: "We thought it\'s best of us to come down and mingle with everyone and say hello to the cameras. And now we find we\'ve got four nominations."The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn\'t be too disappointed if they didn\'t win this time around."At the end of the day we\'re grateful to be where we are in our careers."If it don\'t happen then it don\'t happen - live to fight another day and keep on making albums and hits for the fans."Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers\' All These Things That I\'ve Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year\'s Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border."We just done Edinburgh the other day," said Dappy."We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!" ''', ] snake_case = [ '''California\'s largest electricity provider has cut power to hundreds of thousands of customers in an effort to''' ''' reduce the risk of wildfires.''', '''N-Dubz have revealed they\'re "grateful" to have been nominated for four Mobo Awards.''', ] # differs slightly from pytorch, likely due to numerical differences in linear layers snake_case = '''google/pegasus-xsum''' @cached_property def lowerCAmelCase ( self : Optional[int] ): '''simple docstring''' return AutoTokenizer.from_pretrained(self.model_name ) @cached_property def lowerCAmelCase ( self : Dict ): '''simple docstring''' _A = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name ) return model def lowerCAmelCase ( self : List[Any] , **__UpperCAmelCase : Union[str, Any] ): '''simple docstring''' _A = self.translate_src_text(**__UpperCAmelCase ) assert self.expected_text == generated_words def lowerCAmelCase ( self : Dict , **__UpperCAmelCase : Optional[int] ): '''simple docstring''' _A = self.tokenizer(self.src_text , **__UpperCAmelCase , padding=__UpperCAmelCase , return_tensors="tf" ) _A = self.model.generate( model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 , use_cache=__UpperCAmelCase , ) _A = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=__UpperCAmelCase ) return generated_words @slow def lowerCAmelCase ( self : Optional[int] ): '''simple docstring''' self._assert_generated_batch_equal_expected()
79
1
'''simple docstring''' lowerCamelCase_ = ''' # Transformers 설치 방법 ! pip install transformers datasets # 마지막 릴리스 대신 소스에서 설치하려면, 위 명령을 주석으로 바꾸고 아래 명령을 해제하세요. # ! pip install git+https://github.com/huggingface/transformers.git ''' lowerCamelCase_ = [{'''type''': '''code''', '''content''': INSTALL_CONTENT}] lowerCamelCase_ = { '''{processor_class}''': '''FakeProcessorClass''', '''{model_class}''': '''FakeModelClass''', '''{object_class}''': '''FakeObjectClass''', }
79
'''simple docstring''' from __future__ import annotations import unittest from transformers import RoFormerConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TFRoFormerForCausalLM, TFRoFormerForMaskedLM, TFRoFormerForMultipleChoice, TFRoFormerForQuestionAnswering, TFRoFormerForSequenceClassification, TFRoFormerForTokenClassification, TFRoFormerModel, ) from transformers.models.roformer.modeling_tf_roformer import ( TFRoFormerSelfAttention, TFRoFormerSinusoidalPositionalEmbedding, ) class _UpperCAmelCase : """simple docstring""" def __init__( self : Tuple , __UpperCAmelCase : List[Any] , __UpperCAmelCase : Tuple=13 , __UpperCAmelCase : Optional[int]=7 , __UpperCAmelCase : int=True , __UpperCAmelCase : str=True , __UpperCAmelCase : Union[str, Any]=True , __UpperCAmelCase : str=True , __UpperCAmelCase : List[str]=99 , __UpperCAmelCase : List[str]=32 , __UpperCAmelCase : Union[str, Any]=2 , __UpperCAmelCase : List[str]=4 , __UpperCAmelCase : Optional[Any]=37 , __UpperCAmelCase : Any="gelu" , __UpperCAmelCase : Optional[Any]=0.1 , __UpperCAmelCase : int=0.1 , __UpperCAmelCase : Dict=512 , __UpperCAmelCase : List[Any]=16 , __UpperCAmelCase : List[str]=2 , __UpperCAmelCase : Optional[Any]=0.02 , __UpperCAmelCase : int=3 , __UpperCAmelCase : Dict=4 , __UpperCAmelCase : str=None , ): '''simple docstring''' _A = parent _A = 13 _A = 7 _A = True _A = True _A = True _A = True _A = 99 _A = 32 _A = 2 _A = 4 _A = 37 _A = "gelu" _A = 0.1 _A = 0.1 _A = 512 _A = 16 _A = 2 _A = 0.02 _A = 3 _A = 4 _A = None def lowerCAmelCase ( self : Dict ): '''simple docstring''' _A = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) _A = None if self.use_input_mask: _A = random_attention_mask([self.batch_size, self.seq_length] ) _A = None if self.use_token_type_ids: _A = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) _A = None _A = None _A = None if self.use_labels: _A = ids_tensor([self.batch_size] , self.type_sequence_label_size ) _A = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) _A = ids_tensor([self.batch_size] , self.num_choices ) _A = RoFormerConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=__UpperCAmelCase , ) return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def lowerCAmelCase ( self : Optional[Any] , __UpperCAmelCase : str , __UpperCAmelCase : Dict , __UpperCAmelCase : Any , __UpperCAmelCase : str , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Union[str, Any] ): '''simple docstring''' _A = TFRoFormerModel(config=__UpperCAmelCase ) _A = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids} _A = [input_ids, input_mask] _A = model(__UpperCAmelCase ) _A = model(__UpperCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def lowerCAmelCase ( self : int , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : int , __UpperCAmelCase : Tuple , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Tuple , __UpperCAmelCase : Any , __UpperCAmelCase : List[Any] ): '''simple docstring''' _A = True _A = TFRoFormerForCausalLM(config=__UpperCAmelCase ) _A = { "input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids, } _A = model(__UpperCAmelCase )["logits"] self.parent.assertListEqual( list(prediction_scores.numpy().shape ) , [self.batch_size, self.seq_length, self.vocab_size] ) def lowerCAmelCase ( self : int , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Tuple , __UpperCAmelCase : str , __UpperCAmelCase : Tuple , __UpperCAmelCase : Dict , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : str ): '''simple docstring''' _A = TFRoFormerForMaskedLM(config=__UpperCAmelCase ) _A = { "input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids, } _A = model(__UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def lowerCAmelCase ( self : List[Any] , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : List[Any] , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Tuple , __UpperCAmelCase : Union[str, Any] ): '''simple docstring''' _A = self.num_labels _A = TFRoFormerForSequenceClassification(config=__UpperCAmelCase ) _A = { "input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids, } _A = model(__UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def lowerCAmelCase ( self : List[Any] , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : str , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : List[Any] ): '''simple docstring''' _A = self.num_choices _A = TFRoFormerForMultipleChoice(config=__UpperCAmelCase ) _A = tf.tile(tf.expand_dims(__UpperCAmelCase , 1 ) , (1, self.num_choices, 1) ) _A = tf.tile(tf.expand_dims(__UpperCAmelCase , 1 ) , (1, self.num_choices, 1) ) _A = tf.tile(tf.expand_dims(__UpperCAmelCase , 1 ) , (1, self.num_choices, 1) ) _A = { "input_ids": multiple_choice_inputs_ids, "attention_mask": multiple_choice_input_mask, "token_type_ids": multiple_choice_token_type_ids, } _A = model(__UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def lowerCAmelCase ( self : Dict , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : int , __UpperCAmelCase : List[str] , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : List[str] , __UpperCAmelCase : Optional[int] ): '''simple docstring''' _A = self.num_labels _A = TFRoFormerForTokenClassification(config=__UpperCAmelCase ) _A = { "input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids, } _A = model(__UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def lowerCAmelCase ( self : List[Any] , __UpperCAmelCase : List[str] , __UpperCAmelCase : Dict , __UpperCAmelCase : int , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Any , __UpperCAmelCase : int , __UpperCAmelCase : int ): '''simple docstring''' _A = TFRoFormerForQuestionAnswering(config=__UpperCAmelCase ) _A = { "input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids, } _A = model(__UpperCAmelCase ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def lowerCAmelCase ( self : Union[str, Any] ): '''simple docstring''' _A = self.prepare_config_and_inputs() ( ( _A ) , ( _A ) , ( _A ) , ( _A ) , ( _A ) , ( _A ) , ( _A ) , ) = config_and_inputs _A = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask} return config, inputs_dict @require_tf class _UpperCAmelCase ( snake_case_ , snake_case_ , unittest.TestCase ): """simple docstring""" snake_case = ( ( TFRoFormerModel, TFRoFormerForCausalLM, TFRoFormerForMaskedLM, TFRoFormerForQuestionAnswering, TFRoFormerForSequenceClassification, TFRoFormerForTokenClassification, TFRoFormerForMultipleChoice, ) if is_tf_available() else () ) snake_case = ( { '''feature-extraction''': TFRoFormerModel, '''fill-mask''': TFRoFormerForMaskedLM, '''question-answering''': TFRoFormerForQuestionAnswering, '''text-classification''': TFRoFormerForSequenceClassification, '''text-generation''': TFRoFormerForCausalLM, '''token-classification''': TFRoFormerForTokenClassification, '''zero-shot''': TFRoFormerForSequenceClassification, } if is_tf_available() else {} ) snake_case = False snake_case = False def lowerCAmelCase ( self : int , __UpperCAmelCase : Dict , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Dict , __UpperCAmelCase : Optional[int] ): '''simple docstring''' if pipeline_test_casse_name == "TextGenerationPipelineTests": return True return False def lowerCAmelCase ( self : Optional[int] ): '''simple docstring''' _A = TFRoFormerModelTester(self ) _A = ConfigTester(self , config_class=__UpperCAmelCase , hidden_size=37 ) def lowerCAmelCase ( self : Optional[int] ): '''simple docstring''' self.config_tester.run_common_tests() def lowerCAmelCase ( self : Optional[int] ): '''simple docstring''' _A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__UpperCAmelCase ) def lowerCAmelCase ( self : Any ): '''simple docstring''' _A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*__UpperCAmelCase ) def lowerCAmelCase ( self : Union[str, Any] ): '''simple docstring''' _A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_lm_head(*__UpperCAmelCase ) def lowerCAmelCase ( self : Optional[Any] ): '''simple docstring''' _A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*__UpperCAmelCase ) def lowerCAmelCase ( self : List[Any] ): '''simple docstring''' _A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*__UpperCAmelCase ) def lowerCAmelCase ( self : str ): '''simple docstring''' _A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*__UpperCAmelCase ) def lowerCAmelCase ( self : Any ): '''simple docstring''' _A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*__UpperCAmelCase ) @slow def lowerCAmelCase ( self : Dict ): '''simple docstring''' _A = TFRoFormerModel.from_pretrained("junnyu/roformer_chinese_base" ) self.assertIsNotNone(__UpperCAmelCase ) @require_tf class _UpperCAmelCase ( unittest.TestCase ): """simple docstring""" @slow def lowerCAmelCase ( self : Optional[int] ): '''simple docstring''' _A = TFRoFormerForMaskedLM.from_pretrained("junnyu/roformer_chinese_base" ) _A = tf.constant([[0, 1, 2, 3, 4, 5]] ) _A = model(__UpperCAmelCase )[0] # TODO Replace vocab size _A = 50000 _A = [1, 6, vocab_size] self.assertEqual(output.shape , __UpperCAmelCase ) print(output[:, :3, :3] ) # TODO Replace values below with what was printed above. _A = tf.constant( [ [ [-0.12053341, -1.0264901, 0.29221946], [-1.5133783, 0.197433, 0.15190607], [-5.0135403, -3.900256, -0.84038764], ] ] ) tf.debugging.assert_near(output[:, :3, :3] , __UpperCAmelCase , atol=1E-4 ) @require_tf class _UpperCAmelCase ( unittest.TestCase ): """simple docstring""" snake_case = 1E-4 def lowerCAmelCase ( self : List[str] ): '''simple docstring''' _A = tf.constant([[4, 10]] ) _A = TFRoFormerSinusoidalPositionalEmbedding(num_positions=6 , embedding_dim=6 ) _A = emba(input_ids.shape ) _A = tf.constant( [[0.0000, 0.0000, 0.0000, 1.0000, 1.0000, 1.0000], [0.8415, 0.0464, 0.0022, 0.5403, 0.9989, 1.0000]] ) tf.debugging.assert_near(__UpperCAmelCase , __UpperCAmelCase , atol=self.tolerance ) def lowerCAmelCase ( self : Tuple ): '''simple docstring''' _A = tf.constant( [ [0.0000, 0.0000, 0.0000, 0.0000, 0.0000], [0.8415, 0.8219, 0.8020, 0.7819, 0.7617], [0.9093, 0.9364, 0.9581, 0.9749, 0.9870], ] ) _A = TFRoFormerSinusoidalPositionalEmbedding(num_positions=512 , embedding_dim=512 ) emba([2, 16, 512] ) _A = emba.weight[:3, :5] tf.debugging.assert_near(__UpperCAmelCase , __UpperCAmelCase , atol=self.tolerance ) @require_tf class _UpperCAmelCase ( unittest.TestCase ): """simple docstring""" snake_case = 1E-4 def lowerCAmelCase ( self : str ): '''simple docstring''' _A = tf.reshape(tf.range(2 * 12 * 16 * 64 , dtype=tf.floataa ) , shape=(2, 12, 16, 64) ) / 100 _A = -tf.reshape(tf.range(2 * 12 * 16 * 64 , dtype=tf.floataa ) , shape=(2, 12, 16, 64) ) / 100 _A = TFRoFormerSinusoidalPositionalEmbedding(num_positions=32 , embedding_dim=64 ) _A = embed_positions([2, 16, 768] )[None, None, :, :] _A , _A = TFRoFormerSelfAttention.apply_rotary_position_embeddings( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) _A = tf.constant( [ [0.0000, 0.0100, 0.0200, 0.0300, 0.0400, 0.0500, 0.0600, 0.0700], [-0.2012, 0.8897, 0.0263, 0.9401, 0.2074, 0.9463, 0.3481, 0.9343], [-1.7057, 0.6271, -1.2145, 1.3897, -0.6303, 1.7647, -0.1173, 1.8985], [-2.1731, -1.6397, -2.7358, 0.2854, -2.1840, 1.7183, -1.3018, 2.4871], [0.2717, -3.6173, -2.9206, -2.1988, -3.6638, 0.3858, -2.9155, 2.2980], [3.9859, -2.1580, -0.7984, -4.4904, -4.1181, -2.0252, -4.4782, 1.1253], ] ) _A = tf.constant( [ [0.0000, -0.0100, -0.0200, -0.0300, -0.0400, -0.0500, -0.0600, -0.0700], [0.2012, -0.8897, -0.0263, -0.9401, -0.2074, -0.9463, -0.3481, -0.9343], [1.7057, -0.6271, 1.2145, -1.3897, 0.6303, -1.7647, 0.1173, -1.8985], [2.1731, 1.6397, 2.7358, -0.2854, 2.1840, -1.7183, 1.3018, -2.4871], [-0.2717, 3.6173, 2.9206, 2.1988, 3.6638, -0.3858, 2.9155, -2.2980], [-3.9859, 2.1580, 0.7984, 4.4904, 4.1181, 2.0252, 4.4782, -1.1253], ] ) tf.debugging.assert_near(query_layer[0, 0, :6, :8] , __UpperCAmelCase , atol=self.tolerance ) tf.debugging.assert_near(key_layer[0, 0, :6, :8] , __UpperCAmelCase , atol=self.tolerance )
79
1
'''simple docstring''' import inspect import unittest import numpy as np from tests.test_modeling_common import floats_tensor from transformers import MaskaFormerConfig, is_torch_available, is_vision_available from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import MaskaFormerForUniversalSegmentation, MaskaFormerModel if is_vision_available(): from transformers import MaskaFormerImageProcessor if is_vision_available(): from PIL import Image class _UpperCAmelCase : """simple docstring""" def __init__( self : str , __UpperCAmelCase : int , __UpperCAmelCase : Union[str, Any]=2 , __UpperCAmelCase : List[Any]=True , __UpperCAmelCase : List[Any]=False , __UpperCAmelCase : Any=10 , __UpperCAmelCase : Optional[int]=3 , __UpperCAmelCase : int=32 * 8 , __UpperCAmelCase : int=32 * 8 , __UpperCAmelCase : List[Any]=4 , __UpperCAmelCase : Optional[int]=64 , ): '''simple docstring''' _A = parent _A = batch_size _A = is_training _A = use_auxiliary_loss _A = num_queries _A = num_channels _A = min_size _A = max_size _A = num_labels _A = hidden_dim _A = hidden_dim def lowerCAmelCase ( self : Optional[int] ): '''simple docstring''' _A = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to( __UpperCAmelCase ) _A = torch.ones([self.batch_size, self.min_size, self.max_size] , device=__UpperCAmelCase ) _A = ( torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=__UpperCAmelCase ) > 0.5 ).float() _A = (torch.rand((self.batch_size, self.num_labels) , device=__UpperCAmelCase ) > 0.5).long() _A = self.get_config() return config, pixel_values, pixel_mask, mask_labels, class_labels def lowerCAmelCase ( self : Optional[Any] ): '''simple docstring''' _A = MaskaFormerConfig( hidden_size=self.hidden_dim , ) _A = self.num_queries _A = self.num_labels _A = [1, 1, 1, 1] _A = self.num_channels _A = 64 _A = 128 _A = self.hidden_dim _A = self.hidden_dim _A = self.hidden_dim return config def lowerCAmelCase ( self : Tuple ): '''simple docstring''' _A , _A , _A , _A , _A = self.prepare_config_and_inputs() _A = {"pixel_values": pixel_values, "pixel_mask": pixel_mask} return config, inputs_dict def lowerCAmelCase ( self : Tuple , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Optional[int] ): '''simple docstring''' _A = output.encoder_hidden_states _A = output.pixel_decoder_hidden_states _A = output.transformer_decoder_hidden_states self.parent.assertTrue(len(__UpperCAmelCase ) , len(config.backbone_config.depths ) ) self.parent.assertTrue(len(__UpperCAmelCase ) , len(config.backbone_config.depths ) ) self.parent.assertTrue(len(__UpperCAmelCase ) , config.decoder_layers ) def lowerCAmelCase ( self : Optional[int] , __UpperCAmelCase : Dict , __UpperCAmelCase : Any , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : List[str]=False ): '''simple docstring''' with torch.no_grad(): _A = MaskaFormerModel(config=__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() _A = model(pixel_values=__UpperCAmelCase , pixel_mask=__UpperCAmelCase ) _A = model(__UpperCAmelCase , output_hidden_states=__UpperCAmelCase ) self.parent.assertEqual( output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.hidden_dim) , ) # let's ensure the other two hidden state exists self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None ) self.parent.assertTrue(output.encoder_last_hidden_state is not None ) if output_hidden_states: self.check_output_hidden_state(__UpperCAmelCase , __UpperCAmelCase ) def lowerCAmelCase ( self : Union[str, Any] , __UpperCAmelCase : List[Any] , __UpperCAmelCase : Dict , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : str , __UpperCAmelCase : Optional[Any] ): '''simple docstring''' _A = MaskaFormerForUniversalSegmentation(config=__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() def comm_check_on_output(__UpperCAmelCase : Any ): # let's still check that all the required stuff is there self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None ) self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None ) self.parent.assertTrue(result.encoder_last_hidden_state is not None ) # okay, now we need to check the logits shape # due to the encoder compression, masks have a //4 spatial size self.parent.assertEqual( result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , ) # + 1 for null class self.parent.assertEqual( result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) ) with torch.no_grad(): _A = model(pixel_values=__UpperCAmelCase , pixel_mask=__UpperCAmelCase ) _A = model(__UpperCAmelCase ) comm_check_on_output(__UpperCAmelCase ) _A = model( pixel_values=__UpperCAmelCase , pixel_mask=__UpperCAmelCase , mask_labels=__UpperCAmelCase , class_labels=__UpperCAmelCase ) comm_check_on_output(__UpperCAmelCase ) self.parent.assertTrue(result.loss is not None ) self.parent.assertEqual(result.loss.shape , torch.Size([1] ) ) @require_torch class _UpperCAmelCase ( snake_case_ , snake_case_ , unittest.TestCase ): """simple docstring""" snake_case = (MaskaFormerModel, MaskaFormerForUniversalSegmentation) if is_torch_available() else () snake_case = {'''feature-extraction''': MaskaFormerModel} if is_torch_available() else {} snake_case = False snake_case = False snake_case = False snake_case = False def lowerCAmelCase ( self : str ): '''simple docstring''' _A = MaskaFormerModelTester(self ) _A = ConfigTester(self , config_class=__UpperCAmelCase , has_text_modality=__UpperCAmelCase ) def lowerCAmelCase ( self : Any ): '''simple docstring''' self.config_tester.run_common_tests() def lowerCAmelCase ( self : str ): '''simple docstring''' _A , _A = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.create_and_check_maskaformer_model(__UpperCAmelCase , **__UpperCAmelCase , output_hidden_states=__UpperCAmelCase ) def lowerCAmelCase ( self : str ): '''simple docstring''' _A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_maskaformer_instance_segmentation_head_model(*__UpperCAmelCase ) @unittest.skip(reason="Mask2Former does not use inputs_embeds" ) def lowerCAmelCase ( self : List[str] ): '''simple docstring''' pass @unittest.skip(reason="Mask2Former does not have a get_input_embeddings method" ) def lowerCAmelCase ( self : Union[str, Any] ): '''simple docstring''' pass @unittest.skip(reason="Mask2Former is not a generative model" ) def lowerCAmelCase ( self : List[Any] ): '''simple docstring''' pass @unittest.skip(reason="Mask2Former does not use token embeddings" ) def lowerCAmelCase ( self : Union[str, Any] ): '''simple docstring''' pass @require_torch_multi_gpu @unittest.skip( reason="Mask2Former has some layers using `add_module` which doesn't work well with `nn.DataParallel`" ) def lowerCAmelCase ( self : List[str] ): '''simple docstring''' pass @unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." ) def lowerCAmelCase ( self : Optional[int] ): '''simple docstring''' pass def lowerCAmelCase ( self : List[Any] ): '''simple docstring''' _A , _A = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _A = model_class(__UpperCAmelCase ) _A = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic _A = [*signature.parameters.keys()] _A = ["pixel_values"] self.assertListEqual(arg_names[:1] , __UpperCAmelCase ) @slow def lowerCAmelCase ( self : Union[str, Any] ): '''simple docstring''' for model_name in ["facebook/mask2former-swin-small-coco-instance"]: _A = MaskaFormerModel.from_pretrained(__UpperCAmelCase ) self.assertIsNotNone(__UpperCAmelCase ) def lowerCAmelCase ( self : int ): '''simple docstring''' _A = (self.model_tester.min_size,) * 2 _A = { "pixel_values": torch.randn((2, 3, *size) , device=__UpperCAmelCase ), "mask_labels": torch.randn((2, 10, *size) , device=__UpperCAmelCase ), "class_labels": torch.zeros(2 , 10 , device=__UpperCAmelCase ).long(), } _A = self.model_tester.get_config() _A = MaskaFormerForUniversalSegmentation(__UpperCAmelCase ).to(__UpperCAmelCase ) _A = model(**__UpperCAmelCase ) self.assertTrue(outputs.loss is not None ) def lowerCAmelCase ( self : str ): '''simple docstring''' _A , _A = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.create_and_check_maskaformer_model(__UpperCAmelCase , **__UpperCAmelCase , output_hidden_states=__UpperCAmelCase ) def lowerCAmelCase ( self : Optional[int] ): '''simple docstring''' _A , _A = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _A = model_class(__UpperCAmelCase ).to(__UpperCAmelCase ) _A = model(**__UpperCAmelCase , output_attentions=__UpperCAmelCase ) self.assertTrue(outputs.attentions is not None ) def lowerCAmelCase ( self : Union[str, Any] ): '''simple docstring''' if not self.model_tester.is_training: return _A = self.all_model_classes[1] _A , _A , _A , _A , _A = self.model_tester.prepare_config_and_inputs() _A = model_class(__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.train() _A = model(__UpperCAmelCase , mask_labels=__UpperCAmelCase , class_labels=__UpperCAmelCase ).loss loss.backward() def lowerCAmelCase ( self : Tuple ): '''simple docstring''' _A = self.all_model_classes[1] _A , _A , _A , _A , _A = self.model_tester.prepare_config_and_inputs() _A = True _A = True _A = model_class(__UpperCAmelCase ).to(__UpperCAmelCase ) model.train() _A = model(__UpperCAmelCase , mask_labels=__UpperCAmelCase , class_labels=__UpperCAmelCase ) _A = outputs.encoder_hidden_states[0] encoder_hidden_states.retain_grad() _A = outputs.pixel_decoder_hidden_states[0] pixel_decoder_hidden_states.retain_grad() _A = outputs.transformer_decoder_hidden_states[0] transformer_decoder_hidden_states.retain_grad() _A = outputs.attentions[0] attentions.retain_grad() outputs.loss.backward(retain_graph=__UpperCAmelCase ) self.assertIsNotNone(encoder_hidden_states.grad ) self.assertIsNotNone(pixel_decoder_hidden_states.grad ) self.assertIsNotNone(transformer_decoder_hidden_states.grad ) self.assertIsNotNone(attentions.grad ) lowerCamelCase_ = 1e-4 def __lowercase ( ) -> Optional[int]: '''simple docstring''' _A = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) return image @require_vision @slow class _UpperCAmelCase ( unittest.TestCase ): """simple docstring""" @cached_property def lowerCAmelCase ( self : Optional[int] ): '''simple docstring''' return "facebook/mask2former-swin-small-coco-instance" @cached_property def lowerCAmelCase ( self : Optional[Any] ): '''simple docstring''' return MaskaFormerImageProcessor.from_pretrained(self.model_checkpoints ) if is_vision_available() else None def lowerCAmelCase ( self : List[str] ): '''simple docstring''' _A = MaskaFormerModel.from_pretrained(self.model_checkpoints ).to(__UpperCAmelCase ) _A = self.default_image_processor _A = prepare_img() _A = image_processor(__UpperCAmelCase , return_tensors="pt" ).to(__UpperCAmelCase ) _A = inputs["pixel_values"].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 ) # check size self.assertEqual(__UpperCAmelCase , (1, 3, 384, 384) ) with torch.no_grad(): _A = model(**__UpperCAmelCase ) _A = torch.tensor( [[-0.2790, -1.0717, -1.1668], [-0.5128, -0.3128, -0.4987], [-0.5832, 0.1971, -0.0197]] ).to(__UpperCAmelCase ) self.assertTrue( torch.allclose( outputs.encoder_last_hidden_state[0, 0, :3, :3] , __UpperCAmelCase , atol=__UpperCAmelCase ) ) _A = torch.tensor( [[0.8973, 1.1847, 1.1776], [1.1934, 1.5040, 1.5128], [1.1153, 1.4486, 1.4951]] ).to(__UpperCAmelCase ) self.assertTrue( torch.allclose( outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , __UpperCAmelCase , atol=__UpperCAmelCase ) ) _A = torch.tensor( [[2.1152, 1.7000, -0.8603], [1.5808, 1.8004, -0.9353], [1.6043, 1.7495, -0.5999]] ).to(__UpperCAmelCase ) self.assertTrue( torch.allclose( outputs.transformer_decoder_last_hidden_state[0, :3, :3] , __UpperCAmelCase , atol=__UpperCAmelCase ) ) def lowerCAmelCase ( self : Optional[int] ): '''simple docstring''' _A = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(__UpperCAmelCase ).eval() _A = self.default_image_processor _A = prepare_img() _A = image_processor(__UpperCAmelCase , return_tensors="pt" ).to(__UpperCAmelCase ) _A = inputs["pixel_values"].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 ) # check size self.assertEqual(__UpperCAmelCase , (1, 3, 384, 384) ) with torch.no_grad(): _A = model(**__UpperCAmelCase ) # masks_queries_logits _A = outputs.masks_queries_logits self.assertEqual( masks_queries_logits.shape , (1, model.config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) ) _A = [ [-8.7839, -9.0056, -8.8121], [-7.4104, -7.0313, -6.5401], [-6.6105, -6.3427, -6.4675], ] _A = torch.tensor(__UpperCAmelCase ).to(__UpperCAmelCase ) self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , __UpperCAmelCase , atol=__UpperCAmelCase ) ) # class_queries_logits _A = outputs.class_queries_logits self.assertEqual(class_queries_logits.shape , (1, model.config.num_queries, model.config.num_labels + 1) ) _A = torch.tensor( [ [1.8324, -8.0835, -4.1922], [0.8450, -9.0050, -3.6053], [0.3045, -7.7293, -3.0275], ] ).to(__UpperCAmelCase ) self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , __UpperCAmelCase , atol=__UpperCAmelCase ) ) def lowerCAmelCase ( self : Tuple ): '''simple docstring''' _A = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(__UpperCAmelCase ).eval() _A = self.default_image_processor _A = image_processor( [np.zeros((3, 800, 1333) ), np.zeros((3, 800, 1333) )] , segmentation_maps=[np.zeros((384, 384) ).astype(np.floataa ), np.zeros((384, 384) ).astype(np.floataa )] , return_tensors="pt" , ) _A = inputs["pixel_values"].to(__UpperCAmelCase ) _A = [el.to(__UpperCAmelCase ) for el in inputs["mask_labels"]] _A = [el.to(__UpperCAmelCase ) for el in inputs["class_labels"]] with torch.no_grad(): _A = model(**__UpperCAmelCase ) self.assertTrue(outputs.loss is not None )
79
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCamelCase_ = logging.get_logger(__name__) lowerCamelCase_ = { '''EleutherAI/gpt-neox-20b''': '''https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/config.json''', # See all GPTNeoX models at https://huggingface.co/models?filter=gpt_neox } class _UpperCAmelCase ( snake_case_ ): """simple docstring""" snake_case = '''gpt_neox''' def __init__( self : List[Any] , __UpperCAmelCase : List[Any]=50432 , __UpperCAmelCase : Any=6144 , __UpperCAmelCase : List[str]=44 , __UpperCAmelCase : List[Any]=64 , __UpperCAmelCase : List[str]=24576 , __UpperCAmelCase : Union[str, Any]="gelu" , __UpperCAmelCase : Tuple=0.25 , __UpperCAmelCase : Optional[Any]=10000 , __UpperCAmelCase : int=0.0 , __UpperCAmelCase : str=0.0 , __UpperCAmelCase : Any=0.1 , __UpperCAmelCase : Tuple=2048 , __UpperCAmelCase : Optional[int]=0.02 , __UpperCAmelCase : Union[str, Any]=1E-5 , __UpperCAmelCase : str=True , __UpperCAmelCase : List[Any]=0 , __UpperCAmelCase : Dict=2 , __UpperCAmelCase : Optional[Any]=False , __UpperCAmelCase : str=True , __UpperCAmelCase : Dict=None , **__UpperCAmelCase : Tuple , ): '''simple docstring''' super().__init__(bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase , **__UpperCAmelCase ) _A = vocab_size _A = max_position_embeddings _A = hidden_size _A = num_hidden_layers _A = num_attention_heads _A = intermediate_size _A = hidden_act _A = rotary_pct _A = rotary_emb_base _A = attention_dropout _A = hidden_dropout _A = classifier_dropout _A = initializer_range _A = layer_norm_eps _A = use_cache _A = tie_word_embeddings _A = use_parallel_residual _A = rope_scaling self._rope_scaling_validation() if self.hidden_size % self.num_attention_heads != 0: raise ValueError( "The hidden size is not divisble by the number of attention heads! Make sure to update them!" ) def lowerCAmelCase ( self : Dict ): '''simple docstring''' if self.rope_scaling is None: return if not isinstance(self.rope_scaling , __UpperCAmelCase ) or len(self.rope_scaling ) != 2: raise ValueError( "`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, " f'''got {self.rope_scaling}''' ) _A = self.rope_scaling.get("type" , __UpperCAmelCase ) _A = self.rope_scaling.get("factor" , __UpperCAmelCase ) if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]: raise ValueError( f'''`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}''' ) if rope_scaling_factor is None or not isinstance(__UpperCAmelCase , __UpperCAmelCase ) or rope_scaling_factor <= 1.0: raise ValueError(f'''`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}''' )
79
1
'''simple docstring''' import argparse import torch from torch import nn from transformers import SpeechaTextConfig, SpeechaTextForConditionalGeneration def __lowercase ( __lowercase ) -> Dict: '''simple docstring''' _A = [ "encoder.version", "decoder.version", "model.encoder.version", "model.decoder.version", "decoder.output_projection.weight", "_float_tensor", "encoder.embed_positions._float_tensor", "decoder.embed_positions._float_tensor", ] for k in ignore_keys: state_dict.pop(__lowercase , __lowercase ) def __lowercase ( __lowercase ) -> List[Any]: '''simple docstring''' _A = list(s_dict.keys() ) for key in keys: if "transformer_layers" in key: _A = s_dict.pop(__lowercase ) elif "subsample" in key: _A = s_dict.pop(__lowercase ) def __lowercase ( __lowercase ) -> Tuple: '''simple docstring''' _A , _A = emb.weight.shape _A = nn.Linear(__lowercase , __lowercase , bias=__lowercase ) _A = emb.weight.data return lin_layer def __lowercase ( __lowercase , __lowercase ) -> List[Any]: '''simple docstring''' _A = torch.load(__lowercase , map_location="cpu" ) _A = mam_aaa["args"] _A = mam_aaa["model"] _A = state_dict["decoder.output_projection.weight"] remove_ignore_keys_(__lowercase ) rename_keys(__lowercase ) _A = state_dict["decoder.embed_tokens.weight"].shape[0] _A = args.share_decoder_input_output_embed _A = [int(__lowercase ) for i in args.conv_kernel_sizes.split("," )] _A = SpeechaTextConfig( vocab_size=__lowercase , max_source_positions=args.max_source_positions , max_target_positions=args.max_target_positions , encoder_layers=args.encoder_layers , decoder_layers=args.decoder_layers , encoder_attention_heads=args.encoder_attention_heads , decoder_attention_heads=args.decoder_attention_heads , encoder_ffn_dim=args.encoder_ffn_embed_dim , decoder_ffn_dim=args.decoder_ffn_embed_dim , d_model=args.encoder_embed_dim , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function="relu" , num_conv_layers=len(__lowercase ) , conv_channels=args.conv_channels , conv_kernel_sizes=__lowercase , input_feat_per_channel=args.input_feat_per_channel , input_channels=args.input_channels , tie_word_embeddings=__lowercase , num_beams=5 , max_length=200 , use_cache=__lowercase , decoder_start_token_id=2 , early_stopping=__lowercase , ) _A = SpeechaTextForConditionalGeneration(__lowercase ) _A , _A = model.model.load_state_dict(__lowercase , strict=__lowercase ) if len(__lowercase ) > 0 and not set(__lowercase ) <= { "encoder.embed_positions.weights", "decoder.embed_positions.weights", }: raise ValueError( "Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing," F''' but all the following weights are missing {missing}''' ) if tie_embeds: _A = make_linear_from_emb(model.model.decoder.embed_tokens ) else: _A = lm_head_weights model.save_pretrained(__lowercase ) if __name__ == "__main__": lowerCamelCase_ = argparse.ArgumentParser() # Required parameters parser.add_argument('''--fairseq_path''', type=str, help='''Path to the fairseq model (.pt) file.''') parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''') lowerCamelCase_ = parser.parse_args() convert_fairseq_sat_checkpoint_to_tfms(args.fairseq_path, args.pytorch_dump_folder_path)
79
'''simple docstring''' from PIL import Image def __lowercase ( __lowercase , __lowercase ) -> Image: '''simple docstring''' _A = (259 * (level + 255)) / (255 * (259 - level)) def contrast(__lowercase ) -> int: return int(128 + factor * (c - 128) ) return img.point(__lowercase ) if __name__ == "__main__": # Load image with Image.open('''image_data/lena.jpg''') as img: # Change contrast to 170 lowerCamelCase_ = change_contrast(img, 1_70) cont_img.save('''image_data/lena_high_contrast.png''', format='''png''')
79
1
'''simple docstring''' import tempfile import torch from diffusers import ( DEISMultistepScheduler, DPMSolverMultistepScheduler, DPMSolverSinglestepScheduler, UniPCMultistepScheduler, ) from .test_schedulers import SchedulerCommonTest class _UpperCAmelCase ( snake_case_ ): """simple docstring""" snake_case = (DEISMultistepScheduler,) snake_case = (('''num_inference_steps''', 25),) def lowerCAmelCase ( self : Any , **__UpperCAmelCase : Dict ): '''simple docstring''' _A = { "num_train_timesteps": 1000, "beta_start": 0.0001, "beta_end": 0.02, "beta_schedule": "linear", "solver_order": 2, } config.update(**__UpperCAmelCase ) return config def lowerCAmelCase ( self : Dict , __UpperCAmelCase : int=0 , **__UpperCAmelCase : Union[str, Any] ): '''simple docstring''' _A = dict(self.forward_default_kwargs ) _A = kwargs.pop("num_inference_steps" , __UpperCAmelCase ) _A = self.dummy_sample _A = 0.1 * sample _A = [residual + 0.2, residual + 0.15, residual + 0.10] for scheduler_class in self.scheduler_classes: _A = self.get_scheduler_config(**__UpperCAmelCase ) _A = scheduler_class(**__UpperCAmelCase ) scheduler.set_timesteps(__UpperCAmelCase ) # copy over dummy past residuals _A = dummy_past_residuals[: scheduler.config.solver_order] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(__UpperCAmelCase ) _A = scheduler_class.from_pretrained(__UpperCAmelCase ) new_scheduler.set_timesteps(__UpperCAmelCase ) # copy over dummy past residuals _A = dummy_past_residuals[: new_scheduler.config.solver_order] _A , _A = sample, sample for t in range(__UpperCAmelCase , time_step + scheduler.config.solver_order + 1 ): _A = scheduler.step(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase ).prev_sample _A = new_scheduler.step(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" def lowerCAmelCase ( self : str ): '''simple docstring''' pass def lowerCAmelCase ( self : Dict , __UpperCAmelCase : Tuple=0 , **__UpperCAmelCase : List[Any] ): '''simple docstring''' _A = dict(self.forward_default_kwargs ) _A = kwargs.pop("num_inference_steps" , __UpperCAmelCase ) _A = self.dummy_sample _A = 0.1 * sample _A = [residual + 0.2, residual + 0.15, residual + 0.10] for scheduler_class in self.scheduler_classes: _A = self.get_scheduler_config() _A = scheduler_class(**__UpperCAmelCase ) scheduler.set_timesteps(__UpperCAmelCase ) # copy over dummy past residuals (must be after setting timesteps) _A = dummy_past_residuals[: scheduler.config.solver_order] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(__UpperCAmelCase ) _A = scheduler_class.from_pretrained(__UpperCAmelCase ) # copy over dummy past residuals new_scheduler.set_timesteps(__UpperCAmelCase ) # copy over dummy past residual (must be after setting timesteps) _A = dummy_past_residuals[: new_scheduler.config.solver_order] _A = scheduler.step(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase ).prev_sample _A = new_scheduler.step(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" def lowerCAmelCase ( self : Dict , __UpperCAmelCase : Optional[Any]=None , **__UpperCAmelCase : Tuple ): '''simple docstring''' if scheduler is None: _A = self.scheduler_classes[0] _A = self.get_scheduler_config(**__UpperCAmelCase ) _A = scheduler_class(**__UpperCAmelCase ) _A = self.scheduler_classes[0] _A = self.get_scheduler_config(**__UpperCAmelCase ) _A = scheduler_class(**__UpperCAmelCase ) _A = 10 _A = self.dummy_model() _A = self.dummy_sample_deter scheduler.set_timesteps(__UpperCAmelCase ) for i, t in enumerate(scheduler.timesteps ): _A = model(__UpperCAmelCase , __UpperCAmelCase ) _A = scheduler.step(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ).prev_sample return sample def lowerCAmelCase ( self : Optional[Any] ): '''simple docstring''' _A = dict(self.forward_default_kwargs ) _A = kwargs.pop("num_inference_steps" , __UpperCAmelCase ) for scheduler_class in self.scheduler_classes: _A = self.get_scheduler_config() _A = scheduler_class(**__UpperCAmelCase ) _A = self.dummy_sample _A = 0.1 * sample if num_inference_steps is not None and hasattr(__UpperCAmelCase , "set_timesteps" ): scheduler.set_timesteps(__UpperCAmelCase ) elif num_inference_steps is not None and not hasattr(__UpperCAmelCase , "set_timesteps" ): _A = num_inference_steps # copy over dummy past residuals (must be done after set_timesteps) _A = [residual + 0.2, residual + 0.15, residual + 0.10] _A = dummy_past_residuals[: scheduler.config.solver_order] _A = scheduler.timesteps[5] _A = scheduler.timesteps[6] _A = scheduler.step(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase ).prev_sample _A = scheduler.step(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase ).prev_sample self.assertEqual(output_a.shape , sample.shape ) self.assertEqual(output_a.shape , output_a.shape ) def lowerCAmelCase ( self : Any ): '''simple docstring''' _A = DEISMultistepScheduler(**self.get_scheduler_config() ) _A = self.full_loop(scheduler=__UpperCAmelCase ) _A = torch.mean(torch.abs(__UpperCAmelCase ) ) assert abs(result_mean.item() - 0.23916 ) < 1E-3 _A = DPMSolverSinglestepScheduler.from_config(scheduler.config ) _A = DPMSolverMultistepScheduler.from_config(scheduler.config ) _A = UniPCMultistepScheduler.from_config(scheduler.config ) _A = DEISMultistepScheduler.from_config(scheduler.config ) _A = self.full_loop(scheduler=__UpperCAmelCase ) _A = torch.mean(torch.abs(__UpperCAmelCase ) ) assert abs(result_mean.item() - 0.23916 ) < 1E-3 def lowerCAmelCase ( self : List[str] ): '''simple docstring''' for timesteps in [25, 50, 100, 999, 1000]: self.check_over_configs(num_train_timesteps=__UpperCAmelCase ) def lowerCAmelCase ( self : Union[str, Any] ): '''simple docstring''' self.check_over_configs(thresholding=__UpperCAmelCase ) for order in [1, 2, 3]: for solver_type in ["logrho"]: for threshold in [0.5, 1.0, 2.0]: for prediction_type in ["epsilon", "sample"]: self.check_over_configs( thresholding=__UpperCAmelCase , prediction_type=__UpperCAmelCase , sample_max_value=__UpperCAmelCase , algorithm_type="deis" , solver_order=__UpperCAmelCase , solver_type=__UpperCAmelCase , ) def lowerCAmelCase ( self : List[str] ): '''simple docstring''' for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=__UpperCAmelCase ) def lowerCAmelCase ( self : Optional[int] ): '''simple docstring''' for algorithm_type in ["deis"]: for solver_type in ["logrho"]: for order in [1, 2, 3]: for prediction_type in ["epsilon", "sample"]: self.check_over_configs( solver_order=__UpperCAmelCase , solver_type=__UpperCAmelCase , prediction_type=__UpperCAmelCase , algorithm_type=__UpperCAmelCase , ) _A = self.full_loop( solver_order=__UpperCAmelCase , solver_type=__UpperCAmelCase , prediction_type=__UpperCAmelCase , algorithm_type=__UpperCAmelCase , ) assert not torch.isnan(__UpperCAmelCase ).any(), "Samples have nan numbers" def lowerCAmelCase ( self : Tuple ): '''simple docstring''' self.check_over_configs(lower_order_final=__UpperCAmelCase ) self.check_over_configs(lower_order_final=__UpperCAmelCase ) def lowerCAmelCase ( self : Optional[Any] ): '''simple docstring''' for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1000]: self.check_over_forward(num_inference_steps=__UpperCAmelCase , time_step=0 ) def lowerCAmelCase ( self : Any ): '''simple docstring''' _A = self.full_loop() _A = torch.mean(torch.abs(__UpperCAmelCase ) ) assert abs(result_mean.item() - 0.23916 ) < 1E-3 def lowerCAmelCase ( self : Any ): '''simple docstring''' _A = self.full_loop(prediction_type="v_prediction" ) _A = torch.mean(torch.abs(__UpperCAmelCase ) ) assert abs(result_mean.item() - 0.091 ) < 1E-3 def lowerCAmelCase ( self : Union[str, Any] ): '''simple docstring''' _A = self.scheduler_classes[0] _A = self.get_scheduler_config(thresholding=__UpperCAmelCase , dynamic_thresholding_ratio=0 ) _A = scheduler_class(**__UpperCAmelCase ) _A = 10 _A = self.dummy_model() _A = self.dummy_sample_deter.half() scheduler.set_timesteps(__UpperCAmelCase ) for i, t in enumerate(scheduler.timesteps ): _A = model(__UpperCAmelCase , __UpperCAmelCase ) _A = scheduler.step(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ).prev_sample assert sample.dtype == torch.floataa
79
'''simple docstring''' def __lowercase ( __lowercase ) -> int: '''simple docstring''' assert isinstance(__lowercase , __lowercase ), F'''The input value of [n={number}] is not an integer''' if number == 1: return 2 elif number < 1: _A = F'''The input value of [n={number}] has to be > 0''' raise ValueError(__lowercase ) else: _A = sylvester(number - 1 ) _A = num - 1 _A = num return lower * upper + 1 if __name__ == "__main__": print(F"""The 8th number in Sylvester's sequence: {sylvester(8)}""")
79
1
'''simple docstring''' import warnings from ...utils import logging from .image_processing_layoutlmva import LayoutLMvaImageProcessor lowerCamelCase_ = logging.get_logger(__name__) class _UpperCAmelCase ( snake_case_ ): """simple docstring""" def __init__( self : List[Any] , *__UpperCAmelCase : List[str] , **__UpperCAmelCase : Tuple ): '''simple docstring''' warnings.warn( "The class LayoutLMv2FeatureExtractor is deprecated and will be removed in version 5 of Transformers." " Please use LayoutLMv2ImageProcessor instead." , __UpperCAmelCase , ) super().__init__(*__UpperCAmelCase , **__UpperCAmelCase )
79
'''simple docstring''' import argparse import logging import os from datetime import datetime import numpy as np import torch from torch import nn from torch.utils.data import DataLoader, RandomSampler, TensorDataset from tqdm import tqdm from transformers import GPTaLMHeadModel lowerCamelCase_ = logging.getLogger(__name__) def __lowercase ( __lowercase , __lowercase ) -> Optional[int]: '''simple docstring''' if os.path.exists(__lowercase ): if os.path.exists(os.path.join(__lowercase , "config.json" ) ) and os.path.isfile( os.path.join(__lowercase , "config.json" ) ): os.remove(os.path.join(__lowercase , "config.json" ) ) if os.path.exists(os.path.join(__lowercase , "pytorch_model.bin" ) ) and os.path.isfile( os.path.join(__lowercase , "pytorch_model.bin" ) ): os.remove(os.path.join(__lowercase , "pytorch_model.bin" ) ) else: os.makedirs(__lowercase ) model.save_pretrained(__lowercase ) def __lowercase ( __lowercase , __lowercase=False ) -> Optional[int]: '''simple docstring''' _A = 2 if unlogit: _A = torch.pow(__lowercase , __lowercase ) _A = p * torch.log(__lowercase ) _A = 0 return -plogp.sum(dim=-1 ) def __lowercase ( __lowercase ) -> Optional[Any]: '''simple docstring''' logger.info("lv, h >\t" + "\t".join(F'''{x + 1}''' for x in range(len(__lowercase ) ) ) ) for row in range(len(__lowercase ) ): if tensor.dtype != torch.long: logger.info(F'''layer {row + 1}:\t''' + "\t".join(F'''{x:.5f}''' for x in tensor[row].cpu().data ) ) else: logger.info(F'''layer {row + 1}:\t''' + "\t".join(F'''{x:d}''' for x in tensor[row].cpu().data ) ) def __lowercase ( __lowercase , __lowercase , __lowercase , __lowercase=True , __lowercase=True , __lowercase=None , __lowercase=False ) -> int: '''simple docstring''' _A , _A = model.config.num_hidden_layers, model.config.num_attention_heads _A = torch.zeros(__lowercase , __lowercase ).to(args.device ) _A = torch.zeros(__lowercase , __lowercase ).to(args.device ) if head_mask is None: _A = torch.ones(__lowercase , __lowercase ).to(args.device ) head_mask.requires_grad_(requires_grad=__lowercase ) # If actually pruned attention multi-head, set head mask to None to avoid shape mismatch if actually_pruned: _A = None _A = 0.0 _A = 0.0 for step, inputs in enumerate(tqdm(__lowercase , desc="Iteration" , disable=args.local_rank not in [-1, 0] ) ): _A = tuple(t.to(args.device ) for t in inputs ) ((_A) , ) = inputs # Do a forward pass (not with torch.no_grad() since we need gradients for importance score - see below) _A = model(__lowercase , labels=__lowercase , head_mask=__lowercase ) # (loss), lm_logits, presents, (all hidden_states), (attentions) _A , _A , _A = ( outputs[0], outputs[1], outputs[-1], ) # Loss and logits are the first, attention the last loss.backward() # Backpropagate to populate the gradients in the head mask total_loss += loss.detach().cpu().numpy() if compute_entropy: for layer, attn in enumerate(__lowercase ): _A = entropy(attn.detach() , __lowercase ) attn_entropy[layer] += masked_entropy.sum(-1 ).sum(0 ).sum(0 ).detach() if compute_importance: head_importance += head_mask.grad.abs().detach() tot_tokens += torch.ones_like(__lowercase ).float().detach().sum().data # Normalize attn_entropy /= tot_tokens head_importance /= tot_tokens # Layerwise importance normalization if not args.dont_normalize_importance_by_layer: _A = 2 _A = torch.pow(torch.pow(__lowercase , __lowercase ).sum(-1 ) , 1 / exponent ) head_importance /= norm_by_layer.unsqueeze(-1 ) + 1e-20 if not args.dont_normalize_global_importance: _A = (head_importance - head_importance.min()) / (head_importance.max() - head_importance.min()) # Print matrices if compute_entropy: logger.info("Attention entropies" ) print_ad_tensor(__lowercase ) if compute_importance: logger.info("Head importance scores" ) print_ad_tensor(__lowercase ) logger.info("Head ranked by importance scores" ) _A = torch.zeros(head_importance.numel() , dtype=torch.long , device=args.device ) _A = torch.arange( head_importance.numel() , device=args.device ) _A = head_ranks.view_as(__lowercase ) print_ad_tensor(__lowercase ) return attn_entropy, head_importance, total_loss def __lowercase ( __lowercase , __lowercase , __lowercase ) -> List[str]: '''simple docstring''' _A , _A , _A = compute_heads_importance(__lowercase , __lowercase , __lowercase , compute_entropy=__lowercase ) _A = 1 / loss # instead of downsteam score use the LM loss logger.info("Pruning: original score: %f, threshold: %f" , __lowercase , original_score * args.masking_threshold ) _A = torch.ones_like(__lowercase ) _A = max(1 , int(new_head_mask.numel() * args.masking_amount ) ) _A = original_score while current_score >= original_score * args.masking_threshold: _A = new_head_mask.clone().detach() # save current head mask # heads from least important to most - keep only not-masked heads _A = float("Inf" ) _A = head_importance.view(-1 ).sort()[1] if len(__lowercase ) <= num_to_mask: print("BREAK BY num_to_mask" ) break # mask heads _A = current_heads_to_mask[:num_to_mask] logger.info("Heads to mask: %s" , str(current_heads_to_mask.tolist() ) ) _A = new_head_mask.view(-1 ) _A = 0.0 _A = new_head_mask.view_as(__lowercase ) _A = new_head_mask.clone().detach() print_ad_tensor(__lowercase ) # Compute metric and head importance again _A , _A , _A = compute_heads_importance( __lowercase , __lowercase , __lowercase , compute_entropy=__lowercase , head_mask=__lowercase ) _A = 1 / loss logger.info( "Masking: current score: %f, remaining heads %d (%.1f percents)" , __lowercase , new_head_mask.sum() , new_head_mask.sum() / new_head_mask.numel() * 100 , ) logger.info("Final head mask" ) print_ad_tensor(__lowercase ) np.save(os.path.join(args.output_dir , "head_mask.npy" ) , head_mask.detach().cpu().numpy() ) return head_mask def __lowercase ( __lowercase , __lowercase , __lowercase , __lowercase ) -> List[str]: '''simple docstring''' _A = datetime.now() _A , _A , _A = compute_heads_importance( __lowercase , __lowercase , __lowercase , compute_entropy=__lowercase , compute_importance=__lowercase , head_mask=__lowercase ) _A = 1 / loss _A = datetime.now() - before_time _A = sum(p.numel() for p in model.parameters() ) _A = { layer: (1 - head_mask[layer].long()).nonzero().squeeze().tolist() for layer in range(len(__lowercase ) ) } for k, v in heads_to_prune.items(): if isinstance(__lowercase , __lowercase ): _A = [ v, ] assert sum(len(__lowercase ) for h in heads_to_prune.values() ) == (1 - head_mask.long()).sum().item() model.prune_heads(__lowercase ) _A = sum(p.numel() for p in model.parameters() ) _A = datetime.now() _A , _A , _A = compute_heads_importance( __lowercase , __lowercase , __lowercase , compute_entropy=__lowercase , compute_importance=__lowercase , head_mask=__lowercase , actually_pruned=__lowercase , ) _A = 1 / loss _A = datetime.now() - before_time logger.info( "Pruning: original num of params: %.2e, after pruning %.2e (%.1f percents)" , __lowercase , __lowercase , pruned_num_params / original_num_params * 100 , ) logger.info("Pruning: score with masking: %f score with pruning: %f" , __lowercase , __lowercase ) logger.info("Pruning: speed ratio (original timing / new timing): %f percents" , original_time / new_time * 100 ) save_model(__lowercase , args.output_dir ) def __lowercase ( ) -> Union[str, Any]: '''simple docstring''' _A = argparse.ArgumentParser() # Required parameters parser.add_argument( "--data_dir" , default=__lowercase , type=__lowercase , required=__lowercase , help="The input data dir. Should contain the .tsv files (or other data files) for the task." , ) parser.add_argument( "--model_name_or_path" , default=__lowercase , type=__lowercase , required=__lowercase , help="Path to pretrained model or model identifier from huggingface.co/models" , ) parser.add_argument( "--output_dir" , default=__lowercase , type=__lowercase , required=__lowercase , help="The output directory where the model predictions and checkpoints will be written." , ) # Other parameters parser.add_argument( "--config_name" , default="" , type=__lowercase , help="Pretrained config name or path if not the same as model_name_or_path" , ) parser.add_argument( "--tokenizer_name" , default="" , type=__lowercase , help="Pretrained tokenizer name or path if not the same as model_name_or_path" , ) parser.add_argument( "--cache_dir" , default=__lowercase , type=__lowercase , help="Where do you want to store the pre-trained models downloaded from s3" , ) parser.add_argument( "--data_subset" , type=__lowercase , default=-1 , help="If > 0: limit the data to a subset of data_subset instances." ) parser.add_argument( "--overwrite_output_dir" , action="store_true" , help="Whether to overwrite data in output directory" ) parser.add_argument( "--overwrite_cache" , action="store_true" , help="Overwrite the cached training and evaluation sets" ) parser.add_argument( "--dont_normalize_importance_by_layer" , action="store_true" , help="Don't normalize importance score by layers" ) parser.add_argument( "--dont_normalize_global_importance" , action="store_true" , help="Don't normalize all importance scores between 0 and 1" , ) parser.add_argument( "--try_masking" , action="store_true" , help="Whether to try to mask head until a threshold of accuracy." ) parser.add_argument( "--masking_threshold" , default=0.9 , type=__lowercase , help="masking threshold in term of metrics (stop masking when metric < threshold * original metric value)." , ) parser.add_argument( "--masking_amount" , default=0.1 , type=__lowercase , help="Amount to heads to masking at each masking step." ) parser.add_argument("--metric_name" , default="acc" , type=__lowercase , help="Metric to use for head masking." ) parser.add_argument( "--max_seq_length" , default=128 , type=__lowercase , help=( "The maximum total input sequence length after WordPiece tokenization. \n" "Sequences longer than this will be truncated, sequences shorter padded." ) , ) parser.add_argument("--batch_size" , default=1 , type=__lowercase , help="Batch size." ) parser.add_argument("--seed" , type=__lowercase , default=42 ) parser.add_argument("--local_rank" , type=__lowercase , default=-1 , help="local_rank for distributed training on gpus" ) parser.add_argument("--no_cuda" , action="store_true" , help="Whether not to use CUDA when available" ) parser.add_argument("--server_ip" , type=__lowercase , default="" , help="Can be used for distant debugging." ) parser.add_argument("--server_port" , type=__lowercase , default="" , help="Can be used for distant debugging." ) _A = parser.parse_args() if args.server_ip and args.server_port: # Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script import ptvsd print("Waiting for debugger attach" ) ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=__lowercase ) ptvsd.wait_for_attach() # Setup devices and distributed training if args.local_rank == -1 or args.no_cuda: _A = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu" ) _A = 0 if args.no_cuda else torch.cuda.device_count() else: torch.cuda.set_device(args.local_rank ) _A = torch.device("cuda" , args.local_rank ) _A = 1 torch.distributed.init_process_group(backend="nccl" ) # Initializes the distributed backend # Setup logging logging.basicConfig(level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN ) logger.info("device: {} n_gpu: {}, distributed: {}".format(args.device , args.n_gpu , bool(args.local_rank != -1 ) ) ) _A = GPTaLMHeadModel.from_pretrained(args.model_name_or_path ) # Distributed and parallel training model.to(args.device ) if args.local_rank != -1: _A = nn.parallel.DistributedDataParallel( __lowercase , device_ids=[args.local_rank] , output_device=args.local_rank , find_unused_parameters=__lowercase ) elif args.n_gpu > 1: _A = nn.DataParallel(__lowercase ) # Print/save training arguments os.makedirs(args.output_dir , exist_ok=__lowercase ) torch.save(__lowercase , os.path.join(args.output_dir , "run_args.bin" ) ) logger.info("Training/evaluation parameters %s" , __lowercase ) # Prepare dataset _A = np.concatenate( [ np.loadtxt(args.data_dir , dtype=np.intaa ), ] ) _A = (torch.from_numpy(__lowercase ),) _A = TensorDataset(*__lowercase ) _A = RandomSampler(__lowercase ) _A = DataLoader(__lowercase , sampler=__lowercase , batch_size=args.batch_size ) # Compute head entropy and importance score compute_heads_importance(__lowercase , __lowercase , __lowercase ) # Try head masking (set heads to zero until the score goes under a threshole) # and head pruning (remove masked heads and see the effect on the network) if args.try_masking and args.masking_threshold > 0.0 and args.masking_threshold < 1.0: _A = mask_heads(__lowercase , __lowercase , __lowercase ) prune_heads(__lowercase , __lowercase , __lowercase , __lowercase ) if __name__ == "__main__": main()
79
1
'''simple docstring''' def __lowercase ( __lowercase ) -> float: '''simple docstring''' if edge <= 0 or not isinstance(__lowercase , __lowercase ): raise ValueError("Length must be a positive." ) return 3 * ((25 + 10 * (5 ** (1 / 2))) ** (1 / 2)) * (edge**2) def __lowercase ( __lowercase ) -> float: '''simple docstring''' if edge <= 0 or not isinstance(__lowercase , __lowercase ): raise ValueError("Length must be a positive." ) return ((15 + (7 * (5 ** (1 / 2)))) / 4) * (edge**3) if __name__ == "__main__": import doctest doctest.testmod()
79
'''simple docstring''' import gc import random import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import AutoencoderKL, CycleDiffusionPipeline, DDIMScheduler, UNetaDConditionModel from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps from ..pipeline_params import ( IMAGE_TO_IMAGE_IMAGE_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS, ) from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class _UpperCAmelCase ( snake_case_ , snake_case_ , unittest.TestCase ): """simple docstring""" snake_case = CycleDiffusionPipeline snake_case = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - { '''negative_prompt''', '''height''', '''width''', '''negative_prompt_embeds''', } snake_case = PipelineTesterMixin.required_optional_params - {'''latents'''} snake_case = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({'''source_prompt'''} ) snake_case = IMAGE_TO_IMAGE_IMAGE_PARAMS snake_case = IMAGE_TO_IMAGE_IMAGE_PARAMS def lowerCAmelCase ( self : Optional[int] ): '''simple docstring''' torch.manual_seed(0 ) _A = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , ) _A = DDIMScheduler( beta_start=0.00085 , beta_end=0.012 , beta_schedule="scaled_linear" , num_train_timesteps=1000 , clip_sample=__UpperCAmelCase , set_alpha_to_one=__UpperCAmelCase , ) torch.manual_seed(0 ) _A = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , ) torch.manual_seed(0 ) _A = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) _A = CLIPTextModel(__UpperCAmelCase ) _A = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" ) _A = { "unet": unet, "scheduler": scheduler, "vae": vae, "text_encoder": text_encoder, "tokenizer": tokenizer, "safety_checker": None, "feature_extractor": None, } return components def lowerCAmelCase ( self : List[Any] , __UpperCAmelCase : Tuple , __UpperCAmelCase : Optional[Any]=0 ): '''simple docstring''' _A = floats_tensor((1, 3, 32, 32) , rng=random.Random(__UpperCAmelCase ) ).to(__UpperCAmelCase ) _A = image / 2 + 0.5 if str(__UpperCAmelCase ).startswith("mps" ): _A = torch.manual_seed(__UpperCAmelCase ) else: _A = torch.Generator(device=__UpperCAmelCase ).manual_seed(__UpperCAmelCase ) _A = { "prompt": "An astronaut riding an elephant", "source_prompt": "An astronaut riding a horse", "image": image, "generator": generator, "num_inference_steps": 2, "eta": 0.1, "strength": 0.8, "guidance_scale": 3, "source_guidance_scale": 1, "output_type": "numpy", } return inputs def lowerCAmelCase ( self : Optional[int] ): '''simple docstring''' _A = "cpu" # ensure determinism for the device-dependent torch.Generator _A = self.get_dummy_components() _A = CycleDiffusionPipeline(**__UpperCAmelCase ) _A = pipe.to(__UpperCAmelCase ) pipe.set_progress_bar_config(disable=__UpperCAmelCase ) _A = self.get_dummy_inputs(__UpperCAmelCase ) _A = pipe(**__UpperCAmelCase ) _A = output.images _A = images[0, -3:, -3:, -1] assert images.shape == (1, 32, 32, 3) _A = np.array([0.4459, 0.4943, 0.4544, 0.6643, 0.5474, 0.4327, 0.5701, 0.5959, 0.5179] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 @unittest.skipIf(torch_device != "cuda" , "This test requires a GPU" ) def lowerCAmelCase ( self : Optional[Any] ): '''simple docstring''' _A = self.get_dummy_components() for name, module in components.items(): if hasattr(__UpperCAmelCase , "half" ): _A = module.half() _A = CycleDiffusionPipeline(**__UpperCAmelCase ) _A = pipe.to(__UpperCAmelCase ) pipe.set_progress_bar_config(disable=__UpperCAmelCase ) _A = self.get_dummy_inputs(__UpperCAmelCase ) _A = pipe(**__UpperCAmelCase ) _A = output.images _A = images[0, -3:, -3:, -1] assert images.shape == (1, 32, 32, 3) _A = np.array([0.3506, 0.4543, 0.446, 0.4575, 0.5195, 0.4155, 0.5273, 0.518, 0.4116] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 @skip_mps def lowerCAmelCase ( self : Any ): '''simple docstring''' return super().test_save_load_local() @unittest.skip("non-deterministic pipeline" ) def lowerCAmelCase ( self : str ): '''simple docstring''' return super().test_inference_batch_single_identical() @skip_mps def lowerCAmelCase ( self : Union[str, Any] ): '''simple docstring''' return super().test_dict_tuple_outputs_equivalent() @skip_mps def lowerCAmelCase ( self : List[Any] ): '''simple docstring''' return super().test_save_load_optional_components() @skip_mps def lowerCAmelCase ( self : str ): '''simple docstring''' return super().test_attention_slicing_forward_pass() @slow @require_torch_gpu class _UpperCAmelCase ( unittest.TestCase ): """simple docstring""" def lowerCAmelCase ( self : Optional[int] ): '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def lowerCAmelCase ( self : Union[str, Any] ): '''simple docstring''' _A = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/cycle-diffusion/black_colored_car.png" ) _A = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car_fp16.npy" ) _A = init_image.resize((512, 512) ) _A = "CompVis/stable-diffusion-v1-4" _A = DDIMScheduler.from_pretrained(__UpperCAmelCase , subfolder="scheduler" ) _A = CycleDiffusionPipeline.from_pretrained( __UpperCAmelCase , scheduler=__UpperCAmelCase , safety_checker=__UpperCAmelCase , torch_dtype=torch.floataa , revision="fp16" ) pipe.to(__UpperCAmelCase ) pipe.set_progress_bar_config(disable=__UpperCAmelCase ) pipe.enable_attention_slicing() _A = "A black colored car" _A = "A blue colored car" _A = torch.manual_seed(0 ) _A = pipe( prompt=__UpperCAmelCase , source_prompt=__UpperCAmelCase , image=__UpperCAmelCase , num_inference_steps=100 , eta=0.1 , strength=0.85 , guidance_scale=3 , source_guidance_scale=1 , generator=__UpperCAmelCase , output_type="np" , ) _A = output.images # the values aren't exactly equal, but the images look the same visually assert np.abs(image - expected_image ).max() < 5E-1 def lowerCAmelCase ( self : List[str] ): '''simple docstring''' _A = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/cycle-diffusion/black_colored_car.png" ) _A = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car.npy" ) _A = init_image.resize((512, 512) ) _A = "CompVis/stable-diffusion-v1-4" _A = DDIMScheduler.from_pretrained(__UpperCAmelCase , subfolder="scheduler" ) _A = CycleDiffusionPipeline.from_pretrained(__UpperCAmelCase , scheduler=__UpperCAmelCase , safety_checker=__UpperCAmelCase ) pipe.to(__UpperCAmelCase ) pipe.set_progress_bar_config(disable=__UpperCAmelCase ) pipe.enable_attention_slicing() _A = "A black colored car" _A = "A blue colored car" _A = torch.manual_seed(0 ) _A = pipe( prompt=__UpperCAmelCase , source_prompt=__UpperCAmelCase , image=__UpperCAmelCase , num_inference_steps=100 , eta=0.1 , strength=0.85 , guidance_scale=3 , source_guidance_scale=1 , generator=__UpperCAmelCase , output_type="np" , ) _A = output.images assert np.abs(image - expected_image ).max() < 2E-2
79
1
'''simple docstring''' import warnings from typing import List import numpy as np from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding from ...utils import is_flax_available, is_tf_available, is_torch_available class _UpperCAmelCase ( snake_case_ ): """simple docstring""" snake_case = ['''image_processor''', '''tokenizer'''] snake_case = '''OwlViTImageProcessor''' snake_case = ('''CLIPTokenizer''', '''CLIPTokenizerFast''') def __init__( self : List[Any] , __UpperCAmelCase : int=None , __UpperCAmelCase : str=None , **__UpperCAmelCase : Tuple ): '''simple docstring''' _A = None if "feature_extractor" in kwargs: warnings.warn( "The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`" " instead." , __UpperCAmelCase , ) _A = kwargs.pop("feature_extractor" ) _A = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError("You need to specify an `image_processor`." ) if tokenizer is None: raise ValueError("You need to specify a `tokenizer`." ) super().__init__(__UpperCAmelCase , __UpperCAmelCase ) def __call__( self : Optional[Any] , __UpperCAmelCase : Any=None , __UpperCAmelCase : List[Any]=None , __UpperCAmelCase : Union[str, Any]=None , __UpperCAmelCase : Any="max_length" , __UpperCAmelCase : List[Any]="np" , **__UpperCAmelCase : str ): '''simple docstring''' if text is None and query_images is None and images is None: raise ValueError( "You have to specify at least one text or query image or image. All three cannot be none." ) if text is not None: if isinstance(__UpperCAmelCase , __UpperCAmelCase ) or (isinstance(__UpperCAmelCase , __UpperCAmelCase ) and not isinstance(text[0] , __UpperCAmelCase )): _A = [self.tokenizer(__UpperCAmelCase , padding=__UpperCAmelCase , return_tensors=__UpperCAmelCase , **__UpperCAmelCase )] elif isinstance(__UpperCAmelCase , __UpperCAmelCase ) and isinstance(text[0] , __UpperCAmelCase ): _A = [] # Maximum number of queries across batch _A = max([len(__UpperCAmelCase ) for t in text] ) # Pad all batch samples to max number of text queries for t in text: if len(__UpperCAmelCase ) != max_num_queries: _A = t + [" "] * (max_num_queries - len(__UpperCAmelCase )) _A = self.tokenizer(__UpperCAmelCase , padding=__UpperCAmelCase , return_tensors=__UpperCAmelCase , **__UpperCAmelCase ) encodings.append(__UpperCAmelCase ) else: raise TypeError("Input text should be a string, a list of strings or a nested list of strings" ) if return_tensors == "np": _A = np.concatenate([encoding["input_ids"] for encoding in encodings] , axis=0 ) _A = np.concatenate([encoding["attention_mask"] for encoding in encodings] , axis=0 ) elif return_tensors == "jax" and is_flax_available(): import jax.numpy as jnp _A = jnp.concatenate([encoding["input_ids"] for encoding in encodings] , axis=0 ) _A = jnp.concatenate([encoding["attention_mask"] for encoding in encodings] , axis=0 ) elif return_tensors == "pt" and is_torch_available(): import torch _A = torch.cat([encoding["input_ids"] for encoding in encodings] , dim=0 ) _A = torch.cat([encoding["attention_mask"] for encoding in encodings] , dim=0 ) elif return_tensors == "tf" and is_tf_available(): import tensorflow as tf _A = tf.stack([encoding["input_ids"] for encoding in encodings] , axis=0 ) _A = tf.stack([encoding["attention_mask"] for encoding in encodings] , axis=0 ) else: raise ValueError("Target return tensor type could not be returned" ) _A = BatchEncoding() _A = input_ids _A = attention_mask if query_images is not None: _A = BatchEncoding() _A = self.image_processor( __UpperCAmelCase , return_tensors=__UpperCAmelCase , **__UpperCAmelCase ).pixel_values _A = query_pixel_values if images is not None: _A = self.image_processor(__UpperCAmelCase , return_tensors=__UpperCAmelCase , **__UpperCAmelCase ) if text is not None and images is not None: _A = image_features.pixel_values return encoding elif query_images is not None and images is not None: _A = image_features.pixel_values return encoding elif text is not None or query_images is not None: return encoding else: return BatchEncoding(data=dict(**__UpperCAmelCase ) , tensor_type=__UpperCAmelCase ) def lowerCAmelCase ( self : Tuple , *__UpperCAmelCase : Dict , **__UpperCAmelCase : Dict ): '''simple docstring''' return self.image_processor.post_process(*__UpperCAmelCase , **__UpperCAmelCase ) def lowerCAmelCase ( self : int , *__UpperCAmelCase : int , **__UpperCAmelCase : int ): '''simple docstring''' return self.image_processor.post_process_object_detection(*__UpperCAmelCase , **__UpperCAmelCase ) def lowerCAmelCase ( self : Dict , *__UpperCAmelCase : str , **__UpperCAmelCase : Optional[Any] ): '''simple docstring''' return self.image_processor.post_process_image_guided_detection(*__UpperCAmelCase , **__UpperCAmelCase ) def lowerCAmelCase ( self : str , *__UpperCAmelCase : Any , **__UpperCAmelCase : Union[str, Any] ): '''simple docstring''' return self.tokenizer.batch_decode(*__UpperCAmelCase , **__UpperCAmelCase ) def lowerCAmelCase ( self : int , *__UpperCAmelCase : Any , **__UpperCAmelCase : int ): '''simple docstring''' return self.tokenizer.decode(*__UpperCAmelCase , **__UpperCAmelCase ) @property def lowerCAmelCase ( self : Union[str, Any] ): '''simple docstring''' warnings.warn( "`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , __UpperCAmelCase , ) return self.image_processor_class @property def lowerCAmelCase ( self : int ): '''simple docstring''' warnings.warn( "`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , __UpperCAmelCase , ) return self.image_processor
79
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_tokenizers_available, is_torch_available, ) lowerCamelCase_ = { '''configuration_longformer''': [ '''LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''LongformerConfig''', '''LongformerOnnxConfig''', ], '''tokenization_longformer''': ['''LongformerTokenizer'''], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase_ = ['''LongformerTokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase_ = [ '''LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''', '''LongformerForMaskedLM''', '''LongformerForMultipleChoice''', '''LongformerForQuestionAnswering''', '''LongformerForSequenceClassification''', '''LongformerForTokenClassification''', '''LongformerModel''', '''LongformerPreTrainedModel''', '''LongformerSelfAttention''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase_ = [ '''TF_LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFLongformerForMaskedLM''', '''TFLongformerForMultipleChoice''', '''TFLongformerForQuestionAnswering''', '''TFLongformerForSequenceClassification''', '''TFLongformerForTokenClassification''', '''TFLongformerModel''', '''TFLongformerPreTrainedModel''', '''TFLongformerSelfAttention''', ] if TYPE_CHECKING: from .configuration_longformer import ( LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, LongformerConfig, LongformerOnnxConfig, ) from .tokenization_longformer import LongformerTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_longformer_fast import LongformerTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_longformer import ( LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, LongformerForMaskedLM, LongformerForMultipleChoice, LongformerForQuestionAnswering, LongformerForSequenceClassification, LongformerForTokenClassification, LongformerModel, LongformerPreTrainedModel, LongformerSelfAttention, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_longformer import ( TF_LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, TFLongformerForMaskedLM, TFLongformerForMultipleChoice, TFLongformerForQuestionAnswering, TFLongformerForSequenceClassification, TFLongformerForTokenClassification, TFLongformerModel, TFLongformerPreTrainedModel, TFLongformerSelfAttention, ) else: import sys lowerCamelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
79
1
'''simple docstring''' import gc import random import unittest import numpy as np import torch from transformers import ( CLIPImageProcessor, CLIPTextConfig, CLIPTextModel, CLIPTokenizer, CLIPVisionConfig, CLIPVisionModelWithProjection, ) from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableUnCLIPImgaImgPipeline, UNetaDConditionModel from diffusers.pipelines.pipeline_utils import DiffusionPipeline from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import ( enable_full_determinism, floats_tensor, load_image, load_numpy, require_torch_gpu, skip_mps, slow, torch_device, ) from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS from ..test_pipelines_common import ( PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin, assert_mean_pixel_difference, ) enable_full_determinism() class _UpperCAmelCase ( snake_case_ , snake_case_ , snake_case_ , unittest.TestCase ): """simple docstring""" snake_case = StableUnCLIPImgaImgPipeline snake_case = TEXT_GUIDED_IMAGE_VARIATION_PARAMS snake_case = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS snake_case = frozenset( [] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess snake_case = frozenset([] ) def lowerCAmelCase ( self : Tuple ): '''simple docstring''' _A = 32 _A = embedder_hidden_size # image encoding components _A = CLIPImageProcessor(crop_size=32 , size=32 ) torch.manual_seed(0 ) _A = CLIPVisionModelWithProjection( CLIPVisionConfig( hidden_size=__UpperCAmelCase , projection_dim=__UpperCAmelCase , num_hidden_layers=5 , num_attention_heads=4 , image_size=32 , intermediate_size=37 , patch_size=1 , ) ) # regular denoising components torch.manual_seed(0 ) _A = StableUnCLIPImageNormalizer(embedding_dim=__UpperCAmelCase ) _A = DDPMScheduler(beta_schedule="squaredcos_cap_v2" ) torch.manual_seed(0 ) _A = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" ) torch.manual_seed(0 ) _A = CLIPTextModel( CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=__UpperCAmelCase , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) ) torch.manual_seed(0 ) _A = UNetaDConditionModel( sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("CrossAttnDownBlock2D", "DownBlock2D") , up_block_types=("UpBlock2D", "CrossAttnUpBlock2D") , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type="projection" , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=__UpperCAmelCase , layers_per_block=1 , upcast_attention=__UpperCAmelCase , use_linear_projection=__UpperCAmelCase , ) torch.manual_seed(0 ) _A = DDIMScheduler( beta_schedule="scaled_linear" , beta_start=0.00085 , beta_end=0.012 , prediction_type="v_prediction" , set_alpha_to_one=__UpperCAmelCase , steps_offset=1 , ) torch.manual_seed(0 ) _A = AutoencoderKL() _A = { # image encoding components "feature_extractor": feature_extractor, "image_encoder": image_encoder.eval(), # image noising components "image_normalizer": image_normalizer.eval(), "image_noising_scheduler": image_noising_scheduler, # regular denoising components "tokenizer": tokenizer, "text_encoder": text_encoder.eval(), "unet": unet.eval(), "scheduler": scheduler, "vae": vae.eval(), } return components def lowerCAmelCase ( self : List[Any] , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Dict=0 , __UpperCAmelCase : str=True ): '''simple docstring''' if str(__UpperCAmelCase ).startswith("mps" ): _A = torch.manual_seed(__UpperCAmelCase ) else: _A = torch.Generator(device=__UpperCAmelCase ).manual_seed(__UpperCAmelCase ) _A = floats_tensor((1, 3, 32, 32) , rng=random.Random(__UpperCAmelCase ) ).to(__UpperCAmelCase ) if pil_image: _A = input_image * 0.5 + 0.5 _A = input_image.clamp(0 , 1 ) _A = input_image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy() _A = DiffusionPipeline.numpy_to_pil(__UpperCAmelCase )[0] return { "prompt": "An anime racoon running a marathon", "image": input_image, "generator": generator, "num_inference_steps": 2, "output_type": "np", } @skip_mps def lowerCAmelCase ( self : int ): '''simple docstring''' _A = "cpu" # ensure determinism for the device-dependent torch.Generator _A = self.get_dummy_components() _A = StableUnCLIPImgaImgPipeline(**__UpperCAmelCase ) _A = sd_pipe.to(__UpperCAmelCase ) sd_pipe.set_progress_bar_config(disable=__UpperCAmelCase ) _A = self.get_dummy_inputs(__UpperCAmelCase ) inputs.update({"image_embeds": None} ) _A = sd_pipe(**__UpperCAmelCase ).images _A = image[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) _A = np.array([0.3872, 0.7224, 0.5601, 0.4741, 0.6872, 0.5814, 0.4636, 0.3867, 0.5078] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3 def lowerCAmelCase ( self : List[Any] ): '''simple docstring''' _A = torch_device in ["cpu", "mps"] self._test_attention_slicing_forward_pass(test_max_difference=__UpperCAmelCase ) def lowerCAmelCase ( self : Dict ): '''simple docstring''' _A = torch_device in ["cpu", "mps"] self._test_inference_batch_single_identical(test_max_difference=__UpperCAmelCase ) @unittest.skipIf( torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , ) def lowerCAmelCase ( self : str ): '''simple docstring''' self._test_xformers_attention_forwardGenerator_pass(test_max_difference=__UpperCAmelCase ) @slow @require_torch_gpu class _UpperCAmelCase ( unittest.TestCase ): """simple docstring""" def lowerCAmelCase ( self : Tuple ): '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def lowerCAmelCase ( self : List[Any] ): '''simple docstring''' _A = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png" ) _A = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_img2img_anime_turtle_fp16.npy" ) _A = StableUnCLIPImgaImgPipeline.from_pretrained( "fusing/stable-unclip-2-1-l-img2img" , torch_dtype=torch.floataa ) pipe.to(__UpperCAmelCase ) pipe.set_progress_bar_config(disable=__UpperCAmelCase ) # stable unclip will oom when integration tests are run on a V100, # so turn on memory savings pipe.enable_attention_slicing() pipe.enable_sequential_cpu_offload() _A = torch.Generator(device="cpu" ).manual_seed(0 ) _A = pipe(__UpperCAmelCase , "anime turle" , generator=__UpperCAmelCase , output_type="np" ) _A = output.images[0] assert image.shape == (768, 768, 3) assert_mean_pixel_difference(__UpperCAmelCase , __UpperCAmelCase ) def lowerCAmelCase ( self : Union[str, Any] ): '''simple docstring''' _A = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png" ) _A = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_h_img2img_anime_turtle_fp16.npy" ) _A = StableUnCLIPImgaImgPipeline.from_pretrained( "fusing/stable-unclip-2-1-h-img2img" , torch_dtype=torch.floataa ) pipe.to(__UpperCAmelCase ) pipe.set_progress_bar_config(disable=__UpperCAmelCase ) # stable unclip will oom when integration tests are run on a V100, # so turn on memory savings pipe.enable_attention_slicing() pipe.enable_sequential_cpu_offload() _A = torch.Generator(device="cpu" ).manual_seed(0 ) _A = pipe(__UpperCAmelCase , "anime turle" , generator=__UpperCAmelCase , output_type="np" ) _A = output.images[0] assert image.shape == (768, 768, 3) assert_mean_pixel_difference(__UpperCAmelCase , __UpperCAmelCase ) def lowerCAmelCase ( self : List[str] ): '''simple docstring''' _A = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png" ) torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() _A = StableUnCLIPImgaImgPipeline.from_pretrained( "fusing/stable-unclip-2-1-h-img2img" , torch_dtype=torch.floataa ) _A = pipe.to(__UpperCAmelCase ) pipe.set_progress_bar_config(disable=__UpperCAmelCase ) pipe.enable_attention_slicing() pipe.enable_sequential_cpu_offload() _A = pipe( __UpperCAmelCase , "anime turtle" , num_inference_steps=2 , output_type="np" , ) _A = torch.cuda.max_memory_allocated() # make sure that less than 7 GB is allocated assert mem_bytes < 7 * 10**9
79
'''simple docstring''' # Lint as: python3 import os import re import urllib.parse from pathlib import Path from typing import Callable, List, Optional, Union from zipfile import ZipFile from ..utils.file_utils import cached_path, hf_github_url from ..utils.logging import get_logger from ..utils.version import Version lowerCamelCase_ = get_logger(__name__) class _UpperCAmelCase : """simple docstring""" snake_case = '''dummy_data''' snake_case = '''datasets''' snake_case = False def __init__( self : List[Any] , __UpperCAmelCase : str , __UpperCAmelCase : str , __UpperCAmelCase : Union[Version, str] , __UpperCAmelCase : Optional[str] = None , __UpperCAmelCase : bool = False , __UpperCAmelCase : bool = True , __UpperCAmelCase : Optional[List[Callable]] = None , ): '''simple docstring''' _A = 0 _A = dataset_name _A = cache_dir _A = use_local_dummy_data _A = config # download_callbacks take a single url as input _A = download_callbacks or [] # if False, it doesn't load existing files and it returns the paths of the dummy files relative # to the dummy_data zip file root _A = load_existing_dummy_data # TODO(PVP, QL) might need to make this more general _A = str(__UpperCAmelCase ) # to be downloaded _A = None _A = None @property def lowerCAmelCase ( self : List[str] ): '''simple docstring''' if self._dummy_file is None: _A = self.download_dummy_data() return self._dummy_file @property def lowerCAmelCase ( self : Union[str, Any] ): '''simple docstring''' if self.config is not None: # structure is dummy / config_name / version_name return os.path.join("dummy" , self.config.name , self.version_name ) # structure is dummy / version_name return os.path.join("dummy" , self.version_name ) @property def lowerCAmelCase ( self : int ): '''simple docstring''' return os.path.join(self.dummy_data_folder , "dummy_data.zip" ) def lowerCAmelCase ( self : Dict ): '''simple docstring''' _A = ( self.local_path_to_dummy_data if self.use_local_dummy_data is True else self.github_path_to_dummy_data ) _A = cached_path( __UpperCAmelCase , cache_dir=self.cache_dir , extract_compressed_file=__UpperCAmelCase , force_extract=__UpperCAmelCase ) return os.path.join(__UpperCAmelCase , self.dummy_file_name ) @property def lowerCAmelCase ( self : List[str] ): '''simple docstring''' return os.path.join(self.datasets_scripts_dir , self.dataset_name , self.dummy_zip_file ) @property def lowerCAmelCase ( self : int ): '''simple docstring''' if self._bucket_url is None: _A = hf_github_url(self.dataset_name , self.dummy_zip_file.replace(os.sep , "/" ) ) return self._bucket_url @property def lowerCAmelCase ( self : str ): '''simple docstring''' if os.path.isdir(self.dummy_file ): return self.dummy_file # else cut off path to file -> example `xsum`. return "/".join(self.dummy_file.replace(os.sep , "/" ).split("/" )[:-1] ) def lowerCAmelCase ( self : Union[str, Any] , __UpperCAmelCase : Optional[Any] , *__UpperCAmelCase : Dict ): '''simple docstring''' if self.load_existing_dummy_data: # dummy data is downloaded and tested _A = self.dummy_file else: # dummy data cannot be downloaded and only the path to dummy file is returned _A = self.dummy_file_name # special case when data_url is a dict if isinstance(__UpperCAmelCase , __UpperCAmelCase ): return self.create_dummy_data_dict(__UpperCAmelCase , __UpperCAmelCase ) elif isinstance(__UpperCAmelCase , (list, tuple) ): return self.create_dummy_data_list(__UpperCAmelCase , __UpperCAmelCase ) else: return self.create_dummy_data_single(__UpperCAmelCase , __UpperCAmelCase ) def lowerCAmelCase ( self : List[str] , __UpperCAmelCase : Optional[int] , *__UpperCAmelCase : Any ): '''simple docstring''' return self.download_and_extract(__UpperCAmelCase ) def lowerCAmelCase ( self : Any , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : str ): '''simple docstring''' return self.download_and_extract(__UpperCAmelCase ) def lowerCAmelCase ( self : List[str] , __UpperCAmelCase : Optional[int] , *__UpperCAmelCase : List[str] , **__UpperCAmelCase : List[str] ): '''simple docstring''' return path def lowerCAmelCase ( self : str ): '''simple docstring''' return {} def lowerCAmelCase ( self : List[str] , __UpperCAmelCase : Any , __UpperCAmelCase : Optional[int] ): '''simple docstring''' _A = {} for key, single_urls in data_url.items(): for download_callback in self.download_callbacks: if isinstance(__UpperCAmelCase , __UpperCAmelCase ): for single_url in single_urls: download_callback(__UpperCAmelCase ) else: _A = single_urls download_callback(__UpperCAmelCase ) # we force the name of each key to be the last file / folder name of the url path # if the url has arguments, we need to encode them with urllib.parse.quote_plus if isinstance(__UpperCAmelCase , __UpperCAmelCase ): _A = [os.path.join(__UpperCAmelCase , urllib.parse.quote_plus(Path(__UpperCAmelCase ).name ) ) for x in single_urls] else: _A = single_urls _A = os.path.join(__UpperCAmelCase , urllib.parse.quote_plus(Path(__UpperCAmelCase ).name ) ) _A = value # make sure that values are unique if all(isinstance(__UpperCAmelCase , __UpperCAmelCase ) for i in dummy_data_dict.values() ) and len(set(dummy_data_dict.values() ) ) < len( dummy_data_dict.values() ): # append key to value to make its name unique _A = {key: value + key for key, value in dummy_data_dict.items()} return dummy_data_dict def lowerCAmelCase ( self : Union[str, Any] , __UpperCAmelCase : Tuple , __UpperCAmelCase : Optional[Any] ): '''simple docstring''' _A = [] # trick: if there are many shards named like `data.txt-000001-of-00300`, only use the first one _A = all(bool(re.findall("[0-9]{3,}-of-[0-9]{3,}" , __UpperCAmelCase ) ) for url in data_url ) _A = all( url.startswith("https://ftp.ncbi.nlm.nih.gov/pubmed/baseline/pubmed" ) for url in data_url ) if data_url and (is_tf_records or is_pubmed_records): _A = [data_url[0]] * len(__UpperCAmelCase ) for single_url in data_url: for download_callback in self.download_callbacks: download_callback(__UpperCAmelCase ) # we force the name of each key to be the last file / folder name of the url path # if the url has arguments, we need to encode them with urllib.parse.quote_plus _A = os.path.join(__UpperCAmelCase , urllib.parse.quote_plus(single_url.split("/" )[-1] ) ) dummy_data_list.append(__UpperCAmelCase ) return dummy_data_list def lowerCAmelCase ( self : str , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : List[str] ): '''simple docstring''' for download_callback in self.download_callbacks: download_callback(__UpperCAmelCase ) # we force the name of each key to be the last file / folder name of the url path # if the url has arguments, we need to encode them with urllib.parse.quote_plus _A = os.path.join(__UpperCAmelCase , urllib.parse.quote_plus(data_url.split("/" )[-1] ) ) if os.path.exists(__UpperCAmelCase ) or not self.load_existing_dummy_data: return value else: # Backward compatibility, maybe deprecate at one point. # For many datasets with single url calls to dl_manager.download_and_extract, # the dummy_data.zip file is actually the zipped downloaded file # while now we expected the dummy_data.zip file to be a directory containing # the downloaded file. return path_to_dummy_data def lowerCAmelCase ( self : Optional[int] ): '''simple docstring''' pass def lowerCAmelCase ( self : Dict ): '''simple docstring''' pass def lowerCAmelCase ( self : Any , __UpperCAmelCase : Optional[Any] ): '''simple docstring''' def _iter_archive_members(__UpperCAmelCase : List[Any] ): # this preserves the order of the members inside the ZIP archive _A = Path(self.dummy_file ).parent _A = path.relative_to(__UpperCAmelCase ) with ZipFile(self.local_path_to_dummy_data ) as zip_file: _A = zip_file.namelist() for member in members: if member.startswith(relative_path.as_posix() ): yield dummy_parent_path.joinpath(__UpperCAmelCase ) _A = Path(__UpperCAmelCase ) _A = _iter_archive_members(__UpperCAmelCase ) if self.use_local_dummy_data else path.rglob("*" ) for file_path in file_paths: if file_path.is_file() and not file_path.name.startswith((".", "__") ): yield file_path.relative_to(__UpperCAmelCase ).as_posix(), file_path.open("rb" ) def lowerCAmelCase ( self : Optional[int] , __UpperCAmelCase : str ): '''simple docstring''' if not isinstance(__UpperCAmelCase , __UpperCAmelCase ): _A = [paths] for path in paths: if os.path.isfile(__UpperCAmelCase ): if os.path.basename(__UpperCAmelCase ).startswith((".", "__") ): return yield path else: for dirpath, dirnames, filenames in os.walk(__UpperCAmelCase ): if os.path.basename(__UpperCAmelCase ).startswith((".", "__") ): continue dirnames.sort() for filename in sorted(__UpperCAmelCase ): if filename.startswith((".", "__") ): continue yield os.path.join(__UpperCAmelCase , __UpperCAmelCase )
79
1
'''simple docstring''' import copy from ...configuration_utils import PretrainedConfig from ...utils import logging from ..auto.configuration_auto import CONFIG_MAPPING lowerCamelCase_ = logging.get_logger(__name__) class _UpperCAmelCase ( snake_case_ ): """simple docstring""" snake_case = '''upernet''' def __init__( self : Optional[int] , __UpperCAmelCase : int=None , __UpperCAmelCase : Tuple=512 , __UpperCAmelCase : Optional[int]=0.02 , __UpperCAmelCase : List[str]=[1, 2, 3, 6] , __UpperCAmelCase : Optional[int]=True , __UpperCAmelCase : List[str]=0.4 , __UpperCAmelCase : Tuple=384 , __UpperCAmelCase : Any=256 , __UpperCAmelCase : List[str]=1 , __UpperCAmelCase : Union[str, Any]=False , __UpperCAmelCase : Tuple=255 , **__UpperCAmelCase : Optional[int] , ): '''simple docstring''' super().__init__(**__UpperCAmelCase ) if backbone_config is None: logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." ) _A = CONFIG_MAPPING["resnet"](out_features=["stage1", "stage2", "stage3", "stage4"] ) elif isinstance(__UpperCAmelCase , __UpperCAmelCase ): _A = backbone_config.get("model_type" ) _A = CONFIG_MAPPING[backbone_model_type] _A = config_class.from_dict(__UpperCAmelCase ) _A = backbone_config _A = hidden_size _A = initializer_range _A = pool_scales _A = use_auxiliary_head _A = auxiliary_loss_weight _A = auxiliary_in_channels _A = auxiliary_channels _A = auxiliary_num_convs _A = auxiliary_concat_input _A = loss_ignore_index def lowerCAmelCase ( self : Optional[Any] ): '''simple docstring''' _A = copy.deepcopy(self.__dict__ ) _A = self.backbone_config.to_dict() _A = self.__class__.model_type return output
79
'''simple docstring''' def __lowercase ( __lowercase , __lowercase , __lowercase=False ) -> Union[str, Any]: '''simple docstring''' if isinstance(__lowercase , __lowercase ) and isinstance(__lowercase , __lowercase ): _A = len(set_a.intersection(__lowercase ) ) if alternative_union: _A = len(__lowercase ) + len(__lowercase ) else: _A = len(set_a.union(__lowercase ) ) return intersection / union if isinstance(__lowercase , (list, tuple) ) and isinstance(__lowercase , (list, tuple) ): _A = [element for element in set_a if element in set_b] if alternative_union: _A = len(__lowercase ) + len(__lowercase ) return len(__lowercase ) / union else: _A = set_a + [element for element in set_b if element not in set_a] return len(__lowercase ) / len(__lowercase ) return len(__lowercase ) / len(__lowercase ) return None if __name__ == "__main__": lowerCamelCase_ = {'''a''', '''b''', '''c''', '''d''', '''e'''} lowerCamelCase_ = {'''c''', '''d''', '''e''', '''f''', '''h''', '''i'''} print(jaccard_similarity(set_a, set_b))
79
1
'''simple docstring''' from typing import List import numpy as np def __lowercase ( __lowercase ) -> int: '''simple docstring''' _A = {key: len(__lowercase ) for key, value in gen_kwargs.items() if isinstance(__lowercase , __lowercase )} if len(set(lists_lengths.values() ) ) > 1: raise RuntimeError( ( "Sharding is ambiguous for this dataset: " + "we found several data sources lists of different lengths, and we don't know over which list we should parallelize:\n" + "\n".join(F'''\t- key {key} has length {length}''' for key, length in lists_lengths.items() ) + "\nTo fix this, check the 'gen_kwargs' and make sure to use lists only for data sources, " + "and use tuples otherwise. In the end there should only be one single list, or several lists with the same length." ) ) _A = max(lists_lengths.values() , default=0 ) return max(1 , __lowercase ) def __lowercase ( __lowercase , __lowercase ) -> List[range]: '''simple docstring''' _A = [] for group_idx in range(__lowercase ): _A = num_shards // max_num_jobs + (group_idx < (num_shards % max_num_jobs)) if num_shards_to_add == 0: break _A = shards_indices_per_group[-1].stop if shards_indices_per_group else 0 _A = range(__lowercase , start + num_shards_to_add ) shards_indices_per_group.append(__lowercase ) return shards_indices_per_group def __lowercase ( __lowercase , __lowercase ) -> List[dict]: '''simple docstring''' _A = _number_of_shards_in_gen_kwargs(__lowercase ) if num_shards == 1: return [dict(__lowercase )] else: _A = _distribute_shards(num_shards=__lowercase , max_num_jobs=__lowercase ) return [ { key: [value[shard_idx] for shard_idx in shard_indices_per_group[group_idx]] if isinstance(__lowercase , __lowercase ) else value for key, value in gen_kwargs.items() } for group_idx in range(len(__lowercase ) ) ] def __lowercase ( __lowercase ) -> dict: '''simple docstring''' return { key: [value for gen_kwargs in gen_kwargs_list for value in gen_kwargs[key]] if isinstance(gen_kwargs_list[0][key] , __lowercase ) else gen_kwargs_list[0][key] for key in gen_kwargs_list[0] } def __lowercase ( __lowercase , __lowercase ) -> dict: '''simple docstring''' _A = {len(__lowercase ) for value in gen_kwargs.values() if isinstance(__lowercase , __lowercase )} _A = {} for size in list_sizes: _A = list(range(__lowercase ) ) rng.shuffle(indices_per_size[size] ) # Now let's copy the gen_kwargs and shuffle the lists based on their sizes _A = dict(__lowercase ) for key, value in shuffled_kwargs.items(): if isinstance(__lowercase , __lowercase ): _A = [value[i] for i in indices_per_size[len(__lowercase )]] return shuffled_kwargs
79
'''simple docstring''' import inspect import os import unittest from dataclasses import dataclass import torch from accelerate import Accelerator, DistributedDataParallelKwargs, GradScalerKwargs from accelerate.state import AcceleratorState from accelerate.test_utils import execute_subprocess_async, require_cuda, require_multi_gpu from accelerate.utils import KwargsHandler @dataclass class _UpperCAmelCase ( snake_case_ ): """simple docstring""" snake_case = 0 snake_case = False snake_case = 3.0 class _UpperCAmelCase ( unittest.TestCase ): """simple docstring""" def lowerCAmelCase ( self : Union[str, Any] ): '''simple docstring''' self.assertDictEqual(MockClass().to_kwargs() , {} ) self.assertDictEqual(MockClass(a=2 ).to_kwargs() , {"a": 2} ) self.assertDictEqual(MockClass(a=2 , b=__UpperCAmelCase ).to_kwargs() , {"a": 2, "b": True} ) self.assertDictEqual(MockClass(a=2 , c=2.25 ).to_kwargs() , {"a": 2, "c": 2.25} ) @require_cuda def lowerCAmelCase ( self : int ): '''simple docstring''' _A = GradScalerKwargs(init_scale=1024 , growth_factor=2 ) AcceleratorState._reset_state() _A = Accelerator(mixed_precision="fp16" , kwargs_handlers=[scaler_handler] ) print(accelerator.use_fpaa ) _A = accelerator.scaler # Check the kwargs have been applied self.assertEqual(scaler._init_scale , 1024.0 ) self.assertEqual(scaler._growth_factor , 2.0 ) # Check the other values are at the default self.assertEqual(scaler._backoff_factor , 0.5 ) self.assertEqual(scaler._growth_interval , 2000 ) self.assertEqual(scaler._enabled , __UpperCAmelCase ) @require_multi_gpu def lowerCAmelCase ( self : List[Any] ): '''simple docstring''' _A = ["torchrun", f'''--nproc_per_node={torch.cuda.device_count()}''', inspect.getfile(self.__class__ )] execute_subprocess_async(__UpperCAmelCase , env=os.environ.copy() ) if __name__ == "__main__": lowerCamelCase_ = DistributedDataParallelKwargs(bucket_cap_mb=15, find_unused_parameters=True) lowerCamelCase_ = Accelerator(kwargs_handlers=[ddp_scaler]) lowerCamelCase_ = torch.nn.Linear(1_00, 2_00) lowerCamelCase_ = accelerator.prepare(model) # Check the values changed in kwargs lowerCamelCase_ = '''''' lowerCamelCase_ = model.bucket_bytes_cap // (10_24 * 10_24) if observed_bucket_cap_map != 15: error_msg += F"Kwargs badly passed, should have `15` but found {observed_bucket_cap_map}.\n" if model.find_unused_parameters is not True: error_msg += F"Kwargs badly passed, should have `True` but found {model.find_unused_parameters}.\n" # Check the values of the defaults if model.dim != 0: error_msg += F"Default value not respected, should have `0` but found {model.dim}.\n" if model.broadcast_buffers is not True: error_msg += F"Default value not respected, should have `True` but found {model.broadcast_buffers}.\n" if model.gradient_as_bucket_view is not False: error_msg += F"Default value not respected, should have `False` but found {model.gradient_as_bucket_view}.\n" # Raise error at the end to make sure we don't stop at the first failure. if len(error_msg) > 0: raise ValueError(error_msg)
79
1
'''simple docstring''' import unittest from transformers import MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING, AutoTokenizer, is_vision_available from transformers.pipelines import pipeline from transformers.pipelines.document_question_answering import apply_tesseract from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_detectrona, require_pytesseract, require_tf, require_torch, require_vision, slow, ) from .test_pipelines_common import ANY if is_vision_available(): from PIL import Image from transformers.image_utils import load_image else: class _UpperCAmelCase : """simple docstring""" @staticmethod def lowerCAmelCase ( *__UpperCAmelCase : Optional[int] , **__UpperCAmelCase : List[Any] ): '''simple docstring''' pass def __lowercase ( __lowercase ) -> Optional[Any]: '''simple docstring''' return None # This is a pinned image from a specific revision of a document question answering space, hosted by HuggingFace, # so we can expect it to be available. lowerCamelCase_ = ( '''https://huggingface.co/spaces/impira/docquery/resolve/2f6c96314dc84dfda62d40de9da55f2f5165d403/invoice.png''' ) @is_pipeline_test @require_torch @require_vision class _UpperCAmelCase ( unittest.TestCase ): """simple docstring""" snake_case = MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING @require_pytesseract @require_vision def lowerCAmelCase ( self : int , __UpperCAmelCase : List[str] , __UpperCAmelCase : int , __UpperCAmelCase : Union[str, Any] ): '''simple docstring''' _A = pipeline( "document-question-answering" , model=__UpperCAmelCase , tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase ) _A = INVOICE_URL _A = list(zip(*apply_tesseract(load_image(__UpperCAmelCase ) , __UpperCAmelCase , "" ) ) ) _A = "What is the placebo?" _A = [ { "image": load_image(__UpperCAmelCase ), "question": question, }, { "image": image, "question": question, }, { "image": image, "question": question, "word_boxes": word_boxes, }, ] return dqa_pipeline, examples def lowerCAmelCase ( self : Tuple , __UpperCAmelCase : List[Any] , __UpperCAmelCase : str ): '''simple docstring''' _A = dqa_pipeline(__UpperCAmelCase , top_k=2 ) self.assertEqual( __UpperCAmelCase , [ [ {"score": ANY(__UpperCAmelCase ), "answer": ANY(__UpperCAmelCase ), "start": ANY(__UpperCAmelCase ), "end": ANY(__UpperCAmelCase )}, {"score": ANY(__UpperCAmelCase ), "answer": ANY(__UpperCAmelCase ), "start": ANY(__UpperCAmelCase ), "end": ANY(__UpperCAmelCase )}, ] ] * 3 , ) @require_torch @require_detectrona @require_pytesseract def lowerCAmelCase ( self : List[str] ): '''simple docstring''' _A = pipeline("document-question-answering" , model="hf-internal-testing/tiny-random-layoutlmv2" ) _A = INVOICE_URL _A = "How many cats are there?" _A = [ {"score": 0.0001, "answer": "oy 2312/2019", "start": 38, "end": 39}, {"score": 0.0001, "answer": "oy 2312/2019 DUE", "start": 38, "end": 40}, ] _A = dqa_pipeline(image=__UpperCAmelCase , question=__UpperCAmelCase , top_k=2 ) self.assertEqual(nested_simplify(__UpperCAmelCase , decimals=4 ) , __UpperCAmelCase ) _A = dqa_pipeline({"image": image, "question": question} , top_k=2 ) self.assertEqual(nested_simplify(__UpperCAmelCase , decimals=4 ) , __UpperCAmelCase ) # This image does not detect ANY text in it, meaning layoutlmv2 should fail. # Empty answer probably _A = "./tests/fixtures/tests_samples/COCO/000000039769.png" _A = dqa_pipeline(image=__UpperCAmelCase , question=__UpperCAmelCase , top_k=2 ) self.assertEqual(__UpperCAmelCase , [] ) # We can optionnally pass directly the words and bounding boxes _A = "./tests/fixtures/tests_samples/COCO/000000039769.png" _A = [] _A = [] _A = dqa_pipeline(image=__UpperCAmelCase , question=__UpperCAmelCase , words=__UpperCAmelCase , boxes=__UpperCAmelCase , top_k=2 ) self.assertEqual(__UpperCAmelCase , [] ) @slow @require_torch @require_detectrona @require_pytesseract def lowerCAmelCase ( self : List[str] ): '''simple docstring''' _A = pipeline( "document-question-answering" , model="tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa" , revision="9977165" , ) _A = INVOICE_URL _A = "What is the invoice number?" _A = dqa_pipeline(image=__UpperCAmelCase , question=__UpperCAmelCase , top_k=2 ) self.assertEqual( nested_simplify(__UpperCAmelCase , decimals=4 ) , [ {"score": 0.9944, "answer": "us-001", "start": 16, "end": 16}, {"score": 0.0009, "answer": "us-001", "start": 16, "end": 16}, ] , ) _A = dqa_pipeline({"image": image, "question": question} , top_k=2 ) self.assertEqual( nested_simplify(__UpperCAmelCase , decimals=4 ) , [ {"score": 0.9944, "answer": "us-001", "start": 16, "end": 16}, {"score": 0.0009, "answer": "us-001", "start": 16, "end": 16}, ] , ) _A = dqa_pipeline( [{"image": image, "question": question}, {"image": image, "question": question}] , top_k=2 ) self.assertEqual( nested_simplify(__UpperCAmelCase , decimals=4 ) , [ [ {"score": 0.9944, "answer": "us-001", "start": 16, "end": 16}, {"score": 0.0009, "answer": "us-001", "start": 16, "end": 16}, ], ] * 2 , ) @slow @require_torch @require_detectrona @require_pytesseract def lowerCAmelCase ( self : Optional[Any] ): '''simple docstring''' _A = pipeline( "document-question-answering" , model="tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa" , revision="9977165" , max_seq_len=50 , ) _A = INVOICE_URL _A = "What is the invoice number?" _A = dqa_pipeline(image=__UpperCAmelCase , question=__UpperCAmelCase , top_k=2 ) self.assertEqual( nested_simplify(__UpperCAmelCase , decimals=4 ) , [ {"score": 0.9974, "answer": "1110212019", "start": 23, "end": 23}, {"score": 0.9948, "answer": "us-001", "start": 16, "end": 16}, ] , ) _A = dqa_pipeline({"image": image, "question": question} , top_k=2 ) self.assertEqual( nested_simplify(__UpperCAmelCase , decimals=4 ) , [ {"score": 0.9974, "answer": "1110212019", "start": 23, "end": 23}, {"score": 0.9948, "answer": "us-001", "start": 16, "end": 16}, ] , ) _A = dqa_pipeline( [{"image": image, "question": question}, {"image": image, "question": question}] , top_k=2 ) self.assertEqual( nested_simplify(__UpperCAmelCase , decimals=4 ) , [ [ {"score": 0.9974, "answer": "1110212019", "start": 23, "end": 23}, {"score": 0.9948, "answer": "us-001", "start": 16, "end": 16}, ] ] * 2 , ) @slow @require_torch @require_pytesseract @require_vision def lowerCAmelCase ( self : Tuple ): '''simple docstring''' _A = AutoTokenizer.from_pretrained( "impira/layoutlm-document-qa" , revision="3dc6de3" , add_prefix_space=__UpperCAmelCase ) _A = pipeline( "document-question-answering" , model="impira/layoutlm-document-qa" , tokenizer=__UpperCAmelCase , revision="3dc6de3" , ) _A = INVOICE_URL _A = "What is the invoice number?" _A = dqa_pipeline(image=__UpperCAmelCase , question=__UpperCAmelCase , top_k=2 ) self.assertEqual( nested_simplify(__UpperCAmelCase , decimals=4 ) , [ {"score": 0.4251, "answer": "us-001", "start": 16, "end": 16}, {"score": 0.0819, "answer": "1110212019", "start": 23, "end": 23}, ] , ) _A = dqa_pipeline({"image": image, "question": question} , top_k=2 ) self.assertEqual( nested_simplify(__UpperCAmelCase , decimals=4 ) , [ {"score": 0.4251, "answer": "us-001", "start": 16, "end": 16}, {"score": 0.0819, "answer": "1110212019", "start": 23, "end": 23}, ] , ) _A = dqa_pipeline( [{"image": image, "question": question}, {"image": image, "question": question}] , top_k=2 ) self.assertEqual( nested_simplify(__UpperCAmelCase , decimals=4 ) , [ [ {"score": 0.4251, "answer": "us-001", "start": 16, "end": 16}, {"score": 0.0819, "answer": "1110212019", "start": 23, "end": 23}, ] ] * 2 , ) _A = list(zip(*apply_tesseract(load_image(__UpperCAmelCase ) , __UpperCAmelCase , "" ) ) ) # This model should also work if `image` is set to None _A = dqa_pipeline({"image": None, "word_boxes": word_boxes, "question": question} , top_k=2 ) self.assertEqual( nested_simplify(__UpperCAmelCase , decimals=4 ) , [ {"score": 0.4251, "answer": "us-001", "start": 16, "end": 16}, {"score": 0.0819, "answer": "1110212019", "start": 23, "end": 23}, ] , ) @slow @require_torch @require_pytesseract @require_vision def lowerCAmelCase ( self : int ): '''simple docstring''' _A = AutoTokenizer.from_pretrained( "impira/layoutlm-document-qa" , revision="3dc6de3" , add_prefix_space=__UpperCAmelCase ) _A = pipeline( "document-question-answering" , model="impira/layoutlm-document-qa" , tokenizer=__UpperCAmelCase , revision="3dc6de3" , max_seq_len=50 , ) _A = INVOICE_URL _A = "What is the invoice number?" _A = dqa_pipeline(image=__UpperCAmelCase , question=__UpperCAmelCase , top_k=2 ) self.assertEqual( nested_simplify(__UpperCAmelCase , decimals=4 ) , [ {"score": 0.9999, "answer": "us-001", "start": 16, "end": 16}, {"score": 0.9998, "answer": "us-001", "start": 16, "end": 16}, ] , ) _A = dqa_pipeline( [{"image": image, "question": question}, {"image": image, "question": question}] , top_k=2 ) self.assertEqual( nested_simplify(__UpperCAmelCase , decimals=4 ) , [ [ {"score": 0.9999, "answer": "us-001", "start": 16, "end": 16}, {"score": 0.9998, "answer": "us-001", "start": 16, "end": 16}, ] ] * 2 , ) _A = list(zip(*apply_tesseract(load_image(__UpperCAmelCase ) , __UpperCAmelCase , "" ) ) ) # This model should also work if `image` is set to None _A = dqa_pipeline({"image": None, "word_boxes": word_boxes, "question": question} , top_k=2 ) self.assertEqual( nested_simplify(__UpperCAmelCase , decimals=4 ) , [ {"score": 0.9999, "answer": "us-001", "start": 16, "end": 16}, {"score": 0.9998, "answer": "us-001", "start": 16, "end": 16}, ] , ) @slow @require_torch def lowerCAmelCase ( self : int ): '''simple docstring''' _A = pipeline( "document-question-answering" , model="naver-clova-ix/donut-base-finetuned-docvqa" , tokenizer=AutoTokenizer.from_pretrained("naver-clova-ix/donut-base-finetuned-docvqa" ) , feature_extractor="naver-clova-ix/donut-base-finetuned-docvqa" , ) _A = INVOICE_URL _A = "What is the invoice number?" _A = dqa_pipeline(image=__UpperCAmelCase , question=__UpperCAmelCase , top_k=2 ) self.assertEqual(nested_simplify(__UpperCAmelCase , decimals=4 ) , [{"answer": "us-001"}] ) @require_tf @unittest.skip("Document question answering not implemented in TF" ) def lowerCAmelCase ( self : List[str] ): '''simple docstring''' pass
79
'''simple docstring''' def __lowercase ( __lowercase = 100 ) -> int: '''simple docstring''' _A = n * (n + 1) * (2 * n + 1) / 6 _A = (n * (n + 1) / 2) ** 2 return int(square_of_sum - sum_of_squares ) if __name__ == "__main__": print(F"""{solution() = }""")
79
1
'''simple docstring''' def __lowercase ( __lowercase ) -> int: '''simple docstring''' if not grid or not grid[0]: raise TypeError("The grid does not contain the appropriate information" ) for cell_n in range(1 , len(grid[0] ) ): grid[0][cell_n] += grid[0][cell_n - 1] _A = grid[0] for row_n in range(1 , len(__lowercase ) ): _A = grid[row_n] _A = fill_row(__lowercase , __lowercase ) _A = grid[row_n] return grid[-1][-1] def __lowercase ( __lowercase , __lowercase ) -> list: '''simple docstring''' current_row[0] += row_above[0] for cell_n in range(1 , len(__lowercase ) ): current_row[cell_n] += min(current_row[cell_n - 1] , row_above[cell_n] ) return current_row if __name__ == "__main__": import doctest doctest.testmod()
79
'''simple docstring''' import logging import os import sys from dataclasses import dataclass, field from typing import Optional import numpy as np import torch from datasets import load_dataset from torchvision.transforms import Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, ToTensor import transformers from transformers import ( CONFIG_MAPPING, IMAGE_PROCESSOR_MAPPING, MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING, AutoConfig, AutoImageProcessor, AutoModelForMaskedImageModeling, HfArgumentParser, Trainer, TrainingArguments, ) from transformers.trainer_utils import get_last_checkpoint from transformers.utils import check_min_version, send_example_telemetry from transformers.utils.versions import require_version lowerCamelCase_ = logging.getLogger(__name__) # Will error if the minimal version of Transformers is not installed. Remove at your own risks. check_min_version('''4.31.0''') require_version('''datasets>=1.8.0''', '''To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt''') lowerCamelCase_ = list(MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING.keys()) lowerCamelCase_ = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES) @dataclass class _UpperCAmelCase : """simple docstring""" snake_case = field( default='''cifar10''' , metadata={'''help''': '''Name of a dataset from the datasets package'''} ) snake_case = field( default=snake_case_ , metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''} ) snake_case = field( default=snake_case_ , metadata={'''help''': '''The column name of the images in the files. If not set, will try to use \'image\' or \'img\'.'''} , ) snake_case = field(default=snake_case_ , metadata={'''help''': '''A folder containing the training data.'''} ) snake_case = field(default=snake_case_ , metadata={'''help''': '''A folder containing the validation data.'''} ) snake_case = field( default=0.15 , metadata={'''help''': '''Percent to split off of train for validation.'''} ) snake_case = field(default=32 , metadata={'''help''': '''The size of the square patches to use for masking.'''} ) snake_case = field( default=0.6 , metadata={'''help''': '''Percentage of patches to mask.'''} , ) snake_case = field( default=snake_case_ , metadata={ '''help''': ( '''For debugging purposes or quicker training, truncate the number of training examples to this ''' '''value if set.''' ) } , ) snake_case = field( default=snake_case_ , metadata={ '''help''': ( '''For debugging purposes or quicker training, truncate the number of evaluation examples to this ''' '''value if set.''' ) } , ) def lowerCAmelCase ( self : Any ): '''simple docstring''' _A = {} if self.train_dir is not None: _A = self.train_dir if self.validation_dir is not None: _A = self.validation_dir _A = data_files if data_files else None @dataclass class _UpperCAmelCase : """simple docstring""" snake_case = field( default=snake_case_ , metadata={ '''help''': ( '''The model checkpoint for weights initialization. Can be a local path to a pytorch_model.bin or a ''' '''checkpoint identifier on the hub. ''' '''Don\'t set if you want to train a model from scratch.''' ) } , ) snake_case = field( default=snake_case_ , metadata={'''help''': '''If training from scratch, pass a model type from the list: ''' + ''', '''.join(snake_case_ )} , ) snake_case = field( default=snake_case_ , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} ) snake_case = field( default=snake_case_ , metadata={ '''help''': ( '''Override some existing default config settings when a model is trained from scratch. Example: ''' '''n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index''' ) } , ) snake_case = field( default=snake_case_ , metadata={'''help''': '''Where do you want to store (cache) the pretrained models/datasets downloaded from the hub'''} , ) snake_case = field( default='''main''' , metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''} , ) snake_case = field(default=snake_case_ , metadata={'''help''': '''Name or path of preprocessor config.'''} ) snake_case = field( default=snake_case_ , metadata={ '''help''': ( '''Will use the token generated when running `huggingface-cli login` (necessary to use this script ''' '''with private models).''' ) } , ) snake_case = field( default=snake_case_ , metadata={ '''help''': ( '''The size (resolution) of each image. If not specified, will use `image_size` of the configuration.''' ) } , ) snake_case = field( default=snake_case_ , metadata={ '''help''': ( '''The size (resolution) of each patch. If not specified, will use `patch_size` of the configuration.''' ) } , ) snake_case = field( default=snake_case_ , metadata={'''help''': '''Stride to use for the encoder.'''} , ) class _UpperCAmelCase : """simple docstring""" def __init__( self : Tuple , __UpperCAmelCase : Optional[int]=192 , __UpperCAmelCase : Dict=32 , __UpperCAmelCase : int=4 , __UpperCAmelCase : int=0.6 ): '''simple docstring''' _A = input_size _A = mask_patch_size _A = model_patch_size _A = mask_ratio if self.input_size % self.mask_patch_size != 0: raise ValueError("Input size must be divisible by mask patch size" ) if self.mask_patch_size % self.model_patch_size != 0: raise ValueError("Mask patch size must be divisible by model patch size" ) _A = self.input_size // self.mask_patch_size _A = self.mask_patch_size // self.model_patch_size _A = self.rand_size**2 _A = int(np.ceil(self.token_count * self.mask_ratio ) ) def __call__( self : Any ): '''simple docstring''' _A = np.random.permutation(self.token_count )[: self.mask_count] _A = np.zeros(self.token_count , dtype=__UpperCAmelCase ) _A = 1 _A = mask.reshape((self.rand_size, self.rand_size) ) _A = mask.repeat(self.scale , axis=0 ).repeat(self.scale , axis=1 ) return torch.tensor(mask.flatten() ) def __lowercase ( __lowercase ) -> str: '''simple docstring''' _A = torch.stack([example["pixel_values"] for example in examples] ) _A = torch.stack([example["mask"] for example in examples] ) return {"pixel_values": pixel_values, "bool_masked_pos": mask} def __lowercase ( ) -> Dict: '''simple docstring''' _A = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. _A , _A , _A = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: _A , _A , _A = parser.parse_args_into_dataclasses() # Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The # information sent is the one passed as arguments along with your Python/PyTorch versions. send_example_telemetry("run_mim" , __lowercase , __lowercase ) # Setup logging logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , ) if training_args.should_log: # The default of training_args.log_level is passive, so we set log level at info here to have that default. transformers.utils.logging.set_verbosity_info() _A = training_args.get_process_log_level() logger.setLevel(__lowercase ) transformers.utils.logging.set_verbosity(__lowercase ) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Log on each process the small summary: logger.warning( F'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}''' + F'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' ) logger.info(F'''Training/evaluation parameters {training_args}''' ) # Detecting last checkpoint. _A = None if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir: _A = get_last_checkpoint(training_args.output_dir ) if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0: raise ValueError( F'''Output directory ({training_args.output_dir}) already exists and is not empty. ''' "Use --overwrite_output_dir to overcome." ) elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: logger.info( F'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change ''' "the `--output_dir` or add `--overwrite_output_dir` to train from scratch." ) # Initialize our dataset. _A = load_dataset( data_args.dataset_name , data_args.dataset_config_name , data_files=data_args.data_files , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , ) # If we don't have a validation split, split off a percentage of train as validation. _A = None if "validation" in ds.keys() else data_args.train_val_split if isinstance(data_args.train_val_split , __lowercase ) and data_args.train_val_split > 0.0: _A = ds["train"].train_test_split(data_args.train_val_split ) _A = split["train"] _A = split["test"] # Create config # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. _A = { "cache_dir": model_args.cache_dir, "revision": model_args.model_revision, "use_auth_token": True if model_args.use_auth_token else None, } if model_args.config_name_or_path: _A = AutoConfig.from_pretrained(model_args.config_name_or_path , **__lowercase ) elif model_args.model_name_or_path: _A = AutoConfig.from_pretrained(model_args.model_name_or_path , **__lowercase ) else: _A = CONFIG_MAPPING[model_args.model_type]() logger.warning("You are instantiating a new config instance from scratch." ) if model_args.config_overrides is not None: logger.info(F'''Overriding config: {model_args.config_overrides}''' ) config.update_from_string(model_args.config_overrides ) logger.info(F'''New config: {config}''' ) # make sure the decoder_type is "simmim" (only relevant for BEiT) if hasattr(__lowercase , "decoder_type" ): _A = "simmim" # adapt config _A = model_args.image_size if model_args.image_size is not None else config.image_size _A = model_args.patch_size if model_args.patch_size is not None else config.patch_size _A = ( model_args.encoder_stride if model_args.encoder_stride is not None else config.encoder_stride ) config.update( { "image_size": model_args.image_size, "patch_size": model_args.patch_size, "encoder_stride": model_args.encoder_stride, } ) # create image processor if model_args.image_processor_name: _A = AutoImageProcessor.from_pretrained(model_args.image_processor_name , **__lowercase ) elif model_args.model_name_or_path: _A = AutoImageProcessor.from_pretrained(model_args.model_name_or_path , **__lowercase ) else: _A = { conf.model_type: image_processor_class for conf, image_processor_class in IMAGE_PROCESSOR_MAPPING.items() } _A = IMAGE_PROCESSOR_TYPES[model_args.model_type]() # create model if model_args.model_name_or_path: _A = AutoModelForMaskedImageModeling.from_pretrained( model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=__lowercase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) else: logger.info("Training new model from scratch" ) _A = AutoModelForMaskedImageModeling.from_config(__lowercase ) if training_args.do_train: _A = ds["train"].column_names else: _A = ds["validation"].column_names if data_args.image_column_name is not None: _A = data_args.image_column_name elif "image" in column_names: _A = "image" elif "img" in column_names: _A = "img" else: _A = column_names[0] # transformations as done in original SimMIM paper # source: https://github.com/microsoft/SimMIM/blob/main/data/data_simmim.py _A = Compose( [ Lambda(lambda __lowercase : img.convert("RGB" ) if img.mode != "RGB" else img ), RandomResizedCrop(model_args.image_size , scale=(0.67, 1.0) , ratio=(3.0 / 4.0, 4.0 / 3.0) ), RandomHorizontalFlip(), ToTensor(), Normalize(mean=image_processor.image_mean , std=image_processor.image_std ), ] ) # create mask generator _A = MaskGenerator( input_size=model_args.image_size , mask_patch_size=data_args.mask_patch_size , model_patch_size=model_args.patch_size , mask_ratio=data_args.mask_ratio , ) def preprocess_images(__lowercase ): _A = [transforms(__lowercase ) for image in examples[image_column_name]] _A = [mask_generator() for i in range(len(examples[image_column_name] ) )] return examples if training_args.do_train: if "train" not in ds: raise ValueError("--do_train requires a train dataset" ) if data_args.max_train_samples is not None: _A = ds["train"].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) ) # Set the training transforms ds["train"].set_transform(__lowercase ) if training_args.do_eval: if "validation" not in ds: raise ValueError("--do_eval requires a validation dataset" ) if data_args.max_eval_samples is not None: _A = ( ds["validation"].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) ) ) # Set the validation transforms ds["validation"].set_transform(__lowercase ) # Initialize our trainer _A = Trainer( model=__lowercase , args=__lowercase , train_dataset=ds["train"] if training_args.do_train else None , eval_dataset=ds["validation"] if training_args.do_eval else None , tokenizer=__lowercase , data_collator=__lowercase , ) # Training if training_args.do_train: _A = None if training_args.resume_from_checkpoint is not None: _A = training_args.resume_from_checkpoint elif last_checkpoint is not None: _A = last_checkpoint _A = trainer.train(resume_from_checkpoint=__lowercase ) trainer.save_model() trainer.log_metrics("train" , train_result.metrics ) trainer.save_metrics("train" , train_result.metrics ) trainer.save_state() # Evaluation if training_args.do_eval: _A = trainer.evaluate() trainer.log_metrics("eval" , __lowercase ) trainer.save_metrics("eval" , __lowercase ) # Write model card and (optionally) push to hub _A = { "finetuned_from": model_args.model_name_or_path, "tasks": "masked-image-modeling", "dataset": data_args.dataset_name, "tags": ["masked-image-modeling"], } if training_args.push_to_hub: trainer.push_to_hub(**__lowercase ) else: trainer.create_model_card(**__lowercase ) if __name__ == "__main__": main()
79
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_tokenizers_available, is_torch_available, ) lowerCamelCase_ = { '''configuration_funnel''': ['''FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''FunnelConfig'''], '''convert_funnel_original_tf_checkpoint_to_pytorch''': [], '''tokenization_funnel''': ['''FunnelTokenizer'''], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase_ = ['''FunnelTokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase_ = [ '''FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST''', '''FunnelBaseModel''', '''FunnelForMaskedLM''', '''FunnelForMultipleChoice''', '''FunnelForPreTraining''', '''FunnelForQuestionAnswering''', '''FunnelForSequenceClassification''', '''FunnelForTokenClassification''', '''FunnelModel''', '''FunnelPreTrainedModel''', '''load_tf_weights_in_funnel''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase_ = [ '''TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFFunnelBaseModel''', '''TFFunnelForMaskedLM''', '''TFFunnelForMultipleChoice''', '''TFFunnelForPreTraining''', '''TFFunnelForQuestionAnswering''', '''TFFunnelForSequenceClassification''', '''TFFunnelForTokenClassification''', '''TFFunnelModel''', '''TFFunnelPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_funnel import FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP, FunnelConfig from .tokenization_funnel import FunnelTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_funnel_fast import FunnelTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_funnel import ( FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST, FunnelBaseModel, FunnelForMaskedLM, FunnelForMultipleChoice, FunnelForPreTraining, FunnelForQuestionAnswering, FunnelForSequenceClassification, FunnelForTokenClassification, FunnelModel, FunnelPreTrainedModel, load_tf_weights_in_funnel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_funnel import ( TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST, TFFunnelBaseModel, TFFunnelForMaskedLM, TFFunnelForMultipleChoice, TFFunnelForPreTraining, TFFunnelForQuestionAnswering, TFFunnelForSequenceClassification, TFFunnelForTokenClassification, TFFunnelModel, TFFunnelPreTrainedModel, ) else: import sys lowerCamelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
79
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCamelCase_ = logging.get_logger(__name__) lowerCamelCase_ = { '''google/canine-s''': '''https://huggingface.co/google/canine-s/resolve/main/config.json''', # See all CANINE models at https://huggingface.co/models?filter=canine } class _UpperCAmelCase ( snake_case_ ): """simple docstring""" snake_case = '''canine''' def __init__( self : Dict , __UpperCAmelCase : List[str]=768 , __UpperCAmelCase : str=12 , __UpperCAmelCase : Union[str, Any]=12 , __UpperCAmelCase : int=3072 , __UpperCAmelCase : Optional[int]="gelu" , __UpperCAmelCase : Tuple=0.1 , __UpperCAmelCase : str=0.1 , __UpperCAmelCase : List[Any]=16384 , __UpperCAmelCase : Any=16 , __UpperCAmelCase : str=0.02 , __UpperCAmelCase : Dict=1E-12 , __UpperCAmelCase : Optional[Any]=0 , __UpperCAmelCase : int=0xE000 , __UpperCAmelCase : List[Any]=0xE001 , __UpperCAmelCase : Any=4 , __UpperCAmelCase : Dict=4 , __UpperCAmelCase : List[str]=8 , __UpperCAmelCase : int=16384 , __UpperCAmelCase : Union[str, Any]=128 , **__UpperCAmelCase : Dict , ): '''simple docstring''' super().__init__(pad_token_id=__UpperCAmelCase , bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase , **__UpperCAmelCase ) _A = max_position_embeddings _A = hidden_size _A = num_hidden_layers _A = num_attention_heads _A = intermediate_size _A = hidden_act _A = hidden_dropout_prob _A = attention_probs_dropout_prob _A = initializer_range _A = type_vocab_size _A = layer_norm_eps # Character config: _A = downsampling_rate _A = upsampling_kernel_size _A = num_hash_functions _A = num_hash_buckets _A = local_transformer_stride
79
1
'''simple docstring''' import logging import os import threading import time try: import warnings except ImportError: lowerCamelCase_ = None try: import msvcrt except ImportError: lowerCamelCase_ = None try: import fcntl except ImportError: lowerCamelCase_ = None # Backward compatibility # ------------------------------------------------ try: TimeoutError except NameError: lowerCamelCase_ = OSError # Data # ------------------------------------------------ lowerCamelCase_ = [ '''Timeout''', '''BaseFileLock''', '''WindowsFileLock''', '''UnixFileLock''', '''SoftFileLock''', '''FileLock''', ] lowerCamelCase_ = '''3.0.12''' lowerCamelCase_ = None def __lowercase ( ) -> Optional[int]: '''simple docstring''' global _logger _A = _logger or logging.getLogger(__name__ ) return _logger class _UpperCAmelCase ( snake_case_ ): """simple docstring""" def __init__( self : Union[str, Any] , __UpperCAmelCase : Dict ): '''simple docstring''' _A = lock_file return None def __str__( self : Tuple ): '''simple docstring''' _A = f'''The file lock \'{self.lock_file}\' could not be acquired.''' return temp class _UpperCAmelCase : """simple docstring""" def __init__( self : List[Any] , __UpperCAmelCase : Any ): '''simple docstring''' _A = lock return None def __enter__( self : Union[str, Any] ): '''simple docstring''' return self.lock def __exit__( self : Tuple , __UpperCAmelCase : Any , __UpperCAmelCase : Dict , __UpperCAmelCase : Union[str, Any] ): '''simple docstring''' self.lock.release() return None class _UpperCAmelCase : """simple docstring""" def __init__( self : Union[str, Any] , __UpperCAmelCase : Tuple , __UpperCAmelCase : Optional[int]=-1 , __UpperCAmelCase : Any=None ): '''simple docstring''' _A = max_filename_length if max_filename_length is not None else 255 # Hash the filename if it's too long _A = self.hash_filename_if_too_long(__UpperCAmelCase , __UpperCAmelCase ) # The path to the lock file. _A = lock_file # The file descriptor for the *_lock_file* as it is returned by the # os.open() function. # This file lock is only NOT None, if the object currently holds the # lock. _A = None # The default timeout value. _A = timeout # We use this lock primarily for the lock counter. _A = threading.Lock() # The lock counter is used for implementing the nested locking # mechanism. Whenever the lock is acquired, the counter is increased and # the lock is only released, when this value is 0 again. _A = 0 return None @property def lowerCAmelCase ( self : str ): '''simple docstring''' return self._lock_file @property def lowerCAmelCase ( self : Union[str, Any] ): '''simple docstring''' return self._timeout @timeout.setter def lowerCAmelCase ( self : Optional[int] , __UpperCAmelCase : Tuple ): '''simple docstring''' _A = float(__UpperCAmelCase ) return None def lowerCAmelCase ( self : List[str] ): '''simple docstring''' raise NotImplementedError() def lowerCAmelCase ( self : Dict ): '''simple docstring''' raise NotImplementedError() @property def lowerCAmelCase ( self : Union[str, Any] ): '''simple docstring''' return self._lock_file_fd is not None def lowerCAmelCase ( self : List[Any] , __UpperCAmelCase : Union[str, Any]=None , __UpperCAmelCase : Tuple=0.05 ): '''simple docstring''' if timeout is None: _A = self.timeout # Increment the number right at the beginning. # We can still undo it, if something fails. with self._thread_lock: self._lock_counter += 1 _A = id(self ) _A = self._lock_file _A = time.time() try: while True: with self._thread_lock: if not self.is_locked: logger().debug(f'''Attempting to acquire lock {lock_id} on {lock_filename}''' ) self._acquire() if self.is_locked: logger().debug(f'''Lock {lock_id} acquired on {lock_filename}''' ) break elif timeout >= 0 and time.time() - start_time > timeout: logger().debug(f'''Timeout on acquiring lock {lock_id} on {lock_filename}''' ) raise Timeout(self._lock_file ) else: logger().debug( f'''Lock {lock_id} not acquired on {lock_filename}, waiting {poll_intervall} seconds ...''' ) time.sleep(__UpperCAmelCase ) except: # noqa # Something did go wrong, so decrement the counter. with self._thread_lock: _A = max(0 , self._lock_counter - 1 ) raise return _Acquire_ReturnProxy(lock=self ) def lowerCAmelCase ( self : Union[str, Any] , __UpperCAmelCase : Dict=False ): '''simple docstring''' with self._thread_lock: if self.is_locked: self._lock_counter -= 1 if self._lock_counter == 0 or force: _A = id(self ) _A = self._lock_file logger().debug(f'''Attempting to release lock {lock_id} on {lock_filename}''' ) self._release() _A = 0 logger().debug(f'''Lock {lock_id} released on {lock_filename}''' ) return None def __enter__( self : Optional[Any] ): '''simple docstring''' self.acquire() return self def __exit__( self : Any , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : int , __UpperCAmelCase : Optional[Any] ): '''simple docstring''' self.release() return None def __del__( self : int ): '''simple docstring''' self.release(force=__UpperCAmelCase ) return None def lowerCAmelCase ( self : Optional[Any] , __UpperCAmelCase : str , __UpperCAmelCase : int ): '''simple docstring''' _A = os.path.basename(__UpperCAmelCase ) if len(__UpperCAmelCase ) > max_length and max_length > 0: _A = os.path.dirname(__UpperCAmelCase ) _A = str(hash(__UpperCAmelCase ) ) _A = filename[: max_length - len(__UpperCAmelCase ) - 8] + "..." + hashed_filename + ".lock" return os.path.join(__UpperCAmelCase , __UpperCAmelCase ) else: return path class _UpperCAmelCase ( snake_case_ ): """simple docstring""" def __init__( self : Optional[int] , __UpperCAmelCase : int , __UpperCAmelCase : List[Any]=-1 , __UpperCAmelCase : Dict=None ): '''simple docstring''' from .file_utils import relative_to_absolute_path super().__init__(__UpperCAmelCase , timeout=__UpperCAmelCase , max_filename_length=__UpperCAmelCase ) _A = "\\\\?\\" + relative_to_absolute_path(self.lock_file ) def lowerCAmelCase ( self : Any ): '''simple docstring''' _A = os.O_RDWR | os.O_CREAT | os.O_TRUNC try: _A = os.open(self._lock_file , __UpperCAmelCase ) except OSError: pass else: try: msvcrt.locking(__UpperCAmelCase , msvcrt.LK_NBLCK , 1 ) except OSError: os.close(__UpperCAmelCase ) else: _A = fd return None def lowerCAmelCase ( self : List[str] ): '''simple docstring''' _A = self._lock_file_fd _A = None msvcrt.locking(__UpperCAmelCase , msvcrt.LK_UNLCK , 1 ) os.close(__UpperCAmelCase ) try: os.remove(self._lock_file ) # Probably another instance of the application # that acquired the file lock. except OSError: pass return None class _UpperCAmelCase ( snake_case_ ): """simple docstring""" def __init__( self : Optional[int] , __UpperCAmelCase : int , __UpperCAmelCase : Any=-1 , __UpperCAmelCase : List[str]=None ): '''simple docstring''' _A = os.statvfs(os.path.dirname(__UpperCAmelCase ) ).f_namemax super().__init__(__UpperCAmelCase , timeout=__UpperCAmelCase , max_filename_length=__UpperCAmelCase ) def lowerCAmelCase ( self : List[Any] ): '''simple docstring''' _A = os.O_RDWR | os.O_CREAT | os.O_TRUNC _A = os.open(self._lock_file , __UpperCAmelCase ) try: fcntl.flock(__UpperCAmelCase , fcntl.LOCK_EX | fcntl.LOCK_NB ) except OSError: os.close(__UpperCAmelCase ) else: _A = fd return None def lowerCAmelCase ( self : Dict ): '''simple docstring''' _A = self._lock_file_fd _A = None fcntl.flock(__UpperCAmelCase , fcntl.LOCK_UN ) os.close(__UpperCAmelCase ) return None class _UpperCAmelCase ( snake_case_ ): """simple docstring""" def lowerCAmelCase ( self : Optional[Any] ): '''simple docstring''' _A = os.O_WRONLY | os.O_CREAT | os.O_EXCL | os.O_TRUNC try: _A = os.open(self._lock_file , __UpperCAmelCase ) except OSError: pass else: _A = fd return None def lowerCAmelCase ( self : Dict ): '''simple docstring''' os.close(self._lock_file_fd ) _A = None try: os.remove(self._lock_file ) # The file is already deleted and that's what we want. except OSError: pass return None lowerCamelCase_ = None if msvcrt: lowerCamelCase_ = WindowsFileLock elif fcntl: lowerCamelCase_ = UnixFileLock else: lowerCamelCase_ = SoftFileLock if warnings is not None: warnings.warn('''only soft file lock is available''')
79
'''simple docstring''' class _UpperCAmelCase : """simple docstring""" def __init__( self : List[str] , __UpperCAmelCase : list[int] ): '''simple docstring''' _A = len(__UpperCAmelCase ) _A = [0] * len_array if len_array > 0: _A = array[0] for i in range(1 , __UpperCAmelCase ): _A = self.prefix_sum[i - 1] + array[i] def lowerCAmelCase ( self : Dict , __UpperCAmelCase : int , __UpperCAmelCase : int ): '''simple docstring''' if start == 0: return self.prefix_sum[end] return self.prefix_sum[end] - self.prefix_sum[start - 1] def lowerCAmelCase ( self : List[str] , __UpperCAmelCase : int ): '''simple docstring''' _A = {0} for sum_item in self.prefix_sum: if sum_item - target_sum in sums: return True sums.add(__UpperCAmelCase ) return False if __name__ == "__main__": import doctest doctest.testmod()
79
1
'''simple docstring''' from __future__ import annotations def __lowercase ( __lowercase , __lowercase ) -> list[list[int]]: '''simple docstring''' _A = [] _A = [] _A = 0 _A = sum(__lowercase ) create_state_space_tree(__lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase ) return result def __lowercase ( __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , ) -> None: '''simple docstring''' if sum(__lowercase ) > max_sum or (remaining_nums_sum + sum(__lowercase )) < max_sum: return if sum(__lowercase ) == max_sum: result.append(__lowercase ) return for index in range(__lowercase , len(__lowercase ) ): create_state_space_tree( __lowercase , __lowercase , index + 1 , [*path, nums[index]] , __lowercase , remaining_nums_sum - nums[index] , ) lowerCamelCase_ = [3, 34, 4, 12, 5, 2] lowerCamelCase_ = 9 lowerCamelCase_ = generate_sum_of_subsets_soln(nums, max_sum) print(*result)
79
'''simple docstring''' from typing import List import numpy as np def __lowercase ( __lowercase ) -> int: '''simple docstring''' _A = {key: len(__lowercase ) for key, value in gen_kwargs.items() if isinstance(__lowercase , __lowercase )} if len(set(lists_lengths.values() ) ) > 1: raise RuntimeError( ( "Sharding is ambiguous for this dataset: " + "we found several data sources lists of different lengths, and we don't know over which list we should parallelize:\n" + "\n".join(F'''\t- key {key} has length {length}''' for key, length in lists_lengths.items() ) + "\nTo fix this, check the 'gen_kwargs' and make sure to use lists only for data sources, " + "and use tuples otherwise. In the end there should only be one single list, or several lists with the same length." ) ) _A = max(lists_lengths.values() , default=0 ) return max(1 , __lowercase ) def __lowercase ( __lowercase , __lowercase ) -> List[range]: '''simple docstring''' _A = [] for group_idx in range(__lowercase ): _A = num_shards // max_num_jobs + (group_idx < (num_shards % max_num_jobs)) if num_shards_to_add == 0: break _A = shards_indices_per_group[-1].stop if shards_indices_per_group else 0 _A = range(__lowercase , start + num_shards_to_add ) shards_indices_per_group.append(__lowercase ) return shards_indices_per_group def __lowercase ( __lowercase , __lowercase ) -> List[dict]: '''simple docstring''' _A = _number_of_shards_in_gen_kwargs(__lowercase ) if num_shards == 1: return [dict(__lowercase )] else: _A = _distribute_shards(num_shards=__lowercase , max_num_jobs=__lowercase ) return [ { key: [value[shard_idx] for shard_idx in shard_indices_per_group[group_idx]] if isinstance(__lowercase , __lowercase ) else value for key, value in gen_kwargs.items() } for group_idx in range(len(__lowercase ) ) ] def __lowercase ( __lowercase ) -> dict: '''simple docstring''' return { key: [value for gen_kwargs in gen_kwargs_list for value in gen_kwargs[key]] if isinstance(gen_kwargs_list[0][key] , __lowercase ) else gen_kwargs_list[0][key] for key in gen_kwargs_list[0] } def __lowercase ( __lowercase , __lowercase ) -> dict: '''simple docstring''' _A = {len(__lowercase ) for value in gen_kwargs.values() if isinstance(__lowercase , __lowercase )} _A = {} for size in list_sizes: _A = list(range(__lowercase ) ) rng.shuffle(indices_per_size[size] ) # Now let's copy the gen_kwargs and shuffle the lists based on their sizes _A = dict(__lowercase ) for key, value in shuffled_kwargs.items(): if isinstance(__lowercase , __lowercase ): _A = [value[i] for i in indices_per_size[len(__lowercase )]] return shuffled_kwargs
79
1
'''simple docstring''' import warnings from transformers import AutoTokenizer from transformers.utils import is_torch_available from transformers.utils.generic import ExplicitEnum from ...processing_utils import ProcessorMixin if is_torch_available(): import torch class _UpperCAmelCase ( snake_case_ ): """simple docstring""" snake_case = '''char''' snake_case = '''bpe''' snake_case = '''wp''' lowerCamelCase_ = (DecodeType.CHARACTER, DecodeType.BPE, DecodeType.WORDPIECE) class _UpperCAmelCase ( snake_case_ ): """simple docstring""" snake_case = ['''image_processor''', '''char_tokenizer'''] snake_case = '''ViTImageProcessor''' snake_case = '''MgpstrTokenizer''' def __init__( self : List[str] , __UpperCAmelCase : Optional[int]=None , __UpperCAmelCase : Tuple=None , **__UpperCAmelCase : str ): '''simple docstring''' _A = None if "feature_extractor" in kwargs: warnings.warn( "The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`" " instead." , __UpperCAmelCase , ) _A = kwargs.pop("feature_extractor" ) _A = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError("You need to specify an `image_processor`." ) if tokenizer is None: raise ValueError("You need to specify a `tokenizer`." ) _A = tokenizer _A = AutoTokenizer.from_pretrained("gpt2" ) _A = AutoTokenizer.from_pretrained("bert-base-uncased" ) super().__init__(__UpperCAmelCase , __UpperCAmelCase ) def __call__( self : int , __UpperCAmelCase : Optional[int]=None , __UpperCAmelCase : int=None , __UpperCAmelCase : str=None , **__UpperCAmelCase : List[Any] ): '''simple docstring''' if images is None and text is None: raise ValueError("You need to specify either an `images` or `text` input to process." ) if images is not None: _A = self.image_processor(__UpperCAmelCase , return_tensors=__UpperCAmelCase , **__UpperCAmelCase ) if text is not None: _A = self.char_tokenizer(__UpperCAmelCase , return_tensors=__UpperCAmelCase , **__UpperCAmelCase ) if text is None: return inputs elif images is None: return encodings else: _A = encodings["input_ids"] return inputs def lowerCAmelCase ( self : Optional[int] , __UpperCAmelCase : List[Any] ): '''simple docstring''' _A , _A , _A = sequences _A = char_preds.size(0 ) _A , _A = self._decode_helper(__UpperCAmelCase , "char" ) _A , _A = self._decode_helper(__UpperCAmelCase , "bpe" ) _A , _A = self._decode_helper(__UpperCAmelCase , "wp" ) _A = [] _A = [] for i in range(__UpperCAmelCase ): _A = [char_scores[i], bpe_scores[i], wp_scores[i]] _A = [char_strs[i], bpe_strs[i], wp_strs[i]] _A = scores.index(max(__UpperCAmelCase ) ) final_strs.append(strs[max_score_index] ) final_scores.append(scores[max_score_index] ) _A = {} _A = final_strs _A = final_scores _A = char_strs _A = bpe_strs _A = wp_strs return out def lowerCAmelCase ( self : Tuple , __UpperCAmelCase : Tuple , __UpperCAmelCase : List[str] ): '''simple docstring''' if format == DecodeType.CHARACTER: _A = self.char_decode _A = 1 _A = "[s]" elif format == DecodeType.BPE: _A = self.bpe_decode _A = 2 _A = "#" elif format == DecodeType.WORDPIECE: _A = self.wp_decode _A = 102 _A = "[SEP]" else: raise ValueError(f'''Format {format} is not supported.''' ) _A , _A = [], [] _A = pred_logits.size(0 ) _A = pred_logits.size(1 ) _A , _A = pred_logits.topk(1 , dim=-1 , largest=__UpperCAmelCase , sorted=__UpperCAmelCase ) _A = preds_index.view(-1 , __UpperCAmelCase )[:, 1:] _A = decoder(__UpperCAmelCase ) _A , _A = torch.nn.functional.softmax(__UpperCAmelCase , dim=2 ).max(dim=2 ) _A = preds_max_prob[:, 1:] for index in range(__UpperCAmelCase ): _A = preds_str[index].find(__UpperCAmelCase ) _A = preds_str[index][:pred_eos] _A = preds_index[index].cpu().tolist() _A = pred_index.index(__UpperCAmelCase ) if eos_token in pred_index else -1 _A = preds_max_prob[index][: pred_eos_index + 1] _A = pred_max_prob.cumprod(dim=0 )[-1] if pred_max_prob.nelement() != 0 else 0.0 dec_strs.append(__UpperCAmelCase ) conf_scores.append(__UpperCAmelCase ) return dec_strs, conf_scores def lowerCAmelCase ( self : Dict , __UpperCAmelCase : List[Any] ): '''simple docstring''' _A = [seq.replace(" " , "" ) for seq in self.char_tokenizer.batch_decode(__UpperCAmelCase )] return decode_strs def lowerCAmelCase ( self : Dict , __UpperCAmelCase : List[str] ): '''simple docstring''' return self.bpe_tokenizer.batch_decode(__UpperCAmelCase ) def lowerCAmelCase ( self : Optional[int] , __UpperCAmelCase : Union[str, Any] ): '''simple docstring''' _A = [seq.replace(" " , "" ) for seq in self.wp_tokenizer.batch_decode(__UpperCAmelCase )] return decode_strs
79
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available lowerCamelCase_ = { '''configuration_jukebox''': [ '''JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''JukeboxConfig''', '''JukeboxPriorConfig''', '''JukeboxVQVAEConfig''', ], '''tokenization_jukebox''': ['''JukeboxTokenizer'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase_ = [ '''JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST''', '''JukeboxModel''', '''JukeboxPreTrainedModel''', '''JukeboxVQVAE''', '''JukeboxPrior''', ] if TYPE_CHECKING: from .configuration_jukebox import ( JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP, JukeboxConfig, JukeboxPriorConfig, JukeboxVQVAEConfig, ) from .tokenization_jukebox import JukeboxTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_jukebox import ( JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST, JukeboxModel, JukeboxPreTrainedModel, JukeboxPrior, JukeboxVQVAE, ) else: import sys lowerCamelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
79
1
'''simple docstring''' import tempfile import torch from diffusers import IPNDMScheduler from .test_schedulers import SchedulerCommonTest class _UpperCAmelCase ( snake_case_ ): """simple docstring""" snake_case = (IPNDMScheduler,) snake_case = (('''num_inference_steps''', 50),) def lowerCAmelCase ( self : int , **__UpperCAmelCase : Dict ): '''simple docstring''' _A = {"num_train_timesteps": 1000} config.update(**__UpperCAmelCase ) return config def lowerCAmelCase ( self : Optional[Any] , __UpperCAmelCase : List[str]=0 , **__UpperCAmelCase : List[Any] ): '''simple docstring''' _A = dict(self.forward_default_kwargs ) _A = kwargs.pop("num_inference_steps" , __UpperCAmelCase ) _A = self.dummy_sample _A = 0.1 * sample _A = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] for scheduler_class in self.scheduler_classes: _A = self.get_scheduler_config(**__UpperCAmelCase ) _A = scheduler_class(**__UpperCAmelCase ) scheduler.set_timesteps(__UpperCAmelCase ) # copy over dummy past residuals _A = dummy_past_residuals[:] if time_step is None: _A = scheduler.timesteps[len(scheduler.timesteps ) // 2] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(__UpperCAmelCase ) _A = scheduler_class.from_pretrained(__UpperCAmelCase ) new_scheduler.set_timesteps(__UpperCAmelCase ) # copy over dummy past residuals _A = dummy_past_residuals[:] _A = scheduler.step(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase ).prev_sample _A = new_scheduler.step(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" _A = scheduler.step(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase ).prev_sample _A = new_scheduler.step(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" def lowerCAmelCase ( self : Dict ): '''simple docstring''' pass def lowerCAmelCase ( self : Optional[Any] , __UpperCAmelCase : Optional[Any]=0 , **__UpperCAmelCase : List[Any] ): '''simple docstring''' _A = dict(self.forward_default_kwargs ) _A = kwargs.pop("num_inference_steps" , __UpperCAmelCase ) _A = self.dummy_sample _A = 0.1 * sample _A = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] for scheduler_class in self.scheduler_classes: _A = self.get_scheduler_config() _A = scheduler_class(**__UpperCAmelCase ) scheduler.set_timesteps(__UpperCAmelCase ) # copy over dummy past residuals (must be after setting timesteps) _A = dummy_past_residuals[:] if time_step is None: _A = scheduler.timesteps[len(scheduler.timesteps ) // 2] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(__UpperCAmelCase ) _A = scheduler_class.from_pretrained(__UpperCAmelCase ) # copy over dummy past residuals new_scheduler.set_timesteps(__UpperCAmelCase ) # copy over dummy past residual (must be after setting timesteps) _A = dummy_past_residuals[:] _A = scheduler.step(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase ).prev_sample _A = new_scheduler.step(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" _A = scheduler.step(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase ).prev_sample _A = new_scheduler.step(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" def lowerCAmelCase ( self : str , **__UpperCAmelCase : Union[str, Any] ): '''simple docstring''' _A = self.scheduler_classes[0] _A = self.get_scheduler_config(**__UpperCAmelCase ) _A = scheduler_class(**__UpperCAmelCase ) _A = 10 _A = self.dummy_model() _A = self.dummy_sample_deter scheduler.set_timesteps(__UpperCAmelCase ) for i, t in enumerate(scheduler.timesteps ): _A = model(__UpperCAmelCase , __UpperCAmelCase ) _A = scheduler.step(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ).prev_sample for i, t in enumerate(scheduler.timesteps ): _A = model(__UpperCAmelCase , __UpperCAmelCase ) _A = scheduler.step(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ).prev_sample return sample def lowerCAmelCase ( self : Optional[int] ): '''simple docstring''' _A = dict(self.forward_default_kwargs ) _A = kwargs.pop("num_inference_steps" , __UpperCAmelCase ) for scheduler_class in self.scheduler_classes: _A = self.get_scheduler_config() _A = scheduler_class(**__UpperCAmelCase ) _A = self.dummy_sample _A = 0.1 * sample if num_inference_steps is not None and hasattr(__UpperCAmelCase , "set_timesteps" ): scheduler.set_timesteps(__UpperCAmelCase ) elif num_inference_steps is not None and not hasattr(__UpperCAmelCase , "set_timesteps" ): _A = num_inference_steps # copy over dummy past residuals (must be done after set_timesteps) _A = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] _A = dummy_past_residuals[:] _A = scheduler.timesteps[5] _A = scheduler.timesteps[6] _A = scheduler.step(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase ).prev_sample _A = scheduler.step(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase ).prev_sample self.assertEqual(output_a.shape , sample.shape ) self.assertEqual(output_a.shape , output_a.shape ) _A = scheduler.step(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase ).prev_sample _A = scheduler.step(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase ).prev_sample self.assertEqual(output_a.shape , sample.shape ) self.assertEqual(output_a.shape , output_a.shape ) def lowerCAmelCase ( self : List[Any] ): '''simple docstring''' for timesteps in [100, 1000]: self.check_over_configs(num_train_timesteps=__UpperCAmelCase , time_step=__UpperCAmelCase ) def lowerCAmelCase ( self : List[Any] ): '''simple docstring''' for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 100] ): self.check_over_forward(num_inference_steps=__UpperCAmelCase , time_step=__UpperCAmelCase ) def lowerCAmelCase ( self : Tuple ): '''simple docstring''' _A = self.full_loop() _A = torch.mean(torch.abs(__UpperCAmelCase ) ) assert abs(result_mean.item() - 2540529 ) < 10
79
'''simple docstring''' from typing import Callable, List, Optional, Tuple, Union import torch from transformers import CLIPTextModel, CLIPTokenizer from ...configuration_utils import ConfigMixin, register_to_config from ...models import ModelMixin, TransformeraDModel, VQModel from ...schedulers import VQDiffusionScheduler from ...utils import logging from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput lowerCamelCase_ = logging.get_logger(__name__) # pylint: disable=invalid-name class _UpperCAmelCase ( snake_case_ , snake_case_ ): """simple docstring""" @register_to_config def __init__( self : Union[str, Any] , __UpperCAmelCase : bool , __UpperCAmelCase : Optional[int] = None , __UpperCAmelCase : Optional[int] = None ): '''simple docstring''' super().__init__() _A = learnable if self.learnable: assert hidden_size is not None, "learnable=True requires `hidden_size` to be set" assert length is not None, "learnable=True requires `length` to be set" _A = torch.zeros(__UpperCAmelCase , __UpperCAmelCase ) else: _A = None _A = torch.nn.Parameter(__UpperCAmelCase ) class _UpperCAmelCase ( snake_case_ ): """simple docstring""" snake_case = 42 snake_case = 42 snake_case = 42 snake_case = 42 snake_case = 42 snake_case = 42 def __init__( self : Any , __UpperCAmelCase : VQModel , __UpperCAmelCase : CLIPTextModel , __UpperCAmelCase : CLIPTokenizer , __UpperCAmelCase : TransformeraDModel , __UpperCAmelCase : VQDiffusionScheduler , __UpperCAmelCase : LearnedClassifierFreeSamplingEmbeddings , ): '''simple docstring''' super().__init__() self.register_modules( vqvae=__UpperCAmelCase , transformer=__UpperCAmelCase , text_encoder=__UpperCAmelCase , tokenizer=__UpperCAmelCase , scheduler=__UpperCAmelCase , learned_classifier_free_sampling_embeddings=__UpperCAmelCase , ) def lowerCAmelCase ( self : int , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Dict , __UpperCAmelCase : Any ): '''simple docstring''' _A = len(__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else 1 # get prompt text embeddings _A = self.tokenizer( __UpperCAmelCase , padding="max_length" , max_length=self.tokenizer.model_max_length , return_tensors="pt" , ) _A = text_inputs.input_ids if text_input_ids.shape[-1] > self.tokenizer.model_max_length: _A = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] ) logger.warning( "The following part of your input was truncated because CLIP can only handle sequences up to" f''' {self.tokenizer.model_max_length} tokens: {removed_text}''' ) _A = text_input_ids[:, : self.tokenizer.model_max_length] _A = self.text_encoder(text_input_ids.to(self.device ) )[0] # NOTE: This additional step of normalizing the text embeddings is from VQ-Diffusion. # While CLIP does normalize the pooled output of the text transformer when combining # the image and text embeddings, CLIP does not directly normalize the last hidden state. # # CLIP normalizing the pooled output. # https://github.com/huggingface/transformers/blob/d92e22d1f28324f513f3080e5c47c071a3916721/src/transformers/models/clip/modeling_clip.py#L1052-L1053 _A = prompt_embeds / prompt_embeds.norm(dim=-1 , keepdim=__UpperCAmelCase ) # duplicate text embeddings for each generation per prompt _A = prompt_embeds.repeat_interleave(__UpperCAmelCase , dim=0 ) if do_classifier_free_guidance: if self.learned_classifier_free_sampling_embeddings.learnable: _A = self.learned_classifier_free_sampling_embeddings.embeddings _A = negative_prompt_embeds.unsqueeze(0 ).repeat(__UpperCAmelCase , 1 , 1 ) else: _A = [""] * batch_size _A = text_input_ids.shape[-1] _A = self.tokenizer( __UpperCAmelCase , padding="max_length" , max_length=__UpperCAmelCase , truncation=__UpperCAmelCase , return_tensors="pt" , ) _A = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0] # See comment for normalizing text embeddings _A = negative_prompt_embeds / negative_prompt_embeds.norm(dim=-1 , keepdim=__UpperCAmelCase ) # duplicate unconditional embeddings for each generation per prompt, using mps friendly method _A = negative_prompt_embeds.shape[1] _A = negative_prompt_embeds.repeat(1 , __UpperCAmelCase , 1 ) _A = negative_prompt_embeds.view(batch_size * num_images_per_prompt , __UpperCAmelCase , -1 ) # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes _A = torch.cat([negative_prompt_embeds, prompt_embeds] ) return prompt_embeds @torch.no_grad() def __call__( self : Optional[Any] , __UpperCAmelCase : Union[str, List[str]] , __UpperCAmelCase : int = 100 , __UpperCAmelCase : float = 5.0 , __UpperCAmelCase : float = 1.0 , __UpperCAmelCase : int = 1 , __UpperCAmelCase : Optional[Union[torch.Generator, List[torch.Generator]]] = None , __UpperCAmelCase : Optional[torch.FloatTensor] = None , __UpperCAmelCase : Optional[str] = "pil" , __UpperCAmelCase : bool = True , __UpperCAmelCase : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __UpperCAmelCase : int = 1 , ): '''simple docstring''' if isinstance(__UpperCAmelCase , __UpperCAmelCase ): _A = 1 elif isinstance(__UpperCAmelCase , __UpperCAmelCase ): _A = len(__UpperCAmelCase ) else: raise ValueError(f'''`prompt` has to be of type `str` or `list` but is {type(__UpperCAmelCase )}''' ) _A = batch_size * num_images_per_prompt _A = guidance_scale > 1.0 _A = self._encode_prompt(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) if (callback_steps is None) or ( callback_steps is not None and (not isinstance(__UpperCAmelCase , __UpperCAmelCase ) or callback_steps <= 0) ): raise ValueError( f'''`callback_steps` has to be a positive integer but is {callback_steps} of type''' f''' {type(__UpperCAmelCase )}.''' ) # get the initial completely masked latents unless the user supplied it _A = (batch_size, self.transformer.num_latent_pixels) if latents is None: _A = self.transformer.num_vector_embeds - 1 _A = torch.full(__UpperCAmelCase , __UpperCAmelCase ).to(self.device ) else: if latents.shape != latents_shape: raise ValueError(f'''Unexpected latents shape, got {latents.shape}, expected {latents_shape}''' ) if (latents < 0).any() or (latents >= self.transformer.num_vector_embeds).any(): raise ValueError( "Unexpected latents value(s). All latents be valid embedding indices i.e. in the range 0," f''' {self.transformer.num_vector_embeds - 1} (inclusive).''' ) _A = latents.to(self.device ) # set timesteps self.scheduler.set_timesteps(__UpperCAmelCase , device=self.device ) _A = self.scheduler.timesteps.to(self.device ) _A = latents for i, t in enumerate(self.progress_bar(__UpperCAmelCase ) ): # expand the sample if we are doing classifier free guidance _A = torch.cat([sample] * 2 ) if do_classifier_free_guidance else sample # predict the un-noised image # model_output == `log_p_x_0` _A = self.transformer(__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , timestep=__UpperCAmelCase ).sample if do_classifier_free_guidance: _A , _A = model_output.chunk(2 ) _A = model_output_uncond + guidance_scale * (model_output_text - model_output_uncond) model_output -= torch.logsumexp(__UpperCAmelCase , dim=1 , keepdim=__UpperCAmelCase ) _A = self.truncate(__UpperCAmelCase , __UpperCAmelCase ) # remove `log(0)`'s (`-inf`s) _A = model_output.clamp(-70 ) # compute the previous noisy sample x_t -> x_t-1 _A = self.scheduler.step(__UpperCAmelCase , timestep=__UpperCAmelCase , sample=__UpperCAmelCase , generator=__UpperCAmelCase ).prev_sample # call the callback, if provided if callback is not None and i % callback_steps == 0: callback(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) _A = self.vqvae.config.vq_embed_dim _A = (batch_size, self.transformer.height, self.transformer.width, embedding_channels) _A = self.vqvae.quantize.get_codebook_entry(__UpperCAmelCase , shape=__UpperCAmelCase ) _A = self.vqvae.decode(__UpperCAmelCase , force_not_quantize=__UpperCAmelCase ).sample _A = (image / 2 + 0.5).clamp(0 , 1 ) _A = image.cpu().permute(0 , 2 , 3 , 1 ).numpy() if output_type == "pil": _A = self.numpy_to_pil(__UpperCAmelCase ) if not return_dict: return (image,) return ImagePipelineOutput(images=__UpperCAmelCase ) def lowerCAmelCase ( self : Union[str, Any] , __UpperCAmelCase : torch.FloatTensor , __UpperCAmelCase : float ): '''simple docstring''' _A , _A = torch.sort(__UpperCAmelCase , 1 , descending=__UpperCAmelCase ) _A = torch.exp(__UpperCAmelCase ) _A = sorted_p_x_0.cumsum(dim=1 ) < truncation_rate # Ensure that at least the largest probability is not zeroed out _A = torch.full_like(keep_mask[:, 0:1, :] , __UpperCAmelCase ) _A = torch.cat((all_true, keep_mask) , dim=1 ) _A = keep_mask[:, :-1, :] _A = keep_mask.gather(1 , indices.argsort(1 ) ) _A = log_p_x_0.clone() _A = -torch.inf # -inf = log(0) return rv
79
1
'''simple docstring''' import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_funnel import FunnelTokenizer lowerCamelCase_ = logging.get_logger(__name__) lowerCamelCase_ = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''} lowerCamelCase_ = [ '''small''', '''small-base''', '''medium''', '''medium-base''', '''intermediate''', '''intermediate-base''', '''large''', '''large-base''', '''xlarge''', '''xlarge-base''', ] lowerCamelCase_ = { '''vocab_file''': { '''funnel-transformer/small''': '''https://huggingface.co/funnel-transformer/small/resolve/main/vocab.txt''', '''funnel-transformer/small-base''': '''https://huggingface.co/funnel-transformer/small-base/resolve/main/vocab.txt''', '''funnel-transformer/medium''': '''https://huggingface.co/funnel-transformer/medium/resolve/main/vocab.txt''', '''funnel-transformer/medium-base''': ( '''https://huggingface.co/funnel-transformer/medium-base/resolve/main/vocab.txt''' ), '''funnel-transformer/intermediate''': ( '''https://huggingface.co/funnel-transformer/intermediate/resolve/main/vocab.txt''' ), '''funnel-transformer/intermediate-base''': ( '''https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/vocab.txt''' ), '''funnel-transformer/large''': '''https://huggingface.co/funnel-transformer/large/resolve/main/vocab.txt''', '''funnel-transformer/large-base''': '''https://huggingface.co/funnel-transformer/large-base/resolve/main/vocab.txt''', '''funnel-transformer/xlarge''': '''https://huggingface.co/funnel-transformer/xlarge/resolve/main/vocab.txt''', '''funnel-transformer/xlarge-base''': ( '''https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/vocab.txt''' ), }, '''tokenizer_file''': { '''funnel-transformer/small''': '''https://huggingface.co/funnel-transformer/small/resolve/main/tokenizer.json''', '''funnel-transformer/small-base''': ( '''https://huggingface.co/funnel-transformer/small-base/resolve/main/tokenizer.json''' ), '''funnel-transformer/medium''': '''https://huggingface.co/funnel-transformer/medium/resolve/main/tokenizer.json''', '''funnel-transformer/medium-base''': ( '''https://huggingface.co/funnel-transformer/medium-base/resolve/main/tokenizer.json''' ), '''funnel-transformer/intermediate''': ( '''https://huggingface.co/funnel-transformer/intermediate/resolve/main/tokenizer.json''' ), '''funnel-transformer/intermediate-base''': ( '''https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/tokenizer.json''' ), '''funnel-transformer/large''': '''https://huggingface.co/funnel-transformer/large/resolve/main/tokenizer.json''', '''funnel-transformer/large-base''': ( '''https://huggingface.co/funnel-transformer/large-base/resolve/main/tokenizer.json''' ), '''funnel-transformer/xlarge''': '''https://huggingface.co/funnel-transformer/xlarge/resolve/main/tokenizer.json''', '''funnel-transformer/xlarge-base''': ( '''https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/tokenizer.json''' ), }, } lowerCamelCase_ = {F"""funnel-transformer/{name}""": 5_12 for name in _model_names} lowerCamelCase_ = {F"""funnel-transformer/{name}""": {'''do_lower_case''': True} for name in _model_names} class _UpperCAmelCase ( snake_case_ ): """simple docstring""" snake_case = VOCAB_FILES_NAMES snake_case = PRETRAINED_VOCAB_FILES_MAP snake_case = PRETRAINED_INIT_CONFIGURATION snake_case = FunnelTokenizer snake_case = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES snake_case = 2 def __init__( self : int , __UpperCAmelCase : Dict=None , __UpperCAmelCase : Union[str, Any]=None , __UpperCAmelCase : Tuple=True , __UpperCAmelCase : int="<unk>" , __UpperCAmelCase : int="<sep>" , __UpperCAmelCase : Union[str, Any]="<pad>" , __UpperCAmelCase : List[Any]="<cls>" , __UpperCAmelCase : Optional[int]="<mask>" , __UpperCAmelCase : Tuple="<s>" , __UpperCAmelCase : List[Any]="</s>" , __UpperCAmelCase : int=True , __UpperCAmelCase : Tuple=True , __UpperCAmelCase : Tuple=None , __UpperCAmelCase : Optional[int]="##" , **__UpperCAmelCase : Any , ): '''simple docstring''' super().__init__( __UpperCAmelCase , tokenizer_file=__UpperCAmelCase , do_lower_case=__UpperCAmelCase , unk_token=__UpperCAmelCase , sep_token=__UpperCAmelCase , pad_token=__UpperCAmelCase , cls_token=__UpperCAmelCase , mask_token=__UpperCAmelCase , bos_token=__UpperCAmelCase , eos_token=__UpperCAmelCase , clean_text=__UpperCAmelCase , tokenize_chinese_chars=__UpperCAmelCase , strip_accents=__UpperCAmelCase , wordpieces_prefix=__UpperCAmelCase , **__UpperCAmelCase , ) _A = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( normalizer_state.get("lowercase" , __UpperCAmelCase ) != do_lower_case or normalizer_state.get("strip_accents" , __UpperCAmelCase ) != strip_accents or normalizer_state.get("handle_chinese_chars" , __UpperCAmelCase ) != tokenize_chinese_chars ): _A = getattr(__UpperCAmelCase , normalizer_state.pop("type" ) ) _A = do_lower_case _A = strip_accents _A = tokenize_chinese_chars _A = normalizer_class(**__UpperCAmelCase ) _A = do_lower_case def lowerCAmelCase ( self : List[str] , __UpperCAmelCase : List[Any] , __UpperCAmelCase : Any=None ): '''simple docstring''' _A = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def lowerCAmelCase ( self : Optional[Any] , __UpperCAmelCase : List[int] , __UpperCAmelCase : Optional[List[int]] = None ): '''simple docstring''' _A = [self.sep_token_id] _A = [self.cls_token_id] if token_ids_a is None: return len(cls ) * [self.cls_token_type_id] + len(token_ids_a + sep ) * [0] return len(cls ) * [self.cls_token_type_id] + len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def lowerCAmelCase ( self : Optional[int] , __UpperCAmelCase : str , __UpperCAmelCase : Optional[str] = None ): '''simple docstring''' _A = self._tokenizer.model.save(__UpperCAmelCase , name=__UpperCAmelCase ) return tuple(__UpperCAmelCase )
79
'''simple docstring''' import argparse import json from pathlib import Path import requests import timm import torch from huggingface_hub import hf_hub_download from PIL import Image from timm.data import resolve_data_config from timm.data.transforms_factory import create_transform from transformers import ( BitConfig, ViTHybridConfig, ViTHybridForImageClassification, ViTHybridImageProcessor, ViTHybridModel, ) from transformers.image_utils import PILImageResampling from transformers.utils import logging logging.set_verbosity_info() lowerCamelCase_ = logging.get_logger(__name__) def __lowercase ( __lowercase , __lowercase=False ) -> int: '''simple docstring''' _A = [] # fmt: off # stem: rename_keys.append(("cls_token", "vit.embeddings.cls_token") ) rename_keys.append(("pos_embed", "vit.embeddings.position_embeddings") ) rename_keys.append(("patch_embed.proj.weight", "vit.embeddings.patch_embeddings.projection.weight") ) rename_keys.append(("patch_embed.proj.bias", "vit.embeddings.patch_embeddings.projection.bias") ) # backbone rename_keys.append(("patch_embed.backbone.stem.conv.weight", "vit.embeddings.patch_embeddings.backbone.bit.embedder.convolution.weight") ) rename_keys.append(("patch_embed.backbone.stem.norm.weight", "vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.weight") ) rename_keys.append(("patch_embed.backbone.stem.norm.bias", "vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.bias") ) for stage_idx in range(len(config.backbone_config.depths ) ): for layer_idx in range(config.backbone_config.depths[stage_idx] ): rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv1.weight''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv1.weight''') ) rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.weight''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.weight''') ) rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.bias''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.bias''') ) rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv2.weight''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv2.weight''') ) rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.weight''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.weight''') ) rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.bias''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.bias''') ) rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv3.weight''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv3.weight''') ) rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.weight''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.weight''') ) rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.bias''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.bias''') ) rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.conv.weight''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.conv.weight''') ) rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.weight''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.weight''') ) rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.bias''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.bias''') ) # transformer encoder for i in range(config.num_hidden_layers ): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append((F'''blocks.{i}.norm1.weight''', F'''vit.encoder.layer.{i}.layernorm_before.weight''') ) rename_keys.append((F'''blocks.{i}.norm1.bias''', F'''vit.encoder.layer.{i}.layernorm_before.bias''') ) rename_keys.append((F'''blocks.{i}.attn.proj.weight''', F'''vit.encoder.layer.{i}.attention.output.dense.weight''') ) rename_keys.append((F'''blocks.{i}.attn.proj.bias''', F'''vit.encoder.layer.{i}.attention.output.dense.bias''') ) rename_keys.append((F'''blocks.{i}.norm2.weight''', F'''vit.encoder.layer.{i}.layernorm_after.weight''') ) rename_keys.append((F'''blocks.{i}.norm2.bias''', F'''vit.encoder.layer.{i}.layernorm_after.bias''') ) rename_keys.append((F'''blocks.{i}.mlp.fc1.weight''', F'''vit.encoder.layer.{i}.intermediate.dense.weight''') ) rename_keys.append((F'''blocks.{i}.mlp.fc1.bias''', F'''vit.encoder.layer.{i}.intermediate.dense.bias''') ) rename_keys.append((F'''blocks.{i}.mlp.fc2.weight''', F'''vit.encoder.layer.{i}.output.dense.weight''') ) rename_keys.append((F'''blocks.{i}.mlp.fc2.bias''', F'''vit.encoder.layer.{i}.output.dense.bias''') ) if base_model: # layernorm + pooler rename_keys.extend( [ ("norm.weight", "layernorm.weight"), ("norm.bias", "layernorm.bias"), ("pre_logits.fc.weight", "pooler.dense.weight"), ("pre_logits.fc.bias", "pooler.dense.bias"), ] ) # if just the base model, we should remove "vit" from all keys that start with "vit" _A = [(pair[0], pair[1][4:]) if pair[1].startswith("vit" ) else pair for pair in rename_keys] else: # layernorm + classification head rename_keys.extend( [ ("norm.weight", "vit.layernorm.weight"), ("norm.bias", "vit.layernorm.bias"), ("head.weight", "classifier.weight"), ("head.bias", "classifier.bias"), ] ) # fmt: on return rename_keys def __lowercase ( __lowercase , __lowercase , __lowercase=False ) -> Tuple: '''simple docstring''' for i in range(config.num_hidden_layers ): if base_model: _A = "" else: _A = "vit." # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) _A = state_dict.pop(F'''blocks.{i}.attn.qkv.weight''' ) _A = state_dict.pop(F'''blocks.{i}.attn.qkv.bias''' ) # next, add query, keys and values (in that order) to the state dict _A = in_proj_weight[ : config.hidden_size, : ] _A = in_proj_bias[: config.hidden_size] _A = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] _A = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] _A = in_proj_weight[ -config.hidden_size :, : ] _A = in_proj_bias[-config.hidden_size :] def __lowercase ( __lowercase ) -> List[str]: '''simple docstring''' _A = ["head.weight", "head.bias"] for k in ignore_keys: state_dict.pop(__lowercase , __lowercase ) def __lowercase ( __lowercase , __lowercase , __lowercase ) -> Tuple: '''simple docstring''' _A = dct.pop(__lowercase ) _A = val def __lowercase ( ) -> List[str]: '''simple docstring''' _A = "http://images.cocodataset.org/val2017/000000039769.jpg" _A = Image.open(requests.get(__lowercase , stream=__lowercase ).raw ) return im @torch.no_grad() def __lowercase ( __lowercase , __lowercase , __lowercase=False ) -> Tuple: '''simple docstring''' _A = BitConfig( global_padding="same" , layer_type="bottleneck" , depths=(3, 4, 9) , out_features=["stage3"] , embedding_dynamic_padding=__lowercase , ) _A = ViTHybridConfig(backbone_config=__lowercase , image_size=384 , num_labels=1000 ) _A = False # load original model from timm _A = timm.create_model(__lowercase , pretrained=__lowercase ) timm_model.eval() # load state_dict of original model, remove and rename some keys _A = timm_model.state_dict() if base_model: remove_classification_head_(__lowercase ) _A = create_rename_keys(__lowercase , __lowercase ) for src, dest in rename_keys: rename_key(__lowercase , __lowercase , __lowercase ) read_in_q_k_v(__lowercase , __lowercase , __lowercase ) _A = "huggingface/label-files" _A = "imagenet-1k-id2label.json" _A = json.load(open(hf_hub_download(__lowercase , __lowercase , repo_type="dataset" ) , "r" ) ) _A = {int(__lowercase ): v for k, v in idalabel.items()} _A = idalabel _A = {v: k for k, v in idalabel.items()} # load HuggingFace model if vit_name[-5:] == "in21k": _A = ViTHybridModel(__lowercase ).eval() else: _A = ViTHybridForImageClassification(__lowercase ).eval() model.load_state_dict(__lowercase ) # create image processor _A = create_transform(**resolve_data_config({} , model=__lowercase ) ) _A = transform.transforms _A = { "bilinear": PILImageResampling.BILINEAR, "bicubic": PILImageResampling.BICUBIC, "nearest": PILImageResampling.NEAREST, } _A = ViTHybridImageProcessor( do_resize=__lowercase , size={"shortest_edge": timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=__lowercase , crop_size={"height": timm_transforms[1].size[0], "width": timm_transforms[1].size[1]} , do_normalize=__lowercase , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , ) _A = prepare_img() _A = transform(__lowercase ).unsqueeze(0 ) _A = processor(__lowercase , return_tensors="pt" ).pixel_values # verify pixel values assert torch.allclose(__lowercase , __lowercase ) # verify logits with torch.no_grad(): _A = model(__lowercase ) _A = outputs.logits print("Predicted class:" , logits.argmax(-1 ).item() ) if base_model: _A = timm_model.forward_features(__lowercase ) assert timm_pooled_output.shape == outputs.pooler_output.shape assert torch.allclose(__lowercase , outputs.pooler_output , atol=1e-3 ) else: _A = timm_model(__lowercase ) assert timm_logits.shape == outputs.logits.shape assert torch.allclose(__lowercase , outputs.logits , atol=1e-3 ) print("Looks ok!" ) if pytorch_dump_folder_path is not None: Path(__lowercase ).mkdir(exist_ok=__lowercase ) print(F'''Saving model {vit_name} to {pytorch_dump_folder_path}''' ) model.save_pretrained(__lowercase ) print(F'''Saving processor to {pytorch_dump_folder_path}''' ) processor.save_pretrained(__lowercase ) if push_to_hub: print(F'''Pushing model and processor to the hub {vit_name}''' ) model.push_to_hub(F'''ybelkada/{vit_name}''' ) processor.push_to_hub(F'''ybelkada/{vit_name}''' ) if __name__ == "__main__": lowerCamelCase_ = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--vit_name''', default='''vit_base_r50_s16_384''', type=str, help='''Name of the hybrid ViT timm model you\'d like to convert.''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.''' ) parser.add_argument( '''--push_to_hub''', action='''store_true''', help='''Whether to upload the model to the HuggingFace hub.''' ) lowerCamelCase_ = parser.parse_args() convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path, args.push_to_hub)
79
1
'''simple docstring''' from __future__ import annotations from typing import Any def __lowercase ( __lowercase ) -> int: '''simple docstring''' if not postfix_notation: return 0 _A = {"+", "-", "*", "/"} _A = [] for token in postfix_notation: if token in operations: _A , _A = stack.pop(), stack.pop() if token == "+": stack.append(a + b ) elif token == "-": stack.append(a - b ) elif token == "*": stack.append(a * b ) else: if a * b < 0 and a % b != 0: stack.append(a // b + 1 ) else: stack.append(a // b ) else: stack.append(int(__lowercase ) ) return stack.pop() if __name__ == "__main__": import doctest doctest.testmod()
79
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available lowerCamelCase_ = { '''configuration_time_series_transformer''': [ '''TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''TimeSeriesTransformerConfig''', ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase_ = [ '''TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TimeSeriesTransformerForPrediction''', '''TimeSeriesTransformerModel''', '''TimeSeriesTransformerPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_time_series_transformer import ( TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TimeSeriesTransformerConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_time_series_transformer import ( TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, TimeSeriesTransformerForPrediction, TimeSeriesTransformerModel, TimeSeriesTransformerPreTrainedModel, ) else: import sys lowerCamelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
79
1
'''simple docstring''' from __future__ import annotations from decimal import Decimal from math import * # noqa: F403 from sympy import diff def __lowercase ( __lowercase , __lowercase , __lowercase = 10**-10 ) -> float: '''simple docstring''' _A = a while True: _A = Decimal(__lowercase ) - ( Decimal(eval(__lowercase ) ) / Decimal(eval(str(diff(__lowercase ) ) ) ) # noqa: S307 ) # This number dictates the accuracy of the answer if abs(eval(__lowercase ) ) < precision: # noqa: S307 return float(__lowercase ) # Let's Execute if __name__ == "__main__": # Find root of trigonometric function # Find value of pi print(F"""The root of sin(x) = 0 is {newton_raphson("sin(x)", 2)}""") # Find root of polynomial print(F"""The root of x**2 - 5*x + 2 = 0 is {newton_raphson("x**2 - 5*x + 2", 0.4)}""") # Find Square Root of 5 print(F"""The root of log(x) - 1 = 0 is {newton_raphson("log(x) - 1", 2)}""") # Exponential Roots print(F"""The root of exp(x) - 1 = 0 is {newton_raphson("exp(x) - 1", 0)}""")
79
'''simple docstring''' import comet # From: unbabel-comet import torch import datasets lowerCamelCase_ = datasets.logging.get_logger(__name__) lowerCamelCase_ = '''\ @inproceedings{rei-EtAl:2020:WMT, author = {Rei, Ricardo and Stewart, Craig and Farinha, Ana C and Lavie, Alon}, title = {Unbabel\'s Participation in the WMT20 Metrics Shared Task}, booktitle = {Proceedings of the Fifth Conference on Machine Translation}, month = {November}, year = {2020}, address = {Online}, publisher = {Association for Computational Linguistics}, pages = {909--918}, } @inproceedings{rei-etal-2020-comet, title = "{COMET}: A Neural Framework for {MT} Evaluation", author = "Rei, Ricardo and Stewart, Craig and Farinha, Ana C and Lavie, Alon", booktitle = "Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP)", month = nov, year = "2020", address = "Online", publisher = "Association for Computational Linguistics", url = "https://www.aclweb.org/anthology/2020.emnlp-main.213", pages = "2685--2702", } ''' lowerCamelCase_ = '''\ Crosslingual Optimized Metric for Evaluation of Translation (COMET) is an open-source framework used to train Machine Translation metrics that achieve high levels of correlation with different types of human judgments (HTER, DA\'s or MQM). With the release of the framework the authors also released fully trained models that were used to compete in the WMT20 Metrics Shared Task achieving SOTA in that years competition. See the [README.md] file at https://unbabel.github.io/COMET/html/models.html for more information. ''' lowerCamelCase_ = ''' COMET score. Args: `sources` (list of str): Source sentences `predictions` (list of str): candidate translations `references` (list of str): reference translations `cuda` (bool): If set to True, runs COMET using GPU `show_progress` (bool): Shows progress `model`: COMET model to be used. Will default to `wmt-large-da-estimator-1719` if None. Returns: `samples`: List of dictionaries with `src`, `mt`, `ref` and `score`. `scores`: List of scores. Examples: >>> comet_metric = datasets.load_metric(\'comet\') >>> # comet_metric = load_metric(\'comet\', \'wmt20-comet-da\') # you can also choose which model to use >>> source = ["Dem Feuer konnte Einhalt geboten werden", "Schulen und Kindergärten wurden eröffnet."] >>> hypothesis = ["The fire could be stopped", "Schools and kindergartens were open"] >>> reference = ["They were able to control the fire.", "Schools and kindergartens opened"] >>> results = comet_metric.compute(predictions=hypothesis, references=reference, sources=source) >>> print([round(v, 2) for v in results["scores"]]) [0.19, 0.92] ''' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class _UpperCAmelCase ( datasets.Metric ): """simple docstring""" def lowerCAmelCase ( self : int ): '''simple docstring''' return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , homepage="https://unbabel.github.io/COMET/html/index.html" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { "sources": datasets.Value("string" , id="sequence" ), "predictions": datasets.Value("string" , id="sequence" ), "references": datasets.Value("string" , id="sequence" ), } ) , codebase_urls=["https://github.com/Unbabel/COMET"] , reference_urls=[ "https://github.com/Unbabel/COMET", "https://www.aclweb.org/anthology/2020.emnlp-main.213/", "http://www.statmt.org/wmt20/pdf/2020.wmt-1.101.pdf6", ] , ) def lowerCAmelCase ( self : Any , __UpperCAmelCase : str ): '''simple docstring''' if self.config_name == "default": _A = comet.load_from_checkpoint(comet.download_model("wmt20-comet-da" ) ) else: _A = comet.load_from_checkpoint(comet.download_model(self.config_name ) ) def lowerCAmelCase ( self : str , __UpperCAmelCase : str , __UpperCAmelCase : int , __UpperCAmelCase : Any , __UpperCAmelCase : Tuple=None , __UpperCAmelCase : int=False ): '''simple docstring''' if gpus is None: _A = 1 if torch.cuda.is_available() else 0 _A = {"src": sources, "mt": predictions, "ref": references} _A = [dict(zip(__UpperCAmelCase , __UpperCAmelCase ) ) for t in zip(*data.values() )] _A , _A = self.scorer.predict(__UpperCAmelCase , gpus=__UpperCAmelCase , progress_bar=__UpperCAmelCase ) return {"mean_score": mean_score, "scores": scores}
79
1
'''simple docstring''' from collections import OrderedDict from typing import Any, Mapping, Optional, Union from ...configuration_utils import PretrainedConfig from ...feature_extraction_utils import FeatureExtractionMixin from ...onnx import OnnxConfig from ...onnx.utils import compute_effective_axis_dimension from ...tokenization_utils_base import PreTrainedTokenizerBase from ...utils import TensorType, logging lowerCamelCase_ = logging.get_logger(__name__) lowerCamelCase_ = { '''deepmind/language-perceiver''': '''https://huggingface.co/deepmind/language-perceiver/resolve/main/config.json''', # See all Perceiver models at https://huggingface.co/models?filter=perceiver } class _UpperCAmelCase ( snake_case_ ): """simple docstring""" snake_case = '''perceiver''' def __init__( self : Any , __UpperCAmelCase : Optional[Any]=256 , __UpperCAmelCase : str=1280 , __UpperCAmelCase : List[str]=768 , __UpperCAmelCase : Union[str, Any]=1 , __UpperCAmelCase : List[Any]=26 , __UpperCAmelCase : int=8 , __UpperCAmelCase : Union[str, Any]=8 , __UpperCAmelCase : Dict=None , __UpperCAmelCase : Optional[Any]=None , __UpperCAmelCase : Optional[Any]="kv" , __UpperCAmelCase : int=1 , __UpperCAmelCase : int=1 , __UpperCAmelCase : str="gelu" , __UpperCAmelCase : List[Any]=0.1 , __UpperCAmelCase : int=0.02 , __UpperCAmelCase : Optional[Any]=1E-12 , __UpperCAmelCase : str=True , __UpperCAmelCase : Dict=262 , __UpperCAmelCase : List[str]=2048 , __UpperCAmelCase : List[Any]=56 , __UpperCAmelCase : Dict=[368, 496] , __UpperCAmelCase : Union[str, Any]=16 , __UpperCAmelCase : Optional[Any]=1920 , __UpperCAmelCase : Any=16 , __UpperCAmelCase : Any=[1, 16, 224, 224] , **__UpperCAmelCase : Optional[int] , ): '''simple docstring''' super().__init__(**__UpperCAmelCase ) _A = num_latents _A = d_latents _A = d_model _A = num_blocks _A = num_self_attends_per_block _A = num_self_attention_heads _A = num_cross_attention_heads _A = qk_channels _A = v_channels _A = cross_attention_shape_for_attention _A = self_attention_widening_factor _A = cross_attention_widening_factor _A = hidden_act _A = attention_probs_dropout_prob _A = initializer_range _A = layer_norm_eps _A = use_query_residual # masked language modeling attributes _A = vocab_size _A = max_position_embeddings # image classification attributes _A = image_size # flow attributes _A = train_size # multimodal autoencoding attributes _A = num_frames _A = audio_samples_per_frame _A = samples_per_patch _A = output_shape class _UpperCAmelCase ( snake_case_ ): """simple docstring""" @property def lowerCAmelCase ( self : Tuple ): '''simple docstring''' if self.task == "multiple-choice": _A = {0: "batch", 1: "choice", 2: "sequence"} else: _A = {0: "batch", 1: "sequence"} return OrderedDict( [ ("inputs", dynamic_axis), ("attention_mask", dynamic_axis), ] ) @property def lowerCAmelCase ( self : Union[str, Any] ): '''simple docstring''' return 1E-4 def lowerCAmelCase ( self : Dict , __UpperCAmelCase : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] , __UpperCAmelCase : int = -1 , __UpperCAmelCase : int = -1 , __UpperCAmelCase : int = -1 , __UpperCAmelCase : bool = False , __UpperCAmelCase : Optional[TensorType] = None , __UpperCAmelCase : int = 3 , __UpperCAmelCase : int = 40 , __UpperCAmelCase : int = 40 , ): '''simple docstring''' if isinstance(__UpperCAmelCase , __UpperCAmelCase ): # If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX _A = compute_effective_axis_dimension( __UpperCAmelCase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 ) # If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX _A = preprocessor.num_special_tokens_to_add(__UpperCAmelCase ) _A = compute_effective_axis_dimension( __UpperCAmelCase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=__UpperCAmelCase ) # Generate dummy inputs according to compute batch and sequence _A = [" ".join(["a"] ) * seq_length] * batch_size _A = dict(preprocessor(__UpperCAmelCase , return_tensors=__UpperCAmelCase ) ) _A = inputs.pop("input_ids" ) return inputs elif isinstance(__UpperCAmelCase , __UpperCAmelCase ) and preprocessor.model_input_names[0] == "pixel_values": # If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX _A = compute_effective_axis_dimension(__UpperCAmelCase , fixed_dimension=OnnxConfig.default_fixed_batch ) _A = self._generate_dummy_images(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) _A = dict(preprocessor(images=__UpperCAmelCase , return_tensors=__UpperCAmelCase ) ) _A = inputs.pop("pixel_values" ) return inputs else: raise ValueError( "Unable to generate dummy inputs for the model. Please provide a tokenizer or a preprocessor." )
79
'''simple docstring''' from __future__ import annotations def __lowercase ( __lowercase , __lowercase = None , __lowercase = None ) -> None: '''simple docstring''' if start is None: _A = 0 if end is None: _A = len(__lowercase ) - 1 if start >= end: return _A = (start + end) // 2 slowsort(__lowercase , __lowercase , __lowercase ) slowsort(__lowercase , mid + 1 , __lowercase ) if sequence[end] < sequence[mid]: _A , _A = sequence[mid], sequence[end] slowsort(__lowercase , __lowercase , end - 1 ) if __name__ == "__main__": from doctest import testmod testmod()
79
1
'''simple docstring''' def __lowercase ( __lowercase = 3 , __lowercase = 7 , __lowercase = 100_0000 ) -> int: '''simple docstring''' _A = 0 _A = 1 for current_denominator in range(1 , limit + 1 ): _A = current_denominator * numerator // denominator if current_denominator % denominator == 0: current_numerator -= 1 if current_numerator * max_denominator > current_denominator * max_numerator: _A = current_numerator _A = current_denominator return max_numerator if __name__ == "__main__": print(solution(numerator=3, denominator=7, limit=1_00_00_00))
79
'''simple docstring''' from __future__ import annotations import unittest from transformers import AutoTokenizer, PegasusConfig, is_tf_available from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFAutoModelForSeqaSeqLM, TFPegasusForConditionalGeneration, TFPegasusModel @require_tf class _UpperCAmelCase : """simple docstring""" snake_case = PegasusConfig snake_case = {} snake_case = '''gelu''' def __init__( self : Tuple , __UpperCAmelCase : List[str] , __UpperCAmelCase : Optional[Any]=13 , __UpperCAmelCase : int=7 , __UpperCAmelCase : List[str]=True , __UpperCAmelCase : str=False , __UpperCAmelCase : Union[str, Any]=99 , __UpperCAmelCase : Tuple=32 , __UpperCAmelCase : Tuple=2 , __UpperCAmelCase : int=4 , __UpperCAmelCase : Tuple=37 , __UpperCAmelCase : Optional[int]=0.1 , __UpperCAmelCase : Optional[int]=0.1 , __UpperCAmelCase : List[str]=40 , __UpperCAmelCase : Tuple=2 , __UpperCAmelCase : Optional[int]=1 , __UpperCAmelCase : Any=0 , ): '''simple docstring''' _A = parent _A = batch_size _A = seq_length _A = is_training _A = use_labels _A = vocab_size _A = hidden_size _A = num_hidden_layers _A = num_attention_heads _A = intermediate_size _A = hidden_dropout_prob _A = attention_probs_dropout_prob _A = max_position_embeddings _A = eos_token_id _A = pad_token_id _A = bos_token_id def lowerCAmelCase ( self : List[Any] ): '''simple docstring''' _A = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) _A = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 ) _A = tf.concat([input_ids, eos_tensor] , axis=1 ) _A = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) _A = self.config_cls( vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , ) _A = prepare_pegasus_inputs_dict(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) return config, inputs_dict def lowerCAmelCase ( self : Tuple , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : int ): '''simple docstring''' _A = TFPegasusModel(config=__UpperCAmelCase ).get_decoder() _A = inputs_dict["input_ids"] _A = input_ids[:1, :] _A = inputs_dict["attention_mask"][:1, :] _A = inputs_dict["head_mask"] _A = 1 # first forward pass _A = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , head_mask=__UpperCAmelCase , use_cache=__UpperCAmelCase ) _A , _A = outputs.to_tuple() # create hypothetical next token and extent to next_input_ids _A = ids_tensor((self.batch_size, 3) , config.vocab_size ) _A = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta ) # append to next input_ids and _A = tf.concat([input_ids, next_tokens] , axis=-1 ) _A = tf.concat([attention_mask, next_attn_mask] , axis=-1 ) _A = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase )[0] _A = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , past_key_values=__UpperCAmelCase )[0] self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] ) # select random slice _A = int(ids_tensor((1,) , output_from_past.shape[-1] ) ) _A = output_from_no_past[:, -3:, random_slice_idx] _A = output_from_past[:, :, random_slice_idx] # test that outputs are equal for slice tf.debugging.assert_near(__UpperCAmelCase , __UpperCAmelCase , rtol=1E-3 ) def __lowercase ( __lowercase , __lowercase , __lowercase , __lowercase=None , __lowercase=None , __lowercase=None , __lowercase=None , __lowercase=None , ) -> Union[str, Any]: '''simple docstring''' if attention_mask is None: _A = tf.cast(tf.math.not_equal(__lowercase , config.pad_token_id ) , tf.inta ) if decoder_attention_mask is None: _A = tf.concat( [ tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ), tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ), ] , axis=-1 , ) if head_mask is None: _A = tf.ones((config.encoder_layers, config.encoder_attention_heads) ) if decoder_head_mask is None: _A = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) if cross_attn_head_mask is None: _A = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": decoder_attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, "cross_attn_head_mask": cross_attn_head_mask, } @require_tf class _UpperCAmelCase ( snake_case_ , snake_case_ , unittest.TestCase ): """simple docstring""" snake_case = (TFPegasusForConditionalGeneration, TFPegasusModel) if is_tf_available() else () snake_case = (TFPegasusForConditionalGeneration,) if is_tf_available() else () snake_case = ( { '''conversational''': TFPegasusForConditionalGeneration, '''feature-extraction''': TFPegasusModel, '''summarization''': TFPegasusForConditionalGeneration, '''text2text-generation''': TFPegasusForConditionalGeneration, '''translation''': TFPegasusForConditionalGeneration, } if is_tf_available() else {} ) snake_case = True snake_case = False snake_case = False def lowerCAmelCase ( self : str ): '''simple docstring''' _A = TFPegasusModelTester(self ) _A = ConfigTester(self , config_class=__UpperCAmelCase ) def lowerCAmelCase ( self : List[Any] ): '''simple docstring''' self.config_tester.run_common_tests() def lowerCAmelCase ( self : Tuple ): '''simple docstring''' _A = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_decoder_model_past_large_inputs(*__UpperCAmelCase ) @require_sentencepiece @require_tokenizers @require_tf class _UpperCAmelCase ( unittest.TestCase ): """simple docstring""" snake_case = [ ''' PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.''', ''' The London trio are up for best UK act and best album, as well as getting two nominations in the best song category."We got told like this morning \'Oh I think you\'re nominated\'", said Dappy."And I was like \'Oh yeah, which one?\' And now we\'ve got nominated for four awards. I mean, wow!"Bandmate Fazer added: "We thought it\'s best of us to come down and mingle with everyone and say hello to the cameras. And now we find we\'ve got four nominations."The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn\'t be too disappointed if they didn\'t win this time around."At the end of the day we\'re grateful to be where we are in our careers."If it don\'t happen then it don\'t happen - live to fight another day and keep on making albums and hits for the fans."Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers\' All These Things That I\'ve Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year\'s Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border."We just done Edinburgh the other day," said Dappy."We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!" ''', ] snake_case = [ '''California\'s largest electricity provider has cut power to hundreds of thousands of customers in an effort to''' ''' reduce the risk of wildfires.''', '''N-Dubz have revealed they\'re "grateful" to have been nominated for four Mobo Awards.''', ] # differs slightly from pytorch, likely due to numerical differences in linear layers snake_case = '''google/pegasus-xsum''' @cached_property def lowerCAmelCase ( self : Optional[int] ): '''simple docstring''' return AutoTokenizer.from_pretrained(self.model_name ) @cached_property def lowerCAmelCase ( self : Dict ): '''simple docstring''' _A = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name ) return model def lowerCAmelCase ( self : List[Any] , **__UpperCAmelCase : Union[str, Any] ): '''simple docstring''' _A = self.translate_src_text(**__UpperCAmelCase ) assert self.expected_text == generated_words def lowerCAmelCase ( self : Dict , **__UpperCAmelCase : Optional[int] ): '''simple docstring''' _A = self.tokenizer(self.src_text , **__UpperCAmelCase , padding=__UpperCAmelCase , return_tensors="tf" ) _A = self.model.generate( model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 , use_cache=__UpperCAmelCase , ) _A = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=__UpperCAmelCase ) return generated_words @slow def lowerCAmelCase ( self : Optional[int] ): '''simple docstring''' self._assert_generated_batch_equal_expected()
79
1
'''simple docstring''' def __lowercase ( __lowercase = 100 ) -> int: '''simple docstring''' _A = n * (n + 1) * (2 * n + 1) / 6 _A = (n * (n + 1) / 2) ** 2 return int(square_of_sum - sum_of_squares ) if __name__ == "__main__": print(F"""{solution() = }""")
79
'''simple docstring''' from __future__ import annotations import unittest from transformers import RoFormerConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TFRoFormerForCausalLM, TFRoFormerForMaskedLM, TFRoFormerForMultipleChoice, TFRoFormerForQuestionAnswering, TFRoFormerForSequenceClassification, TFRoFormerForTokenClassification, TFRoFormerModel, ) from transformers.models.roformer.modeling_tf_roformer import ( TFRoFormerSelfAttention, TFRoFormerSinusoidalPositionalEmbedding, ) class _UpperCAmelCase : """simple docstring""" def __init__( self : Tuple , __UpperCAmelCase : List[Any] , __UpperCAmelCase : Tuple=13 , __UpperCAmelCase : Optional[int]=7 , __UpperCAmelCase : int=True , __UpperCAmelCase : str=True , __UpperCAmelCase : Union[str, Any]=True , __UpperCAmelCase : str=True , __UpperCAmelCase : List[str]=99 , __UpperCAmelCase : List[str]=32 , __UpperCAmelCase : Union[str, Any]=2 , __UpperCAmelCase : List[str]=4 , __UpperCAmelCase : Optional[Any]=37 , __UpperCAmelCase : Any="gelu" , __UpperCAmelCase : Optional[Any]=0.1 , __UpperCAmelCase : int=0.1 , __UpperCAmelCase : Dict=512 , __UpperCAmelCase : List[Any]=16 , __UpperCAmelCase : List[str]=2 , __UpperCAmelCase : Optional[Any]=0.02 , __UpperCAmelCase : int=3 , __UpperCAmelCase : Dict=4 , __UpperCAmelCase : str=None , ): '''simple docstring''' _A = parent _A = 13 _A = 7 _A = True _A = True _A = True _A = True _A = 99 _A = 32 _A = 2 _A = 4 _A = 37 _A = "gelu" _A = 0.1 _A = 0.1 _A = 512 _A = 16 _A = 2 _A = 0.02 _A = 3 _A = 4 _A = None def lowerCAmelCase ( self : Dict ): '''simple docstring''' _A = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) _A = None if self.use_input_mask: _A = random_attention_mask([self.batch_size, self.seq_length] ) _A = None if self.use_token_type_ids: _A = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) _A = None _A = None _A = None if self.use_labels: _A = ids_tensor([self.batch_size] , self.type_sequence_label_size ) _A = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) _A = ids_tensor([self.batch_size] , self.num_choices ) _A = RoFormerConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=__UpperCAmelCase , ) return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def lowerCAmelCase ( self : Optional[Any] , __UpperCAmelCase : str , __UpperCAmelCase : Dict , __UpperCAmelCase : Any , __UpperCAmelCase : str , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Union[str, Any] ): '''simple docstring''' _A = TFRoFormerModel(config=__UpperCAmelCase ) _A = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids} _A = [input_ids, input_mask] _A = model(__UpperCAmelCase ) _A = model(__UpperCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def lowerCAmelCase ( self : int , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : int , __UpperCAmelCase : Tuple , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Tuple , __UpperCAmelCase : Any , __UpperCAmelCase : List[Any] ): '''simple docstring''' _A = True _A = TFRoFormerForCausalLM(config=__UpperCAmelCase ) _A = { "input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids, } _A = model(__UpperCAmelCase )["logits"] self.parent.assertListEqual( list(prediction_scores.numpy().shape ) , [self.batch_size, self.seq_length, self.vocab_size] ) def lowerCAmelCase ( self : int , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Tuple , __UpperCAmelCase : str , __UpperCAmelCase : Tuple , __UpperCAmelCase : Dict , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : str ): '''simple docstring''' _A = TFRoFormerForMaskedLM(config=__UpperCAmelCase ) _A = { "input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids, } _A = model(__UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def lowerCAmelCase ( self : List[Any] , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : List[Any] , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Tuple , __UpperCAmelCase : Union[str, Any] ): '''simple docstring''' _A = self.num_labels _A = TFRoFormerForSequenceClassification(config=__UpperCAmelCase ) _A = { "input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids, } _A = model(__UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def lowerCAmelCase ( self : List[Any] , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : str , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : List[Any] ): '''simple docstring''' _A = self.num_choices _A = TFRoFormerForMultipleChoice(config=__UpperCAmelCase ) _A = tf.tile(tf.expand_dims(__UpperCAmelCase , 1 ) , (1, self.num_choices, 1) ) _A = tf.tile(tf.expand_dims(__UpperCAmelCase , 1 ) , (1, self.num_choices, 1) ) _A = tf.tile(tf.expand_dims(__UpperCAmelCase , 1 ) , (1, self.num_choices, 1) ) _A = { "input_ids": multiple_choice_inputs_ids, "attention_mask": multiple_choice_input_mask, "token_type_ids": multiple_choice_token_type_ids, } _A = model(__UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def lowerCAmelCase ( self : Dict , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : int , __UpperCAmelCase : List[str] , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : List[str] , __UpperCAmelCase : Optional[int] ): '''simple docstring''' _A = self.num_labels _A = TFRoFormerForTokenClassification(config=__UpperCAmelCase ) _A = { "input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids, } _A = model(__UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def lowerCAmelCase ( self : List[Any] , __UpperCAmelCase : List[str] , __UpperCAmelCase : Dict , __UpperCAmelCase : int , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Any , __UpperCAmelCase : int , __UpperCAmelCase : int ): '''simple docstring''' _A = TFRoFormerForQuestionAnswering(config=__UpperCAmelCase ) _A = { "input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids, } _A = model(__UpperCAmelCase ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def lowerCAmelCase ( self : Union[str, Any] ): '''simple docstring''' _A = self.prepare_config_and_inputs() ( ( _A ) , ( _A ) , ( _A ) , ( _A ) , ( _A ) , ( _A ) , ( _A ) , ) = config_and_inputs _A = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask} return config, inputs_dict @require_tf class _UpperCAmelCase ( snake_case_ , snake_case_ , unittest.TestCase ): """simple docstring""" snake_case = ( ( TFRoFormerModel, TFRoFormerForCausalLM, TFRoFormerForMaskedLM, TFRoFormerForQuestionAnswering, TFRoFormerForSequenceClassification, TFRoFormerForTokenClassification, TFRoFormerForMultipleChoice, ) if is_tf_available() else () ) snake_case = ( { '''feature-extraction''': TFRoFormerModel, '''fill-mask''': TFRoFormerForMaskedLM, '''question-answering''': TFRoFormerForQuestionAnswering, '''text-classification''': TFRoFormerForSequenceClassification, '''text-generation''': TFRoFormerForCausalLM, '''token-classification''': TFRoFormerForTokenClassification, '''zero-shot''': TFRoFormerForSequenceClassification, } if is_tf_available() else {} ) snake_case = False snake_case = False def lowerCAmelCase ( self : int , __UpperCAmelCase : Dict , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Dict , __UpperCAmelCase : Optional[int] ): '''simple docstring''' if pipeline_test_casse_name == "TextGenerationPipelineTests": return True return False def lowerCAmelCase ( self : Optional[int] ): '''simple docstring''' _A = TFRoFormerModelTester(self ) _A = ConfigTester(self , config_class=__UpperCAmelCase , hidden_size=37 ) def lowerCAmelCase ( self : Optional[int] ): '''simple docstring''' self.config_tester.run_common_tests() def lowerCAmelCase ( self : Optional[int] ): '''simple docstring''' _A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__UpperCAmelCase ) def lowerCAmelCase ( self : Any ): '''simple docstring''' _A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*__UpperCAmelCase ) def lowerCAmelCase ( self : Union[str, Any] ): '''simple docstring''' _A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_lm_head(*__UpperCAmelCase ) def lowerCAmelCase ( self : Optional[Any] ): '''simple docstring''' _A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*__UpperCAmelCase ) def lowerCAmelCase ( self : List[Any] ): '''simple docstring''' _A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*__UpperCAmelCase ) def lowerCAmelCase ( self : str ): '''simple docstring''' _A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*__UpperCAmelCase ) def lowerCAmelCase ( self : Any ): '''simple docstring''' _A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*__UpperCAmelCase ) @slow def lowerCAmelCase ( self : Dict ): '''simple docstring''' _A = TFRoFormerModel.from_pretrained("junnyu/roformer_chinese_base" ) self.assertIsNotNone(__UpperCAmelCase ) @require_tf class _UpperCAmelCase ( unittest.TestCase ): """simple docstring""" @slow def lowerCAmelCase ( self : Optional[int] ): '''simple docstring''' _A = TFRoFormerForMaskedLM.from_pretrained("junnyu/roformer_chinese_base" ) _A = tf.constant([[0, 1, 2, 3, 4, 5]] ) _A = model(__UpperCAmelCase )[0] # TODO Replace vocab size _A = 50000 _A = [1, 6, vocab_size] self.assertEqual(output.shape , __UpperCAmelCase ) print(output[:, :3, :3] ) # TODO Replace values below with what was printed above. _A = tf.constant( [ [ [-0.12053341, -1.0264901, 0.29221946], [-1.5133783, 0.197433, 0.15190607], [-5.0135403, -3.900256, -0.84038764], ] ] ) tf.debugging.assert_near(output[:, :3, :3] , __UpperCAmelCase , atol=1E-4 ) @require_tf class _UpperCAmelCase ( unittest.TestCase ): """simple docstring""" snake_case = 1E-4 def lowerCAmelCase ( self : List[str] ): '''simple docstring''' _A = tf.constant([[4, 10]] ) _A = TFRoFormerSinusoidalPositionalEmbedding(num_positions=6 , embedding_dim=6 ) _A = emba(input_ids.shape ) _A = tf.constant( [[0.0000, 0.0000, 0.0000, 1.0000, 1.0000, 1.0000], [0.8415, 0.0464, 0.0022, 0.5403, 0.9989, 1.0000]] ) tf.debugging.assert_near(__UpperCAmelCase , __UpperCAmelCase , atol=self.tolerance ) def lowerCAmelCase ( self : Tuple ): '''simple docstring''' _A = tf.constant( [ [0.0000, 0.0000, 0.0000, 0.0000, 0.0000], [0.8415, 0.8219, 0.8020, 0.7819, 0.7617], [0.9093, 0.9364, 0.9581, 0.9749, 0.9870], ] ) _A = TFRoFormerSinusoidalPositionalEmbedding(num_positions=512 , embedding_dim=512 ) emba([2, 16, 512] ) _A = emba.weight[:3, :5] tf.debugging.assert_near(__UpperCAmelCase , __UpperCAmelCase , atol=self.tolerance ) @require_tf class _UpperCAmelCase ( unittest.TestCase ): """simple docstring""" snake_case = 1E-4 def lowerCAmelCase ( self : str ): '''simple docstring''' _A = tf.reshape(tf.range(2 * 12 * 16 * 64 , dtype=tf.floataa ) , shape=(2, 12, 16, 64) ) / 100 _A = -tf.reshape(tf.range(2 * 12 * 16 * 64 , dtype=tf.floataa ) , shape=(2, 12, 16, 64) ) / 100 _A = TFRoFormerSinusoidalPositionalEmbedding(num_positions=32 , embedding_dim=64 ) _A = embed_positions([2, 16, 768] )[None, None, :, :] _A , _A = TFRoFormerSelfAttention.apply_rotary_position_embeddings( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) _A = tf.constant( [ [0.0000, 0.0100, 0.0200, 0.0300, 0.0400, 0.0500, 0.0600, 0.0700], [-0.2012, 0.8897, 0.0263, 0.9401, 0.2074, 0.9463, 0.3481, 0.9343], [-1.7057, 0.6271, -1.2145, 1.3897, -0.6303, 1.7647, -0.1173, 1.8985], [-2.1731, -1.6397, -2.7358, 0.2854, -2.1840, 1.7183, -1.3018, 2.4871], [0.2717, -3.6173, -2.9206, -2.1988, -3.6638, 0.3858, -2.9155, 2.2980], [3.9859, -2.1580, -0.7984, -4.4904, -4.1181, -2.0252, -4.4782, 1.1253], ] ) _A = tf.constant( [ [0.0000, -0.0100, -0.0200, -0.0300, -0.0400, -0.0500, -0.0600, -0.0700], [0.2012, -0.8897, -0.0263, -0.9401, -0.2074, -0.9463, -0.3481, -0.9343], [1.7057, -0.6271, 1.2145, -1.3897, 0.6303, -1.7647, 0.1173, -1.8985], [2.1731, 1.6397, 2.7358, -0.2854, 2.1840, -1.7183, 1.3018, -2.4871], [-0.2717, 3.6173, 2.9206, 2.1988, 3.6638, -0.3858, 2.9155, -2.2980], [-3.9859, 2.1580, 0.7984, 4.4904, 4.1181, 2.0252, 4.4782, -1.1253], ] ) tf.debugging.assert_near(query_layer[0, 0, :6, :8] , __UpperCAmelCase , atol=self.tolerance ) tf.debugging.assert_near(key_layer[0, 0, :6, :8] , __UpperCAmelCase , atol=self.tolerance )
79
1
'''simple docstring''' from typing import Dict, List from nltk.translate import gleu_score import datasets from datasets import MetricInfo lowerCamelCase_ = '''\ @misc{wu2016googles, title={Google\'s Neural Machine Translation System: Bridging the Gap between Human and Machine Translation}, author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes and Jeffrey Dean}, year={2016}, eprint={1609.08144}, archivePrefix={arXiv}, primaryClass={cs.CL} } ''' lowerCamelCase_ = '''\ The BLEU score has some undesirable properties when used for single sentences, as it was designed to be a corpus measure. We therefore use a slightly different score for our RL experiments which we call the \'GLEU score\'. For the GLEU score, we record all sub-sequences of 1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then compute a recall, which is the ratio of the number of matching n-grams to the number of total n-grams in the target (ground truth) sequence, and a precision, which is the ratio of the number of matching n-grams to the number of total n-grams in the generated output sequence. Then GLEU score is simply the minimum of recall and precision. This GLEU score\'s range is always between 0 (no matches) and 1 (all match) and it is symmetrical when switching output and target. According to our experiments, GLEU score correlates quite well with the BLEU metric on a corpus level but does not have its drawbacks for our per sentence reward objective. ''' lowerCamelCase_ = '''\ Computes corpus-level Google BLEU (GLEU) score of translated segments against one or more references. Instead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching tokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values. Args: predictions (list of str): list of translations to score. Each translation should be tokenized into a list of tokens. references (list of list of str): list of lists of references for each translation. Each reference should be tokenized into a list of tokens. min_len (int): The minimum order of n-gram this function should extract. Defaults to 1. max_len (int): The maximum order of n-gram this function should extract. Defaults to 4. Returns: \'google_bleu\': google_bleu score Examples: Example 1: >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\', ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\', ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\'] >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\', ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\', ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\'] >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\', ... \'interested\', \'in\', \'world\', \'history\'] >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\', ... \'because\', \'he\', \'read\', \'the\', \'book\'] >>> list_of_references = [[ref1a], [ref2a]] >>> hypotheses = [hyp1, hyp2] >>> google_bleu = datasets.load_metric("google_bleu") >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references) >>> print(round(results["google_bleu"], 2)) 0.44 Example 2: >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\', ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\', ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\'] >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\', ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\', ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\'] >>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\', ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\', ... \'heed\', \'the\', \'cat\', \'commands\'] >>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\', ... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\', ... \'of\', \'the\', \'cat\'] >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\', ... \'interested\', \'in\', \'world\', \'history\'] >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\', ... \'because\', \'he\', \'read\', \'the\', \'book\'] >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]] >>> hypotheses = [hyp1, hyp2] >>> google_bleu = datasets.load_metric("google_bleu") >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references) >>> print(round(results["google_bleu"], 2)) 0.61 Example 3: >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\', ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\', ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\'] >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\', ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\', ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\'] >>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\', ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\', ... \'heed\', \'the\', \'cat\', \'commands\'] >>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\', ... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\', ... \'of\', \'the\', \'cat\'] >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\', ... \'interested\', \'in\', \'world\', \'history\'] >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\', ... \'because\', \'he\', \'read\', \'the\', \'book\'] >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]] >>> hypotheses = [hyp1, hyp2] >>> google_bleu = datasets.load_metric("google_bleu") >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2) >>> print(round(results["google_bleu"], 2)) 0.53 Example 4: >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\', ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\', ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\'] >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\', ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\', ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\'] >>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\', ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\', ... \'heed\', \'the\', \'cat\', \'commands\'] >>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\', ... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\', ... \'of\', \'the\', \'cat\'] >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\', ... \'interested\', \'in\', \'world\', \'history\'] >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\', ... \'because\', \'he\', \'read\', \'the\', \'book\'] >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]] >>> hypotheses = [hyp1, hyp2] >>> google_bleu = datasets.load_metric("google_bleu") >>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6) >>> print(round(results["google_bleu"], 2)) 0.4 ''' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class _UpperCAmelCase ( datasets.Metric ): """simple docstring""" def lowerCAmelCase ( self : Dict ): '''simple docstring''' return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { "predictions": datasets.Sequence(datasets.Value("string" , id="token" ) , id="sequence" ), "references": datasets.Sequence( datasets.Sequence(datasets.Value("string" , id="token" ) , id="sequence" ) , id="references" ), } ) , ) def lowerCAmelCase ( self : Optional[int] , __UpperCAmelCase : List[List[List[str]]] , __UpperCAmelCase : List[List[str]] , __UpperCAmelCase : int = 1 , __UpperCAmelCase : int = 4 , ): '''simple docstring''' return { "google_bleu": gleu_score.corpus_gleu( list_of_references=__UpperCAmelCase , hypotheses=__UpperCAmelCase , min_len=__UpperCAmelCase , max_len=__UpperCAmelCase ) }
79
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCamelCase_ = logging.get_logger(__name__) lowerCamelCase_ = { '''EleutherAI/gpt-neox-20b''': '''https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/config.json''', # See all GPTNeoX models at https://huggingface.co/models?filter=gpt_neox } class _UpperCAmelCase ( snake_case_ ): """simple docstring""" snake_case = '''gpt_neox''' def __init__( self : List[Any] , __UpperCAmelCase : List[Any]=50432 , __UpperCAmelCase : Any=6144 , __UpperCAmelCase : List[str]=44 , __UpperCAmelCase : List[Any]=64 , __UpperCAmelCase : List[str]=24576 , __UpperCAmelCase : Union[str, Any]="gelu" , __UpperCAmelCase : Tuple=0.25 , __UpperCAmelCase : Optional[Any]=10000 , __UpperCAmelCase : int=0.0 , __UpperCAmelCase : str=0.0 , __UpperCAmelCase : Any=0.1 , __UpperCAmelCase : Tuple=2048 , __UpperCAmelCase : Optional[int]=0.02 , __UpperCAmelCase : Union[str, Any]=1E-5 , __UpperCAmelCase : str=True , __UpperCAmelCase : List[Any]=0 , __UpperCAmelCase : Dict=2 , __UpperCAmelCase : Optional[Any]=False , __UpperCAmelCase : str=True , __UpperCAmelCase : Dict=None , **__UpperCAmelCase : Tuple , ): '''simple docstring''' super().__init__(bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase , **__UpperCAmelCase ) _A = vocab_size _A = max_position_embeddings _A = hidden_size _A = num_hidden_layers _A = num_attention_heads _A = intermediate_size _A = hidden_act _A = rotary_pct _A = rotary_emb_base _A = attention_dropout _A = hidden_dropout _A = classifier_dropout _A = initializer_range _A = layer_norm_eps _A = use_cache _A = tie_word_embeddings _A = use_parallel_residual _A = rope_scaling self._rope_scaling_validation() if self.hidden_size % self.num_attention_heads != 0: raise ValueError( "The hidden size is not divisble by the number of attention heads! Make sure to update them!" ) def lowerCAmelCase ( self : Dict ): '''simple docstring''' if self.rope_scaling is None: return if not isinstance(self.rope_scaling , __UpperCAmelCase ) or len(self.rope_scaling ) != 2: raise ValueError( "`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, " f'''got {self.rope_scaling}''' ) _A = self.rope_scaling.get("type" , __UpperCAmelCase ) _A = self.rope_scaling.get("factor" , __UpperCAmelCase ) if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]: raise ValueError( f'''`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}''' ) if rope_scaling_factor is None or not isinstance(__UpperCAmelCase , __UpperCAmelCase ) or rope_scaling_factor <= 1.0: raise ValueError(f'''`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}''' )
79
1
'''simple docstring''' import argparse from argparse import Namespace import torch from torch import nn from transformers import XGLMConfig, XGLMForCausalLM def __lowercase ( __lowercase ) -> Optional[Any]: '''simple docstring''' _A = [ "decoder.version", "decoder.output_projection.weight", "_float_tensor", "decoder.embed_positions._float_tensor", ] for k in ignore_keys: state_dict.pop(__lowercase , __lowercase ) def __lowercase ( __lowercase ) -> Any: '''simple docstring''' _A , _A = emb.weight.shape _A = nn.Linear(__lowercase , __lowercase , bias=__lowercase ) _A = emb.weight.data return lin_layer def __lowercase ( __lowercase ) -> Optional[int]: '''simple docstring''' _A = torch.load(__lowercase , map_location="cpu" ) _A = Namespace(**checkpoint["cfg"]["model"] ) _A = checkpoint["model"] remove_ignore_keys_(__lowercase ) _A = state_dict["decoder.embed_tokens.weight"].shape[0] _A = {key.replace("decoder" , "model" ): val for key, val in state_dict.items()} _A = XGLMConfig( vocab_size=__lowercase , max_position_embeddings=args.max_target_positions , num_layers=args.decoder_layers , attention_heads=args.decoder_attention_heads , ffn_dim=args.decoder_ffn_embed_dim , d_model=args.decoder_embed_dim , layerdrop=args.decoder_layerdrop , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function="gelu" , scale_embedding=not args.no_scale_embedding , tie_word_embeddings=args.share_decoder_input_output_embed , ) _A = XGLMForCausalLM(__lowercase ) _A = model.load_state_dict(__lowercase , strict=__lowercase ) print(__lowercase ) _A = make_linear_from_emb(model.model.embed_tokens ) return model if __name__ == "__main__": lowerCamelCase_ = argparse.ArgumentParser() # Required parameters parser.add_argument('''fairseq_path''', type=str, help='''path to a model.pt on local filesystem.''') parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''') lowerCamelCase_ = parser.parse_args() lowerCamelCase_ = convert_fairseq_xglm_checkpoint_from_disk(args.fairseq_path) model.save_pretrained(args.pytorch_dump_folder_path)
79
'''simple docstring''' from PIL import Image def __lowercase ( __lowercase , __lowercase ) -> Image: '''simple docstring''' _A = (259 * (level + 255)) / (255 * (259 - level)) def contrast(__lowercase ) -> int: return int(128 + factor * (c - 128) ) return img.point(__lowercase ) if __name__ == "__main__": # Load image with Image.open('''image_data/lena.jpg''') as img: # Change contrast to 170 lowerCamelCase_ = change_contrast(img, 1_70) cont_img.save('''image_data/lena_high_contrast.png''', format='''png''')
79
1
'''simple docstring''' import json import pathlib import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision, slow from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import DetaImageProcessor class _UpperCAmelCase ( unittest.TestCase ): """simple docstring""" def __init__( self : Any , __UpperCAmelCase : List[Any] , __UpperCAmelCase : Dict=7 , __UpperCAmelCase : List[str]=3 , __UpperCAmelCase : Any=30 , __UpperCAmelCase : List[Any]=400 , __UpperCAmelCase : Optional[int]=True , __UpperCAmelCase : List[Any]=None , __UpperCAmelCase : Tuple=True , __UpperCAmelCase : Tuple=[0.5, 0.5, 0.5] , __UpperCAmelCase : Optional[int]=[0.5, 0.5, 0.5] , __UpperCAmelCase : int=True , __UpperCAmelCase : Optional[int]=1 / 255 , __UpperCAmelCase : Tuple=True , ): '''simple docstring''' _A = size if size is not None else {"shortest_edge": 18, "longest_edge": 1333} _A = parent _A = batch_size _A = num_channels _A = min_resolution _A = max_resolution _A = do_resize _A = size _A = do_normalize _A = image_mean _A = image_std _A = do_rescale _A = rescale_factor _A = do_pad def lowerCAmelCase ( self : Optional[int] ): '''simple docstring''' return { "do_resize": self.do_resize, "size": self.size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "do_rescale": self.do_rescale, "rescale_factor": self.rescale_factor, "do_pad": self.do_pad, } def lowerCAmelCase ( self : List[Any] , __UpperCAmelCase : Any , __UpperCAmelCase : Optional[int]=False ): '''simple docstring''' if not batched: _A = image_inputs[0] if isinstance(__UpperCAmelCase , Image.Image ): _A , _A = image.size else: _A , _A = image.shape[1], image.shape[2] if w < h: _A = int(self.size["shortest_edge"] * h / w ) _A = self.size["shortest_edge"] elif w > h: _A = self.size["shortest_edge"] _A = int(self.size["shortest_edge"] * w / h ) else: _A = self.size["shortest_edge"] _A = self.size["shortest_edge"] else: _A = [] for image in image_inputs: _A , _A = self.get_expected_values([image] ) expected_values.append((expected_height, expected_width) ) _A = max(__UpperCAmelCase , key=lambda __UpperCAmelCase : item[0] )[0] _A = max(__UpperCAmelCase , key=lambda __UpperCAmelCase : item[1] )[1] return expected_height, expected_width @require_torch @require_vision class _UpperCAmelCase ( snake_case_ , unittest.TestCase ): """simple docstring""" snake_case = DetaImageProcessor if is_vision_available() else None def lowerCAmelCase ( self : List[Any] ): '''simple docstring''' _A = DetaImageProcessingTester(self ) @property def lowerCAmelCase ( self : Any ): '''simple docstring''' return self.image_processor_tester.prepare_image_processor_dict() def lowerCAmelCase ( self : Union[str, Any] ): '''simple docstring''' _A = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(__UpperCAmelCase , "image_mean" ) ) self.assertTrue(hasattr(__UpperCAmelCase , "image_std" ) ) self.assertTrue(hasattr(__UpperCAmelCase , "do_normalize" ) ) self.assertTrue(hasattr(__UpperCAmelCase , "do_resize" ) ) self.assertTrue(hasattr(__UpperCAmelCase , "do_rescale" ) ) self.assertTrue(hasattr(__UpperCAmelCase , "do_pad" ) ) self.assertTrue(hasattr(__UpperCAmelCase , "size" ) ) def lowerCAmelCase ( self : List[Any] ): '''simple docstring''' _A = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {"shortest_edge": 18, "longest_edge": 1333} ) self.assertEqual(image_processor.do_pad , __UpperCAmelCase ) def lowerCAmelCase ( self : str ): '''simple docstring''' pass def lowerCAmelCase ( self : Dict ): '''simple docstring''' _A = self.image_processing_class(**self.image_processor_dict ) # create random PIL images _A = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCAmelCase ) for image in image_inputs: self.assertIsInstance(__UpperCAmelCase , Image.Image ) # Test not batched input _A = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values _A , _A = self.image_processor_tester.get_expected_values(__UpperCAmelCase ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched _A , _A = self.image_processor_tester.get_expected_values(__UpperCAmelCase , batched=__UpperCAmelCase ) _A = image_processing(__UpperCAmelCase , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def lowerCAmelCase ( self : Union[str, Any] ): '''simple docstring''' _A = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors _A = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCAmelCase , numpify=__UpperCAmelCase ) for image in image_inputs: self.assertIsInstance(__UpperCAmelCase , np.ndarray ) # Test not batched input _A = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values _A , _A = self.image_processor_tester.get_expected_values(__UpperCAmelCase ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched _A = image_processing(__UpperCAmelCase , return_tensors="pt" ).pixel_values _A , _A = self.image_processor_tester.get_expected_values(__UpperCAmelCase , batched=__UpperCAmelCase ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def lowerCAmelCase ( self : Optional[Any] ): '''simple docstring''' _A = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors _A = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCAmelCase , torchify=__UpperCAmelCase ) for image in image_inputs: self.assertIsInstance(__UpperCAmelCase , torch.Tensor ) # Test not batched input _A = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values _A , _A = self.image_processor_tester.get_expected_values(__UpperCAmelCase ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched _A = image_processing(__UpperCAmelCase , return_tensors="pt" ).pixel_values _A , _A = self.image_processor_tester.get_expected_values(__UpperCAmelCase , batched=__UpperCAmelCase ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) @slow def lowerCAmelCase ( self : str ): '''simple docstring''' _A = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) with open("./tests/fixtures/tests_samples/COCO/coco_annotations.txt" , "r" ) as f: _A = json.loads(f.read() ) _A = {"image_id": 39769, "annotations": target} # encode them _A = DetaImageProcessor() _A = image_processing(images=__UpperCAmelCase , annotations=__UpperCAmelCase , return_tensors="pt" ) # verify pixel values _A = torch.Size([1, 3, 800, 1066] ) self.assertEqual(encoding["pixel_values"].shape , __UpperCAmelCase ) _A = torch.tensor([0.2796, 0.3138, 0.3481] ) self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , __UpperCAmelCase , atol=1E-4 ) ) # verify area _A = torch.tensor([5887.9600, 11250.2061, 489353.8438, 837122.7500, 147967.5156, 165732.3438] ) self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , __UpperCAmelCase ) ) # verify boxes _A = torch.Size([6, 4] ) self.assertEqual(encoding["labels"][0]["boxes"].shape , __UpperCAmelCase ) _A = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] ) self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , __UpperCAmelCase , atol=1E-3 ) ) # verify image_id _A = torch.tensor([39769] ) self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , __UpperCAmelCase ) ) # verify is_crowd _A = torch.tensor([0, 0, 0, 0, 0, 0] ) self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , __UpperCAmelCase ) ) # verify class_labels _A = torch.tensor([75, 75, 63, 65, 17, 17] ) self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , __UpperCAmelCase ) ) # verify orig_size _A = torch.tensor([480, 640] ) self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , __UpperCAmelCase ) ) # verify size _A = torch.tensor([800, 1066] ) self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , __UpperCAmelCase ) ) @slow def lowerCAmelCase ( self : int ): '''simple docstring''' _A = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) with open("./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt" , "r" ) as f: _A = json.loads(f.read() ) _A = {"file_name": "000000039769.png", "image_id": 39769, "segments_info": target} _A = pathlib.Path("./tests/fixtures/tests_samples/COCO/coco_panoptic" ) # encode them _A = DetaImageProcessor(format="coco_panoptic" ) _A = image_processing(images=__UpperCAmelCase , annotations=__UpperCAmelCase , masks_path=__UpperCAmelCase , return_tensors="pt" ) # verify pixel values _A = torch.Size([1, 3, 800, 1066] ) self.assertEqual(encoding["pixel_values"].shape , __UpperCAmelCase ) _A = torch.tensor([0.2796, 0.3138, 0.3481] ) self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , __UpperCAmelCase , atol=1E-4 ) ) # verify area _A = torch.tensor([147979.6875, 165527.0469, 484638.5938, 11292.9375, 5879.6562, 7634.1147] ) self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , __UpperCAmelCase ) ) # verify boxes _A = torch.Size([6, 4] ) self.assertEqual(encoding["labels"][0]["boxes"].shape , __UpperCAmelCase ) _A = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] ) self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , __UpperCAmelCase , atol=1E-3 ) ) # verify image_id _A = torch.tensor([39769] ) self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , __UpperCAmelCase ) ) # verify is_crowd _A = torch.tensor([0, 0, 0, 0, 0, 0] ) self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , __UpperCAmelCase ) ) # verify class_labels _A = torch.tensor([17, 17, 63, 75, 75, 93] ) self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , __UpperCAmelCase ) ) # verify masks _A = 822873 self.assertEqual(encoding["labels"][0]["masks"].sum().item() , __UpperCAmelCase ) # verify orig_size _A = torch.tensor([480, 640] ) self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , __UpperCAmelCase ) ) # verify size _A = torch.tensor([800, 1066] ) self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , __UpperCAmelCase ) )
79
'''simple docstring''' def __lowercase ( __lowercase ) -> int: '''simple docstring''' assert isinstance(__lowercase , __lowercase ), F'''The input value of [n={number}] is not an integer''' if number == 1: return 2 elif number < 1: _A = F'''The input value of [n={number}] has to be > 0''' raise ValueError(__lowercase ) else: _A = sylvester(number - 1 ) _A = num - 1 _A = num return lower * upper + 1 if __name__ == "__main__": print(F"""The 8th number in Sylvester's sequence: {sylvester(8)}""")
79
1