code
stringlengths
81
54k
code_codestyle
int64
0
721
style_context
stringlengths
91
41.9k
style_context_codestyle
int64
0
699
label
int64
0
1
def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : int = 1_000 ): """simple docstring""" SCREAMING_SNAKE_CASE_ = 2**power SCREAMING_SNAKE_CASE_ = str(_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE_ = list(_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE_ = 0 for i in list_num: sum_of_num += int(_SCREAMING_SNAKE_CASE ) return sum_of_num if __name__ == "__main__": UpperCamelCase__ : List[Any] = int(input("Enter the power of 2: ").strip()) print("2 ^ ", power, " = ", 2**power) UpperCamelCase__ : Any = solution(power) print("Sum of the digits is: ", result)
620
import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( SwiftFormerConfig, SwiftFormerForImageClassification, ViTImageProcessor, ) from transformers.utils import logging logging.set_verbosity_info() UpperCamelCase__ : Optional[int] = logging.get_logger(__name__) UpperCamelCase__ : List[Any] = torch.device("cpu") def _UpperCAmelCase ( ): """simple docstring""" SCREAMING_SNAKE_CASE_ = 'http://images.cocodataset.org/val2017/000000039769.jpg' SCREAMING_SNAKE_CASE_ = Image.open(requests.get(_SCREAMING_SNAKE_CASE , stream=_SCREAMING_SNAKE_CASE ).raw ) return im def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : int ): """simple docstring""" if swiftformer_name == "swiftformer_xs": return torch.tensor([-2.1_7_0_3E0_0, 2.1_1_0_7E0_0, -2.0_8_1_1E0_0, 8.8_6_8_5E-0_1, 2.4_3_6_0E-0_1] ) elif swiftformer_name == "swiftformer_s": return torch.tensor([3.9_6_3_6E-0_1, 2.3_4_7_8E-0_1, -1.6_9_6_3E0_0, -1.7_3_8_1E0_0, -8.6_3_3_7E-0_1] ) elif swiftformer_name == "swiftformer_l1": return torch.tensor([-4.2_7_6_8E-0_1, -4.7_4_2_9E-0_1, -1.0_8_9_7E0_0, -1.0_2_4_8E0_0, 3.5_5_2_3E-0_2] ) elif swiftformer_name == "swiftformer_l3": return torch.tensor([-2.5_3_3_0E-0_1, 2.4_2_1_1E-0_1, -6.0_1_8_5E-0_1, -8.2_7_8_9E-0_1, -6.0_4_4_6E-0_2] ) def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : Optional[Any] ): """simple docstring""" SCREAMING_SNAKE_CASE_ = dct.pop(_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE_ = val def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : Dict ): """simple docstring""" SCREAMING_SNAKE_CASE_ = [] for k in state_dict.keys(): SCREAMING_SNAKE_CASE_ = k if ".pwconv" in k: SCREAMING_SNAKE_CASE_ = k_new.replace('.pwconv' , '.point_wise_conv' ) if ".dwconv" in k: SCREAMING_SNAKE_CASE_ = k_new.replace('.dwconv' , '.depth_wise_conv' ) if ".Proj." in k: SCREAMING_SNAKE_CASE_ = k_new.replace('.Proj.' , '.proj.' ) if "patch_embed" in k_new: SCREAMING_SNAKE_CASE_ = k_new.replace('patch_embed' , 'swiftformer.patch_embed.patch_embedding' ) if "network" in k_new: SCREAMING_SNAKE_CASE_ = k_new.split('.' ) if ls[2].isdigit(): SCREAMING_SNAKE_CASE_ = 'swiftformer.encoder.network.' + ls[1] + '.blocks.' + ls[2] + '.' + '.'.join(ls[3:] ) else: SCREAMING_SNAKE_CASE_ = k_new.replace('network' , 'swiftformer.encoder.network' ) rename_keys.append((k, k_new) ) return rename_keys @torch.no_grad() def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Optional[Any] ): """simple docstring""" SCREAMING_SNAKE_CASE_ = SwiftFormerConfig() # dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size SCREAMING_SNAKE_CASE_ = 1_000 SCREAMING_SNAKE_CASE_ = 'huggingface/label-files' SCREAMING_SNAKE_CASE_ = 'imagenet-1k-id2label.json' SCREAMING_SNAKE_CASE_ = json.load(open(hf_hub_download(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , repo_type='dataset' ) , 'r' ) ) SCREAMING_SNAKE_CASE_ = {int(_SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()} SCREAMING_SNAKE_CASE_ = idalabel SCREAMING_SNAKE_CASE_ = {v: k for k, v in idalabel.items()} # size of the architecture if swiftformer_name == "swiftformer_xs": SCREAMING_SNAKE_CASE_ = [3, 3, 6, 4] SCREAMING_SNAKE_CASE_ = [48, 56, 112, 220] elif swiftformer_name == "swiftformer_s": SCREAMING_SNAKE_CASE_ = [3, 3, 9, 6] SCREAMING_SNAKE_CASE_ = [48, 64, 168, 224] elif swiftformer_name == "swiftformer_l1": SCREAMING_SNAKE_CASE_ = [4, 3, 10, 5] SCREAMING_SNAKE_CASE_ = [48, 96, 192, 384] elif swiftformer_name == "swiftformer_l3": SCREAMING_SNAKE_CASE_ = [4, 4, 12, 6] SCREAMING_SNAKE_CASE_ = [64, 128, 320, 512] # load state_dict of original model, remove and rename some keys if original_ckpt: if original_ckpt.startswith('https' ): SCREAMING_SNAKE_CASE_ = torch.hub.load_state_dict_from_url(_SCREAMING_SNAKE_CASE , map_location='cpu' , check_hash=_SCREAMING_SNAKE_CASE ) else: SCREAMING_SNAKE_CASE_ = torch.load(_SCREAMING_SNAKE_CASE , map_location='cpu' ) SCREAMING_SNAKE_CASE_ = checkpoint SCREAMING_SNAKE_CASE_ = create_rename_keys(_SCREAMING_SNAKE_CASE ) for rename_key_src, rename_key_dest in rename_keys: rename_key(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # load HuggingFace model SCREAMING_SNAKE_CASE_ = SwiftFormerForImageClassification(_SCREAMING_SNAKE_CASE ).eval() hf_model.load_state_dict(_SCREAMING_SNAKE_CASE ) # prepare test inputs SCREAMING_SNAKE_CASE_ = prepare_img() SCREAMING_SNAKE_CASE_ = ViTImageProcessor.from_pretrained('preprocessor_config' ) SCREAMING_SNAKE_CASE_ = processor(images=_SCREAMING_SNAKE_CASE , return_tensors='pt' ) # compare outputs from both models SCREAMING_SNAKE_CASE_ = get_expected_output(_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE_ = hf_model(inputs['pixel_values'] ).logits assert hf_logits.shape == torch.Size([1, 1_000] ) assert torch.allclose(hf_logits[0, 0:5] , _SCREAMING_SNAKE_CASE , atol=1E-3 ) Path(_SCREAMING_SNAKE_CASE ).mkdir(exist_ok=_SCREAMING_SNAKE_CASE ) print(f"""Saving model {swiftformer_name} to {pytorch_dump_folder_path}""" ) hf_model.save_pretrained(_SCREAMING_SNAKE_CASE ) if __name__ == "__main__": UpperCamelCase__ : str = argparse.ArgumentParser() # Required parameters parser.add_argument( "--swiftformer_name", default="swiftformer_xs", choices=["swiftformer_xs", "swiftformer_s", "swiftformer_l1", "swiftformer_l3"], type=str, help="Name of the SwiftFormer model you'd like to convert.", ) parser.add_argument( "--pytorch_dump_folder_path", default="./converted_outputs/", type=str, help="Path to the output PyTorch model directory.", ) parser.add_argument("--original_ckpt", default=None, type=str, help="Path to the original model checkpoint.") UpperCamelCase__ : Union[str, Any] = parser.parse_args() convert_swiftformer_checkpoint(args.swiftformer_name, args.pytorch_dump_folder_path, args.original_ckpt)
620
1
def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : int = 100 ): """simple docstring""" SCREAMING_SNAKE_CASE_ = n * (n + 1) * (2 * n + 1) / 6 SCREAMING_SNAKE_CASE_ = (n * (n + 1) / 2) ** 2 return int(square_of_sum - sum_of_squares ) if __name__ == "__main__": print(F'{solution() = }')
620
def _UpperCAmelCase ( ): """simple docstring""" for n in range(1 , 1_000_000 ): yield n * (n + 1) // 2 def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : Tuple ): """simple docstring""" SCREAMING_SNAKE_CASE_ = 1 SCREAMING_SNAKE_CASE_ = 2 while i * i <= n: SCREAMING_SNAKE_CASE_ = 0 while n % i == 0: n //= i multiplicity += 1 divisors_count *= multiplicity + 1 i += 1 if n > 1: divisors_count *= 2 return divisors_count def _UpperCAmelCase ( ): """simple docstring""" return next(i for i in triangle_number_generator() if count_divisors(_SCREAMING_SNAKE_CASE ) > 500 ) if __name__ == "__main__": print(solution())
620
1
import json from typing import List, Optional, Tuple from tokenizers import pre_tokenizers, processors from ...tokenization_utils_base import AddedToken, BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_mvp import MvpTokenizer UpperCamelCase__ : str = logging.get_logger(__name__) UpperCamelCase__ : Union[str, Any] = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"} # See all MVP models at https://huggingface.co/models?filter=mvp UpperCamelCase__ : Tuple = { "vocab_file": { "RUCAIBox/mvp": "https://huggingface.co/RUCAIBox/mvp/resolve/main/vocab.json", }, "added_tokens.json": { "RUCAIBox/mvp": "https://huggingface.co/RUCAIBox/mvp/resolve/main/added_tokens.json", }, "merges_file": { "RUCAIBox/mvp": "https://huggingface.co/RUCAIBox/mvp/resolve/main/merges.txt", }, "tokenizer_file": { "RUCAIBox/mvp": "https://huggingface.co/RUCAIBox/mvp/resolve/main/tokenizer.json", }, } UpperCamelCase__ : Optional[Any] = { "RUCAIBox/mvp": 1_024, } class __snake_case ( lowerCAmelCase__ ): __lowerCAmelCase : Dict = VOCAB_FILES_NAMES __lowerCAmelCase : int = PRETRAINED_VOCAB_FILES_MAP __lowerCAmelCase : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __lowerCAmelCase : str = ['input_ids', 'attention_mask'] __lowerCAmelCase : Optional[int] = MvpTokenizer def __init__( self , _A=None , _A=None , _A=None , _A="replace" , _A="<s>" , _A="</s>" , _A="</s>" , _A="<s>" , _A="<unk>" , _A="<pad>" , _A="<mask>" , _A=False , _A=True , **_A , ): super().__init__( _A , _A , tokenizer_file=_A , errors=_A , bos_token=_A , eos_token=_A , sep_token=_A , cls_token=_A , unk_token=_A , pad_token=_A , mask_token=_A , add_prefix_space=_A , trim_offsets=_A , **_A , ) SCREAMING_SNAKE_CASE_ = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__()) if pre_tok_state.get('add_prefix_space' , _A) != add_prefix_space: SCREAMING_SNAKE_CASE_ = getattr(_A , pre_tok_state.pop('type')) SCREAMING_SNAKE_CASE_ = add_prefix_space SCREAMING_SNAKE_CASE_ = pre_tok_class(**_A) SCREAMING_SNAKE_CASE_ = add_prefix_space # the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__` SCREAMING_SNAKE_CASE_ = 'post_processor' SCREAMING_SNAKE_CASE_ = getattr(self.backend_tokenizer , _A , _A) if tokenizer_component_instance: SCREAMING_SNAKE_CASE_ = json.loads(tokenizer_component_instance.__getstate__()) # The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class` if "sep" in state: SCREAMING_SNAKE_CASE_ = tuple(state['sep']) if "cls" in state: SCREAMING_SNAKE_CASE_ = tuple(state['cls']) SCREAMING_SNAKE_CASE_ = False if state.get('add_prefix_space' , _A) != add_prefix_space: SCREAMING_SNAKE_CASE_ = add_prefix_space SCREAMING_SNAKE_CASE_ = True if state.get('trim_offsets' , _A) != trim_offsets: SCREAMING_SNAKE_CASE_ = trim_offsets SCREAMING_SNAKE_CASE_ = True if changes_to_apply: SCREAMING_SNAKE_CASE_ = getattr(_A , state.pop('type')) SCREAMING_SNAKE_CASE_ = component_class(**_A) setattr(self.backend_tokenizer , _A , _A) @property def lowerCAmelCase__ ( self): if self._mask_token is None: if self.verbose: logger.error('Using mask_token, but it is not set yet.') return None return str(self._mask_token) @mask_token.setter def lowerCAmelCase__ ( self , _A): SCREAMING_SNAKE_CASE_ = AddedToken(_A , lstrip=_A , rstrip=_A) if isinstance(_A , _A) else value SCREAMING_SNAKE_CASE_ = value def lowerCAmelCase__ ( self , *_A , **_A): SCREAMING_SNAKE_CASE_ = kwargs.get('is_split_into_words' , _A) if is_split_into_words and not self.add_prefix_space: raise ValueError( f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """ 'to use it with pretokenized inputs.') return super()._batch_encode_plus(*_A , **_A) def lowerCAmelCase__ ( self , *_A , **_A): SCREAMING_SNAKE_CASE_ = kwargs.get('is_split_into_words' , _A) if is_split_into_words and not self.add_prefix_space: raise ValueError( f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """ 'to use it with pretokenized inputs.') return super()._encode_plus(*_A , **_A) def lowerCAmelCase__ ( self , _A , _A = None): SCREAMING_SNAKE_CASE_ = self._tokenizer.model.save(_A , name=_A) return tuple(_A) def lowerCAmelCase__ ( self , _A , _A=None): SCREAMING_SNAKE_CASE_ = [self.bos_token_id] + token_ids_a + [self.eos_token_id] if token_ids_a is None: return output return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id] def lowerCAmelCase__ ( self , _A , _A = None): SCREAMING_SNAKE_CASE_ = [self.sep_token_id] SCREAMING_SNAKE_CASE_ = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
620
import io import itertools import json from dataclasses import dataclass from typing import Optional import pyarrow as pa import pyarrow.json as paj import datasets from datasets.table import table_cast from datasets.utils.file_utils import readline UpperCamelCase__ : Optional[int] = datasets.utils.logging.get_logger(__name__) @dataclass class __snake_case ( datasets.BuilderConfig ): __lowerCAmelCase : Optional[datasets.Features] = None __lowerCAmelCase : str = "utf-8" __lowerCAmelCase : Optional[str] = None __lowerCAmelCase : Optional[str] = None __lowerCAmelCase : bool = True # deprecated __lowerCAmelCase : Optional[int] = None # deprecated __lowerCAmelCase : int = 10 << 20 # 10MB __lowerCAmelCase : Optional[bool] = None class __snake_case ( datasets.ArrowBasedBuilder ): __lowerCAmelCase : int = JsonConfig def lowerCAmelCase__ ( self): if self.config.block_size is not None: logger.warning('The JSON loader parameter `block_size` is deprecated. Please use `chunksize` instead') SCREAMING_SNAKE_CASE_ = self.config.block_size if self.config.use_threads is not True: logger.warning( 'The JSON loader parameter `use_threads` is deprecated and doesn\'t have any effect anymore.') if self.config.newlines_in_values is not None: raise ValueError('The JSON loader parameter `newlines_in_values` is no longer supported') return datasets.DatasetInfo(features=self.config.features) def lowerCAmelCase__ ( self , _A): if not self.config.data_files: raise ValueError(f"""At least one data file must be specified, but got data_files={self.config.data_files}""") SCREAMING_SNAKE_CASE_ = dl_manager.download_and_extract(self.config.data_files) if isinstance(_A , (str, list, tuple)): SCREAMING_SNAKE_CASE_ = data_files if isinstance(_A , _A): SCREAMING_SNAKE_CASE_ = [files] SCREAMING_SNAKE_CASE_ = [dl_manager.iter_files(_A) for file in files] return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'files': files})] SCREAMING_SNAKE_CASE_ = [] for split_name, files in data_files.items(): if isinstance(_A , _A): SCREAMING_SNAKE_CASE_ = [files] SCREAMING_SNAKE_CASE_ = [dl_manager.iter_files(_A) for file in files] splits.append(datasets.SplitGenerator(name=_A , gen_kwargs={'files': files})) return splits def lowerCAmelCase__ ( self , _A): if self.config.features is not None: # adding missing columns for column_name in set(self.config.features) - set(pa_table.column_names): SCREAMING_SNAKE_CASE_ = self.config.features.arrow_schema.field(_A).type SCREAMING_SNAKE_CASE_ = pa_table.append_column(_A , pa.array([None] * len(_A) , type=_A)) # more expensive cast to support nested structures with keys in a different order # allows str <-> int/float or str to Audio for example SCREAMING_SNAKE_CASE_ = table_cast(_A , self.config.features.arrow_schema) return pa_table def lowerCAmelCase__ ( self , _A): for file_idx, file in enumerate(itertools.chain.from_iterable(_A)): # If the file is one json object and if we need to look at the list of items in one specific field if self.config.field is not None: with open(_A , encoding=self.config.encoding , errors=self.config.encoding_errors) as f: SCREAMING_SNAKE_CASE_ = json.load(_A) # We keep only the field we are interested in SCREAMING_SNAKE_CASE_ = dataset[self.config.field] # We accept two format: a list of dicts or a dict of lists if isinstance(_A , (list, tuple)): SCREAMING_SNAKE_CASE_ = set().union(*[row.keys() for row in dataset]) SCREAMING_SNAKE_CASE_ = {col: [row.get(_A) for row in dataset] for col in keys} else: SCREAMING_SNAKE_CASE_ = dataset SCREAMING_SNAKE_CASE_ = pa.Table.from_pydict(_A) yield file_idx, self._cast_table(_A) # If the file has one json object per line else: with open(_A , 'rb') as f: SCREAMING_SNAKE_CASE_ = 0 # Use block_size equal to the chunk size divided by 32 to leverage multithreading # Set a default minimum value of 16kB if the chunk size is really small SCREAMING_SNAKE_CASE_ = max(self.config.chunksize // 32 , 16 << 10) SCREAMING_SNAKE_CASE_ = ( self.config.encoding_errors if self.config.encoding_errors is not None else 'strict' ) while True: SCREAMING_SNAKE_CASE_ = f.read(self.config.chunksize) if not batch: break # Finish current line try: batch += f.readline() except (AttributeError, io.UnsupportedOperation): batch += readline(_A) # PyArrow only accepts utf-8 encoded bytes if self.config.encoding != "utf-8": SCREAMING_SNAKE_CASE_ = batch.decode(self.config.encoding , errors=_A).encode('utf-8') try: while True: try: SCREAMING_SNAKE_CASE_ = paj.read_json( io.BytesIO(_A) , read_options=paj.ReadOptions(block_size=_A)) break except (pa.ArrowInvalid, pa.ArrowNotImplementedError) as e: if ( isinstance(_A , pa.ArrowInvalid) and "straddling" not in str(_A) or block_size > len(_A) ): raise else: # Increase the block size in case it was too small. # The block size will be reset for the next file. logger.debug( f"""Batch of {len(_A)} bytes couldn't be parsed with block_size={block_size}. Retrying with block_size={block_size * 2}.""") block_size *= 2 except pa.ArrowInvalid as e: try: with open( _A , encoding=self.config.encoding , errors=self.config.encoding_errors) as f: SCREAMING_SNAKE_CASE_ = json.load(_A) except json.JSONDecodeError: logger.error(f"""Failed to read file '{file}' with error {type(_A)}: {e}""") raise e # If possible, parse the file as a list of json objects and exit the loop if isinstance(_A , _A): # list is the only sequence type supported in JSON try: SCREAMING_SNAKE_CASE_ = set().union(*[row.keys() for row in dataset]) SCREAMING_SNAKE_CASE_ = {col: [row.get(_A) for row in dataset] for col in keys} SCREAMING_SNAKE_CASE_ = pa.Table.from_pydict(_A) except (pa.ArrowInvalid, AttributeError) as e: logger.error(f"""Failed to read file '{file}' with error {type(_A)}: {e}""") raise ValueError(f"""Not able to read records in the JSON file at {file}.""") from None yield file_idx, self._cast_table(_A) break else: logger.error(f"""Failed to read file '{file}' with error {type(_A)}: {e}""") raise ValueError( f"""Not able to read records in the JSON file at {file}. """ f"""You should probably indicate the field of the JSON file containing your records. """ f"""This JSON file contain the following fields: {str(list(dataset.keys()))}. """ f"""Select the correct one and provide it as `field='XXX'` to the dataset loading method. """) from None # Uncomment for debugging (will print the Arrow table size and elements) # logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}") # logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows))) yield (file_idx, batch_idx), self._cast_table(_A) batch_idx += 1
620
1
import inspect import unittest from datasets import load_dataset from packaging import version from transformers import BeitConfig from transformers.models.auto import get_values from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ( MODEL_MAPPING, BeitForImageClassification, BeitForMaskedImageModeling, BeitForSemanticSegmentation, BeitModel, ) from transformers.models.beit.modeling_beit import BEIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): import PIL from PIL import Image from transformers import BeitImageProcessor class __snake_case : def __init__( self , _A , _A=100 , _A=13 , _A=30 , _A=2 , _A=3 , _A=True , _A=True , _A=32 , _A=4 , _A=4 , _A=37 , _A="gelu" , _A=0.1 , _A=0.1 , _A=10 , _A=0.0_2 , _A=3 , _A=None , _A=[0, 1, 2, 3] , ): SCREAMING_SNAKE_CASE_ = parent SCREAMING_SNAKE_CASE_ = 100 SCREAMING_SNAKE_CASE_ = batch_size SCREAMING_SNAKE_CASE_ = image_size SCREAMING_SNAKE_CASE_ = patch_size SCREAMING_SNAKE_CASE_ = num_channels SCREAMING_SNAKE_CASE_ = is_training SCREAMING_SNAKE_CASE_ = use_labels SCREAMING_SNAKE_CASE_ = hidden_size SCREAMING_SNAKE_CASE_ = num_hidden_layers SCREAMING_SNAKE_CASE_ = num_attention_heads SCREAMING_SNAKE_CASE_ = intermediate_size SCREAMING_SNAKE_CASE_ = hidden_act SCREAMING_SNAKE_CASE_ = hidden_dropout_prob SCREAMING_SNAKE_CASE_ = attention_probs_dropout_prob SCREAMING_SNAKE_CASE_ = type_sequence_label_size SCREAMING_SNAKE_CASE_ = initializer_range SCREAMING_SNAKE_CASE_ = scope SCREAMING_SNAKE_CASE_ = out_indices SCREAMING_SNAKE_CASE_ = num_labels # in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token) SCREAMING_SNAKE_CASE_ = (image_size // patch_size) ** 2 SCREAMING_SNAKE_CASE_ = num_patches + 1 def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) SCREAMING_SNAKE_CASE_ = None SCREAMING_SNAKE_CASE_ = None if self.use_labels: SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size] , self.type_sequence_label_size) SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels) SCREAMING_SNAKE_CASE_ = self.get_config() return config, pixel_values, labels, pixel_labels def lowerCAmelCase__ ( self): return BeitConfig( vocab_size=self.vocab_size , image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_A , initializer_range=self.initializer_range , out_indices=self.out_indices , ) def lowerCAmelCase__ ( self , _A , _A , _A , _A): SCREAMING_SNAKE_CASE_ = BeitModel(config=_A) model.to(_A) model.eval() SCREAMING_SNAKE_CASE_ = model(_A) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size)) def lowerCAmelCase__ ( self , _A , _A , _A , _A): SCREAMING_SNAKE_CASE_ = BeitForMaskedImageModeling(config=_A) model.to(_A) model.eval() SCREAMING_SNAKE_CASE_ = model(_A) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length - 1, self.vocab_size)) def lowerCAmelCase__ ( self , _A , _A , _A , _A): SCREAMING_SNAKE_CASE_ = self.type_sequence_label_size SCREAMING_SNAKE_CASE_ = BeitForImageClassification(_A) model.to(_A) model.eval() SCREAMING_SNAKE_CASE_ = model(_A , labels=_A) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size)) # test greyscale images SCREAMING_SNAKE_CASE_ = 1 SCREAMING_SNAKE_CASE_ = BeitForImageClassification(_A) model.to(_A) model.eval() SCREAMING_SNAKE_CASE_ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size]) SCREAMING_SNAKE_CASE_ = model(_A , labels=_A) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size)) def lowerCAmelCase__ ( self , _A , _A , _A , _A): SCREAMING_SNAKE_CASE_ = self.num_labels SCREAMING_SNAKE_CASE_ = BeitForSemanticSegmentation(_A) model.to(_A) model.eval() SCREAMING_SNAKE_CASE_ = model(_A) self.parent.assertEqual( result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2)) SCREAMING_SNAKE_CASE_ = model(_A , labels=_A) self.parent.assertEqual( result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2)) def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = self.prepare_config_and_inputs() SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = config_and_inputs SCREAMING_SNAKE_CASE_ = {'pixel_values': pixel_values} return config, inputs_dict @require_torch class __snake_case ( lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ): __lowerCAmelCase : Union[str, Any] = ( (BeitModel, BeitForImageClassification, BeitForMaskedImageModeling, BeitForSemanticSegmentation) if is_torch_available() else () ) __lowerCAmelCase : Union[str, Any] = ( { 'feature-extraction': BeitModel, 'image-classification': BeitForImageClassification, 'image-segmentation': BeitForSemanticSegmentation, } if is_torch_available() else {} ) __lowerCAmelCase : Union[str, Any] = False __lowerCAmelCase : Dict = False __lowerCAmelCase : int = False def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = BeitModelTester(self) SCREAMING_SNAKE_CASE_ = ConfigTester(self , config_class=_A , has_text_modality=_A , hidden_size=37) def lowerCAmelCase__ ( self): self.config_tester.run_common_tests() @unittest.skip(reason='BEiT does not use inputs_embeds') def lowerCAmelCase__ ( self): pass @require_torch_multi_gpu @unittest.skip(reason='BEiT has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`') def lowerCAmelCase__ ( self): pass def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: SCREAMING_SNAKE_CASE_ = model_class(_A) self.assertIsInstance(model.get_input_embeddings() , (nn.Module)) SCREAMING_SNAKE_CASE_ = model.get_output_embeddings() self.assertTrue(x is None or isinstance(_A , nn.Linear)) def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: SCREAMING_SNAKE_CASE_ = model_class(_A) SCREAMING_SNAKE_CASE_ = inspect.signature(model.forward) # signature.parameters is an OrderedDict => so arg_names order is deterministic SCREAMING_SNAKE_CASE_ = [*signature.parameters.keys()] SCREAMING_SNAKE_CASE_ = ['pixel_values'] self.assertListEqual(arg_names[:1] , _A) def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_A) def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*_A) def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*_A) def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_semantic_segmentation(*_A) def lowerCAmelCase__ ( self): if not self.model_tester.is_training: return SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs_for_common() SCREAMING_SNAKE_CASE_ = True for model_class in self.all_model_classes: # we don't test BeitForMaskedImageModeling if model_class in [*get_values(_A), BeitForMaskedImageModeling]: continue SCREAMING_SNAKE_CASE_ = model_class(_A) model.to(_A) model.train() SCREAMING_SNAKE_CASE_ = self._prepare_for_class(_A , _A , return_labels=_A) SCREAMING_SNAKE_CASE_ = model(**_A).loss loss.backward() def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs_for_common() if not self.model_tester.is_training: return SCREAMING_SNAKE_CASE_ = False SCREAMING_SNAKE_CASE_ = True for model_class in self.all_model_classes: # we don't test BeitForMaskedImageModeling if ( model_class in [*get_values(_A), BeitForMaskedImageModeling] or not model_class.supports_gradient_checkpointing ): continue SCREAMING_SNAKE_CASE_ = model_class(_A) model.gradient_checkpointing_enable() model.to(_A) model.train() SCREAMING_SNAKE_CASE_ = self._prepare_for_class(_A , _A , return_labels=_A) SCREAMING_SNAKE_CASE_ = model(**_A).loss loss.backward() def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs_for_common() SCREAMING_SNAKE_CASE_ = _config_zero_init(_A) for model_class in self.all_model_classes: SCREAMING_SNAKE_CASE_ = model_class(config=_A) for name, param in model.named_parameters(): # we skip lambda parameters as these require special initial values # determined by config.layer_scale_init_value if "lambda" in name: continue if param.requires_grad: self.assertIn( ((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" , ) @slow def lowerCAmelCase__ ( self): for model_name in BEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: SCREAMING_SNAKE_CASE_ = BeitModel.from_pretrained(_A) self.assertIsNotNone(_A) def _UpperCAmelCase ( ): """simple docstring""" SCREAMING_SNAKE_CASE_ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) return image @require_torch @require_vision class __snake_case ( unittest.TestCase ): @cached_property def lowerCAmelCase__ ( self): return BeitImageProcessor.from_pretrained('microsoft/beit-base-patch16-224') if is_vision_available() else None @slow def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = BeitForMaskedImageModeling.from_pretrained('microsoft/beit-base-patch16-224-pt22k').to(_A) SCREAMING_SNAKE_CASE_ = self.default_image_processor SCREAMING_SNAKE_CASE_ = prepare_img() SCREAMING_SNAKE_CASE_ = image_processor(images=_A , return_tensors='pt').pixel_values.to(_A) # prepare bool_masked_pos SCREAMING_SNAKE_CASE_ = torch.ones((1, 196) , dtype=torch.bool).to(_A) # forward pass with torch.no_grad(): SCREAMING_SNAKE_CASE_ = model(pixel_values=_A , bool_masked_pos=_A) SCREAMING_SNAKE_CASE_ = outputs.logits # verify the logits SCREAMING_SNAKE_CASE_ = torch.Size((1, 196, 8192)) self.assertEqual(logits.shape , _A) SCREAMING_SNAKE_CASE_ = torch.tensor( [[-3.2_4_3_7, 0.5_0_7_2, -1_3.9_1_7_4], [-3.2_4_5_6, 0.4_9_4_8, -1_3.9_4_0_1], [-3.2_0_3_3, 0.5_1_2_1, -1_3.8_5_5_0]]).to(_A) self.assertTrue(torch.allclose(logits[bool_masked_pos][:3, :3] , _A , atol=1E-2)) @slow def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = BeitForImageClassification.from_pretrained('microsoft/beit-base-patch16-224').to(_A) SCREAMING_SNAKE_CASE_ = self.default_image_processor SCREAMING_SNAKE_CASE_ = prepare_img() SCREAMING_SNAKE_CASE_ = image_processor(images=_A , return_tensors='pt').to(_A) # forward pass with torch.no_grad(): SCREAMING_SNAKE_CASE_ = model(**_A) SCREAMING_SNAKE_CASE_ = outputs.logits # verify the logits SCREAMING_SNAKE_CASE_ = torch.Size((1, 1000)) self.assertEqual(logits.shape , _A) SCREAMING_SNAKE_CASE_ = torch.tensor([-1.2_3_8_5, -1.0_9_8_7, -1.0_1_0_8]).to(_A) self.assertTrue(torch.allclose(logits[0, :3] , _A , atol=1E-4)) SCREAMING_SNAKE_CASE_ = 281 self.assertEqual(logits.argmax(-1).item() , _A) @slow def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = BeitForImageClassification.from_pretrained('microsoft/beit-large-patch16-224-pt22k-ft22k').to( _A) SCREAMING_SNAKE_CASE_ = self.default_image_processor SCREAMING_SNAKE_CASE_ = prepare_img() SCREAMING_SNAKE_CASE_ = image_processor(images=_A , return_tensors='pt').to(_A) # forward pass with torch.no_grad(): SCREAMING_SNAKE_CASE_ = model(**_A) SCREAMING_SNAKE_CASE_ = outputs.logits # verify the logits SCREAMING_SNAKE_CASE_ = torch.Size((1, 21841)) self.assertEqual(logits.shape , _A) SCREAMING_SNAKE_CASE_ = torch.tensor([1.6_8_8_1, -0.2_7_8_7, 0.5_9_0_1]).to(_A) self.assertTrue(torch.allclose(logits[0, :3] , _A , atol=1E-4)) SCREAMING_SNAKE_CASE_ = 2396 self.assertEqual(logits.argmax(-1).item() , _A) @slow def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = BeitForSemanticSegmentation.from_pretrained('microsoft/beit-base-finetuned-ade-640-640') SCREAMING_SNAKE_CASE_ = model.to(_A) SCREAMING_SNAKE_CASE_ = BeitImageProcessor(do_resize=_A , size=640 , do_center_crop=_A) SCREAMING_SNAKE_CASE_ = load_dataset('hf-internal-testing/fixtures_ade20k' , split='test') SCREAMING_SNAKE_CASE_ = Image.open(ds[0]['file']) SCREAMING_SNAKE_CASE_ = image_processor(images=_A , return_tensors='pt').to(_A) # forward pass with torch.no_grad(): SCREAMING_SNAKE_CASE_ = model(**_A) SCREAMING_SNAKE_CASE_ = outputs.logits # verify the logits SCREAMING_SNAKE_CASE_ = torch.Size((1, 150, 160, 160)) self.assertEqual(logits.shape , _A) SCREAMING_SNAKE_CASE_ = version.parse(PIL.__version__) < version.parse('9.0.0') if is_pillow_less_than_a: SCREAMING_SNAKE_CASE_ = torch.tensor( [ [[-4.9_2_2_5, -2.3_9_5_4, -3.0_5_2_2], [-2.8_8_2_2, -1.0_0_4_6, -1.7_5_6_1], [-2.9_5_4_9, -1.3_2_2_8, -2.1_3_4_7]], [[-5.8_1_6_8, -3.4_1_2_9, -4.0_7_7_8], [-3.8_6_5_1, -2.2_2_1_4, -3.0_2_7_7], [-3.8_3_5_6, -2.4_6_4_3, -3.3_5_3_5]], [[-0.0_0_7_8, 3.9_9_5_2, 4.0_7_5_4], [2.9_8_5_6, 4.6_9_4_4, 5.0_0_3_5], [3.2_4_1_3, 4.7_8_1_3, 4.9_9_6_9]], ] , device=_A , ) else: SCREAMING_SNAKE_CASE_ = torch.tensor( [ [[-4.8_9_6_0, -2.3_6_8_8, -3.0_3_5_5], [-2.8_4_7_8, -0.9_8_3_6, -1.7_4_1_8], [-2.9_4_4_9, -1.3_3_3_2, -2.1_4_5_6]], [[-5.8_0_8_1, -3.4_1_2_4, -4.1_0_0_6], [-3.8_5_6_1, -2.2_0_8_1, -3.0_3_2_3], [-3.8_3_6_5, -2.4_6_0_1, -3.3_6_6_9]], [[-0.0_3_0_9, 3.9_8_6_8, 4.0_5_4_0], [2.9_6_4_0, 4.6_8_7_7, 4.9_9_7_6], [3.2_0_8_1, 4.7_6_9_0, 4.9_9_4_2]], ] , device=_A , ) self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , _A , atol=1E-4)) @slow def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = BeitForSemanticSegmentation.from_pretrained('microsoft/beit-base-finetuned-ade-640-640') SCREAMING_SNAKE_CASE_ = model.to(_A) SCREAMING_SNAKE_CASE_ = BeitImageProcessor(do_resize=_A , size=640 , do_center_crop=_A) SCREAMING_SNAKE_CASE_ = load_dataset('hf-internal-testing/fixtures_ade20k' , split='test') SCREAMING_SNAKE_CASE_ = Image.open(ds[0]['file']) SCREAMING_SNAKE_CASE_ = image_processor(images=_A , return_tensors='pt').to(_A) # forward pass with torch.no_grad(): SCREAMING_SNAKE_CASE_ = model(**_A) SCREAMING_SNAKE_CASE_ = outputs.logits.detach().cpu() SCREAMING_SNAKE_CASE_ = image_processor.post_process_semantic_segmentation(outputs=_A , target_sizes=[(500, 300)]) SCREAMING_SNAKE_CASE_ = torch.Size((500, 300)) self.assertEqual(segmentation[0].shape , _A) SCREAMING_SNAKE_CASE_ = image_processor.post_process_semantic_segmentation(outputs=_A) SCREAMING_SNAKE_CASE_ = torch.Size((160, 160)) self.assertEqual(segmentation[0].shape , _A)
620
import unittest from transformers import TrOCRConfig from transformers.testing_utils import is_torch_available, require_torch, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers.models.trocr.modeling_trocr import TrOCRDecoder, TrOCRForCausalLM @require_torch class __snake_case : def __init__( self , _A , _A=99 , _A=13 , _A=16 , _A=7 , _A=True , _A=True , _A=True , _A=False , _A=True , _A=2 , _A=32 , _A=4 , _A=4 , _A=30 , _A=0 , _A=1 , _A=2 , _A=None , ): SCREAMING_SNAKE_CASE_ = parent SCREAMING_SNAKE_CASE_ = batch_size SCREAMING_SNAKE_CASE_ = decoder_seq_length # For common tests SCREAMING_SNAKE_CASE_ = self.decoder_seq_length SCREAMING_SNAKE_CASE_ = is_training SCREAMING_SNAKE_CASE_ = use_attention_mask SCREAMING_SNAKE_CASE_ = use_labels SCREAMING_SNAKE_CASE_ = vocab_size SCREAMING_SNAKE_CASE_ = d_model SCREAMING_SNAKE_CASE_ = d_model SCREAMING_SNAKE_CASE_ = decoder_layers SCREAMING_SNAKE_CASE_ = decoder_layers SCREAMING_SNAKE_CASE_ = decoder_ffn_dim SCREAMING_SNAKE_CASE_ = decoder_attention_heads SCREAMING_SNAKE_CASE_ = decoder_attention_heads SCREAMING_SNAKE_CASE_ = eos_token_id SCREAMING_SNAKE_CASE_ = bos_token_id SCREAMING_SNAKE_CASE_ = pad_token_id SCREAMING_SNAKE_CASE_ = decoder_start_token_id SCREAMING_SNAKE_CASE_ = use_cache SCREAMING_SNAKE_CASE_ = max_position_embeddings SCREAMING_SNAKE_CASE_ = None SCREAMING_SNAKE_CASE_ = decoder_seq_length SCREAMING_SNAKE_CASE_ = 2 SCREAMING_SNAKE_CASE_ = 1 def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size) SCREAMING_SNAKE_CASE_ = None if self.use_attention_mask: SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size, self.decoder_seq_length] , vocab_size=2) SCREAMING_SNAKE_CASE_ = None if self.use_labels: SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size) SCREAMING_SNAKE_CASE_ = TrOCRConfig( vocab_size=self.vocab_size , d_model=self.d_model , decoder_layers=self.decoder_layers , decoder_ffn_dim=self.decoder_ffn_dim , decoder_attention_heads=self.decoder_attention_heads , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , use_cache=self.use_cache , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , max_position_embeddings=self.max_position_embeddings , ) return (config, input_ids, attention_mask, lm_labels) def lowerCAmelCase__ ( self , _A , _A , _A , _A , ): SCREAMING_SNAKE_CASE_ = True SCREAMING_SNAKE_CASE_ = TrOCRDecoder(config=_A).to(_A).eval() SCREAMING_SNAKE_CASE_ = input_ids[:2] input_ids[input_ids == 0] += 1 # first forward pass SCREAMING_SNAKE_CASE_ = model(_A , use_cache=_A) SCREAMING_SNAKE_CASE_ = model(_A) SCREAMING_SNAKE_CASE_ = model(_A , use_cache=_A) self.parent.assertTrue(len(_A) == len(_A)) self.parent.assertTrue(len(_A) == len(_A) + 1) SCREAMING_SNAKE_CASE_ = outputs['past_key_values'] # create hypothetical next token and extent to next_input_ids SCREAMING_SNAKE_CASE_ = ids_tensor((2, 1) , config.vocab_size - 1) + 1 # append to next input_ids and SCREAMING_SNAKE_CASE_ = torch.cat([input_ids, next_tokens] , dim=-1) SCREAMING_SNAKE_CASE_ = model(_A)['last_hidden_state'] SCREAMING_SNAKE_CASE_ = model(_A , past_key_values=_A)['last_hidden_state'] # select random slice SCREAMING_SNAKE_CASE_ = ids_tensor((1,) , output_from_past.shape[-1]).item() SCREAMING_SNAKE_CASE_ = output_from_no_past[:, next_input_ids.shape[-1] - 1, random_slice_idx].detach() SCREAMING_SNAKE_CASE_ = output_from_past[:, 0, random_slice_idx].detach() # test that outputs are equal for slice assert torch.allclose(_A , _A , atol=1E-3) def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = self.prepare_config_and_inputs() SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = config_and_inputs SCREAMING_SNAKE_CASE_ = {'input_ids': input_ids, 'attention_mask': attention_mask} return config, inputs_dict @require_torch class __snake_case ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ): __lowerCAmelCase : Tuple = (TrOCRDecoder, TrOCRForCausalLM) if is_torch_available() else () __lowerCAmelCase : Union[str, Any] = (TrOCRForCausalLM,) if is_torch_available() else () __lowerCAmelCase : str = {'text-generation': TrOCRForCausalLM} if is_torch_available() else {} __lowerCAmelCase : Any = True __lowerCAmelCase : str = False def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = TrOCRStandaloneDecoderModelTester(self , is_training=_A) SCREAMING_SNAKE_CASE_ = ConfigTester(self , config_class=_A) def lowerCAmelCase__ ( self): pass def lowerCAmelCase__ ( self): pass def lowerCAmelCase__ ( self): pass def lowerCAmelCase__ ( self): self.config_tester.run_common_tests() def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_decoder_model_past(*_A) def lowerCAmelCase__ ( self): return @unittest.skip('The model doesn\'t support left padding') # and it's not used enough to be worth fixing :) def lowerCAmelCase__ ( self): pass
620
1
import importlib.util import os import platform from argparse import ArgumentParser import huggingface_hub from .. import __version__ as version from ..utils import ( is_accelerate_available, is_flax_available, is_safetensors_available, is_tf_available, is_torch_available, ) from . import BaseTransformersCLICommand def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : List[Any] ): """simple docstring""" return EnvironmentCommand() def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : Optional[int] ): """simple docstring""" return EnvironmentCommand(args.accelerate_config_file ) class __snake_case ( lowerCAmelCase__ ): @staticmethod def lowerCAmelCase__ ( _A): SCREAMING_SNAKE_CASE_ = parser.add_parser('env') download_parser.set_defaults(func=_A) download_parser.add_argument( '--accelerate-config_file' , default=_A , help='The accelerate config file to use for the default values in the launching script.' , ) download_parser.set_defaults(func=_A) def __init__( self , _A , *_A): SCREAMING_SNAKE_CASE_ = accelerate_config_file def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = 'not installed' if is_safetensors_available(): import safetensors SCREAMING_SNAKE_CASE_ = safetensors.__version__ elif importlib.util.find_spec('safetensors') is not None: import safetensors SCREAMING_SNAKE_CASE_ = f"""{safetensors.__version__} but is ignored because of PyTorch version too old.""" SCREAMING_SNAKE_CASE_ = 'not installed' SCREAMING_SNAKE_CASE_ = SCREAMING_SNAKE_CASE_ = 'not found' if is_accelerate_available(): import accelerate from accelerate.commands.config import default_config_file, load_config_from_file SCREAMING_SNAKE_CASE_ = accelerate.__version__ # Get the default from the config file. if self._accelerate_config_file is not None or os.path.isfile(_A): SCREAMING_SNAKE_CASE_ = load_config_from_file(self._accelerate_config_file).to_dict() SCREAMING_SNAKE_CASE_ = ( '\n'.join([f"""\t- {prop}: {val}""" for prop, val in accelerate_config.items()]) if isinstance(_A , _A) else f"""\t{accelerate_config}""" ) SCREAMING_SNAKE_CASE_ = 'not installed' SCREAMING_SNAKE_CASE_ = 'NA' if is_torch_available(): import torch SCREAMING_SNAKE_CASE_ = torch.__version__ SCREAMING_SNAKE_CASE_ = torch.cuda.is_available() SCREAMING_SNAKE_CASE_ = 'not installed' SCREAMING_SNAKE_CASE_ = 'NA' if is_tf_available(): import tensorflow as tf SCREAMING_SNAKE_CASE_ = tf.__version__ try: # deprecated in v2.1 SCREAMING_SNAKE_CASE_ = tf.test.is_gpu_available() except AttributeError: # returns list of devices, convert to bool SCREAMING_SNAKE_CASE_ = bool(tf.config.list_physical_devices('GPU')) SCREAMING_SNAKE_CASE_ = 'not installed' SCREAMING_SNAKE_CASE_ = 'not installed' SCREAMING_SNAKE_CASE_ = 'not installed' SCREAMING_SNAKE_CASE_ = 'NA' if is_flax_available(): import flax import jax import jaxlib SCREAMING_SNAKE_CASE_ = flax.__version__ SCREAMING_SNAKE_CASE_ = jax.__version__ SCREAMING_SNAKE_CASE_ = jaxlib.__version__ SCREAMING_SNAKE_CASE_ = jax.lib.xla_bridge.get_backend().platform SCREAMING_SNAKE_CASE_ = { '`transformers` version': version, 'Platform': platform.platform(), 'Python version': platform.python_version(), 'Huggingface_hub version': huggingface_hub.__version__, 'Safetensors version': f"""{safetensors_version}""", 'Accelerate version': f"""{accelerate_version}""", 'Accelerate config': f"""{accelerate_config_str}""", 'PyTorch version (GPU?)': f"""{pt_version} ({pt_cuda_available})""", 'Tensorflow version (GPU?)': f"""{tf_version} ({tf_cuda_available})""", 'Flax version (CPU?/GPU?/TPU?)': f"""{flax_version} ({jax_backend})""", 'Jax version': f"""{jax_version}""", 'JaxLib version': f"""{jaxlib_version}""", 'Using GPU in script?': '<fill in>', 'Using distributed or parallel set-up in script?': '<fill in>', } print('\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n') print(self.format_dict(_A)) return info @staticmethod def lowerCAmelCase__ ( _A): return "\n".join([f"""- {prop}: {val}""" for prop, val in d.items()]) + "\n"
620
from dataclasses import dataclass from typing import Optional, Tuple, Union import torch import torch.nn as nn from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput, apply_forward_hook from .modeling_utils import ModelMixin from .vae import Decoder, DecoderOutput, Encoder, VectorQuantizer @dataclass class __snake_case ( lowerCAmelCase__ ): __lowerCAmelCase : torch.FloatTensor class __snake_case ( lowerCAmelCase__ , lowerCAmelCase__ ): @register_to_config def __init__( self , _A = 3 , _A = 3 , _A = ("DownEncoderBlock2D",) , _A = ("UpDecoderBlock2D",) , _A = (64,) , _A = 1 , _A = "silu" , _A = 3 , _A = 32 , _A = 256 , _A = 32 , _A = None , _A = 0.1_8_2_1_5 , _A = "group" , ): super().__init__() # pass init params to Encoder SCREAMING_SNAKE_CASE_ = Encoder( in_channels=_A , out_channels=_A , down_block_types=_A , block_out_channels=_A , layers_per_block=_A , act_fn=_A , norm_num_groups=_A , double_z=_A , ) SCREAMING_SNAKE_CASE_ = vq_embed_dim if vq_embed_dim is not None else latent_channels SCREAMING_SNAKE_CASE_ = nn.Convad(_A , _A , 1) SCREAMING_SNAKE_CASE_ = VectorQuantizer(_A , _A , beta=0.2_5 , remap=_A , sane_index_shape=_A) SCREAMING_SNAKE_CASE_ = nn.Convad(_A , _A , 1) # pass init params to Decoder SCREAMING_SNAKE_CASE_ = Decoder( in_channels=_A , out_channels=_A , up_block_types=_A , block_out_channels=_A , layers_per_block=_A , act_fn=_A , norm_num_groups=_A , norm_type=_A , ) @apply_forward_hook def lowerCAmelCase__ ( self , _A , _A = True): SCREAMING_SNAKE_CASE_ = self.encoder(_A) SCREAMING_SNAKE_CASE_ = self.quant_conv(_A) if not return_dict: return (h,) return VQEncoderOutput(latents=_A) @apply_forward_hook def lowerCAmelCase__ ( self , _A , _A = False , _A = True): # also go through quantization layer if not force_not_quantize: SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.quantize(_A) else: SCREAMING_SNAKE_CASE_ = h SCREAMING_SNAKE_CASE_ = self.post_quant_conv(_A) SCREAMING_SNAKE_CASE_ = self.decoder(_A , quant if self.config.norm_type == 'spatial' else None) if not return_dict: return (dec,) return DecoderOutput(sample=_A) def lowerCAmelCase__ ( self , _A , _A = True): SCREAMING_SNAKE_CASE_ = sample SCREAMING_SNAKE_CASE_ = self.encode(_A).latents SCREAMING_SNAKE_CASE_ = self.decode(_A).sample if not return_dict: return (dec,) return DecoderOutput(sample=_A)
620
1
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_tokenizers_available, is_torch_available, ) UpperCamelCase__ : int = { "configuration_funnel": ["FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP", "FunnelConfig"], "convert_funnel_original_tf_checkpoint_to_pytorch": [], "tokenization_funnel": ["FunnelTokenizer"], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase__ : Optional[Any] = ["FunnelTokenizerFast"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase__ : List[str] = [ "FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST", "FunnelBaseModel", "FunnelForMaskedLM", "FunnelForMultipleChoice", "FunnelForPreTraining", "FunnelForQuestionAnswering", "FunnelForSequenceClassification", "FunnelForTokenClassification", "FunnelModel", "FunnelPreTrainedModel", "load_tf_weights_in_funnel", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase__ : List[str] = [ "TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST", "TFFunnelBaseModel", "TFFunnelForMaskedLM", "TFFunnelForMultipleChoice", "TFFunnelForPreTraining", "TFFunnelForQuestionAnswering", "TFFunnelForSequenceClassification", "TFFunnelForTokenClassification", "TFFunnelModel", "TFFunnelPreTrainedModel", ] if TYPE_CHECKING: from .configuration_funnel import FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP, FunnelConfig from .tokenization_funnel import FunnelTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_funnel_fast import FunnelTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_funnel import ( FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST, FunnelBaseModel, FunnelForMaskedLM, FunnelForMultipleChoice, FunnelForPreTraining, FunnelForQuestionAnswering, FunnelForSequenceClassification, FunnelForTokenClassification, FunnelModel, FunnelPreTrainedModel, load_tf_weights_in_funnel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_funnel import ( TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST, TFFunnelBaseModel, TFFunnelForMaskedLM, TFFunnelForMultipleChoice, TFFunnelForPreTraining, TFFunnelForQuestionAnswering, TFFunnelForSequenceClassification, TFFunnelForTokenClassification, TFFunnelModel, TFFunnelPreTrainedModel, ) else: import sys UpperCamelCase__ : int = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
620
import logging import os from typing import Dict, List, Optional, Union import torch import torch.nn as nn from accelerate.utils.imports import ( is_abit_bnb_available, is_abit_bnb_available, is_bnb_available, ) from ..big_modeling import dispatch_model, init_empty_weights from .dataclasses import BnbQuantizationConfig from .modeling import ( find_tied_parameters, get_balanced_memory, infer_auto_device_map, load_checkpoint_in_model, offload_weight, set_module_tensor_to_device, ) if is_bnb_available(): import bitsandbytes as bnb from copy import deepcopy UpperCamelCase__ : Optional[int] = logging.getLogger(__name__) def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : torch.nn.Module , _SCREAMING_SNAKE_CASE : BnbQuantizationConfig , _SCREAMING_SNAKE_CASE : Union[str, os.PathLike] = None , _SCREAMING_SNAKE_CASE : Optional[Dict[str, Union[int, str, torch.device]]] = None , _SCREAMING_SNAKE_CASE : Optional[List[str]] = None , _SCREAMING_SNAKE_CASE : Optional[Dict[Union[int, str], Union[int, str]]] = None , _SCREAMING_SNAKE_CASE : Optional[Union[str, os.PathLike]] = None , _SCREAMING_SNAKE_CASE : bool = False , ): """simple docstring""" SCREAMING_SNAKE_CASE_ = bnb_quantization_config.load_in_abit SCREAMING_SNAKE_CASE_ = bnb_quantization_config.load_in_abit if load_in_abit and not is_abit_bnb_available(): raise ImportError( 'You have a version of `bitsandbytes` that is not compatible with 8bit quantization,' ' make sure you have the latest version of `bitsandbytes` installed.' ) if load_in_abit and not is_abit_bnb_available(): raise ValueError( 'You have a version of `bitsandbytes` that is not compatible with 4bit quantization,' 'make sure you have the latest version of `bitsandbytes` installed.' ) SCREAMING_SNAKE_CASE_ = [] # custom device map if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) and len(device_map.keys() ) > 1: SCREAMING_SNAKE_CASE_ = [key for key, value in device_map.items() if value in ['disk', 'cpu']] # We keep some modules such as the lm_head in their original dtype for numerical stability reasons if bnb_quantization_config.skip_modules is None: SCREAMING_SNAKE_CASE_ = get_keys_to_not_convert(_SCREAMING_SNAKE_CASE ) # add cpu modules to skip modules only for 4-bit modules if load_in_abit: bnb_quantization_config.skip_modules.extend(_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE_ = bnb_quantization_config.skip_modules # We add the modules we want to keep in full precision if bnb_quantization_config.keep_in_fpaa_modules is None: SCREAMING_SNAKE_CASE_ = [] SCREAMING_SNAKE_CASE_ = bnb_quantization_config.keep_in_fpaa_modules modules_to_not_convert.extend(_SCREAMING_SNAKE_CASE ) # compatibility with peft SCREAMING_SNAKE_CASE_ = load_in_abit SCREAMING_SNAKE_CASE_ = load_in_abit SCREAMING_SNAKE_CASE_ = get_parameter_device(_SCREAMING_SNAKE_CASE ) if model_device.type != "meta": # quantization of an already loaded model logger.warning( 'It is not recommended to quantize a loaded model. ' 'The model should be instantiated under the `init_empty_weights` context manager.' ) SCREAMING_SNAKE_CASE_ = replace_with_bnb_layers(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , modules_to_not_convert=_SCREAMING_SNAKE_CASE ) # convert param to the right dtype SCREAMING_SNAKE_CASE_ = bnb_quantization_config.torch_dtype for name, param in model.state_dict().items(): if any(module_to_keep_in_fpaa in name for module_to_keep_in_fpaa in keep_in_fpaa_modules ): param.to(torch.floataa ) if param.dtype != torch.floataa: SCREAMING_SNAKE_CASE_ = name.replace('.weight' , '' ).replace('.bias' , '' ) SCREAMING_SNAKE_CASE_ = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) if param is not None: param.to(torch.floataa ) elif torch.is_floating_point(_SCREAMING_SNAKE_CASE ): param.to(_SCREAMING_SNAKE_CASE ) if model_device.type == "cuda": # move everything to cpu in the first place because we can't do quantization if the weights are already on cuda model.cuda(torch.cuda.current_device() ) torch.cuda.empty_cache() elif torch.cuda.is_available(): model.to(torch.cuda.current_device() ) else: raise RuntimeError('No GPU found. A GPU is needed for quantization.' ) logger.info( f"""The model device type is {model_device.type}. However, cuda is needed for quantization.""" 'We move the model to cuda.' ) return model elif weights_location is None: raise RuntimeError( f"""`weights_location` needs to be the folder path containing the weights of the model, but we found {weights_location} """ ) else: with init_empty_weights(): SCREAMING_SNAKE_CASE_ = replace_with_bnb_layers( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , modules_to_not_convert=_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE_ = get_quantized_model_device_map( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , max_memory=_SCREAMING_SNAKE_CASE , no_split_module_classes=_SCREAMING_SNAKE_CASE , ) if offload_state_dict is None and device_map is not None and "disk" in device_map.values(): SCREAMING_SNAKE_CASE_ = True SCREAMING_SNAKE_CASE_ = any(x in list(device_map.values() ) for x in ['cpu', 'disk'] ) load_checkpoint_in_model( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , dtype=bnb_quantization_config.torch_dtype , offload_folder=_SCREAMING_SNAKE_CASE , offload_state_dict=_SCREAMING_SNAKE_CASE , keep_in_fpaa_modules=bnb_quantization_config.keep_in_fpaa_modules , offload_abit_bnb=load_in_abit and offload , ) return dispatch_model(_SCREAMING_SNAKE_CASE , device_map=_SCREAMING_SNAKE_CASE , offload_dir=_SCREAMING_SNAKE_CASE ) def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : List[str]=None , _SCREAMING_SNAKE_CASE : List[str]=None , _SCREAMING_SNAKE_CASE : Union[str, Any]=None ): """simple docstring""" if device_map is None: if torch.cuda.is_available(): SCREAMING_SNAKE_CASE_ = {'': torch.cuda.current_device()} else: raise RuntimeError('No GPU found. A GPU is needed for quantization.' ) logger.info('The device_map was not initialized.' 'Setting device_map to `{\'\':torch.cuda.current_device()}`.' ) if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): if device_map not in ["auto", "balanced", "balanced_low_0", "sequential"]: raise ValueError( 'If passing a string for `device_map`, please choose \'auto\', \'balanced\', \'balanced_low_0\' or ' '\'sequential\'.' ) SCREAMING_SNAKE_CASE_ = {} special_dtypes.update( { name: bnb_quantization_config.torch_dtype for name, _ in model.named_parameters() if any(m in name for m in bnb_quantization_config.skip_modules ) } ) special_dtypes.update( { name: torch.floataa for name, _ in model.named_parameters() if any(m in name for m in bnb_quantization_config.keep_in_fpaa_modules ) } ) SCREAMING_SNAKE_CASE_ = {} SCREAMING_SNAKE_CASE_ = special_dtypes SCREAMING_SNAKE_CASE_ = no_split_module_classes SCREAMING_SNAKE_CASE_ = bnb_quantization_config.target_dtype # get max_memory for each device. if device_map != "sequential": SCREAMING_SNAKE_CASE_ = get_balanced_memory( _SCREAMING_SNAKE_CASE , low_zero=(device_map == 'balanced_low_0') , max_memory=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , ) SCREAMING_SNAKE_CASE_ = max_memory SCREAMING_SNAKE_CASE_ = infer_auto_device_map(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): # check if don't have any quantized module on the cpu SCREAMING_SNAKE_CASE_ = bnb_quantization_config.skip_modules + bnb_quantization_config.keep_in_fpaa_modules SCREAMING_SNAKE_CASE_ = { key: device_map[key] for key in device_map.keys() if key not in modules_not_to_convert } for device in ["cpu", "disk"]: if device in device_map_without_some_modules.values(): if bnb_quantization_config.load_in_abit: raise ValueError( '\n Some modules are dispatched on the CPU or the disk. Make sure you have enough GPU RAM to fit\n the quantized model. If you want to dispatch the model on the CPU or the disk while keeping\n these modules in `torch_dtype`, you need to pass a custom `device_map` to\n `load_and_quantize_model`. Check\n https://huggingface.co/docs/accelerate/main/en/usage_guides/quantization#offload-modules-to-cpu-and-disk\n for more details.\n ' ) else: logger.info( 'Some modules are are offloaded to the CPU or the disk. Note that these modules will be converted to 8-bit' ) del device_map_without_some_modules return device_map def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int=None , _SCREAMING_SNAKE_CASE : Union[str, Any]=None ): """simple docstring""" if modules_to_not_convert is None: SCREAMING_SNAKE_CASE_ = [] SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = _replace_with_bnb_layers( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) if not has_been_replaced: logger.warning( 'You are loading your model in 8bit or 4bit but no linear modules were found in your model.' ' this can happen for some architectures such as gpt2 that uses Conv1D instead of Linear layers.' ' Please double check your model architecture, or submit an issue on github if you think this is' ' a bug.' ) return model def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : Optional[Any]=None , _SCREAMING_SNAKE_CASE : str=None , ): """simple docstring""" SCREAMING_SNAKE_CASE_ = False for name, module in model.named_children(): if current_key_name is None: SCREAMING_SNAKE_CASE_ = [] current_key_name.append(_SCREAMING_SNAKE_CASE ) if isinstance(_SCREAMING_SNAKE_CASE , nn.Linear ) and name not in modules_to_not_convert: # Check if the current key is not in the `modules_to_not_convert` SCREAMING_SNAKE_CASE_ = '.'.join(_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE_ = True for key in modules_to_not_convert: if ( (key in current_key_name_str) and (key + "." in current_key_name_str) ) or key == current_key_name_str: SCREAMING_SNAKE_CASE_ = False break if proceed: # Load bnb module with empty weight and replace ``nn.Linear` module if bnb_quantization_config.load_in_abit: SCREAMING_SNAKE_CASE_ = bnb.nn.LinearabitLt( module.in_features , module.out_features , module.bias is not None , has_fpaa_weights=_SCREAMING_SNAKE_CASE , threshold=bnb_quantization_config.llm_inta_threshold , ) elif bnb_quantization_config.load_in_abit: SCREAMING_SNAKE_CASE_ = bnb.nn.Linearabit( module.in_features , module.out_features , module.bias is not None , bnb_quantization_config.bnb_abit_compute_dtype , compress_statistics=bnb_quantization_config.bnb_abit_use_double_quant , quant_type=bnb_quantization_config.bnb_abit_quant_type , ) else: raise ValueError('load_in_8bit and load_in_4bit can\'t be both False' ) SCREAMING_SNAKE_CASE_ = module.weight.data if module.bias is not None: SCREAMING_SNAKE_CASE_ = module.bias.data bnb_module.requires_grad_(_SCREAMING_SNAKE_CASE ) setattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE_ = True if len(list(module.children() ) ) > 0: SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = _replace_with_bnb_layers( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE_ = has_been_replaced | _has_been_replaced # Remove the last key for recursion current_key_name.pop(-1 ) return model, has_been_replaced def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : Union[str, Any] ): """simple docstring""" with init_empty_weights(): SCREAMING_SNAKE_CASE_ = deepcopy(_SCREAMING_SNAKE_CASE ) # this has 0 cost since it is done inside `init_empty_weights` context manager` SCREAMING_SNAKE_CASE_ = find_tied_parameters(_SCREAMING_SNAKE_CASE ) # For compatibility with Accelerate < 0.18 if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): SCREAMING_SNAKE_CASE_ = sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() ) else: SCREAMING_SNAKE_CASE_ = sum(_SCREAMING_SNAKE_CASE , [] ) SCREAMING_SNAKE_CASE_ = len(_SCREAMING_SNAKE_CASE ) > 0 # Check if it is a base model SCREAMING_SNAKE_CASE_ = False if hasattr(_SCREAMING_SNAKE_CASE , 'base_model_prefix' ): SCREAMING_SNAKE_CASE_ = not hasattr(_SCREAMING_SNAKE_CASE , model.base_model_prefix ) # Ignore this for base models (BertModel, GPT2Model, etc.) if (not has_tied_params) and is_base_model: return [] # otherwise they have an attached head SCREAMING_SNAKE_CASE_ = list(model.named_children() ) SCREAMING_SNAKE_CASE_ = [list_modules[-1][0]] # add last module together with tied weights SCREAMING_SNAKE_CASE_ = set(_SCREAMING_SNAKE_CASE ) - set(_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE_ = list(set(_SCREAMING_SNAKE_CASE ) ) + list(_SCREAMING_SNAKE_CASE ) # remove ".weight" from the keys SCREAMING_SNAKE_CASE_ = ['.weight', '.bias'] SCREAMING_SNAKE_CASE_ = [] for name in list_untouched: for name_to_remove in names_to_remove: if name_to_remove in name: SCREAMING_SNAKE_CASE_ = name.replace(_SCREAMING_SNAKE_CASE , '' ) filtered_module_names.append(_SCREAMING_SNAKE_CASE ) return filtered_module_names def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : Dict ): """simple docstring""" for m in model.modules(): if isinstance(_SCREAMING_SNAKE_CASE , bnb.nn.Linearabit ): return True return False def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : nn.Module ): """simple docstring""" return next(parameter.parameters() ).device def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : str ): """simple docstring""" if fpaa_statistics is None: set_module_tensor_to_device(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , 0 , dtype=_SCREAMING_SNAKE_CASE , value=_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE_ = param_name SCREAMING_SNAKE_CASE_ = model if "." in tensor_name: SCREAMING_SNAKE_CASE_ = tensor_name.split('.' ) for split in splits[:-1]: SCREAMING_SNAKE_CASE_ = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) if new_module is None: raise ValueError(f"""{module} has no attribute {split}.""" ) SCREAMING_SNAKE_CASE_ = new_module SCREAMING_SNAKE_CASE_ = splits[-1] # offload weights SCREAMING_SNAKE_CASE_ = False offload_weight(module._parameters[tensor_name] , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , index=_SCREAMING_SNAKE_CASE ) if hasattr(module._parameters[tensor_name] , 'SCB' ): offload_weight( module._parameters[tensor_name].SCB , param_name.replace('weight' , 'SCB' ) , _SCREAMING_SNAKE_CASE , index=_SCREAMING_SNAKE_CASE , ) else: offload_weight(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , index=_SCREAMING_SNAKE_CASE ) offload_weight(_SCREAMING_SNAKE_CASE , param_name.replace('weight' , 'SCB' ) , _SCREAMING_SNAKE_CASE , index=_SCREAMING_SNAKE_CASE ) set_module_tensor_to_device(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , 'meta' , dtype=_SCREAMING_SNAKE_CASE , value=torch.empty(*param.size() ) )
620
1
import json import os from pathlib import Path import pytest from datasets.download.download_config import DownloadConfig from datasets.download.download_manager import DownloadManager from datasets.utils.file_utils import hash_url_to_filename UpperCamelCase__ : Optional[Any] = "http://www.mocksite.com/file1.txt" UpperCamelCase__ : int = "\"text\": [\"foo\", \"foo\"]" UpperCamelCase__ : int = "6d8ce9aa78a471c7477201efbeabd3bb01ac2e7d100a6dc024ba1608361f90a8" class __snake_case : __lowerCAmelCase : Dict = 200 __lowerCAmelCase : Tuple = {'Content-Length': '100'} __lowerCAmelCase : Optional[Any] = {} def lowerCAmelCase__ ( self , **_A): return [bytes(_A , 'utf-8')] def _UpperCAmelCase ( *_SCREAMING_SNAKE_CASE : List[str] , **_SCREAMING_SNAKE_CASE : Optional[int] ): """simple docstring""" return MockResponse() @pytest.mark.parametrize('urls_type' , [str, list, dict] ) def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : Dict ): """simple docstring""" import requests monkeypatch.setattr(_SCREAMING_SNAKE_CASE , 'request' , _SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE_ = URL if issubclass(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): SCREAMING_SNAKE_CASE_ = url elif issubclass(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): SCREAMING_SNAKE_CASE_ = [url] elif issubclass(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): SCREAMING_SNAKE_CASE_ = {'train': url} SCREAMING_SNAKE_CASE_ = 'dummy' SCREAMING_SNAKE_CASE_ = 'downloads' SCREAMING_SNAKE_CASE_ = tmp_path SCREAMING_SNAKE_CASE_ = DownloadConfig( cache_dir=os.path.join(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , use_etag=_SCREAMING_SNAKE_CASE , ) SCREAMING_SNAKE_CASE_ = DownloadManager(dataset_name=_SCREAMING_SNAKE_CASE , download_config=_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE_ = dl_manager.download(_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE_ = urls for downloaded_paths in [downloaded_paths]: if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): SCREAMING_SNAKE_CASE_ = [downloaded_paths] SCREAMING_SNAKE_CASE_ = [urls] elif isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): assert "train" in downloaded_paths.keys() SCREAMING_SNAKE_CASE_ = downloaded_paths.values() SCREAMING_SNAKE_CASE_ = urls.values() assert downloaded_paths for downloaded_path, input_url in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): assert downloaded_path == dl_manager.downloaded_paths[input_url] SCREAMING_SNAKE_CASE_ = Path(_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE_ = downloaded_path.parts assert parts[-1] == HASH assert parts[-2] == cache_subdir assert downloaded_path.exists() SCREAMING_SNAKE_CASE_ = downloaded_path.read_text() assert content == CONTENT SCREAMING_SNAKE_CASE_ = downloaded_path.with_suffix('.json' ) assert metadata_downloaded_path.exists() SCREAMING_SNAKE_CASE_ = json.loads(metadata_downloaded_path.read_text() ) assert metadata_content == {"url": URL, "etag": None} @pytest.mark.parametrize('paths_type' , [str, list, dict] ) def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : str ): """simple docstring""" SCREAMING_SNAKE_CASE_ = str(_SCREAMING_SNAKE_CASE ) if issubclass(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): SCREAMING_SNAKE_CASE_ = filename elif issubclass(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): SCREAMING_SNAKE_CASE_ = [filename] elif issubclass(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): SCREAMING_SNAKE_CASE_ = {'train': filename} SCREAMING_SNAKE_CASE_ = 'dummy' SCREAMING_SNAKE_CASE_ = xz_file.parent SCREAMING_SNAKE_CASE_ = 'extracted' SCREAMING_SNAKE_CASE_ = DownloadConfig( cache_dir=_SCREAMING_SNAKE_CASE , use_etag=_SCREAMING_SNAKE_CASE , ) SCREAMING_SNAKE_CASE_ = DownloadManager(dataset_name=_SCREAMING_SNAKE_CASE , download_config=_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE_ = dl_manager.extract(_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE_ = paths for extracted_paths in [extracted_paths]: if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): SCREAMING_SNAKE_CASE_ = [extracted_paths] SCREAMING_SNAKE_CASE_ = [paths] elif isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): assert "train" in extracted_paths.keys() SCREAMING_SNAKE_CASE_ = extracted_paths.values() SCREAMING_SNAKE_CASE_ = paths.values() assert extracted_paths for extracted_path, input_path in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): assert extracted_path == dl_manager.extracted_paths[input_path] SCREAMING_SNAKE_CASE_ = Path(_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE_ = extracted_path.parts assert parts[-1] == hash_url_to_filename(_SCREAMING_SNAKE_CASE , etag=_SCREAMING_SNAKE_CASE ) assert parts[-2] == extracted_subdir assert extracted_path.exists() SCREAMING_SNAKE_CASE_ = extracted_path.read_text() SCREAMING_SNAKE_CASE_ = text_file.read_text() assert extracted_file_content == expected_file_content def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : Dict ): """simple docstring""" assert path.endswith('.jsonl' ) for num_items, line in enumerate(_SCREAMING_SNAKE_CASE , start=1 ): SCREAMING_SNAKE_CASE_ = json.loads(line.decode('utf-8' ) ) assert item.keys() == {"col_1", "col_2", "col_3"} assert num_items == 4 @pytest.mark.parametrize('archive_jsonl' , ['tar_jsonl_path', 'zip_jsonl_path'] ) def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : Optional[int] ): """simple docstring""" SCREAMING_SNAKE_CASE_ = request.getfixturevalue(_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE_ = DownloadManager() for num_jsonl, (path, file) in enumerate(dl_manager.iter_archive(_SCREAMING_SNAKE_CASE ) , start=1 ): _test_jsonl(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) assert num_jsonl == 2 @pytest.mark.parametrize('archive_nested_jsonl' , ['tar_nested_jsonl_path', 'zip_nested_jsonl_path'] ) def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : Tuple ): """simple docstring""" SCREAMING_SNAKE_CASE_ = request.getfixturevalue(_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE_ = DownloadManager() for num_tar, (path, file) in enumerate(dl_manager.iter_archive(_SCREAMING_SNAKE_CASE ) , start=1 ): for num_jsonl, (subpath, subfile) in enumerate(dl_manager.iter_archive(_SCREAMING_SNAKE_CASE ) , start=1 ): _test_jsonl(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) assert num_tar == 1 assert num_jsonl == 2 def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : Union[str, Any] ): """simple docstring""" SCREAMING_SNAKE_CASE_ = DownloadManager() for num_file, file in enumerate(dl_manager.iter_files(_SCREAMING_SNAKE_CASE ) , start=1 ): assert os.path.basename(_SCREAMING_SNAKE_CASE ) == ("test.txt" if num_file == 1 else "train.txt") assert num_file == 2
620
import json import os from functools import lru_cache from typing import List, Optional, Tuple import regex as re from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging UpperCamelCase__ : Union[str, Any] = logging.get_logger(__name__) UpperCamelCase__ : Optional[Any] = {"vocab_file": "vocab.json", "merges_file": "merges.txt"} # See all BART models at https://huggingface.co/models?filter=bart UpperCamelCase__ : List[str] = { "vocab_file": { "facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/vocab.json", "facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/vocab.json", "facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json", "facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json", "facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json", "yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json", }, "merges_file": { "facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/merges.txt", "facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/merges.txt", "facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt", "facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt", "facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt", "yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt", }, } UpperCamelCase__ : str = { "facebook/bart-base": 1_024, "facebook/bart-large": 1_024, "facebook/bart-large-mnli": 1_024, "facebook/bart-large-cnn": 1_024, "facebook/bart-large-xsum": 1_024, "yjernite/bart_eli5": 1_024, } @lru_cache() def _UpperCAmelCase ( ): """simple docstring""" SCREAMING_SNAKE_CASE_ = ( list(range(ord('!' ) , ord('~' ) + 1 ) ) + list(range(ord('¡' ) , ord('¬' ) + 1 ) ) + list(range(ord('®' ) , ord('ÿ' ) + 1 ) ) ) SCREAMING_SNAKE_CASE_ = bs[:] SCREAMING_SNAKE_CASE_ = 0 for b in range(2**8 ): if b not in bs: bs.append(_SCREAMING_SNAKE_CASE ) cs.append(2**8 + n ) n += 1 SCREAMING_SNAKE_CASE_ = [chr(_SCREAMING_SNAKE_CASE ) for n in cs] return dict(zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ) def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : List[str] ): """simple docstring""" SCREAMING_SNAKE_CASE_ = set() SCREAMING_SNAKE_CASE_ = word[0] for char in word[1:]: pairs.add((prev_char, char) ) SCREAMING_SNAKE_CASE_ = char return pairs class __snake_case ( lowerCAmelCase__ ): __lowerCAmelCase : str = VOCAB_FILES_NAMES __lowerCAmelCase : Any = PRETRAINED_VOCAB_FILES_MAP __lowerCAmelCase : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __lowerCAmelCase : List[Any] = ['input_ids', 'attention_mask'] def __init__( self , _A , _A , _A="replace" , _A="<s>" , _A="</s>" , _A="</s>" , _A="<s>" , _A="<unk>" , _A="<pad>" , _A="<mask>" , _A=False , **_A , ): SCREAMING_SNAKE_CASE_ = AddedToken(_A , lstrip=_A , rstrip=_A) if isinstance(_A , _A) else bos_token SCREAMING_SNAKE_CASE_ = AddedToken(_A , lstrip=_A , rstrip=_A) if isinstance(_A , _A) else eos_token SCREAMING_SNAKE_CASE_ = AddedToken(_A , lstrip=_A , rstrip=_A) if isinstance(_A , _A) else sep_token SCREAMING_SNAKE_CASE_ = AddedToken(_A , lstrip=_A , rstrip=_A) if isinstance(_A , _A) else cls_token SCREAMING_SNAKE_CASE_ = AddedToken(_A , lstrip=_A , rstrip=_A) if isinstance(_A , _A) else unk_token SCREAMING_SNAKE_CASE_ = AddedToken(_A , lstrip=_A , rstrip=_A) if isinstance(_A , _A) else pad_token # Mask token behave like a normal word, i.e. include the space before it SCREAMING_SNAKE_CASE_ = AddedToken(_A , lstrip=_A , rstrip=_A) if isinstance(_A , _A) else mask_token super().__init__( errors=_A , bos_token=_A , eos_token=_A , unk_token=_A , sep_token=_A , cls_token=_A , pad_token=_A , mask_token=_A , add_prefix_space=_A , **_A , ) with open(_A , encoding='utf-8') as vocab_handle: SCREAMING_SNAKE_CASE_ = json.load(_A) SCREAMING_SNAKE_CASE_ = {v: k for k, v in self.encoder.items()} SCREAMING_SNAKE_CASE_ = errors # how to handle errors in decoding SCREAMING_SNAKE_CASE_ = bytes_to_unicode() SCREAMING_SNAKE_CASE_ = {v: k for k, v in self.byte_encoder.items()} with open(_A , encoding='utf-8') as merges_handle: SCREAMING_SNAKE_CASE_ = merges_handle.read().split('\n')[1:-1] SCREAMING_SNAKE_CASE_ = [tuple(merge.split()) for merge in bpe_merges] SCREAMING_SNAKE_CASE_ = dict(zip(_A , range(len(_A)))) SCREAMING_SNAKE_CASE_ = {} SCREAMING_SNAKE_CASE_ = add_prefix_space # Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions SCREAMING_SNAKE_CASE_ = re.compile(r'\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+') @property def lowerCAmelCase__ ( self): return len(self.encoder) def lowerCAmelCase__ ( self): return dict(self.encoder , **self.added_tokens_encoder) def lowerCAmelCase__ ( self , _A): if token in self.cache: return self.cache[token] SCREAMING_SNAKE_CASE_ = tuple(_A) SCREAMING_SNAKE_CASE_ = get_pairs(_A) if not pairs: return token while True: SCREAMING_SNAKE_CASE_ = min(_A , key=lambda _A: self.bpe_ranks.get(_A , float('inf'))) if bigram not in self.bpe_ranks: break SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = bigram SCREAMING_SNAKE_CASE_ = [] SCREAMING_SNAKE_CASE_ = 0 while i < len(_A): try: SCREAMING_SNAKE_CASE_ = word.index(_A , _A) except ValueError: new_word.extend(word[i:]) break else: new_word.extend(word[i:j]) SCREAMING_SNAKE_CASE_ = j if word[i] == first and i < len(_A) - 1 and word[i + 1] == second: new_word.append(first + second) i += 2 else: new_word.append(word[i]) i += 1 SCREAMING_SNAKE_CASE_ = tuple(_A) SCREAMING_SNAKE_CASE_ = new_word if len(_A) == 1: break else: SCREAMING_SNAKE_CASE_ = get_pairs(_A) SCREAMING_SNAKE_CASE_ = ' '.join(_A) SCREAMING_SNAKE_CASE_ = word return word def lowerCAmelCase__ ( self , _A): SCREAMING_SNAKE_CASE_ = [] for token in re.findall(self.pat , _A): SCREAMING_SNAKE_CASE_ = ''.join( self.byte_encoder[b] for b in token.encode('utf-8')) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case) bpe_tokens.extend(bpe_token for bpe_token in self.bpe(_A).split(' ')) return bpe_tokens def lowerCAmelCase__ ( self , _A): return self.encoder.get(_A , self.encoder.get(self.unk_token)) def lowerCAmelCase__ ( self , _A): return self.decoder.get(_A) def lowerCAmelCase__ ( self , _A): SCREAMING_SNAKE_CASE_ = ''.join(_A) SCREAMING_SNAKE_CASE_ = bytearray([self.byte_decoder[c] for c in text]).decode('utf-8' , errors=self.errors) return text def lowerCAmelCase__ ( self , _A , _A = None): if not os.path.isdir(_A): logger.error(f"""Vocabulary path ({save_directory}) should be a directory""") return SCREAMING_SNAKE_CASE_ = os.path.join( _A , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file']) SCREAMING_SNAKE_CASE_ = os.path.join( _A , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file']) with open(_A , 'w' , encoding='utf-8') as f: f.write(json.dumps(self.encoder , indent=2 , sort_keys=_A , ensure_ascii=_A) + '\n') SCREAMING_SNAKE_CASE_ = 0 with open(_A , 'w' , encoding='utf-8') as writer: writer.write('#version: 0.2\n') for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda _A: kv[1]): if index != token_index: logger.warning( f"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.""" ' Please check that the tokenizer is not corrupted!') SCREAMING_SNAKE_CASE_ = token_index writer.write(' '.join(_A) + '\n') index += 1 return vocab_file, merge_file def lowerCAmelCase__ ( self , _A , _A = None): if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] SCREAMING_SNAKE_CASE_ = [self.cls_token_id] SCREAMING_SNAKE_CASE_ = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def lowerCAmelCase__ ( self , _A , _A = None , _A = False): if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=_A , token_ids_a=_A , already_has_special_tokens=_A) if token_ids_a is None: return [1] + ([0] * len(_A)) + [1] return [1] + ([0] * len(_A)) + [1, 1] + ([0] * len(_A)) + [1] def lowerCAmelCase__ ( self , _A , _A = None): SCREAMING_SNAKE_CASE_ = [self.sep_token_id] SCREAMING_SNAKE_CASE_ = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0] def lowerCAmelCase__ ( self , _A , _A=False , **_A): SCREAMING_SNAKE_CASE_ = kwargs.pop('add_prefix_space' , self.add_prefix_space) if (is_split_into_words or add_prefix_space) and (len(_A) > 0 and not text[0].isspace()): SCREAMING_SNAKE_CASE_ = ' ' + text return (text, kwargs)
620
1
import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import DetrConfig, DetrForObjectDetection, DetrForSegmentation, DetrImageProcessor, ResNetConfig from transformers.utils import logging logging.set_verbosity_info() UpperCamelCase__ : int = logging.get_logger(__name__) def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : str ): """simple docstring""" if "resnet-50" in model_name: SCREAMING_SNAKE_CASE_ = ResNetConfig.from_pretrained('microsoft/resnet-50' ) elif "resnet-101" in model_name: SCREAMING_SNAKE_CASE_ = ResNetConfig.from_pretrained('microsoft/resnet-101' ) else: raise ValueError('Model name should include either resnet50 or resnet101' ) SCREAMING_SNAKE_CASE_ = DetrConfig(use_timm_backbone=_SCREAMING_SNAKE_CASE , backbone_config=_SCREAMING_SNAKE_CASE ) # set label attributes SCREAMING_SNAKE_CASE_ = 'panoptic' in model_name if is_panoptic: SCREAMING_SNAKE_CASE_ = 250 else: SCREAMING_SNAKE_CASE_ = 91 SCREAMING_SNAKE_CASE_ = 'huggingface/label-files' SCREAMING_SNAKE_CASE_ = 'coco-detection-id2label.json' SCREAMING_SNAKE_CASE_ = json.load(open(hf_hub_download(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , repo_type='dataset' ) , 'r' ) ) SCREAMING_SNAKE_CASE_ = {int(_SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()} SCREAMING_SNAKE_CASE_ = idalabel SCREAMING_SNAKE_CASE_ = {v: k for k, v in idalabel.items()} return config, is_panoptic def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : int ): """simple docstring""" SCREAMING_SNAKE_CASE_ = [] # stem # fmt: off rename_keys.append(('backbone.0.body.conv1.weight', 'backbone.conv_encoder.model.embedder.embedder.convolution.weight') ) rename_keys.append(('backbone.0.body.bn1.weight', 'backbone.conv_encoder.model.embedder.embedder.normalization.weight') ) rename_keys.append(('backbone.0.body.bn1.bias', 'backbone.conv_encoder.model.embedder.embedder.normalization.bias') ) rename_keys.append(('backbone.0.body.bn1.running_mean', 'backbone.conv_encoder.model.embedder.embedder.normalization.running_mean') ) rename_keys.append(('backbone.0.body.bn1.running_var', 'backbone.conv_encoder.model.embedder.embedder.normalization.running_var') ) # stages for stage_idx in range(len(config.backbone_config.depths ) ): for layer_idx in range(config.backbone_config.depths[stage_idx] ): # shortcut if layer_idx == 0: rename_keys.append( ( f"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.0.weight""", f"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.convolution.weight""", ) ) rename_keys.append( ( f"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.weight""", f"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.weight""", ) ) rename_keys.append( ( f"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.bias""", f"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.bias""", ) ) rename_keys.append( ( f"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_mean""", f"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_mean""", ) ) rename_keys.append( ( f"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_var""", f"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_var""", ) ) # 3 convs for i in range(3 ): rename_keys.append( ( f"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.conv{i+1}.weight""", f"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.convolution.weight""", ) ) rename_keys.append( ( f"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.weight""", f"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.weight""", ) ) rename_keys.append( ( f"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.bias""", f"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.bias""", ) ) rename_keys.append( ( f"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.running_mean""", f"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.running_mean""", ) ) rename_keys.append( ( f"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.running_var""", f"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.running_var""", ) ) # fmt: on for i in range(config.encoder_layers ): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append( ( f"""transformer.encoder.layers.{i}.self_attn.out_proj.weight""", f"""encoder.layers.{i}.self_attn.out_proj.weight""", ) ) rename_keys.append( (f"""transformer.encoder.layers.{i}.self_attn.out_proj.bias""", f"""encoder.layers.{i}.self_attn.out_proj.bias""") ) rename_keys.append((f"""transformer.encoder.layers.{i}.linear1.weight""", f"""encoder.layers.{i}.fc1.weight""") ) rename_keys.append((f"""transformer.encoder.layers.{i}.linear1.bias""", f"""encoder.layers.{i}.fc1.bias""") ) rename_keys.append((f"""transformer.encoder.layers.{i}.linear2.weight""", f"""encoder.layers.{i}.fc2.weight""") ) rename_keys.append((f"""transformer.encoder.layers.{i}.linear2.bias""", f"""encoder.layers.{i}.fc2.bias""") ) rename_keys.append( (f"""transformer.encoder.layers.{i}.norm1.weight""", f"""encoder.layers.{i}.self_attn_layer_norm.weight""") ) rename_keys.append( (f"""transformer.encoder.layers.{i}.norm1.bias""", f"""encoder.layers.{i}.self_attn_layer_norm.bias""") ) rename_keys.append( (f"""transformer.encoder.layers.{i}.norm2.weight""", f"""encoder.layers.{i}.final_layer_norm.weight""") ) rename_keys.append((f"""transformer.encoder.layers.{i}.norm2.bias""", f"""encoder.layers.{i}.final_layer_norm.bias""") ) # decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms rename_keys.append( ( f"""transformer.decoder.layers.{i}.self_attn.out_proj.weight""", f"""decoder.layers.{i}.self_attn.out_proj.weight""", ) ) rename_keys.append( (f"""transformer.decoder.layers.{i}.self_attn.out_proj.bias""", f"""decoder.layers.{i}.self_attn.out_proj.bias""") ) rename_keys.append( ( f"""transformer.decoder.layers.{i}.multihead_attn.out_proj.weight""", f"""decoder.layers.{i}.encoder_attn.out_proj.weight""", ) ) rename_keys.append( ( f"""transformer.decoder.layers.{i}.multihead_attn.out_proj.bias""", f"""decoder.layers.{i}.encoder_attn.out_proj.bias""", ) ) rename_keys.append((f"""transformer.decoder.layers.{i}.linear1.weight""", f"""decoder.layers.{i}.fc1.weight""") ) rename_keys.append((f"""transformer.decoder.layers.{i}.linear1.bias""", f"""decoder.layers.{i}.fc1.bias""") ) rename_keys.append((f"""transformer.decoder.layers.{i}.linear2.weight""", f"""decoder.layers.{i}.fc2.weight""") ) rename_keys.append((f"""transformer.decoder.layers.{i}.linear2.bias""", f"""decoder.layers.{i}.fc2.bias""") ) rename_keys.append( (f"""transformer.decoder.layers.{i}.norm1.weight""", f"""decoder.layers.{i}.self_attn_layer_norm.weight""") ) rename_keys.append( (f"""transformer.decoder.layers.{i}.norm1.bias""", f"""decoder.layers.{i}.self_attn_layer_norm.bias""") ) rename_keys.append( (f"""transformer.decoder.layers.{i}.norm2.weight""", f"""decoder.layers.{i}.encoder_attn_layer_norm.weight""") ) rename_keys.append( (f"""transformer.decoder.layers.{i}.norm2.bias""", f"""decoder.layers.{i}.encoder_attn_layer_norm.bias""") ) rename_keys.append( (f"""transformer.decoder.layers.{i}.norm3.weight""", f"""decoder.layers.{i}.final_layer_norm.weight""") ) rename_keys.append((f"""transformer.decoder.layers.{i}.norm3.bias""", f"""decoder.layers.{i}.final_layer_norm.bias""") ) # convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads rename_keys.extend( [ ('input_proj.weight', 'input_projection.weight'), ('input_proj.bias', 'input_projection.bias'), ('query_embed.weight', 'query_position_embeddings.weight'), ('transformer.decoder.norm.weight', 'decoder.layernorm.weight'), ('transformer.decoder.norm.bias', 'decoder.layernorm.bias'), ('class_embed.weight', 'class_labels_classifier.weight'), ('class_embed.bias', 'class_labels_classifier.bias'), ('bbox_embed.layers.0.weight', 'bbox_predictor.layers.0.weight'), ('bbox_embed.layers.0.bias', 'bbox_predictor.layers.0.bias'), ('bbox_embed.layers.1.weight', 'bbox_predictor.layers.1.weight'), ('bbox_embed.layers.1.bias', 'bbox_predictor.layers.1.bias'), ('bbox_embed.layers.2.weight', 'bbox_predictor.layers.2.weight'), ('bbox_embed.layers.2.bias', 'bbox_predictor.layers.2.bias'), ] ) return rename_keys def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : List[Any] ): """simple docstring""" SCREAMING_SNAKE_CASE_ = state_dict.pop(_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE_ = val def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : int=False ): """simple docstring""" SCREAMING_SNAKE_CASE_ = '' if is_panoptic: SCREAMING_SNAKE_CASE_ = 'detr.' # first: transformer encoder for i in range(6 ): # read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias) SCREAMING_SNAKE_CASE_ = state_dict.pop(f"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight""" ) SCREAMING_SNAKE_CASE_ = state_dict.pop(f"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias""" ) # next, add query, keys and values (in that order) to the state dict SCREAMING_SNAKE_CASE_ = in_proj_weight[:256, :] SCREAMING_SNAKE_CASE_ = in_proj_bias[:256] SCREAMING_SNAKE_CASE_ = in_proj_weight[256:512, :] SCREAMING_SNAKE_CASE_ = in_proj_bias[256:512] SCREAMING_SNAKE_CASE_ = in_proj_weight[-256:, :] SCREAMING_SNAKE_CASE_ = in_proj_bias[-256:] # next: transformer decoder (which is a bit more complex because it also includes cross-attention) for i in range(6 ): # read in weights + bias of input projection layer of self-attention SCREAMING_SNAKE_CASE_ = state_dict.pop(f"""{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight""" ) SCREAMING_SNAKE_CASE_ = state_dict.pop(f"""{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias""" ) # next, add query, keys and values (in that order) to the state dict SCREAMING_SNAKE_CASE_ = in_proj_weight[:256, :] SCREAMING_SNAKE_CASE_ = in_proj_bias[:256] SCREAMING_SNAKE_CASE_ = in_proj_weight[256:512, :] SCREAMING_SNAKE_CASE_ = in_proj_bias[256:512] SCREAMING_SNAKE_CASE_ = in_proj_weight[-256:, :] SCREAMING_SNAKE_CASE_ = in_proj_bias[-256:] # read in weights + bias of input projection layer of cross-attention SCREAMING_SNAKE_CASE_ = state_dict.pop( f"""{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight""" ) SCREAMING_SNAKE_CASE_ = state_dict.pop(f"""{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias""" ) # next, add query, keys and values (in that order) of cross-attention to the state dict SCREAMING_SNAKE_CASE_ = in_proj_weight_cross_attn[:256, :] SCREAMING_SNAKE_CASE_ = in_proj_bias_cross_attn[:256] SCREAMING_SNAKE_CASE_ = in_proj_weight_cross_attn[256:512, :] SCREAMING_SNAKE_CASE_ = in_proj_bias_cross_attn[256:512] SCREAMING_SNAKE_CASE_ = in_proj_weight_cross_attn[-256:, :] SCREAMING_SNAKE_CASE_ = in_proj_bias_cross_attn[-256:] def _UpperCAmelCase ( ): """simple docstring""" SCREAMING_SNAKE_CASE_ = 'http://images.cocodataset.org/val2017/000000039769.jpg' SCREAMING_SNAKE_CASE_ = Image.open(requests.get(_SCREAMING_SNAKE_CASE , stream=_SCREAMING_SNAKE_CASE ).raw ) return im @torch.no_grad() def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : List[Any]=None , _SCREAMING_SNAKE_CASE : Any=False ): """simple docstring""" SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = get_detr_config(_SCREAMING_SNAKE_CASE ) # load original model from torch hub SCREAMING_SNAKE_CASE_ = { 'detr-resnet-50': 'detr_resnet50', 'detr-resnet-101': 'detr_resnet101', } logger.info(f"""Converting model {model_name}...""" ) SCREAMING_SNAKE_CASE_ = torch.hub.load('facebookresearch/detr' , model_name_to_original_name[model_name] , pretrained=_SCREAMING_SNAKE_CASE ).eval() SCREAMING_SNAKE_CASE_ = detr.state_dict() # rename keys for src, dest in create_rename_keys(_SCREAMING_SNAKE_CASE ): if is_panoptic: SCREAMING_SNAKE_CASE_ = 'detr.' + src rename_key(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # query, key and value matrices need special treatment read_in_q_k_v(_SCREAMING_SNAKE_CASE , is_panoptic=_SCREAMING_SNAKE_CASE ) # important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them SCREAMING_SNAKE_CASE_ = 'detr.model.' if is_panoptic else 'model.' for key in state_dict.copy().keys(): if is_panoptic: if ( key.startswith('detr' ) and not key.startswith('class_labels_classifier' ) and not key.startswith('bbox_predictor' ) ): SCREAMING_SNAKE_CASE_ = state_dict.pop(_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE_ = val elif "class_labels_classifier" in key or "bbox_predictor" in key: SCREAMING_SNAKE_CASE_ = state_dict.pop(_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE_ = val elif key.startswith('bbox_attention' ) or key.startswith('mask_head' ): continue else: SCREAMING_SNAKE_CASE_ = state_dict.pop(_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE_ = val else: if not key.startswith('class_labels_classifier' ) and not key.startswith('bbox_predictor' ): SCREAMING_SNAKE_CASE_ = state_dict.pop(_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE_ = val # finally, create HuggingFace model and load state dict SCREAMING_SNAKE_CASE_ = DetrForSegmentation(_SCREAMING_SNAKE_CASE ) if is_panoptic else DetrForObjectDetection(_SCREAMING_SNAKE_CASE ) model.load_state_dict(_SCREAMING_SNAKE_CASE ) model.eval() # verify our conversion on an image SCREAMING_SNAKE_CASE_ = 'coco_panoptic' if is_panoptic else 'coco_detection' SCREAMING_SNAKE_CASE_ = DetrImageProcessor(format=_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE_ = processor(images=prepare_img() , return_tensors='pt' ) SCREAMING_SNAKE_CASE_ = encoding['pixel_values'] SCREAMING_SNAKE_CASE_ = detr(_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE_ = model(_SCREAMING_SNAKE_CASE ) assert torch.allclose(outputs.logits , original_outputs['pred_logits'] , atol=1E-3 ) assert torch.allclose(outputs.pred_boxes , original_outputs['pred_boxes'] , atol=1E-3 ) if is_panoptic: assert torch.allclose(outputs.pred_masks , original_outputs['pred_masks'] , atol=1E-4 ) print('Looks ok!' ) if pytorch_dump_folder_path is not None: # Save model and image processor logger.info(f"""Saving PyTorch model and image processor to {pytorch_dump_folder_path}...""" ) Path(_SCREAMING_SNAKE_CASE ).mkdir(exist_ok=_SCREAMING_SNAKE_CASE ) model.save_pretrained(_SCREAMING_SNAKE_CASE ) processor.save_pretrained(_SCREAMING_SNAKE_CASE ) if push_to_hub: # Upload model and image processor to the hub logger.info('Uploading PyTorch model and image processor to the hub...' ) model.push_to_hub(f"""nielsr/{model_name}""" ) processor.push_to_hub(f"""nielsr/{model_name}""" ) if __name__ == "__main__": UpperCamelCase__ : Dict = argparse.ArgumentParser() parser.add_argument( "--model_name", default="detr-resnet-50", type=str, choices=["detr-resnet-50", "detr-resnet-101"], help="Name of the DETR model you'd like to convert.", ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model." ) parser.add_argument("--push_to_hub", action="store_true", help="Whether to push the model to the hub or not.") UpperCamelCase__ : str = parser.parse_args() convert_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
620
from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCamelCase__ : str = logging.get_logger(__name__) UpperCamelCase__ : Optional[int] = { "facebook/dpr-ctx_encoder-single-nq-base": ( "https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/config.json" ), "facebook/dpr-question_encoder-single-nq-base": ( "https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/config.json" ), "facebook/dpr-reader-single-nq-base": ( "https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/config.json" ), "facebook/dpr-ctx_encoder-multiset-base": ( "https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/config.json" ), "facebook/dpr-question_encoder-multiset-base": ( "https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/config.json" ), "facebook/dpr-reader-multiset-base": ( "https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/config.json" ), } class __snake_case ( lowerCAmelCase__ ): __lowerCAmelCase : Optional[int] = 'dpr' def __init__( self , _A=30522 , _A=768 , _A=12 , _A=12 , _A=3072 , _A="gelu" , _A=0.1 , _A=0.1 , _A=512 , _A=2 , _A=0.0_2 , _A=1E-12 , _A=0 , _A="absolute" , _A = 0 , **_A , ): super().__init__(pad_token_id=_A , **_A) SCREAMING_SNAKE_CASE_ = vocab_size SCREAMING_SNAKE_CASE_ = hidden_size SCREAMING_SNAKE_CASE_ = num_hidden_layers SCREAMING_SNAKE_CASE_ = num_attention_heads SCREAMING_SNAKE_CASE_ = hidden_act SCREAMING_SNAKE_CASE_ = intermediate_size SCREAMING_SNAKE_CASE_ = hidden_dropout_prob SCREAMING_SNAKE_CASE_ = attention_probs_dropout_prob SCREAMING_SNAKE_CASE_ = max_position_embeddings SCREAMING_SNAKE_CASE_ = type_vocab_size SCREAMING_SNAKE_CASE_ = initializer_range SCREAMING_SNAKE_CASE_ = layer_norm_eps SCREAMING_SNAKE_CASE_ = projection_dim SCREAMING_SNAKE_CASE_ = position_embedding_type
620
1
from ..utils import DummyObject, requires_backends class __snake_case ( metaclass=lowerCAmelCase__ ): __lowerCAmelCase : int = ['torch'] def __init__( self , *_A , **_A): requires_backends(self , ['torch']) @classmethod def lowerCAmelCase__ ( cls , *_A , **_A): requires_backends(cls , ['torch']) @classmethod def lowerCAmelCase__ ( cls , *_A , **_A): requires_backends(cls , ['torch']) class __snake_case ( metaclass=lowerCAmelCase__ ): __lowerCAmelCase : Optional[Any] = ['torch'] def __init__( self , *_A , **_A): requires_backends(self , ['torch']) @classmethod def lowerCAmelCase__ ( cls , *_A , **_A): requires_backends(cls , ['torch']) @classmethod def lowerCAmelCase__ ( cls , *_A , **_A): requires_backends(cls , ['torch']) class __snake_case ( metaclass=lowerCAmelCase__ ): __lowerCAmelCase : Union[str, Any] = ['torch'] def __init__( self , *_A , **_A): requires_backends(self , ['torch']) @classmethod def lowerCAmelCase__ ( cls , *_A , **_A): requires_backends(cls , ['torch']) @classmethod def lowerCAmelCase__ ( cls , *_A , **_A): requires_backends(cls , ['torch']) class __snake_case ( metaclass=lowerCAmelCase__ ): __lowerCAmelCase : Any = ['torch'] def __init__( self , *_A , **_A): requires_backends(self , ['torch']) @classmethod def lowerCAmelCase__ ( cls , *_A , **_A): requires_backends(cls , ['torch']) @classmethod def lowerCAmelCase__ ( cls , *_A , **_A): requires_backends(cls , ['torch']) class __snake_case ( metaclass=lowerCAmelCase__ ): __lowerCAmelCase : Dict = ['torch'] def __init__( self , *_A , **_A): requires_backends(self , ['torch']) @classmethod def lowerCAmelCase__ ( cls , *_A , **_A): requires_backends(cls , ['torch']) @classmethod def lowerCAmelCase__ ( cls , *_A , **_A): requires_backends(cls , ['torch']) class __snake_case ( metaclass=lowerCAmelCase__ ): __lowerCAmelCase : Optional[Any] = ['torch'] def __init__( self , *_A , **_A): requires_backends(self , ['torch']) @classmethod def lowerCAmelCase__ ( cls , *_A , **_A): requires_backends(cls , ['torch']) @classmethod def lowerCAmelCase__ ( cls , *_A , **_A): requires_backends(cls , ['torch']) class __snake_case ( metaclass=lowerCAmelCase__ ): __lowerCAmelCase : Dict = ['torch'] def __init__( self , *_A , **_A): requires_backends(self , ['torch']) @classmethod def lowerCAmelCase__ ( cls , *_A , **_A): requires_backends(cls , ['torch']) @classmethod def lowerCAmelCase__ ( cls , *_A , **_A): requires_backends(cls , ['torch']) class __snake_case ( metaclass=lowerCAmelCase__ ): __lowerCAmelCase : Optional[int] = ['torch'] def __init__( self , *_A , **_A): requires_backends(self , ['torch']) @classmethod def lowerCAmelCase__ ( cls , *_A , **_A): requires_backends(cls , ['torch']) @classmethod def lowerCAmelCase__ ( cls , *_A , **_A): requires_backends(cls , ['torch']) class __snake_case ( metaclass=lowerCAmelCase__ ): __lowerCAmelCase : Union[str, Any] = ['torch'] def __init__( self , *_A , **_A): requires_backends(self , ['torch']) @classmethod def lowerCAmelCase__ ( cls , *_A , **_A): requires_backends(cls , ['torch']) @classmethod def lowerCAmelCase__ ( cls , *_A , **_A): requires_backends(cls , ['torch']) class __snake_case ( metaclass=lowerCAmelCase__ ): __lowerCAmelCase : Optional[int] = ['torch'] def __init__( self , *_A , **_A): requires_backends(self , ['torch']) @classmethod def lowerCAmelCase__ ( cls , *_A , **_A): requires_backends(cls , ['torch']) @classmethod def lowerCAmelCase__ ( cls , *_A , **_A): requires_backends(cls , ['torch']) class __snake_case ( metaclass=lowerCAmelCase__ ): __lowerCAmelCase : str = ['torch'] def __init__( self , *_A , **_A): requires_backends(self , ['torch']) @classmethod def lowerCAmelCase__ ( cls , *_A , **_A): requires_backends(cls , ['torch']) @classmethod def lowerCAmelCase__ ( cls , *_A , **_A): requires_backends(cls , ['torch']) def _UpperCAmelCase ( *_SCREAMING_SNAKE_CASE : Dict , **_SCREAMING_SNAKE_CASE : Tuple ): """simple docstring""" requires_backends(_SCREAMING_SNAKE_CASE , ['torch'] ) def _UpperCAmelCase ( *_SCREAMING_SNAKE_CASE : Dict , **_SCREAMING_SNAKE_CASE : Optional[int] ): """simple docstring""" requires_backends(_SCREAMING_SNAKE_CASE , ['torch'] ) def _UpperCAmelCase ( *_SCREAMING_SNAKE_CASE : Dict , **_SCREAMING_SNAKE_CASE : str ): """simple docstring""" requires_backends(_SCREAMING_SNAKE_CASE , ['torch'] ) def _UpperCAmelCase ( *_SCREAMING_SNAKE_CASE : Union[str, Any] , **_SCREAMING_SNAKE_CASE : List[Any] ): """simple docstring""" requires_backends(_SCREAMING_SNAKE_CASE , ['torch'] ) def _UpperCAmelCase ( *_SCREAMING_SNAKE_CASE : Any , **_SCREAMING_SNAKE_CASE : Tuple ): """simple docstring""" requires_backends(_SCREAMING_SNAKE_CASE , ['torch'] ) def _UpperCAmelCase ( *_SCREAMING_SNAKE_CASE : str , **_SCREAMING_SNAKE_CASE : List[Any] ): """simple docstring""" requires_backends(_SCREAMING_SNAKE_CASE , ['torch'] ) def _UpperCAmelCase ( *_SCREAMING_SNAKE_CASE : List[str] , **_SCREAMING_SNAKE_CASE : Tuple ): """simple docstring""" requires_backends(_SCREAMING_SNAKE_CASE , ['torch'] ) class __snake_case ( metaclass=lowerCAmelCase__ ): __lowerCAmelCase : int = ['torch'] def __init__( self , *_A , **_A): requires_backends(self , ['torch']) @classmethod def lowerCAmelCase__ ( cls , *_A , **_A): requires_backends(cls , ['torch']) @classmethod def lowerCAmelCase__ ( cls , *_A , **_A): requires_backends(cls , ['torch']) class __snake_case ( metaclass=lowerCAmelCase__ ): __lowerCAmelCase : Any = ['torch'] def __init__( self , *_A , **_A): requires_backends(self , ['torch']) @classmethod def lowerCAmelCase__ ( cls , *_A , **_A): requires_backends(cls , ['torch']) @classmethod def lowerCAmelCase__ ( cls , *_A , **_A): requires_backends(cls , ['torch']) class __snake_case ( metaclass=lowerCAmelCase__ ): __lowerCAmelCase : List[Any] = ['torch'] def __init__( self , *_A , **_A): requires_backends(self , ['torch']) @classmethod def lowerCAmelCase__ ( cls , *_A , **_A): requires_backends(cls , ['torch']) @classmethod def lowerCAmelCase__ ( cls , *_A , **_A): requires_backends(cls , ['torch']) class __snake_case ( metaclass=lowerCAmelCase__ ): __lowerCAmelCase : str = ['torch'] def __init__( self , *_A , **_A): requires_backends(self , ['torch']) @classmethod def lowerCAmelCase__ ( cls , *_A , **_A): requires_backends(cls , ['torch']) @classmethod def lowerCAmelCase__ ( cls , *_A , **_A): requires_backends(cls , ['torch']) class __snake_case ( metaclass=lowerCAmelCase__ ): __lowerCAmelCase : Union[str, Any] = ['torch'] def __init__( self , *_A , **_A): requires_backends(self , ['torch']) @classmethod def lowerCAmelCase__ ( cls , *_A , **_A): requires_backends(cls , ['torch']) @classmethod def lowerCAmelCase__ ( cls , *_A , **_A): requires_backends(cls , ['torch']) class __snake_case ( metaclass=lowerCAmelCase__ ): __lowerCAmelCase : List[Any] = ['torch'] def __init__( self , *_A , **_A): requires_backends(self , ['torch']) @classmethod def lowerCAmelCase__ ( cls , *_A , **_A): requires_backends(cls , ['torch']) @classmethod def lowerCAmelCase__ ( cls , *_A , **_A): requires_backends(cls , ['torch']) class __snake_case ( metaclass=lowerCAmelCase__ ): __lowerCAmelCase : List[str] = ['torch'] def __init__( self , *_A , **_A): requires_backends(self , ['torch']) @classmethod def lowerCAmelCase__ ( cls , *_A , **_A): requires_backends(cls , ['torch']) @classmethod def lowerCAmelCase__ ( cls , *_A , **_A): requires_backends(cls , ['torch']) class __snake_case ( metaclass=lowerCAmelCase__ ): __lowerCAmelCase : Union[str, Any] = ['torch'] def __init__( self , *_A , **_A): requires_backends(self , ['torch']) @classmethod def lowerCAmelCase__ ( cls , *_A , **_A): requires_backends(cls , ['torch']) @classmethod def lowerCAmelCase__ ( cls , *_A , **_A): requires_backends(cls , ['torch']) class __snake_case ( metaclass=lowerCAmelCase__ ): __lowerCAmelCase : Optional[int] = ['torch'] def __init__( self , *_A , **_A): requires_backends(self , ['torch']) @classmethod def lowerCAmelCase__ ( cls , *_A , **_A): requires_backends(cls , ['torch']) @classmethod def lowerCAmelCase__ ( cls , *_A , **_A): requires_backends(cls , ['torch']) class __snake_case ( metaclass=lowerCAmelCase__ ): __lowerCAmelCase : Any = ['torch'] def __init__( self , *_A , **_A): requires_backends(self , ['torch']) @classmethod def lowerCAmelCase__ ( cls , *_A , **_A): requires_backends(cls , ['torch']) @classmethod def lowerCAmelCase__ ( cls , *_A , **_A): requires_backends(cls , ['torch']) class __snake_case ( metaclass=lowerCAmelCase__ ): __lowerCAmelCase : Tuple = ['torch'] def __init__( self , *_A , **_A): requires_backends(self , ['torch']) @classmethod def lowerCAmelCase__ ( cls , *_A , **_A): requires_backends(cls , ['torch']) @classmethod def lowerCAmelCase__ ( cls , *_A , **_A): requires_backends(cls , ['torch']) class __snake_case ( metaclass=lowerCAmelCase__ ): __lowerCAmelCase : Tuple = ['torch'] def __init__( self , *_A , **_A): requires_backends(self , ['torch']) @classmethod def lowerCAmelCase__ ( cls , *_A , **_A): requires_backends(cls , ['torch']) @classmethod def lowerCAmelCase__ ( cls , *_A , **_A): requires_backends(cls , ['torch']) class __snake_case ( metaclass=lowerCAmelCase__ ): __lowerCAmelCase : Dict = ['torch'] def __init__( self , *_A , **_A): requires_backends(self , ['torch']) @classmethod def lowerCAmelCase__ ( cls , *_A , **_A): requires_backends(cls , ['torch']) @classmethod def lowerCAmelCase__ ( cls , *_A , **_A): requires_backends(cls , ['torch']) class __snake_case ( metaclass=lowerCAmelCase__ ): __lowerCAmelCase : List[Any] = ['torch'] def __init__( self , *_A , **_A): requires_backends(self , ['torch']) @classmethod def lowerCAmelCase__ ( cls , *_A , **_A): requires_backends(cls , ['torch']) @classmethod def lowerCAmelCase__ ( cls , *_A , **_A): requires_backends(cls , ['torch']) class __snake_case ( metaclass=lowerCAmelCase__ ): __lowerCAmelCase : Tuple = ['torch'] def __init__( self , *_A , **_A): requires_backends(self , ['torch']) @classmethod def lowerCAmelCase__ ( cls , *_A , **_A): requires_backends(cls , ['torch']) @classmethod def lowerCAmelCase__ ( cls , *_A , **_A): requires_backends(cls , ['torch']) class __snake_case ( metaclass=lowerCAmelCase__ ): __lowerCAmelCase : str = ['torch'] def __init__( self , *_A , **_A): requires_backends(self , ['torch']) @classmethod def lowerCAmelCase__ ( cls , *_A , **_A): requires_backends(cls , ['torch']) @classmethod def lowerCAmelCase__ ( cls , *_A , **_A): requires_backends(cls , ['torch']) class __snake_case ( metaclass=lowerCAmelCase__ ): __lowerCAmelCase : List[str] = ['torch'] def __init__( self , *_A , **_A): requires_backends(self , ['torch']) @classmethod def lowerCAmelCase__ ( cls , *_A , **_A): requires_backends(cls , ['torch']) @classmethod def lowerCAmelCase__ ( cls , *_A , **_A): requires_backends(cls , ['torch']) class __snake_case ( metaclass=lowerCAmelCase__ ): __lowerCAmelCase : Any = ['torch'] def __init__( self , *_A , **_A): requires_backends(self , ['torch']) @classmethod def lowerCAmelCase__ ( cls , *_A , **_A): requires_backends(cls , ['torch']) @classmethod def lowerCAmelCase__ ( cls , *_A , **_A): requires_backends(cls , ['torch']) class __snake_case ( metaclass=lowerCAmelCase__ ): __lowerCAmelCase : Dict = ['torch'] def __init__( self , *_A , **_A): requires_backends(self , ['torch']) @classmethod def lowerCAmelCase__ ( cls , *_A , **_A): requires_backends(cls , ['torch']) @classmethod def lowerCAmelCase__ ( cls , *_A , **_A): requires_backends(cls , ['torch']) class __snake_case ( metaclass=lowerCAmelCase__ ): __lowerCAmelCase : Optional[int] = ['torch'] def __init__( self , *_A , **_A): requires_backends(self , ['torch']) @classmethod def lowerCAmelCase__ ( cls , *_A , **_A): requires_backends(cls , ['torch']) @classmethod def lowerCAmelCase__ ( cls , *_A , **_A): requires_backends(cls , ['torch']) class __snake_case ( metaclass=lowerCAmelCase__ ): __lowerCAmelCase : Dict = ['torch'] def __init__( self , *_A , **_A): requires_backends(self , ['torch']) @classmethod def lowerCAmelCase__ ( cls , *_A , **_A): requires_backends(cls , ['torch']) @classmethod def lowerCAmelCase__ ( cls , *_A , **_A): requires_backends(cls , ['torch']) class __snake_case ( metaclass=lowerCAmelCase__ ): __lowerCAmelCase : Union[str, Any] = ['torch'] def __init__( self , *_A , **_A): requires_backends(self , ['torch']) @classmethod def lowerCAmelCase__ ( cls , *_A , **_A): requires_backends(cls , ['torch']) @classmethod def lowerCAmelCase__ ( cls , *_A , **_A): requires_backends(cls , ['torch']) class __snake_case ( metaclass=lowerCAmelCase__ ): __lowerCAmelCase : Tuple = ['torch'] def __init__( self , *_A , **_A): requires_backends(self , ['torch']) @classmethod def lowerCAmelCase__ ( cls , *_A , **_A): requires_backends(cls , ['torch']) @classmethod def lowerCAmelCase__ ( cls , *_A , **_A): requires_backends(cls , ['torch']) class __snake_case ( metaclass=lowerCAmelCase__ ): __lowerCAmelCase : Optional[int] = ['torch'] def __init__( self , *_A , **_A): requires_backends(self , ['torch']) @classmethod def lowerCAmelCase__ ( cls , *_A , **_A): requires_backends(cls , ['torch']) @classmethod def lowerCAmelCase__ ( cls , *_A , **_A): requires_backends(cls , ['torch']) class __snake_case ( metaclass=lowerCAmelCase__ ): __lowerCAmelCase : Tuple = ['torch'] def __init__( self , *_A , **_A): requires_backends(self , ['torch']) @classmethod def lowerCAmelCase__ ( cls , *_A , **_A): requires_backends(cls , ['torch']) @classmethod def lowerCAmelCase__ ( cls , *_A , **_A): requires_backends(cls , ['torch']) class __snake_case ( metaclass=lowerCAmelCase__ ): __lowerCAmelCase : Optional[int] = ['torch'] def __init__( self , *_A , **_A): requires_backends(self , ['torch']) @classmethod def lowerCAmelCase__ ( cls , *_A , **_A): requires_backends(cls , ['torch']) @classmethod def lowerCAmelCase__ ( cls , *_A , **_A): requires_backends(cls , ['torch']) class __snake_case ( metaclass=lowerCAmelCase__ ): __lowerCAmelCase : Optional[Any] = ['torch'] def __init__( self , *_A , **_A): requires_backends(self , ['torch']) @classmethod def lowerCAmelCase__ ( cls , *_A , **_A): requires_backends(cls , ['torch']) @classmethod def lowerCAmelCase__ ( cls , *_A , **_A): requires_backends(cls , ['torch']) class __snake_case ( metaclass=lowerCAmelCase__ ): __lowerCAmelCase : List[str] = ['torch'] def __init__( self , *_A , **_A): requires_backends(self , ['torch']) @classmethod def lowerCAmelCase__ ( cls , *_A , **_A): requires_backends(cls , ['torch']) @classmethod def lowerCAmelCase__ ( cls , *_A , **_A): requires_backends(cls , ['torch']) class __snake_case ( metaclass=lowerCAmelCase__ ): __lowerCAmelCase : Tuple = ['torch'] def __init__( self , *_A , **_A): requires_backends(self , ['torch']) @classmethod def lowerCAmelCase__ ( cls , *_A , **_A): requires_backends(cls , ['torch']) @classmethod def lowerCAmelCase__ ( cls , *_A , **_A): requires_backends(cls , ['torch']) class __snake_case ( metaclass=lowerCAmelCase__ ): __lowerCAmelCase : int = ['torch'] def __init__( self , *_A , **_A): requires_backends(self , ['torch']) @classmethod def lowerCAmelCase__ ( cls , *_A , **_A): requires_backends(cls , ['torch']) @classmethod def lowerCAmelCase__ ( cls , *_A , **_A): requires_backends(cls , ['torch']) class __snake_case ( metaclass=lowerCAmelCase__ ): __lowerCAmelCase : Dict = ['torch'] def __init__( self , *_A , **_A): requires_backends(self , ['torch']) @classmethod def lowerCAmelCase__ ( cls , *_A , **_A): requires_backends(cls , ['torch']) @classmethod def lowerCAmelCase__ ( cls , *_A , **_A): requires_backends(cls , ['torch']) class __snake_case ( metaclass=lowerCAmelCase__ ): __lowerCAmelCase : List[str] = ['torch'] def __init__( self , *_A , **_A): requires_backends(self , ['torch']) @classmethod def lowerCAmelCase__ ( cls , *_A , **_A): requires_backends(cls , ['torch']) @classmethod def lowerCAmelCase__ ( cls , *_A , **_A): requires_backends(cls , ['torch']) class __snake_case ( metaclass=lowerCAmelCase__ ): __lowerCAmelCase : Union[str, Any] = ['torch'] def __init__( self , *_A , **_A): requires_backends(self , ['torch']) @classmethod def lowerCAmelCase__ ( cls , *_A , **_A): requires_backends(cls , ['torch']) @classmethod def lowerCAmelCase__ ( cls , *_A , **_A): requires_backends(cls , ['torch']) class __snake_case ( metaclass=lowerCAmelCase__ ): __lowerCAmelCase : Dict = ['torch'] def __init__( self , *_A , **_A): requires_backends(self , ['torch']) @classmethod def lowerCAmelCase__ ( cls , *_A , **_A): requires_backends(cls , ['torch']) @classmethod def lowerCAmelCase__ ( cls , *_A , **_A): requires_backends(cls , ['torch']) class __snake_case ( metaclass=lowerCAmelCase__ ): __lowerCAmelCase : List[str] = ['torch'] def __init__( self , *_A , **_A): requires_backends(self , ['torch']) @classmethod def lowerCAmelCase__ ( cls , *_A , **_A): requires_backends(cls , ['torch']) @classmethod def lowerCAmelCase__ ( cls , *_A , **_A): requires_backends(cls , ['torch']) class __snake_case ( metaclass=lowerCAmelCase__ ): __lowerCAmelCase : str = ['torch'] def __init__( self , *_A , **_A): requires_backends(self , ['torch']) @classmethod def lowerCAmelCase__ ( cls , *_A , **_A): requires_backends(cls , ['torch']) @classmethod def lowerCAmelCase__ ( cls , *_A , **_A): requires_backends(cls , ['torch']) class __snake_case ( metaclass=lowerCAmelCase__ ): __lowerCAmelCase : Optional[Any] = ['torch'] def __init__( self , *_A , **_A): requires_backends(self , ['torch']) @classmethod def lowerCAmelCase__ ( cls , *_A , **_A): requires_backends(cls , ['torch']) @classmethod def lowerCAmelCase__ ( cls , *_A , **_A): requires_backends(cls , ['torch']) class __snake_case ( metaclass=lowerCAmelCase__ ): __lowerCAmelCase : List[Any] = ['torch'] def __init__( self , *_A , **_A): requires_backends(self , ['torch']) @classmethod def lowerCAmelCase__ ( cls , *_A , **_A): requires_backends(cls , ['torch']) @classmethod def lowerCAmelCase__ ( cls , *_A , **_A): requires_backends(cls , ['torch']) class __snake_case ( metaclass=lowerCAmelCase__ ): __lowerCAmelCase : Optional[Any] = ['torch'] def __init__( self , *_A , **_A): requires_backends(self , ['torch']) @classmethod def lowerCAmelCase__ ( cls , *_A , **_A): requires_backends(cls , ['torch']) @classmethod def lowerCAmelCase__ ( cls , *_A , **_A): requires_backends(cls , ['torch'])
620
import pytest import datasets # Import fixture modules as plugins UpperCamelCase__ : Union[str, Any] = ["tests.fixtures.files", "tests.fixtures.hub", "tests.fixtures.fsspec"] def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : Dict ): """simple docstring""" for item in items: if any(marker in item.keywords for marker in ['integration', 'unit'] ): continue item.add_marker(pytest.mark.unit ) def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : Optional[int] ): """simple docstring""" config.addinivalue_line('markers' , 'torchaudio_latest: mark test to run with torchaudio>=0.12' ) @pytest.fixture(autouse=_SCREAMING_SNAKE_CASE ) def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : str ): """simple docstring""" SCREAMING_SNAKE_CASE_ = tmp_path_factory.getbasetemp() / 'cache' SCREAMING_SNAKE_CASE_ = test_hf_cache_home / 'datasets' SCREAMING_SNAKE_CASE_ = test_hf_cache_home / 'metrics' SCREAMING_SNAKE_CASE_ = test_hf_cache_home / 'modules' monkeypatch.setattr('datasets.config.HF_DATASETS_CACHE' , str(_SCREAMING_SNAKE_CASE ) ) monkeypatch.setattr('datasets.config.HF_METRICS_CACHE' , str(_SCREAMING_SNAKE_CASE ) ) monkeypatch.setattr('datasets.config.HF_MODULES_CACHE' , str(_SCREAMING_SNAKE_CASE ) ) SCREAMING_SNAKE_CASE_ = test_hf_datasets_cache / 'downloads' monkeypatch.setattr('datasets.config.DOWNLOADED_DATASETS_PATH' , str(_SCREAMING_SNAKE_CASE ) ) SCREAMING_SNAKE_CASE_ = test_hf_datasets_cache / 'downloads' / 'extracted' monkeypatch.setattr('datasets.config.EXTRACTED_DATASETS_PATH' , str(_SCREAMING_SNAKE_CASE ) ) @pytest.fixture(autouse=_SCREAMING_SNAKE_CASE , scope='session' ) def _UpperCAmelCase ( ): """simple docstring""" datasets.disable_progress_bar() @pytest.fixture(autouse=_SCREAMING_SNAKE_CASE ) def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : Any ): """simple docstring""" monkeypatch.setattr('datasets.config.HF_UPDATE_DOWNLOAD_COUNTS' , _SCREAMING_SNAKE_CASE ) @pytest.fixture def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : Tuple ): """simple docstring""" monkeypatch.setattr('sqlalchemy.util.deprecations.SILENCE_UBER_WARNING' , _SCREAMING_SNAKE_CASE )
620
1
def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : Tuple ): """simple docstring""" if index == r: for j in range(SCREAMING_SNAKE_CASE_ ): print(data[j] , end=' ' ) print(' ' ) return # When no more elements are there to put in data[] if i >= n: return # current is included, put next at next location SCREAMING_SNAKE_CASE_ = arr[i] combination_util(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , index + 1 , SCREAMING_SNAKE_CASE_ , i + 1 ) # current is excluded, replace it with # next (Note that i+1 is passed, but # index is not changed) combination_util(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , i + 1 ) # The main function that prints all combinations # of size r in arr[] of size n. This function # mainly uses combinationUtil() def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : Optional[int] ): """simple docstring""" SCREAMING_SNAKE_CASE_ = [0] * r # Print all combination using temporary array 'data[]' combination_util(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , 0 , SCREAMING_SNAKE_CASE_ , 0 ) if __name__ == "__main__": # Driver code to check the function above UpperCamelCase__ : Any = [10, 20, 30, 40, 50] print_combination(arr, len(arr), 3) # This code is contributed by Ambuj sahu
700
from typing import List import numpy as np def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : dict ): """simple docstring""" SCREAMING_SNAKE_CASE_ = {key: len(_SCREAMING_SNAKE_CASE ) for key, value in gen_kwargs.items() if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )} if len(set(lists_lengths.values() ) ) > 1: raise RuntimeError( ( 'Sharding is ambiguous for this dataset: ' + 'we found several data sources lists of different lengths, and we don\'t know over which list we should parallelize:\n' + '\n'.join(f"""\t- key {key} has length {length}""" for key, length in lists_lengths.items() ) + '\nTo fix this, check the \'gen_kwargs\' and make sure to use lists only for data sources, ' + 'and use tuples otherwise. In the end there should only be one single list, or several lists with the same length.' ) ) SCREAMING_SNAKE_CASE_ = max(lists_lengths.values() , default=0 ) return max(1 , _SCREAMING_SNAKE_CASE ) def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int ): """simple docstring""" SCREAMING_SNAKE_CASE_ = [] for group_idx in range(_SCREAMING_SNAKE_CASE ): SCREAMING_SNAKE_CASE_ = num_shards // max_num_jobs + (group_idx < (num_shards % max_num_jobs)) if num_shards_to_add == 0: break SCREAMING_SNAKE_CASE_ = shards_indices_per_group[-1].stop if shards_indices_per_group else 0 SCREAMING_SNAKE_CASE_ = range(_SCREAMING_SNAKE_CASE , start + num_shards_to_add ) shards_indices_per_group.append(_SCREAMING_SNAKE_CASE ) return shards_indices_per_group def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : dict , _SCREAMING_SNAKE_CASE : int ): """simple docstring""" SCREAMING_SNAKE_CASE_ = _number_of_shards_in_gen_kwargs(_SCREAMING_SNAKE_CASE ) if num_shards == 1: return [dict(_SCREAMING_SNAKE_CASE )] else: SCREAMING_SNAKE_CASE_ = _distribute_shards(num_shards=_SCREAMING_SNAKE_CASE , max_num_jobs=_SCREAMING_SNAKE_CASE ) return [ { key: [value[shard_idx] for shard_idx in shard_indices_per_group[group_idx]] if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) else value for key, value in gen_kwargs.items() } for group_idx in range(len(_SCREAMING_SNAKE_CASE ) ) ] def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : List[dict] ): """simple docstring""" return { key: [value for gen_kwargs in gen_kwargs_list for value in gen_kwargs[key]] if isinstance(gen_kwargs_list[0][key] , _SCREAMING_SNAKE_CASE ) else gen_kwargs_list[0][key] for key in gen_kwargs_list[0] } def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : np.random.Generator , _SCREAMING_SNAKE_CASE : dict ): """simple docstring""" SCREAMING_SNAKE_CASE_ = {len(_SCREAMING_SNAKE_CASE ) for value in gen_kwargs.values() if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )} SCREAMING_SNAKE_CASE_ = {} for size in list_sizes: SCREAMING_SNAKE_CASE_ = list(range(_SCREAMING_SNAKE_CASE ) ) rng.shuffle(indices_per_size[size] ) # Now let's copy the gen_kwargs and shuffle the lists based on their sizes SCREAMING_SNAKE_CASE_ = dict(_SCREAMING_SNAKE_CASE ) for key, value in shuffled_kwargs.items(): if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): SCREAMING_SNAKE_CASE_ = [value[i] for i in indices_per_size[len(_SCREAMING_SNAKE_CASE )]] return shuffled_kwargs
620
0
from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging UpperCamelCase__ : Optional[Any] = logging.get_logger(__name__) UpperCamelCase__ : Optional[Any] = { "junnyu/roformer_chinese_small": "https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/config.json", "junnyu/roformer_chinese_base": "https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/config.json", "junnyu/roformer_chinese_char_small": ( "https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/config.json" ), "junnyu/roformer_chinese_char_base": ( "https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/config.json" ), "junnyu/roformer_small_discriminator": ( "https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/config.json" ), "junnyu/roformer_small_generator": ( "https://huggingface.co/junnyu/roformer_small_generator/resolve/main/config.json" ), # See all RoFormer models at https://huggingface.co/models?filter=roformer } class __snake_case ( _snake_case ): __lowerCAmelCase : Union[str, Any] = 'roformer' def __init__( self , _A=50000 , _A=None , _A=768 , _A=12 , _A=12 , _A=3072 , _A="gelu" , _A=0.1 , _A=0.1 , _A=1536 , _A=2 , _A=0.0_2 , _A=1E-12 , _A=0 , _A=False , _A=True , **_A , ): super().__init__(pad_token_id=lowerCAmelCase__ , **lowerCAmelCase__) SCREAMING_SNAKE_CASE_ = vocab_size SCREAMING_SNAKE_CASE_ = hidden_size if embedding_size is None else embedding_size SCREAMING_SNAKE_CASE_ = hidden_size SCREAMING_SNAKE_CASE_ = num_hidden_layers SCREAMING_SNAKE_CASE_ = num_attention_heads SCREAMING_SNAKE_CASE_ = hidden_act SCREAMING_SNAKE_CASE_ = intermediate_size SCREAMING_SNAKE_CASE_ = hidden_dropout_prob SCREAMING_SNAKE_CASE_ = attention_probs_dropout_prob SCREAMING_SNAKE_CASE_ = max_position_embeddings SCREAMING_SNAKE_CASE_ = type_vocab_size SCREAMING_SNAKE_CASE_ = initializer_range SCREAMING_SNAKE_CASE_ = layer_norm_eps SCREAMING_SNAKE_CASE_ = rotary_value SCREAMING_SNAKE_CASE_ = use_cache class __snake_case ( _snake_case ): @property def lowerCAmelCase__ ( self): if self.task == "multiple-choice": SCREAMING_SNAKE_CASE_ = {0: 'batch', 1: 'choice', 2: 'sequence'} else: SCREAMING_SNAKE_CASE_ = {0: 'batch', 1: 'sequence'} SCREAMING_SNAKE_CASE_ = {0: 'batch', 1: 'sequence'} return OrderedDict( [ ('input_ids', dynamic_axis), ('attention_mask', dynamic_axis), ('token_type_ids', dynamic_axis), ])
701
from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCamelCase__ : List[Any] = logging.get_logger(__name__) UpperCamelCase__ : List[str] = { "microsoft/biogpt": "https://huggingface.co/microsoft/biogpt/resolve/main/config.json", # See all BioGPT models at https://huggingface.co/models?filter=biogpt } class __snake_case ( lowerCAmelCase__ ): __lowerCAmelCase : Any = 'biogpt' def __init__( self , _A=42384 , _A=1024 , _A=24 , _A=16 , _A=4096 , _A="gelu" , _A=0.1 , _A=0.1 , _A=1024 , _A=0.0_2 , _A=1E-12 , _A=True , _A=True , _A=0.0 , _A=0.0 , _A=1 , _A=0 , _A=2 , **_A , ): SCREAMING_SNAKE_CASE_ = vocab_size SCREAMING_SNAKE_CASE_ = max_position_embeddings SCREAMING_SNAKE_CASE_ = hidden_size SCREAMING_SNAKE_CASE_ = num_hidden_layers SCREAMING_SNAKE_CASE_ = num_attention_heads SCREAMING_SNAKE_CASE_ = intermediate_size SCREAMING_SNAKE_CASE_ = hidden_act SCREAMING_SNAKE_CASE_ = hidden_dropout_prob SCREAMING_SNAKE_CASE_ = attention_probs_dropout_prob SCREAMING_SNAKE_CASE_ = initializer_range SCREAMING_SNAKE_CASE_ = layer_norm_eps SCREAMING_SNAKE_CASE_ = scale_embedding SCREAMING_SNAKE_CASE_ = use_cache SCREAMING_SNAKE_CASE_ = layerdrop SCREAMING_SNAKE_CASE_ = activation_dropout super().__init__(pad_token_id=_A , bos_token_id=_A , eos_token_id=_A , **_A)
620
0
import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging UpperCamelCase__ : Optional[int] = logging.get_logger(__name__) UpperCamelCase__ : Union[str, Any] = '▁' UpperCamelCase__ : int = {'vocab_file': 'sentencepiece.bpe.model', 'monolingual_vocab_file': 'dict.txt'} UpperCamelCase__ : Any = { 'vocab_file': { 'vinai/bartpho-syllable': 'https://huggingface.co/vinai/bartpho-syllable/resolve/main/sentencepiece.bpe.model', }, 'monolingual_vocab_file': { 'vinai/bartpho-syllable': 'https://huggingface.co/vinai/bartpho-syllable/resolve/main/dict.txt', }, } UpperCamelCase__ : Any = {'vinai/bartpho-syllable': 1_024} class __snake_case ( snake_case__ ): __lowerCAmelCase : Dict = VOCAB_FILES_NAMES __lowerCAmelCase : str = PRETRAINED_VOCAB_FILES_MAP __lowerCAmelCase : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __lowerCAmelCase : Tuple = ["""input_ids""", """attention_mask"""] def __init__( self , _A , _A , _A="<s>" , _A="</s>" , _A="</s>" , _A="<s>" , _A="<unk>" , _A="<pad>" , _A="<mask>" , _A = None , **_A , ): SCREAMING_SNAKE_CASE_ = AddedToken(UpperCAmelCase_ , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_) if isinstance(UpperCAmelCase_ , UpperCAmelCase_) else mask_token SCREAMING_SNAKE_CASE_ = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( bos_token=UpperCAmelCase_ , eos_token=UpperCAmelCase_ , unk_token=UpperCAmelCase_ , sep_token=UpperCAmelCase_ , cls_token=UpperCAmelCase_ , pad_token=UpperCAmelCase_ , mask_token=UpperCAmelCase_ , sp_model_kwargs=self.sp_model_kwargs , **UpperCAmelCase_ , ) SCREAMING_SNAKE_CASE_ = vocab_file SCREAMING_SNAKE_CASE_ = monolingual_vocab_file SCREAMING_SNAKE_CASE_ = spm.SentencePieceProcessor(**self.sp_model_kwargs) self.sp_model.Load(str(UpperCAmelCase_)) # Load the reduced vocab # Keep order of special tokens for backward compatibility SCREAMING_SNAKE_CASE_ = {} SCREAMING_SNAKE_CASE_ = 0 for token in [bos_token, pad_token, eos_token, unk_token, sep_token, cls_token]: if str(UpperCAmelCase_) not in self.fairseq_tokens_to_ids: SCREAMING_SNAKE_CASE_ = cnt cnt += 1 with open(UpperCAmelCase_ , 'r' , encoding='utf-8') as f: for line in f.readlines(): SCREAMING_SNAKE_CASE_ = line.strip().split()[0] SCREAMING_SNAKE_CASE_ = len(self.fairseq_tokens_to_ids) if str(UpperCAmelCase_) not in self.fairseq_tokens_to_ids: SCREAMING_SNAKE_CASE_ = len(self.fairseq_tokens_to_ids) SCREAMING_SNAKE_CASE_ = {v: k for k, v in self.fairseq_tokens_to_ids.items()} def __getstate__( self): SCREAMING_SNAKE_CASE_ = self.__dict__.copy() SCREAMING_SNAKE_CASE_ = None SCREAMING_SNAKE_CASE_ = self.sp_model.serialized_model_proto() return state def __setstate__( self , _A): SCREAMING_SNAKE_CASE_ = d # for backward compatibility if not hasattr(self , 'sp_model_kwargs'): SCREAMING_SNAKE_CASE_ = {} SCREAMING_SNAKE_CASE_ = spm.SentencePieceProcessor(**self.sp_model_kwargs) self.sp_model.LoadFromSerializedProto(self.sp_model_proto) def lowerCAmelCase__ ( self , _A , _A = None): if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] SCREAMING_SNAKE_CASE_ = [self.cls_token_id] SCREAMING_SNAKE_CASE_ = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def lowerCAmelCase__ ( self , _A , _A = None , _A = False): if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=UpperCAmelCase_ , token_ids_a=UpperCAmelCase_ , already_has_special_tokens=UpperCAmelCase_) if token_ids_a is None: return [1] + ([0] * len(UpperCAmelCase_)) + [1] return [1] + ([0] * len(UpperCAmelCase_)) + [1, 1] + ([0] * len(UpperCAmelCase_)) + [1] def lowerCAmelCase__ ( self , _A , _A = None): SCREAMING_SNAKE_CASE_ = [self.sep_token_id] SCREAMING_SNAKE_CASE_ = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0] @property def lowerCAmelCase__ ( self): return len(self.fairseq_ids_to_tokens) def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = {self.convert_ids_to_tokens(UpperCAmelCase_): i for i in range(self.vocab_size)} vocab.update(self.added_tokens_encoder) return vocab def lowerCAmelCase__ ( self , _A): return self.sp_model.encode(UpperCAmelCase_ , out_type=UpperCAmelCase_) def lowerCAmelCase__ ( self , _A): if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] else: return self.unk_token_id def lowerCAmelCase__ ( self , _A): return self.fairseq_ids_to_tokens[index] def lowerCAmelCase__ ( self , _A): SCREAMING_SNAKE_CASE_ = ''.join(UpperCAmelCase_).replace(UpperCAmelCase_ , ' ').strip() return out_string def lowerCAmelCase__ ( self , _A , _A = None): if not os.path.isdir(UpperCAmelCase_): logger.error(f"""Vocabulary path ({save_directory}) should be a directory""") return SCREAMING_SNAKE_CASE_ = os.path.join( UpperCAmelCase_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file']) SCREAMING_SNAKE_CASE_ = os.path.join( UpperCAmelCase_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['monolingual_vocab_file'] , ) if os.path.abspath(self.vocab_file) != os.path.abspath(UpperCAmelCase_) and os.path.isfile(self.vocab_file): copyfile(self.vocab_file , UpperCAmelCase_) elif not os.path.isfile(self.vocab_file): with open(UpperCAmelCase_ , 'wb') as fi: SCREAMING_SNAKE_CASE_ = self.sp_model.serialized_model_proto() fi.write(UpperCAmelCase_) if os.path.abspath(self.monolingual_vocab_file) != os.path.abspath( UpperCAmelCase_) and os.path.isfile(self.monolingual_vocab_file): copyfile(self.monolingual_vocab_file , UpperCAmelCase_) elif not os.path.isfile(self.monolingual_vocab_file): with open(UpperCAmelCase_ , 'w' , encoding='utf-8') as fp: for token in self.fairseq_tokens_to_ids: if token not in self.all_special_tokens: fp.write(f"""{str(UpperCAmelCase_)} \n""") return out_vocab_file, out_monolingual_vocab_file
702
from typing import Dict, List, Optional, Tuple, Union import torch from ...models import AutoencoderKL, TransformeraDModel from ...schedulers import KarrasDiffusionSchedulers from ...utils import randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput class __snake_case ( lowerCAmelCase__ ): def __init__( self , _A , _A , _A , _A = None , ): super().__init__() self.register_modules(transformer=_A , vae=_A , scheduler=_A) # create a imagenet -> id dictionary for easier use SCREAMING_SNAKE_CASE_ = {} if idalabel is not None: for key, value in idalabel.items(): for label in value.split(','): SCREAMING_SNAKE_CASE_ = int(_A) SCREAMING_SNAKE_CASE_ = dict(sorted(self.labels.items())) def lowerCAmelCase__ ( self , _A): if not isinstance(_A , _A): SCREAMING_SNAKE_CASE_ = list(_A) for l in label: if l not in self.labels: raise ValueError( f"""{l} does not exist. Please make sure to select one of the following labels: \n {self.labels}.""") return [self.labels[l] for l in label] @torch.no_grad() def __call__( self , _A , _A = 4.0 , _A = None , _A = 50 , _A = "pil" , _A = True , ): SCREAMING_SNAKE_CASE_ = len(_A) SCREAMING_SNAKE_CASE_ = self.transformer.config.sample_size SCREAMING_SNAKE_CASE_ = self.transformer.config.in_channels SCREAMING_SNAKE_CASE_ = randn_tensor( shape=(batch_size, latent_channels, latent_size, latent_size) , generator=_A , device=self.device , dtype=self.transformer.dtype , ) SCREAMING_SNAKE_CASE_ = torch.cat([latents] * 2) if guidance_scale > 1 else latents SCREAMING_SNAKE_CASE_ = torch.tensor(_A , device=self.device).reshape(-1) SCREAMING_SNAKE_CASE_ = torch.tensor([1000] * batch_size , device=self.device) SCREAMING_SNAKE_CASE_ = torch.cat([class_labels, class_null] , 0) if guidance_scale > 1 else class_labels # set step values self.scheduler.set_timesteps(_A) for t in self.progress_bar(self.scheduler.timesteps): if guidance_scale > 1: SCREAMING_SNAKE_CASE_ = latent_model_input[: len(_A) // 2] SCREAMING_SNAKE_CASE_ = torch.cat([half, half] , dim=0) SCREAMING_SNAKE_CASE_ = self.scheduler.scale_model_input(_A , _A) SCREAMING_SNAKE_CASE_ = t if not torch.is_tensor(_A): # TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can # This would be a good case for the `match` statement (Python 3.10+) SCREAMING_SNAKE_CASE_ = latent_model_input.device.type == 'mps' if isinstance(_A , _A): SCREAMING_SNAKE_CASE_ = torch.floataa if is_mps else torch.floataa else: SCREAMING_SNAKE_CASE_ = torch.intaa if is_mps else torch.intaa SCREAMING_SNAKE_CASE_ = torch.tensor([timesteps] , dtype=_A , device=latent_model_input.device) elif len(timesteps.shape) == 0: SCREAMING_SNAKE_CASE_ = timesteps[None].to(latent_model_input.device) # broadcast to batch dimension in a way that's compatible with ONNX/Core ML SCREAMING_SNAKE_CASE_ = timesteps.expand(latent_model_input.shape[0]) # predict noise model_output SCREAMING_SNAKE_CASE_ = self.transformer( _A , timestep=_A , class_labels=_A).sample # perform guidance if guidance_scale > 1: SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = noise_pred[:, :latent_channels], noise_pred[:, latent_channels:] SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = torch.split(_A , len(_A) // 2 , dim=0) SCREAMING_SNAKE_CASE_ = uncond_eps + guidance_scale * (cond_eps - uncond_eps) SCREAMING_SNAKE_CASE_ = torch.cat([half_eps, half_eps] , dim=0) SCREAMING_SNAKE_CASE_ = torch.cat([eps, rest] , dim=1) # learned sigma if self.transformer.config.out_channels // 2 == latent_channels: SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = torch.split(_A , _A , dim=1) else: SCREAMING_SNAKE_CASE_ = noise_pred # compute previous image: x_t -> x_t-1 SCREAMING_SNAKE_CASE_ = self.scheduler.step(_A , _A , _A).prev_sample if guidance_scale > 1: SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = latent_model_input.chunk(2 , dim=0) else: SCREAMING_SNAKE_CASE_ = latent_model_input SCREAMING_SNAKE_CASE_ = 1 / self.vae.config.scaling_factor * latents SCREAMING_SNAKE_CASE_ = self.vae.decode(_A).sample SCREAMING_SNAKE_CASE_ = (samples / 2 + 0.5).clamp(0 , 1) # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16 SCREAMING_SNAKE_CASE_ = samples.cpu().permute(0 , 2 , 3 , 1).float().numpy() if output_type == "pil": SCREAMING_SNAKE_CASE_ = self.numpy_to_pil(_A) if not return_dict: return (samples,) return ImagePipelineOutput(images=_A)
620
0
from .glue import GlueDataset, GlueDataTrainingArguments from .language_modeling import ( LineByLineTextDataset, LineByLineWithRefDataset, LineByLineWithSOPTextDataset, TextDataset, TextDatasetForNextSentencePrediction, ) from .squad import SquadDataset, SquadDataTrainingArguments
703
import pickle import numpy as np from matplotlib import pyplot as plt class __snake_case : def __init__( self , _A , _A , _A , _A , _A , _A=0.2 , _A=0.2): SCREAMING_SNAKE_CASE_ = bp_numa SCREAMING_SNAKE_CASE_ = bp_numa SCREAMING_SNAKE_CASE_ = bp_numa SCREAMING_SNAKE_CASE_ = conva_get[:2] SCREAMING_SNAKE_CASE_ = conva_get[2] SCREAMING_SNAKE_CASE_ = size_pa SCREAMING_SNAKE_CASE_ = rate_w SCREAMING_SNAKE_CASE_ = rate_t SCREAMING_SNAKE_CASE_ = [ np.mat(-1 * np.random.rand(self.conva[0] , self.conva[0]) + 0.5) for i in range(self.conva[1]) ] SCREAMING_SNAKE_CASE_ = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa) + 0.5) SCREAMING_SNAKE_CASE_ = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa) + 0.5) SCREAMING_SNAKE_CASE_ = -2 * np.random.rand(self.conva[1]) + 1 SCREAMING_SNAKE_CASE_ = -2 * np.random.rand(self.num_bpa) + 1 SCREAMING_SNAKE_CASE_ = -2 * np.random.rand(self.num_bpa) + 1 def lowerCAmelCase__ ( self , _A): # save model dict with pickle SCREAMING_SNAKE_CASE_ = { 'num_bp1': self.num_bpa, 'num_bp2': self.num_bpa, 'num_bp3': self.num_bpa, 'conv1': self.conva, 'step_conv1': self.step_conva, 'size_pooling1': self.size_poolinga, 'rate_weight': self.rate_weight, 'rate_thre': self.rate_thre, 'w_conv1': self.w_conva, 'wkj': self.wkj, 'vji': self.vji, 'thre_conv1': self.thre_conva, 'thre_bp2': self.thre_bpa, 'thre_bp3': self.thre_bpa, } with open(_A , 'wb') as f: pickle.dump(_A , _A) print(f"""Model saved: {save_path}""") @classmethod def lowerCAmelCase__ ( cls , _A): # read saved model with open(_A , 'rb') as f: SCREAMING_SNAKE_CASE_ = pickle.load(_A) # noqa: S301 SCREAMING_SNAKE_CASE_ = model_dic.get('conv1') conv_get.append(model_dic.get('step_conv1')) SCREAMING_SNAKE_CASE_ = model_dic.get('size_pooling1') SCREAMING_SNAKE_CASE_ = model_dic.get('num_bp1') SCREAMING_SNAKE_CASE_ = model_dic.get('num_bp2') SCREAMING_SNAKE_CASE_ = model_dic.get('num_bp3') SCREAMING_SNAKE_CASE_ = model_dic.get('rate_weight') SCREAMING_SNAKE_CASE_ = model_dic.get('rate_thre') # create model instance SCREAMING_SNAKE_CASE_ = CNN(_A , _A , _A , _A , _A , _A , _A) # modify model parameter SCREAMING_SNAKE_CASE_ = model_dic.get('w_conv1') SCREAMING_SNAKE_CASE_ = model_dic.get('wkj') SCREAMING_SNAKE_CASE_ = model_dic.get('vji') SCREAMING_SNAKE_CASE_ = model_dic.get('thre_conv1') SCREAMING_SNAKE_CASE_ = model_dic.get('thre_bp2') SCREAMING_SNAKE_CASE_ = model_dic.get('thre_bp3') return conv_ins def lowerCAmelCase__ ( self , _A): return 1 / (1 + np.exp(-1 * x)) def lowerCAmelCase__ ( self , _A): return round(_A , 3) def lowerCAmelCase__ ( self , _A , _A , _A , _A , _A): # convolution process SCREAMING_SNAKE_CASE_ = convs[0] SCREAMING_SNAKE_CASE_ = convs[1] SCREAMING_SNAKE_CASE_ = np.shape(_A)[0] # get the data slice of original image data, data_focus SCREAMING_SNAKE_CASE_ = [] for i_focus in range(0 , size_data - size_conv + 1 , _A): for j_focus in range(0 , size_data - size_conv + 1 , _A): SCREAMING_SNAKE_CASE_ = data[ i_focus : i_focus + size_conv, j_focus : j_focus + size_conv ] data_focus.append(_A) # calculate the feature map of every single kernel, and saved as list of matrix SCREAMING_SNAKE_CASE_ = [] SCREAMING_SNAKE_CASE_ = int((size_data - size_conv) / conv_step + 1) for i_map in range(_A): SCREAMING_SNAKE_CASE_ = [] for i_focus in range(len(_A)): SCREAMING_SNAKE_CASE_ = ( np.sum(np.multiply(data_focus[i_focus] , w_convs[i_map])) - thre_convs[i_map] ) featuremap.append(self.sig(_A)) SCREAMING_SNAKE_CASE_ = np.asmatrix(_A).reshape( _A , _A) data_featuremap.append(_A) # expanding the data slice to One dimenssion SCREAMING_SNAKE_CASE_ = [] for each_focus in data_focus: focusa_list.extend(self.Expand_Mat(_A)) SCREAMING_SNAKE_CASE_ = np.asarray(_A) return focus_list, data_featuremap def lowerCAmelCase__ ( self , _A , _A , _A="average_pool"): # pooling process SCREAMING_SNAKE_CASE_ = len(featuremaps[0]) SCREAMING_SNAKE_CASE_ = int(size_map / size_pooling) SCREAMING_SNAKE_CASE_ = [] for i_map in range(len(_A)): SCREAMING_SNAKE_CASE_ = featuremaps[i_map] SCREAMING_SNAKE_CASE_ = [] for i_focus in range(0 , _A , _A): for j_focus in range(0 , _A , _A): SCREAMING_SNAKE_CASE_ = feature_map[ i_focus : i_focus + size_pooling, j_focus : j_focus + size_pooling, ] if pooling_type == "average_pool": # average pooling map_pooled.append(np.average(_A)) elif pooling_type == "max_pooling": # max pooling map_pooled.append(np.max(_A)) SCREAMING_SNAKE_CASE_ = np.asmatrix(_A).reshape(_A , _A) featuremap_pooled.append(_A) return featuremap_pooled def lowerCAmelCase__ ( self , _A): # expanding three dimension data to one dimension list SCREAMING_SNAKE_CASE_ = [] for i in range(len(_A)): SCREAMING_SNAKE_CASE_ = np.shape(data[i]) SCREAMING_SNAKE_CASE_ = data[i].reshape(1 , shapes[0] * shapes[1]) SCREAMING_SNAKE_CASE_ = data_listed.getA().tolist()[0] data_expanded.extend(_A) SCREAMING_SNAKE_CASE_ = np.asarray(_A) return data_expanded def lowerCAmelCase__ ( self , _A): # expanding matrix to one dimension list SCREAMING_SNAKE_CASE_ = np.asarray(_A) SCREAMING_SNAKE_CASE_ = np.shape(_A) SCREAMING_SNAKE_CASE_ = data_mat.reshape(1 , shapes[0] * shapes[1]) return data_expanded def lowerCAmelCase__ ( self , _A , _A , _A , _A , _A): SCREAMING_SNAKE_CASE_ = [] SCREAMING_SNAKE_CASE_ = 0 for i_map in range(_A): SCREAMING_SNAKE_CASE_ = np.ones((size_map, size_map)) for i in range(0 , _A , _A): for j in range(0 , _A , _A): SCREAMING_SNAKE_CASE_ = pd_pool[ i_pool ] SCREAMING_SNAKE_CASE_ = i_pool + 1 SCREAMING_SNAKE_CASE_ = np.multiply( _A , np.multiply(out_map[i_map] , (1 - out_map[i_map]))) pd_all.append(_A) return pd_all def lowerCAmelCase__ ( self , _A , _A , _A , _A , _A , _A=bool): # model traning print('----------------------Start Training-------------------------') print((' - - Shape: Train_Data ', np.shape(_A))) print((' - - Shape: Teach_Data ', np.shape(_A))) SCREAMING_SNAKE_CASE_ = 0 SCREAMING_SNAKE_CASE_ = [] SCREAMING_SNAKE_CASE_ = 10000 while rp < n_repeat and mse >= error_accuracy: SCREAMING_SNAKE_CASE_ = 0 print(f"""-------------Learning Time {rp}--------------""") for p in range(len(_A)): # print('------------Learning Image: %d--------------'%p) SCREAMING_SNAKE_CASE_ = np.asmatrix(datas_train[p]) SCREAMING_SNAKE_CASE_ = np.asarray(datas_teach[p]) SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.convolute( _A , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , ) SCREAMING_SNAKE_CASE_ = self.pooling(_A , self.size_poolinga) SCREAMING_SNAKE_CASE_ = np.shape(_A) SCREAMING_SNAKE_CASE_ = self._expand(_A) SCREAMING_SNAKE_CASE_ = data_bp_input SCREAMING_SNAKE_CASE_ = np.dot(_A , self.vji.T) - self.thre_bpa SCREAMING_SNAKE_CASE_ = self.sig(_A) SCREAMING_SNAKE_CASE_ = np.dot(_A , self.wkj.T) - self.thre_bpa SCREAMING_SNAKE_CASE_ = self.sig(_A) # --------------Model Leaning ------------------------ # calculate error and gradient--------------- SCREAMING_SNAKE_CASE_ = np.multiply( (data_teach - bp_outa) , np.multiply(_A , (1 - bp_outa))) SCREAMING_SNAKE_CASE_ = np.multiply( np.dot(_A , self.wkj) , np.multiply(_A , (1 - bp_outa))) SCREAMING_SNAKE_CASE_ = np.dot(_A , self.vji) SCREAMING_SNAKE_CASE_ = pd_i_all / (self.size_poolinga * self.size_poolinga) SCREAMING_SNAKE_CASE_ = pd_conva_pooled.T.getA().tolist() SCREAMING_SNAKE_CASE_ = self._calculate_gradient_from_pool( _A , _A , shape_featuremapa[0] , shape_featuremapa[1] , self.size_poolinga , ) # weight and threshold learning process--------- # convolution layer for k_conv in range(self.conva[1]): SCREAMING_SNAKE_CASE_ = self._expand_mat(pd_conva_all[k_conv]) SCREAMING_SNAKE_CASE_ = self.rate_weight * np.dot(_A , _A) SCREAMING_SNAKE_CASE_ = self.w_conva[k_conv] + delta_w.reshape( (self.conva[0], self.conva[0])) SCREAMING_SNAKE_CASE_ = ( self.thre_conva[k_conv] - np.sum(pd_conva_all[k_conv]) * self.rate_thre ) # all connected layer SCREAMING_SNAKE_CASE_ = self.wkj + pd_k_all.T * bp_outa * self.rate_weight SCREAMING_SNAKE_CASE_ = self.vji + pd_j_all.T * bp_outa * self.rate_weight SCREAMING_SNAKE_CASE_ = self.thre_bpa - pd_k_all * self.rate_thre SCREAMING_SNAKE_CASE_ = self.thre_bpa - pd_j_all * self.rate_thre # calculate the sum error of all single image SCREAMING_SNAKE_CASE_ = np.sum(abs(data_teach - bp_outa)) error_count += errors # print(' ----Teach ',data_teach) # print(' ----BP_output ',bp_out3) SCREAMING_SNAKE_CASE_ = rp + 1 SCREAMING_SNAKE_CASE_ = error_count / patterns all_mse.append(_A) def draw_error(): SCREAMING_SNAKE_CASE_ = [error_accuracy for i in range(int(n_repeat * 1.2))] plt.plot(_A , '+-') plt.plot(_A , 'r--') plt.xlabel('Learning Times') plt.ylabel('All_mse') plt.grid(_A , alpha=0.5) plt.show() print('------------------Training Complished---------------------') print((' - - Training epoch: ', rp, f""" - - Mse: {mse:.6f}""")) if draw_e: draw_error() return mse def lowerCAmelCase__ ( self , _A): # model predict SCREAMING_SNAKE_CASE_ = [] print('-------------------Start Testing-------------------------') print((' - - Shape: Test_Data ', np.shape(_A))) for p in range(len(_A)): SCREAMING_SNAKE_CASE_ = np.asmatrix(datas_test[p]) SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.convolute( _A , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , ) SCREAMING_SNAKE_CASE_ = self.pooling(_A , self.size_poolinga) SCREAMING_SNAKE_CASE_ = self._expand(_A) SCREAMING_SNAKE_CASE_ = data_bp_input SCREAMING_SNAKE_CASE_ = bp_outa * self.vji.T - self.thre_bpa SCREAMING_SNAKE_CASE_ = self.sig(_A) SCREAMING_SNAKE_CASE_ = bp_outa * self.wkj.T - self.thre_bpa SCREAMING_SNAKE_CASE_ = self.sig(_A) produce_out.extend(bp_outa.getA().tolist()) SCREAMING_SNAKE_CASE_ = [list(map(self.do_round , _A)) for each in produce_out] return np.asarray(_A) def lowerCAmelCase__ ( self , _A): # return the data of image after convoluting process so we can check it out SCREAMING_SNAKE_CASE_ = np.asmatrix(_A) SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.convolute( _A , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , ) SCREAMING_SNAKE_CASE_ = self.pooling(_A , self.size_poolinga) return data_conveda, data_pooleda if __name__ == "__main__": pass
620
0
import argparse from typing import Dict import tensorflow as tf import torch from tqdm import tqdm from transformers import BigBirdPegasusConfig, BigBirdPegasusForConditionalGeneration UpperCamelCase__ : Optional[Any] = [ # tf -> hf ('''/''', '''.'''), ('''layer_''', '''layers.'''), ('''kernel''', '''weight'''), ('''beta''', '''bias'''), ('''gamma''', '''weight'''), ('''pegasus''', '''model'''), ] UpperCamelCase__ : List[str] = [ ('''.output.dense''', '''.fc2'''), ('''intermediate.LayerNorm''', '''final_layer_norm'''), ('''intermediate.dense''', '''fc1'''), ] UpperCamelCase__ : int = ( INIT_COMMON + [ ('''attention.self.LayerNorm''', '''self_attn_layer_norm'''), ('''attention.output.dense''', '''self_attn.out_proj'''), ('''attention.self''', '''self_attn'''), ('''attention.encdec.LayerNorm''', '''encoder_attn_layer_norm'''), ('''attention.encdec_output.dense''', '''encoder_attn.out_proj'''), ('''attention.encdec''', '''encoder_attn'''), ('''key''', '''k_proj'''), ('''value''', '''v_proj'''), ('''query''', '''q_proj'''), ('''decoder.LayerNorm''', '''decoder.layernorm_embedding'''), ] + END_COMMON ) UpperCamelCase__ : Any = ( INIT_COMMON + [ ('''embeddings.word_embeddings''', '''shared.weight'''), ('''embeddings.position_embeddings''', '''embed_positions.weight'''), ('''attention.self.LayerNorm''', '''self_attn_layer_norm'''), ('''attention.output.dense''', '''self_attn.output'''), ('''attention.self''', '''self_attn.self'''), ('''encoder.LayerNorm''', '''encoder.layernorm_embedding'''), ] + END_COMMON ) UpperCamelCase__ : Optional[Any] = [ '''encdec/key/bias''', '''encdec/query/bias''', '''encdec/value/bias''', '''self/key/bias''', '''self/query/bias''', '''self/value/bias''', '''encdec_output/dense/bias''', '''attention/output/dense/bias''', ] def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : Tuple ): """simple docstring""" for tf_name, hf_name in patterns: SCREAMING_SNAKE_CASE_ = k.replace(_lowercase , _lowercase ) return k def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : Union[str, Any] ): """simple docstring""" SCREAMING_SNAKE_CASE_ = BigBirdPegasusConfig(**_lowercase ) SCREAMING_SNAKE_CASE_ = BigBirdPegasusForConditionalGeneration(_lowercase ) SCREAMING_SNAKE_CASE_ = torch_model.state_dict() SCREAMING_SNAKE_CASE_ = {} # separating decoder weights SCREAMING_SNAKE_CASE_ = {k: tf_weights[k] for k in tf_weights if k.startswith('pegasus/decoder' )} SCREAMING_SNAKE_CASE_ = {k: tf_weights[k] for k in tf_weights if not k.startswith('pegasus/decoder' )} for k, v in tqdm(decoder_weights.items() , 'tf -> hf conversion' ): SCREAMING_SNAKE_CASE_ = [k.endswith(_lowercase ) for ending in KEYS_TO_IGNORE] if any(_lowercase ): continue SCREAMING_SNAKE_CASE_ = DECODER_PATTERNS SCREAMING_SNAKE_CASE_ = rename_state_dict_key(_lowercase , _lowercase ) if new_k not in state_dict: raise ValueError(f"""could not find new key {new_k} in state dict. (converted from {k})""" ) if any(True if i in k else False for i in ['dense', 'query', 'key', 'value'] ): SCREAMING_SNAKE_CASE_ = v.T SCREAMING_SNAKE_CASE_ = torch.from_numpy(_lowercase ) assert v.shape == state_dict[new_k].shape, f"""{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}""" for k, v in tqdm(remaining_weights.items() , 'tf -> hf conversion' ): SCREAMING_SNAKE_CASE_ = [k.endswith(_lowercase ) for ending in KEYS_TO_IGNORE] if any(_lowercase ): continue SCREAMING_SNAKE_CASE_ = REMAINING_PATTERNS SCREAMING_SNAKE_CASE_ = rename_state_dict_key(_lowercase , _lowercase ) if new_k not in state_dict and k != "pegasus/embeddings/position_embeddings": raise ValueError(f"""could not find new key {new_k} in state dict. (converted from {k})""" ) if any(True if i in k else False for i in ['dense', 'query', 'key', 'value'] ): SCREAMING_SNAKE_CASE_ = v.T SCREAMING_SNAKE_CASE_ = torch.from_numpy(_lowercase ) if k != "pegasus/embeddings/position_embeddings": assert v.shape == state_dict[new_k].shape, f"""{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}""" SCREAMING_SNAKE_CASE_ = mapping['model.embed_positions.weight'] SCREAMING_SNAKE_CASE_ = mapping.pop('model.embed_positions.weight' ) SCREAMING_SNAKE_CASE_ = torch_model.load_state_dict(_lowercase , strict=_lowercase ) SCREAMING_SNAKE_CASE_ = [ k for k in missing if k not in [ 'final_logits_bias', 'model.encoder.embed_tokens.weight', 'model.decoder.embed_tokens.weight', 'lm_head.weight', ] ] assert unexpected_missing == [], f"""no matches found for the following torch keys {unexpected_missing}""" assert extra == [], f"""no matches found for the following tf keys {extra}""" return torch_model def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : Dict ): """simple docstring""" SCREAMING_SNAKE_CASE_ = tf.train.list_variables(_lowercase ) SCREAMING_SNAKE_CASE_ = {} SCREAMING_SNAKE_CASE_ = ['global_step'] for name, shape in tqdm(_lowercase , desc='converting tf checkpoint to dict' ): SCREAMING_SNAKE_CASE_ = any(pat in name for pat in ignore_name ) if skip_key: continue SCREAMING_SNAKE_CASE_ = tf.train.load_variable(_lowercase , _lowercase ) SCREAMING_SNAKE_CASE_ = array return tf_weights def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : str ): """simple docstring""" SCREAMING_SNAKE_CASE_ = get_tf_weights_as_numpy(_lowercase ) SCREAMING_SNAKE_CASE_ = convert_bigbird_pegasus(_lowercase , _lowercase ) torch_model.save_pretrained(_lowercase ) if __name__ == "__main__": UpperCamelCase__ : List[Any] = argparse.ArgumentParser() parser.add_argument("--tf_ckpt_path", type=str, help="passed to tf.train.list_variables") parser.add_argument("--save_dir", default=None, type=str, help="Path to the output PyTorch model.") UpperCamelCase__ : int = parser.parse_args() UpperCamelCase__ : int = {} convert_bigbird_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir, config_update=config_update)
704
import os import zipfile import requests from get_ci_error_statistics import download_artifact, get_artifacts_links def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : int=7 ): """simple docstring""" SCREAMING_SNAKE_CASE_ = None if token is not None: SCREAMING_SNAKE_CASE_ = {'Accept': 'application/vnd.github+json', 'Authorization': f"""Bearer {token}"""} # The id of a workflow (not of a workflow run) SCREAMING_SNAKE_CASE_ = '636036' SCREAMING_SNAKE_CASE_ = f"""https://api.github.com/repos/huggingface/transformers/actions/workflows/{workflow_id}/runs""" # On `main` branch + event being `schedule` + not returning PRs + only `num_runs` results url += f"""?branch=main&event=schedule&exclude_pull_requests=true&per_page={num_runs}""" SCREAMING_SNAKE_CASE_ = requests.get(_SCREAMING_SNAKE_CASE , headers=_SCREAMING_SNAKE_CASE ).json() return result["workflow_runs"] def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : str ): """simple docstring""" SCREAMING_SNAKE_CASE_ = get_daily_ci_runs(_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE_ = None for workflow_run in workflow_runs: if workflow_run["status"] == "completed": SCREAMING_SNAKE_CASE_ = workflow_run['id'] break return workflow_run_id def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Optional[int] ): """simple docstring""" SCREAMING_SNAKE_CASE_ = get_last_daily_ci_runs(_SCREAMING_SNAKE_CASE ) if workflow_run_id is not None: SCREAMING_SNAKE_CASE_ = get_artifacts_links(worflow_run_id=_SCREAMING_SNAKE_CASE , token=_SCREAMING_SNAKE_CASE ) for artifact_name in artifact_names: if artifact_name in artifacts_links: SCREAMING_SNAKE_CASE_ = artifacts_links[artifact_name] download_artifact( artifact_name=_SCREAMING_SNAKE_CASE , artifact_url=_SCREAMING_SNAKE_CASE , output_dir=_SCREAMING_SNAKE_CASE , token=_SCREAMING_SNAKE_CASE ) def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : List[Any] ): """simple docstring""" get_last_daily_ci_artifacts(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE_ = {} for artifact_name in artifact_names: SCREAMING_SNAKE_CASE_ = os.path.join(_SCREAMING_SNAKE_CASE , f"""{artifact_name}.zip""" ) if os.path.isfile(_SCREAMING_SNAKE_CASE ): SCREAMING_SNAKE_CASE_ = {} with zipfile.ZipFile(_SCREAMING_SNAKE_CASE ) as z: for filename in z.namelist(): if not os.path.isdir(_SCREAMING_SNAKE_CASE ): # read the file with z.open(_SCREAMING_SNAKE_CASE ) as f: SCREAMING_SNAKE_CASE_ = f.read().decode('UTF-8' ) return results
620
0
def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : list ): """simple docstring""" _enforce_args(_A , _A ) if n == 0: return 0 SCREAMING_SNAKE_CASE_ = float('-inf' ) for i in range(1 , n + 1 ): SCREAMING_SNAKE_CASE_ = max( _A , prices[i - 1] + naive_cut_rod_recursive(n - i , _A ) ) return max_revue def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : list ): """simple docstring""" _enforce_args(_A , _A ) SCREAMING_SNAKE_CASE_ = [float('-inf' ) for _ in range(n + 1 )] return _top_down_cut_rod_recursive(_A , _A , _A ) def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : list , _SCREAMING_SNAKE_CASE : list ): """simple docstring""" if max_rev[n] >= 0: return max_rev[n] elif n == 0: return 0 else: SCREAMING_SNAKE_CASE_ = float('-inf' ) for i in range(1 , n + 1 ): SCREAMING_SNAKE_CASE_ = max( _A , prices[i - 1] + _top_down_cut_rod_recursive(n - i , _A , _A ) , ) SCREAMING_SNAKE_CASE_ = max_revenue return max_rev[n] def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : list ): """simple docstring""" _enforce_args(_A , _A ) # length(max_rev) = n + 1, to accommodate for the revenue obtainable from a rod of # length 0. SCREAMING_SNAKE_CASE_ = [float('-inf' ) for _ in range(n + 1 )] SCREAMING_SNAKE_CASE_ = 0 for i in range(1 , n + 1 ): SCREAMING_SNAKE_CASE_ = max_rev[i] for j in range(1 , i + 1 ): SCREAMING_SNAKE_CASE_ = max(_A , prices[j - 1] + max_rev[i - j] ) SCREAMING_SNAKE_CASE_ = max_revenue_i return max_rev[n] def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : list ): """simple docstring""" if n < 0: SCREAMING_SNAKE_CASE_ = f"""n must be greater than or equal to 0. Got n = {n}""" raise ValueError(_A ) if n > len(_A ): SCREAMING_SNAKE_CASE_ = ( "Each integral piece of rod must have a corresponding price. " f"""Got n = {n} but length of prices = {len(_A )}""" ) raise ValueError(_A ) def _UpperCAmelCase ( ): """simple docstring""" SCREAMING_SNAKE_CASE_ = [6, 10, 12, 15, 20, 23] SCREAMING_SNAKE_CASE_ = len(_A ) # the best revenue comes from cutting the rod into 6 pieces, each # of length 1 resulting in a revenue of 6 * 6 = 36. SCREAMING_SNAKE_CASE_ = 36 SCREAMING_SNAKE_CASE_ = top_down_cut_rod(_A , _A ) SCREAMING_SNAKE_CASE_ = bottom_up_cut_rod(_A , _A ) SCREAMING_SNAKE_CASE_ = naive_cut_rod_recursive(_A , _A ) assert expected_max_revenue == max_rev_top_down assert max_rev_top_down == max_rev_bottom_up assert max_rev_bottom_up == max_rev_naive if __name__ == "__main__": main()
705
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available UpperCamelCase__ : Any = { "configuration_mvp": ["MVP_PRETRAINED_CONFIG_ARCHIVE_MAP", "MvpConfig", "MvpOnnxConfig"], "tokenization_mvp": ["MvpTokenizer"], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase__ : Optional[int] = ["MvpTokenizerFast"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase__ : str = [ "MVP_PRETRAINED_MODEL_ARCHIVE_LIST", "MvpForCausalLM", "MvpForConditionalGeneration", "MvpForQuestionAnswering", "MvpForSequenceClassification", "MvpModel", "MvpPreTrainedModel", ] if TYPE_CHECKING: from .configuration_mvp import MVP_PRETRAINED_CONFIG_ARCHIVE_MAP, MvpConfig, MvpOnnxConfig from .tokenization_mvp import MvpTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_mvp_fast import MvpTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mvp import ( MVP_PRETRAINED_MODEL_ARCHIVE_LIST, MvpForCausalLM, MvpForConditionalGeneration, MvpForQuestionAnswering, MvpForSequenceClassification, MvpModel, MvpPreTrainedModel, ) else: import sys UpperCamelCase__ : Dict = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
620
0
def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : List[Any] ): """simple docstring""" global f # a global dp table for knapsack if f[i][j] < 0: if j < wt[i - 1]: SCREAMING_SNAKE_CASE_ = mf_knapsack(i - 1 , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) else: SCREAMING_SNAKE_CASE_ = max( mf_knapsack(i - 1 , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) , mf_knapsack(i - 1 , lowerCamelCase_ , lowerCamelCase_ , j - wt[i - 1] ) + val[i - 1] , ) SCREAMING_SNAKE_CASE_ = val return f[i][j] def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : Tuple ): """simple docstring""" SCREAMING_SNAKE_CASE_ = [[0] * (w + 1) for _ in range(n + 1 )] for i in range(1 , n + 1 ): for w_ in range(1 , w + 1 ): if wt[i - 1] <= w_: SCREAMING_SNAKE_CASE_ = max(val[i - 1] + dp[i - 1][w_ - wt[i - 1]] , dp[i - 1][w_] ) else: SCREAMING_SNAKE_CASE_ = dp[i - 1][w_] return dp[n][w_], dp def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : int ): """simple docstring""" if not (isinstance(lowerCamelCase_ , (list, tuple) ) and isinstance(lowerCamelCase_ , (list, tuple) )): raise ValueError( 'Both the weights and values vectors must be either lists or tuples' ) SCREAMING_SNAKE_CASE_ = len(lowerCamelCase_ ) if num_items != len(lowerCamelCase_ ): SCREAMING_SNAKE_CASE_ = ( 'The number of weights must be the same as the number of values.\n' f"""But got {num_items} weights and {len(lowerCamelCase_ )} values""" ) raise ValueError(lowerCamelCase_ ) for i in range(lowerCamelCase_ ): if not isinstance(wt[i] , lowerCamelCase_ ): SCREAMING_SNAKE_CASE_ = ( 'All weights must be integers but got weight of ' f"""type {type(wt[i] )} at index {i}""" ) raise TypeError(lowerCamelCase_ ) SCREAMING_SNAKE_CASE_ = knapsack(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) SCREAMING_SNAKE_CASE_ = set() _construct_solution(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) return optimal_val, example_optional_set def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : str ): """simple docstring""" if i > 0 and j > 0: if dp[i - 1][j] == dp[i][j]: _construct_solution(lowerCamelCase_ , lowerCamelCase_ , i - 1 , lowerCamelCase_ , lowerCamelCase_ ) else: optimal_set.add(lowerCamelCase_ ) _construct_solution(lowerCamelCase_ , lowerCamelCase_ , i - 1 , j - wt[i - 1] , lowerCamelCase_ ) if __name__ == "__main__": UpperCamelCase__ : Dict = [3, 2, 4, 4] UpperCamelCase__ : str = [4, 3, 2, 3] UpperCamelCase__ : int = 4 UpperCamelCase__ : Dict = 6 UpperCamelCase__ : Any = [[0] * (w + 1)] + [[0] + [-1] * (w + 1) for _ in range(n + 1)] UpperCamelCase__ , UpperCamelCase__ : List[str] = knapsack(w, wt, val, n) print(optimal_solution) print(mf_knapsack(n, wt, val, w)) # switched the n and w # testing the dynamic programming problem with example # the optimal subset for the above example are items 3 and 4 UpperCamelCase__ , UpperCamelCase__ : str = knapsack_with_example_solution(w, wt, val) assert optimal_solution == 8 assert optimal_subset == {3, 4} print("optimal_value = ", optimal_solution) print("An optimal subset corresponding to the optimal value", optimal_subset)
706
import inspect import os import unittest from pathlib import Path import torch import accelerate from accelerate.test_utils import execute_subprocess_async from accelerate.test_utils.testing import run_command class __snake_case ( unittest.TestCase ): __lowerCAmelCase : Dict = inspect.getfile(accelerate.test_utils ) __lowerCAmelCase : Optional[Any] = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['scripts', 'test_cli.py'] ) __lowerCAmelCase : Tuple = ['accelerate', 'launch'] __lowerCAmelCase : Union[str, Any] = Path.home() / '.cache/huggingface/accelerate' __lowerCAmelCase : List[str] = 'default_config.yaml' __lowerCAmelCase : List[Any] = config_folder / config_file __lowerCAmelCase : str = config_folder / '_default_config.yaml' __lowerCAmelCase : Optional[int] = Path('tests/test_configs' ) @classmethod def lowerCAmelCase__ ( cls): if cls.config_path.is_file(): cls.config_path.rename(cls.changed_path) @classmethod def lowerCAmelCase__ ( cls): if cls.changed_path.is_file(): cls.changed_path.rename(cls.config_path) def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = self.base_cmd if torch.cuda.is_available() and (torch.cuda.device_count() > 1): cmd += ["--multi_gpu"] execute_subprocess_async(cmd + [self.test_file_path] , env=os.environ.copy()) def lowerCAmelCase__ ( self): for config in sorted(self.test_config_path.glob('**/*.yaml')): with self.subTest(config_file=_A): execute_subprocess_async( self.base_cmd + ['--config_file', str(_A), self.test_file_path] , env=os.environ.copy()) def lowerCAmelCase__ ( self): execute_subprocess_async(['accelerate', 'test'] , env=os.environ.copy()) class __snake_case ( unittest.TestCase ): __lowerCAmelCase : Optional[Any] = 'test-tpu' __lowerCAmelCase : str = 'us-central1-a' __lowerCAmelCase : Union[str, Any] = 'ls' __lowerCAmelCase : Union[str, Any] = ['accelerate', 'tpu-config'] __lowerCAmelCase : Union[str, Any] = 'cd /usr/share' __lowerCAmelCase : List[Any] = 'tests/test_samples/test_command_file.sh' __lowerCAmelCase : Dict = 'Running gcloud compute tpus tpu-vm ssh' def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = run_command( self.cmd + ['--command', self.command, '--tpu_zone', self.tpu_zone, '--tpu_name', self.tpu_name, '--debug'] , return_stdout=_A , ) self.assertIn( f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all""" , _A , ) def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = run_command( self.cmd + [ '--config_file', 'tests/test_configs/0_12_0.yaml', '--command', self.command, '--tpu_zone', self.tpu_zone, '--tpu_name', self.tpu_name, '--debug', ] , return_stdout=_A , ) self.assertIn( f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all""" , _A , ) def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = run_command( self.cmd + ['--config_file', 'tests/test_configs/latest.yaml', '--debug'] , return_stdout=_A) self.assertIn( f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo \"hello world\"; echo \"this is a second command\" --worker all""" , _A , ) def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = run_command( self.cmd + ['--config_file', 'tests/test_configs/latest.yaml', '--command', self.command, '--debug'] , return_stdout=_A , ) self.assertIn( f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all""" , _A , ) def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = run_command( self.cmd + [ '--config_file', 'tests/test_configs/latest.yaml', '--command', self.command, '--command', 'echo "Hello World"', '--debug', ] , return_stdout=_A , ) self.assertIn( f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls; echo \"Hello World\" --worker all""" , _A , ) def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = run_command( self.cmd + ['--config_file', 'tests/test_configs/latest.yaml', '--command_file', self.command_file, '--debug'] , return_stdout=_A , ) self.assertIn( f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo \"hello world\"; echo \"this is a second command\" --worker all""" , _A , ) def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = run_command( self.cmd + [ '--config_file', 'tests/test_configs/0_12_0.yaml', '--command_file', self.command_file, '--tpu_zone', self.tpu_zone, '--tpu_name', self.tpu_name, '--debug', ] , return_stdout=_A , ) self.assertIn( f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo \"hello world\"; echo \"this is a second command\" --worker all""" , _A , ) def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = run_command( self.cmd + ['--config_file', 'tests/test_configs/latest.yaml', '--install_accelerate', '--debug'] , return_stdout=_A , ) self.assertIn( f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate -U; echo \"hello world\"; echo \"this is a second command\" --worker all""" , _A , ) def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = run_command( self.cmd + [ '--config_file', 'tests/test_configs/latest.yaml', '--install_accelerate', '--accelerate_version', '12.0.0', '--debug', ] , return_stdout=_A , ) self.assertIn( f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate==12.0.0; echo \"hello world\"; echo \"this is a second command\" --worker all""" , _A , )
620
0
import pickle import unittest import torch from accelerate import Accelerator from accelerate.state import AcceleratorState from accelerate.test_utils import require_cpu @require_cpu class __snake_case ( unittest.TestCase ): def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = torch.nn.Linear(10 , 10) SCREAMING_SNAKE_CASE_ = torch.optim.SGD(model.parameters() , 0.1) SCREAMING_SNAKE_CASE_ = Accelerator() SCREAMING_SNAKE_CASE_ = accelerator.prepare(_lowercase) try: pickle.loads(pickle.dumps(_lowercase)) except Exception as e: self.fail(f"""Accelerated optimizer pickling failed with {e}""") AcceleratorState._reset_state()
707
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_speech_available, is_torch_available, ) UpperCamelCase__ : Tuple = { "configuration_trocr": ["TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP", "TrOCRConfig"], "processing_trocr": ["TrOCRProcessor"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase__ : Tuple = [ "TROCR_PRETRAINED_MODEL_ARCHIVE_LIST", "TrOCRForCausalLM", "TrOCRPreTrainedModel", ] if TYPE_CHECKING: from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig from .processing_trocr import TrOCRProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel else: import sys UpperCamelCase__ : str = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
620
0
import inspect import os import sys import unittest import accelerate from accelerate.test_utils import execute_subprocess_async, require_tpu class __snake_case ( unittest.TestCase ): def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = inspect.getfile(accelerate.test_utils) SCREAMING_SNAKE_CASE_ = os.path.sep.join(mod_file.split(os.path.sep)[:-1] + ['scripts', 'test_script.py']) SCREAMING_SNAKE_CASE_ = os.path.sep.join(inspect.getfile(self.__class__).split(os.path.sep)[:-1]) @require_tpu def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = f"""\n {self.test_dir}/xla_spawn.py\n --num_cores 8\n {self.test_file_path}\n """.split() SCREAMING_SNAKE_CASE_ = [sys.executable] + distributed_args execute_subprocess_async(_A , env=os.environ.copy())
708
from multiprocessing import Lock, Pipe, Process # lock used to ensure that two processes do not access a pipe at the same time UpperCamelCase__ : int = Lock() def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : Any ): """simple docstring""" global process_lock # we perform n swaps since after n swaps we know we are sorted # we *could* stop early if we are sorted already, but it takes as long to # find out we are sorted as it does to sort the list with this algorithm for i in range(0 , 10 ): if (i + position) % 2 == 0 and r_send is not None: # send your value to your right neighbor process_lock.acquire() r_send[1].send(_SCREAMING_SNAKE_CASE ) process_lock.release() # receive your right neighbor's value process_lock.acquire() SCREAMING_SNAKE_CASE_ = rr_cv[0].recv() process_lock.release() # take the lower value since you are on the left SCREAMING_SNAKE_CASE_ = min(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) elif (i + position) % 2 != 0 and l_send is not None: # send your value to your left neighbor process_lock.acquire() l_send[1].send(_SCREAMING_SNAKE_CASE ) process_lock.release() # receive your left neighbor's value process_lock.acquire() SCREAMING_SNAKE_CASE_ = lr_cv[0].recv() process_lock.release() # take the higher value since you are on the right SCREAMING_SNAKE_CASE_ = max(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # after all swaps are performed, send the values back to main result_pipe[1].send(_SCREAMING_SNAKE_CASE ) def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : Union[str, Any] ): """simple docstring""" SCREAMING_SNAKE_CASE_ = [] SCREAMING_SNAKE_CASE_ = [] # initialize the list of pipes where the values will be retrieved for _ in arr: result_pipe.append(Pipe() ) # creates the processes # the first and last process only have one neighbor so they are made outside # of the loop SCREAMING_SNAKE_CASE_ = Pipe() SCREAMING_SNAKE_CASE_ = Pipe() process_array_.append( Process( target=_SCREAMING_SNAKE_CASE , args=(0, arr[0], None, temp_rs, None, temp_rr, result_pipe[0]) , ) ) SCREAMING_SNAKE_CASE_ = temp_rs SCREAMING_SNAKE_CASE_ = temp_rr for i in range(1 , len(_SCREAMING_SNAKE_CASE ) - 1 ): SCREAMING_SNAKE_CASE_ = Pipe() SCREAMING_SNAKE_CASE_ = Pipe() process_array_.append( Process( target=_SCREAMING_SNAKE_CASE , args=(i, arr[i], temp_ls, temp_rs, temp_lr, temp_rr, result_pipe[i]) , ) ) SCREAMING_SNAKE_CASE_ = temp_rs SCREAMING_SNAKE_CASE_ = temp_rr process_array_.append( Process( target=_SCREAMING_SNAKE_CASE , args=( len(_SCREAMING_SNAKE_CASE ) - 1, arr[len(_SCREAMING_SNAKE_CASE ) - 1], temp_ls, None, temp_lr, None, result_pipe[len(_SCREAMING_SNAKE_CASE ) - 1], ) , ) ) # start the processes for p in process_array_: p.start() # wait for the processes to end and write their values to the list for p in range(0 , len(_SCREAMING_SNAKE_CASE ) ): SCREAMING_SNAKE_CASE_ = result_pipe[p][0].recv() process_array_[p].join() return arr def _UpperCAmelCase ( ): """simple docstring""" SCREAMING_SNAKE_CASE_ = list(range(10 , 0 , -1 ) ) print('Initial List' ) print(*_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE_ = odd_even_transposition(_SCREAMING_SNAKE_CASE ) print('Sorted List\n' ) print(*_SCREAMING_SNAKE_CASE ) if __name__ == "__main__": main()
620
0
import re import string from collections import Counter import sacrebleu import sacremoses from packaging import version import datasets UpperCamelCase__ : Optional[int] = '\n@inproceedings{xu-etal-2016-optimizing,\n title = {Optimizing Statistical Machine Translation for Text Simplification},\n authors={Xu, Wei and Napoles, Courtney and Pavlick, Ellie and Chen, Quanze and Callison-Burch, Chris},\n journal = {Transactions of the Association for Computational Linguistics},\n volume = {4},\n year={2016},\n url = {https://www.aclweb.org/anthology/Q16-1029},\n pages = {401--415\n},\n@inproceedings{post-2018-call,\n title = "A Call for Clarity in Reporting {BLEU} Scores",\n author = "Post, Matt",\n booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers",\n month = oct,\n year = "2018",\n address = "Belgium, Brussels",\n publisher = "Association for Computational Linguistics",\n url = "https://www.aclweb.org/anthology/W18-6319",\n pages = "186--191",\n}\n' UpperCamelCase__ : Dict = '\\nWIKI_SPLIT is the combination of three metrics SARI, EXACT and SACREBLEU\nIt can be used to evaluate the quality of machine-generated texts.\n' UpperCamelCase__ : List[Any] = '\nCalculates sari score (between 0 and 100) given a list of source and predicted\nsentences, and a list of lists of reference sentences. It also computes the BLEU score as well as the exact match score.\nArgs:\n sources: list of source sentences where each sentence should be a string.\n predictions: list of predicted sentences where each sentence should be a string.\n references: list of lists of reference sentences where each sentence should be a string.\nReturns:\n sari: sari score\n sacrebleu: sacrebleu score\n exact: exact score\n\nExamples:\n >>> sources=["About 95 species are currently accepted ."]\n >>> predictions=["About 95 you now get in ."]\n >>> references=[["About 95 species are currently known ."]]\n >>> wiki_split = datasets.load_metric("wiki_split")\n >>> results = wiki_split.compute(sources=sources, predictions=predictions, references=references)\n >>> print(results)\n {\'sari\': 21.805555555555557, \'sacrebleu\': 14.535768424205482, \'exact\': 0.0}\n' def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : Optional[int] ): """simple docstring""" def remove_articles(_SCREAMING_SNAKE_CASE : Optional[Any] ): SCREAMING_SNAKE_CASE_ = re.compile(r'\b(a|an|the)\b' , re.UNICODE ) return re.sub(__lowerCAmelCase , ' ' , __lowerCAmelCase ) def white_space_fix(_SCREAMING_SNAKE_CASE : int ): return " ".join(text.split() ) def remove_punc(_SCREAMING_SNAKE_CASE : Optional[Any] ): SCREAMING_SNAKE_CASE_ = set(string.punctuation ) return "".join(ch for ch in text if ch not in exclude ) def lower(_SCREAMING_SNAKE_CASE : str ): return text.lower() return white_space_fix(remove_articles(remove_punc(lower(__lowerCAmelCase ) ) ) ) def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : int ): """simple docstring""" return int(normalize_answer(__lowerCAmelCase ) == normalize_answer(__lowerCAmelCase ) ) def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : List[str] ): """simple docstring""" SCREAMING_SNAKE_CASE_ = [any(compute_exact(__lowerCAmelCase , __lowerCAmelCase ) for ref in refs ) for pred, refs in zip(__lowerCAmelCase , __lowerCAmelCase )] return (sum(__lowerCAmelCase ) / len(__lowerCAmelCase )) * 100 def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : str ): """simple docstring""" SCREAMING_SNAKE_CASE_ = [rgram for rgrams in rgramslist for rgram in rgrams] SCREAMING_SNAKE_CASE_ = Counter(__lowerCAmelCase ) SCREAMING_SNAKE_CASE_ = Counter(__lowerCAmelCase ) SCREAMING_SNAKE_CASE_ = Counter() for sgram, scount in sgramcounter.items(): SCREAMING_SNAKE_CASE_ = scount * numref SCREAMING_SNAKE_CASE_ = Counter(__lowerCAmelCase ) SCREAMING_SNAKE_CASE_ = Counter() for cgram, ccount in cgramcounter.items(): SCREAMING_SNAKE_CASE_ = ccount * numref # KEEP SCREAMING_SNAKE_CASE_ = sgramcounter_rep & cgramcounter_rep SCREAMING_SNAKE_CASE_ = keepgramcounter_rep & rgramcounter SCREAMING_SNAKE_CASE_ = sgramcounter_rep & rgramcounter SCREAMING_SNAKE_CASE_ = 0 SCREAMING_SNAKE_CASE_ = 0 for keepgram in keepgramcountergood_rep: keeptmpscorea += keepgramcountergood_rep[keepgram] / keepgramcounter_rep[keepgram] # Fix an alleged bug [2] in the keep score computation. # keeptmpscore2 += keepgramcountergood_rep[keepgram] / keepgramcounterall_rep[keepgram] keeptmpscorea += keepgramcountergood_rep[keepgram] # Define 0/0=1 instead of 0 to give higher scores for predictions that match # a target exactly. SCREAMING_SNAKE_CASE_ = 1 SCREAMING_SNAKE_CASE_ = 1 if len(__lowerCAmelCase ) > 0: SCREAMING_SNAKE_CASE_ = keeptmpscorea / len(__lowerCAmelCase ) if len(__lowerCAmelCase ) > 0: # Fix an alleged bug [2] in the keep score computation. # keepscore_recall = keeptmpscore2 / len(keepgramcounterall_rep) SCREAMING_SNAKE_CASE_ = keeptmpscorea / sum(keepgramcounterall_rep.values() ) SCREAMING_SNAKE_CASE_ = 0 if keepscore_precision > 0 or keepscore_recall > 0: SCREAMING_SNAKE_CASE_ = 2 * keepscore_precision * keepscore_recall / (keepscore_precision + keepscore_recall) # DELETION SCREAMING_SNAKE_CASE_ = sgramcounter_rep - cgramcounter_rep SCREAMING_SNAKE_CASE_ = delgramcounter_rep - rgramcounter SCREAMING_SNAKE_CASE_ = sgramcounter_rep - rgramcounter SCREAMING_SNAKE_CASE_ = 0 SCREAMING_SNAKE_CASE_ = 0 for delgram in delgramcountergood_rep: deltmpscorea += delgramcountergood_rep[delgram] / delgramcounter_rep[delgram] deltmpscorea += delgramcountergood_rep[delgram] / delgramcounterall_rep[delgram] # Define 0/0=1 instead of 0 to give higher scores for predictions that match # a target exactly. SCREAMING_SNAKE_CASE_ = 1 if len(__lowerCAmelCase ) > 0: SCREAMING_SNAKE_CASE_ = deltmpscorea / len(__lowerCAmelCase ) # ADDITION SCREAMING_SNAKE_CASE_ = set(__lowerCAmelCase ) - set(__lowerCAmelCase ) SCREAMING_SNAKE_CASE_ = set(__lowerCAmelCase ) & set(__lowerCAmelCase ) SCREAMING_SNAKE_CASE_ = set(__lowerCAmelCase ) - set(__lowerCAmelCase ) SCREAMING_SNAKE_CASE_ = 0 for addgram in addgramcountergood: addtmpscore += 1 # Define 0/0=1 instead of 0 to give higher scores for predictions that match # a target exactly. SCREAMING_SNAKE_CASE_ = 1 SCREAMING_SNAKE_CASE_ = 1 if len(__lowerCAmelCase ) > 0: SCREAMING_SNAKE_CASE_ = addtmpscore / len(__lowerCAmelCase ) if len(__lowerCAmelCase ) > 0: SCREAMING_SNAKE_CASE_ = addtmpscore / len(__lowerCAmelCase ) SCREAMING_SNAKE_CASE_ = 0 if addscore_precision > 0 or addscore_recall > 0: SCREAMING_SNAKE_CASE_ = 2 * addscore_precision * addscore_recall / (addscore_precision + addscore_recall) return (keepscore, delscore_precision, addscore) def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : int ): """simple docstring""" SCREAMING_SNAKE_CASE_ = len(__lowerCAmelCase ) SCREAMING_SNAKE_CASE_ = ssent.split(' ' ) SCREAMING_SNAKE_CASE_ = csent.split(' ' ) SCREAMING_SNAKE_CASE_ = [] SCREAMING_SNAKE_CASE_ = [] SCREAMING_SNAKE_CASE_ = [] SCREAMING_SNAKE_CASE_ = [] SCREAMING_SNAKE_CASE_ = [] SCREAMING_SNAKE_CASE_ = [] SCREAMING_SNAKE_CASE_ = [] SCREAMING_SNAKE_CASE_ = [] SCREAMING_SNAKE_CASE_ = [] SCREAMING_SNAKE_CASE_ = [] for rsent in rsents: SCREAMING_SNAKE_CASE_ = rsent.split(' ' ) SCREAMING_SNAKE_CASE_ = [] SCREAMING_SNAKE_CASE_ = [] SCREAMING_SNAKE_CASE_ = [] ragramslist.append(__lowerCAmelCase ) for i in range(0 , len(__lowerCAmelCase ) - 1 ): if i < len(__lowerCAmelCase ) - 1: SCREAMING_SNAKE_CASE_ = ragrams[i] + ' ' + ragrams[i + 1] ragrams.append(__lowerCAmelCase ) if i < len(__lowerCAmelCase ) - 2: SCREAMING_SNAKE_CASE_ = ragrams[i] + ' ' + ragrams[i + 1] + ' ' + ragrams[i + 2] ragrams.append(__lowerCAmelCase ) if i < len(__lowerCAmelCase ) - 3: SCREAMING_SNAKE_CASE_ = ragrams[i] + ' ' + ragrams[i + 1] + ' ' + ragrams[i + 2] + ' ' + ragrams[i + 3] ragrams.append(__lowerCAmelCase ) ragramslist.append(__lowerCAmelCase ) ragramslist.append(__lowerCAmelCase ) ragramslist.append(__lowerCAmelCase ) for i in range(0 , len(__lowerCAmelCase ) - 1 ): if i < len(__lowerCAmelCase ) - 1: SCREAMING_SNAKE_CASE_ = sagrams[i] + ' ' + sagrams[i + 1] sagrams.append(__lowerCAmelCase ) if i < len(__lowerCAmelCase ) - 2: SCREAMING_SNAKE_CASE_ = sagrams[i] + ' ' + sagrams[i + 1] + ' ' + sagrams[i + 2] sagrams.append(__lowerCAmelCase ) if i < len(__lowerCAmelCase ) - 3: SCREAMING_SNAKE_CASE_ = sagrams[i] + ' ' + sagrams[i + 1] + ' ' + sagrams[i + 2] + ' ' + sagrams[i + 3] sagrams.append(__lowerCAmelCase ) for i in range(0 , len(__lowerCAmelCase ) - 1 ): if i < len(__lowerCAmelCase ) - 1: SCREAMING_SNAKE_CASE_ = cagrams[i] + ' ' + cagrams[i + 1] cagrams.append(__lowerCAmelCase ) if i < len(__lowerCAmelCase ) - 2: SCREAMING_SNAKE_CASE_ = cagrams[i] + ' ' + cagrams[i + 1] + ' ' + cagrams[i + 2] cagrams.append(__lowerCAmelCase ) if i < len(__lowerCAmelCase ) - 3: SCREAMING_SNAKE_CASE_ = cagrams[i] + ' ' + cagrams[i + 1] + ' ' + cagrams[i + 2] + ' ' + cagrams[i + 3] cagrams.append(__lowerCAmelCase ) ((SCREAMING_SNAKE_CASE_) , (SCREAMING_SNAKE_CASE_) , (SCREAMING_SNAKE_CASE_)) = SARIngram(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) ((SCREAMING_SNAKE_CASE_) , (SCREAMING_SNAKE_CASE_) , (SCREAMING_SNAKE_CASE_)) = SARIngram(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) ((SCREAMING_SNAKE_CASE_) , (SCREAMING_SNAKE_CASE_) , (SCREAMING_SNAKE_CASE_)) = SARIngram(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) ((SCREAMING_SNAKE_CASE_) , (SCREAMING_SNAKE_CASE_) , (SCREAMING_SNAKE_CASE_)) = SARIngram(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) SCREAMING_SNAKE_CASE_ = sum([keepascore, keepascore, keepascore, keepascore] ) / 4 SCREAMING_SNAKE_CASE_ = sum([delascore, delascore, delascore, delascore] ) / 4 SCREAMING_SNAKE_CASE_ = sum([addascore, addascore, addascore, addascore] ) / 4 SCREAMING_SNAKE_CASE_ = (avgkeepscore + avgdelscore + avgaddscore) / 3 return finalscore def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : bool = True , _SCREAMING_SNAKE_CASE : str = "13a" , _SCREAMING_SNAKE_CASE : bool = True ): """simple docstring""" if lowercase: SCREAMING_SNAKE_CASE_ = sentence.lower() if tokenizer in ["13a", "intl"]: if version.parse(sacrebleu.__version__ ).major >= 2: SCREAMING_SNAKE_CASE_ = sacrebleu.metrics.bleu._get_tokenizer(__lowerCAmelCase )()(__lowerCAmelCase ) else: SCREAMING_SNAKE_CASE_ = sacrebleu.TOKENIZERS[tokenizer]()(__lowerCAmelCase ) elif tokenizer == "moses": SCREAMING_SNAKE_CASE_ = sacremoses.MosesTokenizer().tokenize(__lowerCAmelCase , return_str=__lowerCAmelCase , escape=__lowerCAmelCase ) elif tokenizer == "penn": SCREAMING_SNAKE_CASE_ = sacremoses.MosesTokenizer().penn_tokenize(__lowerCAmelCase , return_str=__lowerCAmelCase ) else: SCREAMING_SNAKE_CASE_ = sentence if not return_str: SCREAMING_SNAKE_CASE_ = normalized_sent.split() return normalized_sent def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : Union[str, Any] ): """simple docstring""" if not (len(__lowerCAmelCase ) == len(__lowerCAmelCase ) == len(__lowerCAmelCase )): raise ValueError('Sources length must match predictions and references lengths.' ) SCREAMING_SNAKE_CASE_ = 0 for src, pred, refs in zip(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ): sari_score += SARIsent(normalize(__lowerCAmelCase ) , normalize(__lowerCAmelCase ) , [normalize(__lowerCAmelCase ) for sent in refs] ) SCREAMING_SNAKE_CASE_ = sari_score / len(__lowerCAmelCase ) return 100 * sari_score def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : List[str]="exp" , _SCREAMING_SNAKE_CASE : Optional[Any]=None , _SCREAMING_SNAKE_CASE : Dict=False , _SCREAMING_SNAKE_CASE : List[str]=False , _SCREAMING_SNAKE_CASE : str=False , ): """simple docstring""" SCREAMING_SNAKE_CASE_ = len(references[0] ) if any(len(__lowerCAmelCase ) != references_per_prediction for refs in references ): raise ValueError('Sacrebleu requires the same number of references for each prediction' ) SCREAMING_SNAKE_CASE_ = [[refs[i] for refs in references] for i in range(__lowerCAmelCase )] SCREAMING_SNAKE_CASE_ = sacrebleu.corpus_bleu( __lowerCAmelCase , __lowerCAmelCase , smooth_method=__lowerCAmelCase , smooth_value=__lowerCAmelCase , force=__lowerCAmelCase , lowercase=__lowerCAmelCase , use_effective_order=__lowerCAmelCase , ) return output.score @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class __snake_case ( datasets.Metric ): def lowerCAmelCase__ ( self): return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { 'predictions': datasets.Value('string' , id='sequence'), 'references': datasets.Sequence(datasets.Value('string' , id='sequence') , id='references'), }) , codebase_urls=[ 'https://github.com/huggingface/transformers/blob/master/src/transformers/data/metrics/squad_metrics.py', 'https://github.com/cocoxu/simplification/blob/master/SARI.py', 'https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/utils/sari_hook.py', 'https://github.com/mjpost/sacreBLEU', ] , reference_urls=[ 'https://www.aclweb.org/anthology/Q16-1029.pdf', 'https://github.com/mjpost/sacreBLEU', 'https://en.wikipedia.org/wiki/BLEU', 'https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213', ] , ) def lowerCAmelCase__ ( self , _A , _A , _A): SCREAMING_SNAKE_CASE_ = {} result.update({'sari': compute_sari(sources=_lowerCAmelCase , predictions=_lowerCAmelCase , references=_lowerCAmelCase)}) result.update({'sacrebleu': compute_sacrebleu(predictions=_lowerCAmelCase , references=_lowerCAmelCase)}) result.update({'exact': compute_em(predictions=_lowerCAmelCase , references=_lowerCAmelCase)}) return result
709
import unittest from transformers import load_tool from .test_tools_common import ToolTesterMixin UpperCamelCase__ : int = "\nHugging Face was founded in 2016 by French entrepreneurs Clément Delangue, Julien Chaumond, and Thomas Wolf originally as a company that developed a chatbot app targeted at teenagers.[2] After open-sourcing the model behind the chatbot, the company pivoted to focus on being a platform for machine learning.\n\nIn March 2021, Hugging Face raised $40 million in a Series B funding round.[3]\n\nOn April 28, 2021, the company launched the BigScience Research Workshop in collaboration with several other research groups to release an open large language model.[4] In 2022, the workshop concluded with the announcement of BLOOM, a multilingual large language model with 176 billion parameters.[5]\n" class __snake_case ( unittest.TestCase , lowerCAmelCase__ ): def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = load_tool('text-question-answering') self.tool.setup() SCREAMING_SNAKE_CASE_ = load_tool('text-question-answering' , remote=_A) def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = self.tool(_A , 'What did Hugging Face do in April 2021?') self.assertEqual(_A , 'launched the BigScience Research Workshop') def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = self.remote_tool(_A , 'What did Hugging Face do in April 2021?') self.assertEqual(_A , 'launched the BigScience Research Workshop') def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = self.tool(text=_A , question='What did Hugging Face do in April 2021?') self.assertEqual(_A , 'launched the BigScience Research Workshop') def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = self.remote_tool(text=_A , question='What did Hugging Face do in April 2021?') self.assertEqual(_A , 'launched the BigScience Research Workshop')
620
0
'''simple docstring''' import gc import random import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer import diffusers from diffusers import ( AutoencoderKL, EulerDiscreteScheduler, StableDiffusionLatentUpscalePipeline, StableDiffusionPipeline, UNetaDConditionModel, ) from diffusers.schedulers import KarrasDiffusionSchedulers from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : str ): """simple docstring""" SCREAMING_SNAKE_CASE_ = [tensor.shape for tensor in tensor_list] return all(shape == shapes[0] for shape in shapes[1:] ) class __snake_case ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ): __lowerCAmelCase : Union[str, Any] = StableDiffusionLatentUpscalePipeline __lowerCAmelCase : Optional[int] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - { "height", "width", "cross_attention_kwargs", "negative_prompt_embeds", "prompt_embeds", } __lowerCAmelCase : List[str] = PipelineTesterMixin.required_optional_params - {"num_images_per_prompt"} __lowerCAmelCase : Optional[Any] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS __lowerCAmelCase : Optional[Any] = frozenset( [] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess __lowerCAmelCase : Any = frozenset([] ) __lowerCAmelCase : Optional[int] = True @property def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = 1 SCREAMING_SNAKE_CASE_ = 4 SCREAMING_SNAKE_CASE_ = (16, 16) SCREAMING_SNAKE_CASE_ = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0)).to(UpperCamelCase_) return image def lowerCAmelCase__ ( self): torch.manual_seed(0) SCREAMING_SNAKE_CASE_ = UNetaDConditionModel( act_fn='gelu' , attention_head_dim=8 , norm_num_groups=UpperCamelCase_ , block_out_channels=[32, 32, 64, 64] , time_cond_proj_dim=160 , conv_in_kernel=1 , conv_out_kernel=1 , cross_attention_dim=32 , down_block_types=( 'KDownBlock2D', 'KCrossAttnDownBlock2D', 'KCrossAttnDownBlock2D', 'KCrossAttnDownBlock2D', ) , in_channels=8 , mid_block_type=UpperCamelCase_ , only_cross_attention=UpperCamelCase_ , out_channels=5 , resnet_time_scale_shift='scale_shift' , time_embedding_type='fourier' , timestep_post_act='gelu' , up_block_types=('KCrossAttnUpBlock2D', 'KCrossAttnUpBlock2D', 'KCrossAttnUpBlock2D', 'KUpBlock2D') , ) SCREAMING_SNAKE_CASE_ = AutoencoderKL( block_out_channels=[32, 32, 64, 64] , in_channels=3 , out_channels=3 , down_block_types=[ 'DownEncoderBlock2D', 'DownEncoderBlock2D', 'DownEncoderBlock2D', 'DownEncoderBlock2D', ] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D', 'UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , ) SCREAMING_SNAKE_CASE_ = EulerDiscreteScheduler(prediction_type='sample') SCREAMING_SNAKE_CASE_ = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act='quick_gelu' , projection_dim=512 , ) SCREAMING_SNAKE_CASE_ = CLIPTextModel(UpperCamelCase_) SCREAMING_SNAKE_CASE_ = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip') SCREAMING_SNAKE_CASE_ = { "unet": model.eval(), "vae": vae.eval(), "scheduler": scheduler, "text_encoder": text_encoder, "tokenizer": tokenizer, } return components def lowerCAmelCase__ ( self , _A , _A=0): if str(UpperCamelCase_).startswith('mps'): SCREAMING_SNAKE_CASE_ = torch.manual_seed(UpperCamelCase_) else: SCREAMING_SNAKE_CASE_ = torch.Generator(device=UpperCamelCase_).manual_seed(UpperCamelCase_) SCREAMING_SNAKE_CASE_ = { "prompt": "A painting of a squirrel eating a burger", "image": self.dummy_image.cpu(), "generator": generator, "num_inference_steps": 2, "output_type": "numpy", } return inputs def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = "cpu" SCREAMING_SNAKE_CASE_ = self.get_dummy_components() SCREAMING_SNAKE_CASE_ = self.pipeline_class(**UpperCamelCase_) pipe.to(UpperCamelCase_) pipe.set_progress_bar_config(disable=UpperCamelCase_) SCREAMING_SNAKE_CASE_ = self.get_dummy_inputs(UpperCamelCase_) SCREAMING_SNAKE_CASE_ = pipe(**UpperCamelCase_).images SCREAMING_SNAKE_CASE_ = image[0, -3:, -3:, -1] self.assertEqual(image.shape , (1, 256, 256, 3)) SCREAMING_SNAKE_CASE_ = np.array( [0.4_7_2_2_2_4_1_2, 0.4_1_9_2_1_6_3_3, 0.4_4_7_1_7_4_3_4, 0.4_6_8_7_4_1_9_2, 0.4_2_5_8_8_2_5_8, 0.4_6_1_5_0_7_2_6, 0.4_6_7_7_5_3_4, 0.4_5_5_8_3_8_3_2, 0.4_8_5_7_9_0_5_5]) SCREAMING_SNAKE_CASE_ = np.abs(image_slice.flatten() - expected_slice).max() self.assertLessEqual(UpperCamelCase_ , 1E-3) def lowerCAmelCase__ ( self): super().test_attention_slicing_forward_pass(expected_max_diff=7E-3) def lowerCAmelCase__ ( self): super().test_cpu_offload_forward_pass(expected_max_diff=3E-3) def lowerCAmelCase__ ( self): super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3) def lowerCAmelCase__ ( self): super().test_inference_batch_single_identical(expected_max_diff=7E-3) def lowerCAmelCase__ ( self): super().test_pt_np_pil_outputs_equivalent(expected_max_diff=3E-3) def lowerCAmelCase__ ( self): super().test_save_load_local(expected_max_difference=3E-3) def lowerCAmelCase__ ( self): super().test_save_load_optional_components(expected_max_difference=3E-3) def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = [ "DDIMScheduler", "DDPMScheduler", "PNDMScheduler", "HeunDiscreteScheduler", "EulerAncestralDiscreteScheduler", "KDPM2DiscreteScheduler", "KDPM2AncestralDiscreteScheduler", "DPMSolverSDEScheduler", ] SCREAMING_SNAKE_CASE_ = self.get_dummy_components() SCREAMING_SNAKE_CASE_ = self.pipeline_class(**UpperCamelCase_) # make sure that PNDM does not need warm-up pipe.scheduler.register_to_config(skip_prk_steps=UpperCamelCase_) pipe.to(UpperCamelCase_) pipe.set_progress_bar_config(disable=UpperCamelCase_) SCREAMING_SNAKE_CASE_ = self.get_dummy_inputs(UpperCamelCase_) SCREAMING_SNAKE_CASE_ = 2 SCREAMING_SNAKE_CASE_ = [] for scheduler_enum in KarrasDiffusionSchedulers: if scheduler_enum.name in skip_schedulers: # no sigma schedulers are not supported # no schedulers continue SCREAMING_SNAKE_CASE_ = getattr(UpperCamelCase_ , scheduler_enum.name) SCREAMING_SNAKE_CASE_ = scheduler_cls.from_config(pipe.scheduler.config) SCREAMING_SNAKE_CASE_ = pipe(**UpperCamelCase_)[0] outputs.append(UpperCamelCase_) assert check_same_shape(UpperCamelCase_) @require_torch_gpu @slow class __snake_case ( unittest.TestCase ): def lowerCAmelCase__ ( self): super().tearDown() gc.collect() torch.cuda.empty_cache() def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = torch.manual_seed(33) SCREAMING_SNAKE_CASE_ = StableDiffusionPipeline.from_pretrained('CompVis/stable-diffusion-v1-4' , torch_dtype=torch.floataa) pipe.to('cuda') SCREAMING_SNAKE_CASE_ = StableDiffusionLatentUpscalePipeline.from_pretrained( 'stabilityai/sd-x2-latent-upscaler' , torch_dtype=torch.floataa) upscaler.to('cuda') SCREAMING_SNAKE_CASE_ = "a photo of an astronaut high resolution, unreal engine, ultra realistic" SCREAMING_SNAKE_CASE_ = pipe(UpperCamelCase_ , generator=UpperCamelCase_ , output_type='latent').images SCREAMING_SNAKE_CASE_ = upscaler( prompt=UpperCamelCase_ , image=UpperCamelCase_ , num_inference_steps=20 , guidance_scale=0 , generator=UpperCamelCase_ , output_type='np' , ).images[0] SCREAMING_SNAKE_CASE_ = load_numpy( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/astronaut_1024.npy') assert np.abs((expected_image - image).mean()) < 5E-2 def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = torch.manual_seed(33) SCREAMING_SNAKE_CASE_ = StableDiffusionLatentUpscalePipeline.from_pretrained( 'stabilityai/sd-x2-latent-upscaler' , torch_dtype=torch.floataa) upscaler.to('cuda') SCREAMING_SNAKE_CASE_ = "the temple of fire by Ross Tran and Gerardo Dottori, oil on canvas" SCREAMING_SNAKE_CASE_ = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_512.png') SCREAMING_SNAKE_CASE_ = upscaler( prompt=UpperCamelCase_ , image=UpperCamelCase_ , num_inference_steps=20 , guidance_scale=0 , generator=UpperCamelCase_ , output_type='np' , ).images[0] SCREAMING_SNAKE_CASE_ = load_numpy( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_1024.npy') assert np.abs((expected_image - image).max()) < 5E-2
710
import unittest import numpy as np from datasets import load_dataset from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import BeitImageProcessor class __snake_case ( unittest.TestCase ): def __init__( self , _A , _A=7 , _A=3 , _A=18 , _A=30 , _A=400 , _A=True , _A=None , _A=True , _A=None , _A=True , _A=[0.5, 0.5, 0.5] , _A=[0.5, 0.5, 0.5] , _A=False , ): SCREAMING_SNAKE_CASE_ = size if size is not None else {'height': 20, 'width': 20} SCREAMING_SNAKE_CASE_ = crop_size if crop_size is not None else {'height': 18, 'width': 18} SCREAMING_SNAKE_CASE_ = parent SCREAMING_SNAKE_CASE_ = batch_size SCREAMING_SNAKE_CASE_ = num_channels SCREAMING_SNAKE_CASE_ = image_size SCREAMING_SNAKE_CASE_ = min_resolution SCREAMING_SNAKE_CASE_ = max_resolution SCREAMING_SNAKE_CASE_ = do_resize SCREAMING_SNAKE_CASE_ = size SCREAMING_SNAKE_CASE_ = do_center_crop SCREAMING_SNAKE_CASE_ = crop_size SCREAMING_SNAKE_CASE_ = do_normalize SCREAMING_SNAKE_CASE_ = image_mean SCREAMING_SNAKE_CASE_ = image_std SCREAMING_SNAKE_CASE_ = do_reduce_labels def lowerCAmelCase__ ( self): return { "do_resize": self.do_resize, "size": self.size, "do_center_crop": self.do_center_crop, "crop_size": self.crop_size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "do_reduce_labels": self.do_reduce_labels, } def _UpperCAmelCase ( ): """simple docstring""" SCREAMING_SNAKE_CASE_ = load_dataset('hf-internal-testing/fixtures_ade20k' , split='test' ) SCREAMING_SNAKE_CASE_ = Image.open(dataset[0]['file'] ) SCREAMING_SNAKE_CASE_ = Image.open(dataset[1]['file'] ) return image, map def _UpperCAmelCase ( ): """simple docstring""" SCREAMING_SNAKE_CASE_ = load_dataset('hf-internal-testing/fixtures_ade20k' , split='test' ) SCREAMING_SNAKE_CASE_ = Image.open(ds[0]['file'] ) SCREAMING_SNAKE_CASE_ = Image.open(ds[1]['file'] ) SCREAMING_SNAKE_CASE_ = Image.open(ds[2]['file'] ) SCREAMING_SNAKE_CASE_ = Image.open(ds[3]['file'] ) return [imagea, imagea], [mapa, mapa] @require_torch @require_vision class __snake_case ( lowerCAmelCase__ , unittest.TestCase ): __lowerCAmelCase : Union[str, Any] = BeitImageProcessor if is_vision_available() else None def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = BeitImageProcessingTester(self) @property def lowerCAmelCase__ ( self): return self.image_processor_tester.prepare_image_processor_dict() def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = self.image_processing_class(**self.image_processor_dict) self.assertTrue(hasattr(_A , 'do_resize')) self.assertTrue(hasattr(_A , 'size')) self.assertTrue(hasattr(_A , 'do_center_crop')) self.assertTrue(hasattr(_A , 'center_crop')) self.assertTrue(hasattr(_A , 'do_normalize')) self.assertTrue(hasattr(_A , 'image_mean')) self.assertTrue(hasattr(_A , 'image_std')) def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = self.image_processing_class.from_dict(self.image_processor_dict) self.assertEqual(image_processor.size , {'height': 20, 'width': 20}) self.assertEqual(image_processor.crop_size , {'height': 18, 'width': 18}) self.assertEqual(image_processor.do_reduce_labels , _A) SCREAMING_SNAKE_CASE_ = self.image_processing_class.from_dict( self.image_processor_dict , size=42 , crop_size=84 , reduce_labels=_A) self.assertEqual(image_processor.size , {'height': 42, 'width': 42}) self.assertEqual(image_processor.crop_size , {'height': 84, 'width': 84}) self.assertEqual(image_processor.do_reduce_labels , _A) def lowerCAmelCase__ ( self): pass def lowerCAmelCase__ ( self): # Initialize image_processing SCREAMING_SNAKE_CASE_ = self.image_processing_class(**self.image_processor_dict) # create random PIL images SCREAMING_SNAKE_CASE_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A) for image in image_inputs: self.assertIsInstance(_A , Image.Image) # Test not batched input SCREAMING_SNAKE_CASE_ = image_processing(image_inputs[0] , return_tensors='pt').pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) # Test batched SCREAMING_SNAKE_CASE_ = image_processing(_A , return_tensors='pt').pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) def lowerCAmelCase__ ( self): # Initialize image_processing SCREAMING_SNAKE_CASE_ = self.image_processing_class(**self.image_processor_dict) # create random numpy tensors SCREAMING_SNAKE_CASE_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A , numpify=_A) for image in image_inputs: self.assertIsInstance(_A , np.ndarray) # Test not batched input SCREAMING_SNAKE_CASE_ = image_processing(image_inputs[0] , return_tensors='pt').pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) # Test batched SCREAMING_SNAKE_CASE_ = image_processing(_A , return_tensors='pt').pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) def lowerCAmelCase__ ( self): # Initialize image_processing SCREAMING_SNAKE_CASE_ = self.image_processing_class(**self.image_processor_dict) # create random PyTorch tensors SCREAMING_SNAKE_CASE_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A , torchify=_A) for image in image_inputs: self.assertIsInstance(_A , torch.Tensor) # Test not batched input SCREAMING_SNAKE_CASE_ = image_processing(image_inputs[0] , return_tensors='pt').pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) # Test batched SCREAMING_SNAKE_CASE_ = image_processing(_A , return_tensors='pt').pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) def lowerCAmelCase__ ( self): # Initialize image_processing SCREAMING_SNAKE_CASE_ = self.image_processing_class(**self.image_processor_dict) # create random PyTorch tensors SCREAMING_SNAKE_CASE_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A , torchify=_A) SCREAMING_SNAKE_CASE_ = [] for image in image_inputs: self.assertIsInstance(_A , torch.Tensor) maps.append(torch.zeros(image.shape[-2:]).long()) # Test not batched input SCREAMING_SNAKE_CASE_ = image_processing(image_inputs[0] , maps[0] , return_tensors='pt') self.assertEqual( encoding['pixel_values'].shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) self.assertEqual( encoding['labels'].shape , ( 1, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) self.assertEqual(encoding['labels'].dtype , torch.long) self.assertTrue(encoding['labels'].min().item() >= 0) self.assertTrue(encoding['labels'].max().item() <= 255) # Test batched SCREAMING_SNAKE_CASE_ = image_processing(_A , _A , return_tensors='pt') self.assertEqual( encoding['pixel_values'].shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) self.assertEqual( encoding['labels'].shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) self.assertEqual(encoding['labels'].dtype , torch.long) self.assertTrue(encoding['labels'].min().item() >= 0) self.assertTrue(encoding['labels'].max().item() <= 255) # Test not batched input (PIL images) SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = prepare_semantic_single_inputs() SCREAMING_SNAKE_CASE_ = image_processing(_A , _A , return_tensors='pt') self.assertEqual( encoding['pixel_values'].shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) self.assertEqual( encoding['labels'].shape , ( 1, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) self.assertEqual(encoding['labels'].dtype , torch.long) self.assertTrue(encoding['labels'].min().item() >= 0) self.assertTrue(encoding['labels'].max().item() <= 255) # Test batched input (PIL images) SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = prepare_semantic_batch_inputs() SCREAMING_SNAKE_CASE_ = image_processing(_A , _A , return_tensors='pt') self.assertEqual( encoding['pixel_values'].shape , ( 2, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) self.assertEqual( encoding['labels'].shape , ( 2, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) self.assertEqual(encoding['labels'].dtype , torch.long) self.assertTrue(encoding['labels'].min().item() >= 0) self.assertTrue(encoding['labels'].max().item() <= 255) def lowerCAmelCase__ ( self): # Initialize image_processing SCREAMING_SNAKE_CASE_ = self.image_processing_class(**self.image_processor_dict) # ADE20k has 150 classes, and the background is included, so labels should be between 0 and 150 SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = prepare_semantic_single_inputs() SCREAMING_SNAKE_CASE_ = image_processing(_A , _A , return_tensors='pt') self.assertTrue(encoding['labels'].min().item() >= 0) self.assertTrue(encoding['labels'].max().item() <= 150) SCREAMING_SNAKE_CASE_ = True SCREAMING_SNAKE_CASE_ = image_processing(_A , _A , return_tensors='pt') self.assertTrue(encoding['labels'].min().item() >= 0) self.assertTrue(encoding['labels'].max().item() <= 255)
620
0
from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging UpperCamelCase__ : Optional[Any] = logging.get_logger(__name__) UpperCamelCase__ : List[str] = { 'facebook/levit-128S': 'https://huggingface.co/facebook/levit-128S/resolve/main/config.json', # See all LeViT models at https://huggingface.co/models?filter=levit } class __snake_case ( lowerCAmelCase__ ): __lowerCAmelCase : List[str] = 'levit' def __init__( self , _A=224 , _A=3 , _A=3 , _A=2 , _A=1 , _A=16 , _A=[128, 256, 384] , _A=[4, 8, 12] , _A=[4, 4, 4] , _A=[16, 16, 16] , _A=0 , _A=[2, 2, 2] , _A=[2, 2, 2] , _A=0.0_2 , **_A , ): super().__init__(**__A) SCREAMING_SNAKE_CASE_ = image_size SCREAMING_SNAKE_CASE_ = num_channels SCREAMING_SNAKE_CASE_ = kernel_size SCREAMING_SNAKE_CASE_ = stride SCREAMING_SNAKE_CASE_ = padding SCREAMING_SNAKE_CASE_ = hidden_sizes SCREAMING_SNAKE_CASE_ = num_attention_heads SCREAMING_SNAKE_CASE_ = depths SCREAMING_SNAKE_CASE_ = key_dim SCREAMING_SNAKE_CASE_ = drop_path_rate SCREAMING_SNAKE_CASE_ = patch_size SCREAMING_SNAKE_CASE_ = attention_ratio SCREAMING_SNAKE_CASE_ = mlp_ratio SCREAMING_SNAKE_CASE_ = initializer_range SCREAMING_SNAKE_CASE_ = [ ["Subsample", key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2], ["Subsample", key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2], ] class __snake_case ( lowerCAmelCase__ ): __lowerCAmelCase : List[str] = version.parse('1.11' ) @property def lowerCAmelCase__ ( self): return OrderedDict( [ ('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}), ]) @property def lowerCAmelCase__ ( self): return 1E-4
711
def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : int = 200 ): """simple docstring""" SCREAMING_SNAKE_CASE_ = [1, 2, 5, 10, 20, 50, 100, 200] SCREAMING_SNAKE_CASE_ = [0] * (pence + 1) SCREAMING_SNAKE_CASE_ = 1 # base case: 1 way to make 0 pence for coin in coins: for i in range(_SCREAMING_SNAKE_CASE , pence + 1 , 1 ): number_of_ways[i] += number_of_ways[i - coin] return number_of_ways[pence] if __name__ == "__main__": assert solution(200) == 73_682
620
0
from typing import List, Union from ..utils import ( add_end_docstrings, is_tf_available, is_torch_available, is_vision_available, logging, requires_backends, ) from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_tf_available(): from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_VISION_2_SEQ_MAPPING if is_torch_available(): import torch from ..models.auto.modeling_auto import MODEL_FOR_VISION_2_SEQ_MAPPING UpperCamelCase__ : Optional[Any] = logging.get_logger(__name__) @add_end_docstrings(UpperCamelCase_ ) class __snake_case ( UpperCamelCase_ ): def __init__( self , *_A , **_A): super().__init__(*_a , **_a) requires_backends(self , 'vision') self.check_model_type( TF_MODEL_FOR_VISION_2_SEQ_MAPPING if self.framework == 'tf' else MODEL_FOR_VISION_2_SEQ_MAPPING) def lowerCAmelCase__ ( self , _A=None , _A=None , _A=None): SCREAMING_SNAKE_CASE_ = {} SCREAMING_SNAKE_CASE_ = {} if prompt is not None: SCREAMING_SNAKE_CASE_ = prompt if generate_kwargs is not None: SCREAMING_SNAKE_CASE_ = generate_kwargs if max_new_tokens is not None: if "generate_kwargs" not in forward_kwargs: SCREAMING_SNAKE_CASE_ = {} if "max_new_tokens" in forward_kwargs["generate_kwargs"]: raise ValueError( '\'max_new_tokens\' is defined twice, once in \'generate_kwargs\' and once as a direct parameter,' ' please use only one') SCREAMING_SNAKE_CASE_ = max_new_tokens return preprocess_params, forward_kwargs, {} def __call__( self , _A , **_A): return super().__call__(_a , **_a) def lowerCAmelCase__ ( self , _A , _A=None): SCREAMING_SNAKE_CASE_ = load_image(_a) if prompt is not None: if not isinstance(_a , _a): raise ValueError( f"""Received an invalid text input, got - {type(_a)} - but expected a single string. """ 'Note also that one single text can be provided for conditional image to text generation.') SCREAMING_SNAKE_CASE_ = self.model.config.model_type if model_type == "git": SCREAMING_SNAKE_CASE_ = self.image_processor(images=_a , return_tensors=self.framework) SCREAMING_SNAKE_CASE_ = self.tokenizer(text=_a , add_special_tokens=_a).input_ids SCREAMING_SNAKE_CASE_ = [self.tokenizer.cls_token_id] + input_ids SCREAMING_SNAKE_CASE_ = torch.tensor(_a).unsqueeze(0) model_inputs.update({'input_ids': input_ids}) elif model_type == "pix2struct": SCREAMING_SNAKE_CASE_ = self.image_processor(images=_a , header_text=_a , return_tensors=self.framework) elif model_type != "vision-encoder-decoder": # vision-encoder-decoder does not support conditional generation SCREAMING_SNAKE_CASE_ = self.image_processor(images=_a , return_tensors=self.framework) SCREAMING_SNAKE_CASE_ = self.tokenizer(_a , return_tensors=self.framework) model_inputs.update(_a) else: raise ValueError(f"""Model type {model_type} does not support conditional text generation""") else: SCREAMING_SNAKE_CASE_ = self.image_processor(images=_a , return_tensors=self.framework) if self.model.config.model_type == "git" and prompt is None: SCREAMING_SNAKE_CASE_ = None return model_inputs def lowerCAmelCase__ ( self , _A , _A=None): if ( "input_ids" in model_inputs and isinstance(model_inputs['input_ids'] , _a) and all(x is None for x in model_inputs['input_ids']) ): SCREAMING_SNAKE_CASE_ = None if generate_kwargs is None: SCREAMING_SNAKE_CASE_ = {} # FIXME: We need to pop here due to a difference in how `generation.py` and `generation.tf_utils.py` # parse inputs. In the Tensorflow version, `generate` raises an error if we don't use `input_ids` whereas # the PyTorch version matches it with `self.model.main_input_name` or `self.model.encoder.main_input_name` # in the `_prepare_model_inputs` method. SCREAMING_SNAKE_CASE_ = model_inputs.pop(self.model.main_input_name) SCREAMING_SNAKE_CASE_ = self.model.generate(_a , **_a , **_a) return model_outputs def lowerCAmelCase__ ( self , _A): SCREAMING_SNAKE_CASE_ = [] for output_ids in model_outputs: SCREAMING_SNAKE_CASE_ = { """generated_text""": self.tokenizer.decode( _a , skip_special_tokens=_a , ) } records.append(_a) return records
712
def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : list , _SCREAMING_SNAKE_CASE : list , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int ): """simple docstring""" if index == number_of_items: return 0 SCREAMING_SNAKE_CASE_ = 0 SCREAMING_SNAKE_CASE_ = 0 SCREAMING_SNAKE_CASE_ = knapsack(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , index + 1 ) if weights[index] <= max_weight: SCREAMING_SNAKE_CASE_ = values[index] + knapsack( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , max_weight - weights[index] , index + 1 ) return max(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) if __name__ == "__main__": import doctest doctest.testmod()
620
0
import random from .binary_exp_mod import bin_exp_mod def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : Union[str, Any]=1_000 ): """simple docstring""" if n < 2: return False if n % 2 == 0: return n == 2 # this means n is odd SCREAMING_SNAKE_CASE_ = n - 1 SCREAMING_SNAKE_CASE_ = 0 while d % 2 == 0: d /= 2 exp += 1 # n - 1=d*(2**exp) SCREAMING_SNAKE_CASE_ = 0 while count < prec: SCREAMING_SNAKE_CASE_ = random.randint(2 , n - 1 ) SCREAMING_SNAKE_CASE_ = bin_exp_mod(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) if b != 1: SCREAMING_SNAKE_CASE_ = True for _ in range(__UpperCAmelCase ): if b == n - 1: SCREAMING_SNAKE_CASE_ = False break SCREAMING_SNAKE_CASE_ = b * b b %= n if flag: return False count += 1 return True if __name__ == "__main__": UpperCamelCase__ : Tuple = abs(int(input("Enter bound : ").strip())) print("Here's the list of primes:") print(", ".join(str(i) for i in range(n + 1) if is_prime_big(i)))
713
import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( SwiftFormerConfig, SwiftFormerForImageClassification, ViTImageProcessor, ) from transformers.utils import logging logging.set_verbosity_info() UpperCamelCase__ : Optional[int] = logging.get_logger(__name__) UpperCamelCase__ : List[Any] = torch.device("cpu") def _UpperCAmelCase ( ): """simple docstring""" SCREAMING_SNAKE_CASE_ = 'http://images.cocodataset.org/val2017/000000039769.jpg' SCREAMING_SNAKE_CASE_ = Image.open(requests.get(_SCREAMING_SNAKE_CASE , stream=_SCREAMING_SNAKE_CASE ).raw ) return im def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : int ): """simple docstring""" if swiftformer_name == "swiftformer_xs": return torch.tensor([-2.1_7_0_3E0_0, 2.1_1_0_7E0_0, -2.0_8_1_1E0_0, 8.8_6_8_5E-0_1, 2.4_3_6_0E-0_1] ) elif swiftformer_name == "swiftformer_s": return torch.tensor([3.9_6_3_6E-0_1, 2.3_4_7_8E-0_1, -1.6_9_6_3E0_0, -1.7_3_8_1E0_0, -8.6_3_3_7E-0_1] ) elif swiftformer_name == "swiftformer_l1": return torch.tensor([-4.2_7_6_8E-0_1, -4.7_4_2_9E-0_1, -1.0_8_9_7E0_0, -1.0_2_4_8E0_0, 3.5_5_2_3E-0_2] ) elif swiftformer_name == "swiftformer_l3": return torch.tensor([-2.5_3_3_0E-0_1, 2.4_2_1_1E-0_1, -6.0_1_8_5E-0_1, -8.2_7_8_9E-0_1, -6.0_4_4_6E-0_2] ) def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : Optional[Any] ): """simple docstring""" SCREAMING_SNAKE_CASE_ = dct.pop(_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE_ = val def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : Dict ): """simple docstring""" SCREAMING_SNAKE_CASE_ = [] for k in state_dict.keys(): SCREAMING_SNAKE_CASE_ = k if ".pwconv" in k: SCREAMING_SNAKE_CASE_ = k_new.replace('.pwconv' , '.point_wise_conv' ) if ".dwconv" in k: SCREAMING_SNAKE_CASE_ = k_new.replace('.dwconv' , '.depth_wise_conv' ) if ".Proj." in k: SCREAMING_SNAKE_CASE_ = k_new.replace('.Proj.' , '.proj.' ) if "patch_embed" in k_new: SCREAMING_SNAKE_CASE_ = k_new.replace('patch_embed' , 'swiftformer.patch_embed.patch_embedding' ) if "network" in k_new: SCREAMING_SNAKE_CASE_ = k_new.split('.' ) if ls[2].isdigit(): SCREAMING_SNAKE_CASE_ = 'swiftformer.encoder.network.' + ls[1] + '.blocks.' + ls[2] + '.' + '.'.join(ls[3:] ) else: SCREAMING_SNAKE_CASE_ = k_new.replace('network' , 'swiftformer.encoder.network' ) rename_keys.append((k, k_new) ) return rename_keys @torch.no_grad() def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Optional[Any] ): """simple docstring""" SCREAMING_SNAKE_CASE_ = SwiftFormerConfig() # dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size SCREAMING_SNAKE_CASE_ = 1_000 SCREAMING_SNAKE_CASE_ = 'huggingface/label-files' SCREAMING_SNAKE_CASE_ = 'imagenet-1k-id2label.json' SCREAMING_SNAKE_CASE_ = json.load(open(hf_hub_download(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , repo_type='dataset' ) , 'r' ) ) SCREAMING_SNAKE_CASE_ = {int(_SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()} SCREAMING_SNAKE_CASE_ = idalabel SCREAMING_SNAKE_CASE_ = {v: k for k, v in idalabel.items()} # size of the architecture if swiftformer_name == "swiftformer_xs": SCREAMING_SNAKE_CASE_ = [3, 3, 6, 4] SCREAMING_SNAKE_CASE_ = [48, 56, 112, 220] elif swiftformer_name == "swiftformer_s": SCREAMING_SNAKE_CASE_ = [3, 3, 9, 6] SCREAMING_SNAKE_CASE_ = [48, 64, 168, 224] elif swiftformer_name == "swiftformer_l1": SCREAMING_SNAKE_CASE_ = [4, 3, 10, 5] SCREAMING_SNAKE_CASE_ = [48, 96, 192, 384] elif swiftformer_name == "swiftformer_l3": SCREAMING_SNAKE_CASE_ = [4, 4, 12, 6] SCREAMING_SNAKE_CASE_ = [64, 128, 320, 512] # load state_dict of original model, remove and rename some keys if original_ckpt: if original_ckpt.startswith('https' ): SCREAMING_SNAKE_CASE_ = torch.hub.load_state_dict_from_url(_SCREAMING_SNAKE_CASE , map_location='cpu' , check_hash=_SCREAMING_SNAKE_CASE ) else: SCREAMING_SNAKE_CASE_ = torch.load(_SCREAMING_SNAKE_CASE , map_location='cpu' ) SCREAMING_SNAKE_CASE_ = checkpoint SCREAMING_SNAKE_CASE_ = create_rename_keys(_SCREAMING_SNAKE_CASE ) for rename_key_src, rename_key_dest in rename_keys: rename_key(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # load HuggingFace model SCREAMING_SNAKE_CASE_ = SwiftFormerForImageClassification(_SCREAMING_SNAKE_CASE ).eval() hf_model.load_state_dict(_SCREAMING_SNAKE_CASE ) # prepare test inputs SCREAMING_SNAKE_CASE_ = prepare_img() SCREAMING_SNAKE_CASE_ = ViTImageProcessor.from_pretrained('preprocessor_config' ) SCREAMING_SNAKE_CASE_ = processor(images=_SCREAMING_SNAKE_CASE , return_tensors='pt' ) # compare outputs from both models SCREAMING_SNAKE_CASE_ = get_expected_output(_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE_ = hf_model(inputs['pixel_values'] ).logits assert hf_logits.shape == torch.Size([1, 1_000] ) assert torch.allclose(hf_logits[0, 0:5] , _SCREAMING_SNAKE_CASE , atol=1E-3 ) Path(_SCREAMING_SNAKE_CASE ).mkdir(exist_ok=_SCREAMING_SNAKE_CASE ) print(f"""Saving model {swiftformer_name} to {pytorch_dump_folder_path}""" ) hf_model.save_pretrained(_SCREAMING_SNAKE_CASE ) if __name__ == "__main__": UpperCamelCase__ : str = argparse.ArgumentParser() # Required parameters parser.add_argument( "--swiftformer_name", default="swiftformer_xs", choices=["swiftformer_xs", "swiftformer_s", "swiftformer_l1", "swiftformer_l3"], type=str, help="Name of the SwiftFormer model you'd like to convert.", ) parser.add_argument( "--pytorch_dump_folder_path", default="./converted_outputs/", type=str, help="Path to the output PyTorch model directory.", ) parser.add_argument("--original_ckpt", default=None, type=str, help="Path to the original model checkpoint.") UpperCamelCase__ : Union[str, Any] = parser.parse_args() convert_swiftformer_checkpoint(args.swiftformer_name, args.pytorch_dump_folder_path, args.original_ckpt)
620
0
'''simple docstring''' import argparse import json import gdown import numpy as np import torch from huggingface_hub import hf_hub_download from transformers import ( VideoMAEConfig, VideoMAEForPreTraining, VideoMAEForVideoClassification, VideoMAEImageProcessor, ) def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : str ): """simple docstring""" SCREAMING_SNAKE_CASE_ = VideoMAEConfig() set_architecture_configs(lowerCAmelCase__ , lowerCAmelCase__ ) if "finetuned" not in model_name: SCREAMING_SNAKE_CASE_ = False if "finetuned" in model_name: SCREAMING_SNAKE_CASE_ = 'huggingface/label-files' if "kinetics" in model_name: SCREAMING_SNAKE_CASE_ = 400 SCREAMING_SNAKE_CASE_ = 'kinetics400-id2label.json' elif "ssv2" in model_name: SCREAMING_SNAKE_CASE_ = 174 SCREAMING_SNAKE_CASE_ = 'something-something-v2-id2label.json' else: raise ValueError('Model name should either contain \'kinetics\' or \'ssv2\' in case it\'s fine-tuned.' ) SCREAMING_SNAKE_CASE_ = json.load(open(hf_hub_download(lowerCAmelCase__ , lowerCAmelCase__ , repo_type='dataset' ) , 'r' ) ) SCREAMING_SNAKE_CASE_ = {int(lowerCAmelCase__ ): v for k, v in idalabel.items()} SCREAMING_SNAKE_CASE_ = idalabel SCREAMING_SNAKE_CASE_ = {v: k for k, v in idalabel.items()} return config def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : Optional[Any] ): """simple docstring""" if "small" in model_name: SCREAMING_SNAKE_CASE_ = 384 SCREAMING_SNAKE_CASE_ = 1_536 SCREAMING_SNAKE_CASE_ = 12 SCREAMING_SNAKE_CASE_ = 16 SCREAMING_SNAKE_CASE_ = 12 SCREAMING_SNAKE_CASE_ = 3 SCREAMING_SNAKE_CASE_ = 192 SCREAMING_SNAKE_CASE_ = 768 elif "large" in model_name: SCREAMING_SNAKE_CASE_ = 1_024 SCREAMING_SNAKE_CASE_ = 4_096 SCREAMING_SNAKE_CASE_ = 24 SCREAMING_SNAKE_CASE_ = 16 SCREAMING_SNAKE_CASE_ = 12 SCREAMING_SNAKE_CASE_ = 8 SCREAMING_SNAKE_CASE_ = 512 SCREAMING_SNAKE_CASE_ = 2_048 elif "huge" in model_name: SCREAMING_SNAKE_CASE_ = 1_280 SCREAMING_SNAKE_CASE_ = 5_120 SCREAMING_SNAKE_CASE_ = 32 SCREAMING_SNAKE_CASE_ = 16 SCREAMING_SNAKE_CASE_ = 12 SCREAMING_SNAKE_CASE_ = 8 SCREAMING_SNAKE_CASE_ = 640 SCREAMING_SNAKE_CASE_ = 2_560 elif "base" not in model_name: raise ValueError('Model name should include either \"small\", \"base\", \"large\", or \"huge\"' ) def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : Optional[int] ): """simple docstring""" if "encoder." in name: SCREAMING_SNAKE_CASE_ = name.replace('encoder.' , '' ) if "cls_token" in name: SCREAMING_SNAKE_CASE_ = name.replace('cls_token' , 'videomae.embeddings.cls_token' ) if "decoder_pos_embed" in name: SCREAMING_SNAKE_CASE_ = name.replace('decoder_pos_embed' , 'decoder.decoder_pos_embed' ) if "pos_embed" in name and "decoder" not in name: SCREAMING_SNAKE_CASE_ = name.replace('pos_embed' , 'videomae.embeddings.position_embeddings' ) if "patch_embed.proj" in name: SCREAMING_SNAKE_CASE_ = name.replace('patch_embed.proj' , 'videomae.embeddings.patch_embeddings.projection' ) if "patch_embed.norm" in name: SCREAMING_SNAKE_CASE_ = name.replace('patch_embed.norm' , 'videomae.embeddings.norm' ) if "decoder.blocks" in name: SCREAMING_SNAKE_CASE_ = name.replace('decoder.blocks' , 'decoder.decoder_layers' ) if "blocks" in name: SCREAMING_SNAKE_CASE_ = name.replace('blocks' , 'videomae.encoder.layer' ) if "attn.proj" in name: SCREAMING_SNAKE_CASE_ = name.replace('attn.proj' , 'attention.output.dense' ) if "attn" in name and "bias" not in name: SCREAMING_SNAKE_CASE_ = name.replace('attn' , 'attention.self' ) if "attn" in name: SCREAMING_SNAKE_CASE_ = name.replace('attn' , 'attention.attention' ) if "norm1" in name: SCREAMING_SNAKE_CASE_ = name.replace('norm1' , 'layernorm_before' ) if "norm2" in name: SCREAMING_SNAKE_CASE_ = name.replace('norm2' , 'layernorm_after' ) if "mlp.fc1" in name: SCREAMING_SNAKE_CASE_ = name.replace('mlp.fc1' , 'intermediate.dense' ) if "mlp.fc2" in name: SCREAMING_SNAKE_CASE_ = name.replace('mlp.fc2' , 'output.dense' ) if "decoder_embed" in name: SCREAMING_SNAKE_CASE_ = name.replace('decoder_embed' , 'decoder.decoder_embed' ) if "decoder_norm" in name: SCREAMING_SNAKE_CASE_ = name.replace('decoder_norm' , 'decoder.decoder_norm' ) if "decoder_pred" in name: SCREAMING_SNAKE_CASE_ = name.replace('decoder_pred' , 'decoder.decoder_pred' ) if "norm.weight" in name and "decoder" not in name and "fc" not in name: SCREAMING_SNAKE_CASE_ = name.replace('norm.weight' , 'videomae.layernorm.weight' ) if "norm.bias" in name and "decoder" not in name and "fc" not in name: SCREAMING_SNAKE_CASE_ = name.replace('norm.bias' , 'videomae.layernorm.bias' ) if "head" in name and "decoder" not in name: SCREAMING_SNAKE_CASE_ = name.replace('head' , 'classifier' ) return name def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : Optional[Any] ): """simple docstring""" for key in orig_state_dict.copy().keys(): SCREAMING_SNAKE_CASE_ = orig_state_dict.pop(lowerCAmelCase__ ) if key.startswith('encoder.' ): SCREAMING_SNAKE_CASE_ = key.replace('encoder.' , '' ) if "qkv" in key: SCREAMING_SNAKE_CASE_ = key.split('.' ) if key.startswith('decoder.blocks' ): SCREAMING_SNAKE_CASE_ = config.decoder_hidden_size SCREAMING_SNAKE_CASE_ = int(key_split[2] ) SCREAMING_SNAKE_CASE_ = 'decoder.decoder_layers.' if "weight" in key: SCREAMING_SNAKE_CASE_ = val[:dim, :] SCREAMING_SNAKE_CASE_ = val[dim : dim * 2, :] SCREAMING_SNAKE_CASE_ = val[-dim:, :] else: SCREAMING_SNAKE_CASE_ = config.hidden_size SCREAMING_SNAKE_CASE_ = int(key_split[1] ) SCREAMING_SNAKE_CASE_ = 'videomae.encoder.layer.' if "weight" in key: SCREAMING_SNAKE_CASE_ = val[:dim, :] SCREAMING_SNAKE_CASE_ = val[dim : dim * 2, :] SCREAMING_SNAKE_CASE_ = val[-dim:, :] else: SCREAMING_SNAKE_CASE_ = val return orig_state_dict def _UpperCAmelCase ( ): """simple docstring""" SCREAMING_SNAKE_CASE_ = hf_hub_download( repo_id='hf-internal-testing/spaghetti-video' , filename='eating_spaghetti.npy' , repo_type='dataset' ) SCREAMING_SNAKE_CASE_ = np.load(lowerCAmelCase__ ) return list(lowerCAmelCase__ ) def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : int ): """simple docstring""" SCREAMING_SNAKE_CASE_ = get_videomae_config(lowerCAmelCase__ ) if "finetuned" in model_name: SCREAMING_SNAKE_CASE_ = VideoMAEForVideoClassification(lowerCAmelCase__ ) else: SCREAMING_SNAKE_CASE_ = VideoMAEForPreTraining(lowerCAmelCase__ ) # download original checkpoint, hosted on Google Drive SCREAMING_SNAKE_CASE_ = 'pytorch_model.bin' gdown.cached_download(lowerCAmelCase__ , lowerCAmelCase__ , quiet=lowerCAmelCase__ ) SCREAMING_SNAKE_CASE_ = torch.load(lowerCAmelCase__ , map_location='cpu' ) if "model" in files: SCREAMING_SNAKE_CASE_ = files['model'] else: SCREAMING_SNAKE_CASE_ = files['module'] SCREAMING_SNAKE_CASE_ = convert_state_dict(lowerCAmelCase__ , lowerCAmelCase__ ) model.load_state_dict(lowerCAmelCase__ ) model.eval() # verify model on basic input SCREAMING_SNAKE_CASE_ = VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] ) SCREAMING_SNAKE_CASE_ = prepare_video() SCREAMING_SNAKE_CASE_ = image_processor(lowerCAmelCase__ , return_tensors='pt' ) if "finetuned" not in model_name: SCREAMING_SNAKE_CASE_ = hf_hub_download(repo_id='hf-internal-testing/bool-masked-pos' , filename='bool_masked_pos.pt' ) SCREAMING_SNAKE_CASE_ = torch.load(lowerCAmelCase__ ) SCREAMING_SNAKE_CASE_ = model(**lowerCAmelCase__ ) SCREAMING_SNAKE_CASE_ = outputs.logits SCREAMING_SNAKE_CASE_ = [ 'videomae-small-finetuned-kinetics', 'videomae-small-finetuned-ssv2', # Kinetics-400 checkpoints (short = pretrained only for 800 epochs instead of 1600) 'videomae-base-short', 'videomae-base-short-finetuned-kinetics', 'videomae-base', 'videomae-base-finetuned-kinetics', 'videomae-large', 'videomae-large-finetuned-kinetics', 'videomae-huge-finetuned-kinetics', # Something-Something-v2 checkpoints (short = pretrained only for 800 epochs instead of 2400) 'videomae-base-short-ssv2', 'videomae-base-short-finetuned-ssv2', 'videomae-base-ssv2', 'videomae-base-finetuned-ssv2', ] # NOTE: logits were tested with image_mean and image_std equal to [0.5, 0.5, 0.5] and [0.5, 0.5, 0.5] if model_name == "videomae-small-finetuned-kinetics": SCREAMING_SNAKE_CASE_ = torch.Size([1, 400] ) SCREAMING_SNAKE_CASE_ = torch.tensor([-0.9291, -0.4061, -0.9307] ) elif model_name == "videomae-small-finetuned-ssv2": SCREAMING_SNAKE_CASE_ = torch.Size([1, 174] ) SCREAMING_SNAKE_CASE_ = torch.tensor([0.2671, -0.4689, -0.8235] ) elif model_name == "videomae-base": SCREAMING_SNAKE_CASE_ = torch.Size([1, 1_408, 1_536] ) SCREAMING_SNAKE_CASE_ = torch.tensor([[0.7739, 0.7968, 0.7089], [0.6701, 0.7487, 0.6209], [0.4287, 0.5158, 0.4773]] ) elif model_name == "videomae-base-short": SCREAMING_SNAKE_CASE_ = torch.Size([1, 1_408, 1_536] ) SCREAMING_SNAKE_CASE_ = torch.tensor([[0.7994, 0.9612, 0.8508], [0.7401, 0.8958, 0.8302], [0.5862, 0.7468, 0.7325]] ) # we verified the loss both for normalized and unnormalized targets for this one SCREAMING_SNAKE_CASE_ = torch.tensor([0.5142] ) if config.norm_pix_loss else torch.tensor([0.6469] ) elif model_name == "videomae-large": SCREAMING_SNAKE_CASE_ = torch.Size([1, 1_408, 1_536] ) SCREAMING_SNAKE_CASE_ = torch.tensor([[0.7149, 0.7997, 0.6966], [0.6768, 0.7869, 0.6948], [0.5139, 0.6221, 0.5605]] ) elif model_name == "videomae-large-finetuned-kinetics": SCREAMING_SNAKE_CASE_ = torch.Size([1, 400] ) SCREAMING_SNAKE_CASE_ = torch.tensor([0.0771, 0.0011, -0.3625] ) elif model_name == "videomae-huge-finetuned-kinetics": SCREAMING_SNAKE_CASE_ = torch.Size([1, 400] ) SCREAMING_SNAKE_CASE_ = torch.tensor([0.2433, 0.1632, -0.4894] ) elif model_name == "videomae-base-short-finetuned-kinetics": SCREAMING_SNAKE_CASE_ = torch.Size([1, 400] ) SCREAMING_SNAKE_CASE_ = torch.tensor([0.6588, 0.0990, -0.2493] ) elif model_name == "videomae-base-finetuned-kinetics": SCREAMING_SNAKE_CASE_ = torch.Size([1, 400] ) SCREAMING_SNAKE_CASE_ = torch.tensor([0.3669, -0.0688, -0.2421] ) elif model_name == "videomae-base-short-ssv2": SCREAMING_SNAKE_CASE_ = torch.Size([1, 1_408, 1_536] ) SCREAMING_SNAKE_CASE_ = torch.tensor([[0.4712, 0.5296, 0.5786], [0.2278, 0.2729, 0.4026], [0.0352, 0.0730, 0.2506]] ) elif model_name == "videomae-base-short-finetuned-ssv2": SCREAMING_SNAKE_CASE_ = torch.Size([1, 174] ) SCREAMING_SNAKE_CASE_ = torch.tensor([-0.0537, -0.1539, -0.3266] ) elif model_name == "videomae-base-ssv2": SCREAMING_SNAKE_CASE_ = torch.Size([1, 1_408, 1_536] ) SCREAMING_SNAKE_CASE_ = torch.tensor([[0.8131, 0.8727, 0.8546], [0.7366, 0.9377, 0.8870], [0.5935, 0.8874, 0.8564]] ) elif model_name == "videomae-base-finetuned-ssv2": SCREAMING_SNAKE_CASE_ = torch.Size([1, 174] ) SCREAMING_SNAKE_CASE_ = torch.tensor([0.1961, -0.8337, -0.6389] ) else: raise ValueError(f"""Model name not supported. Should be one of {model_names}""" ) # verify logits assert logits.shape == expected_shape if "finetuned" in model_name: assert torch.allclose(logits[0, :3] , lowerCAmelCase__ , atol=1E-4 ) else: print('Logits:' , logits[0, :3, :3] ) assert torch.allclose(logits[0, :3, :3] , lowerCAmelCase__ , atol=1E-4 ) print('Logits ok!' ) # verify loss, if applicable if model_name == "videomae-base-short": SCREAMING_SNAKE_CASE_ = outputs.loss assert torch.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1E-4 ) print('Loss ok!' ) if pytorch_dump_folder_path is not None: print(f"""Saving model and image processor to {pytorch_dump_folder_path}""" ) image_processor.save_pretrained(lowerCAmelCase__ ) model.save_pretrained(lowerCAmelCase__ ) if push_to_hub: print('Pushing to the hub...' ) model.push_to_hub(lowerCAmelCase__ , organization='nielsr' ) if __name__ == "__main__": UpperCamelCase__ : Union[str, Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( "--checkpoint_url", default="https://drive.google.com/u/1/uc?id=1tEhLyskjb755TJ65ptsrafUG2llSwQE1&amp;export=download&amp;confirm=t&amp;uuid=aa3276eb-fb7e-482a-adec-dc7171df14c4", type=str, help=( "URL of the original PyTorch checkpoint (on Google Drive) you'd like to convert. Should be a direct" " download link." ), ) parser.add_argument( "--pytorch_dump_folder_path", default="/Users/nielsrogge/Documents/VideoMAE/Test", type=str, help="Path to the output PyTorch model directory.", ) parser.add_argument("--model_name", default="videomae-base", type=str, help="Name of the model.") parser.add_argument( "--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub." ) UpperCamelCase__ : Union[str, Any] = parser.parse_args() convert_videomae_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
714
def _UpperCAmelCase ( ): """simple docstring""" for n in range(1 , 1_000_000 ): yield n * (n + 1) // 2 def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : Tuple ): """simple docstring""" SCREAMING_SNAKE_CASE_ = 1 SCREAMING_SNAKE_CASE_ = 2 while i * i <= n: SCREAMING_SNAKE_CASE_ = 0 while n % i == 0: n //= i multiplicity += 1 divisors_count *= multiplicity + 1 i += 1 if n > 1: divisors_count *= 2 return divisors_count def _UpperCAmelCase ( ): """simple docstring""" return next(i for i in triangle_number_generator() if count_divisors(_SCREAMING_SNAKE_CASE ) > 500 ) if __name__ == "__main__": print(solution())
620
0
UpperCamelCase__ : str = [ (1_000, "M"), (900, "CM"), (500, "D"), (400, "CD"), (100, "C"), (90, "XC"), (50, "L"), (40, "XL"), (10, "X"), (9, "IX"), (5, "V"), (4, "IV"), (1, "I"), ] def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : List[str] ): """simple docstring""" SCREAMING_SNAKE_CASE_ = {"""I""": 1, """V""": 5, """X""": 10, """L""": 50, """C""": 100, """D""": 500, """M""": 1_000} SCREAMING_SNAKE_CASE_ = 0 SCREAMING_SNAKE_CASE_ = 0 while place < len(__A ): if (place + 1 < len(__A )) and (vals[roman[place]] < vals[roman[place + 1]]): total += vals[roman[place + 1]] - vals[roman[place]] place += 2 else: total += vals[roman[place]] place += 1 return total def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : int ): """simple docstring""" SCREAMING_SNAKE_CASE_ = [] for arabic, roman in ROMAN: (SCREAMING_SNAKE_CASE_) = divmod(__A , __A ) result.append(roman * factor ) if number == 0: break return "".join(__A ) if __name__ == "__main__": import doctest doctest.testmod()
715
import io import itertools import json from dataclasses import dataclass from typing import Optional import pyarrow as pa import pyarrow.json as paj import datasets from datasets.table import table_cast from datasets.utils.file_utils import readline UpperCamelCase__ : Optional[int] = datasets.utils.logging.get_logger(__name__) @dataclass class __snake_case ( datasets.BuilderConfig ): __lowerCAmelCase : Optional[datasets.Features] = None __lowerCAmelCase : str = "utf-8" __lowerCAmelCase : Optional[str] = None __lowerCAmelCase : Optional[str] = None __lowerCAmelCase : bool = True # deprecated __lowerCAmelCase : Optional[int] = None # deprecated __lowerCAmelCase : int = 10 << 20 # 10MB __lowerCAmelCase : Optional[bool] = None class __snake_case ( datasets.ArrowBasedBuilder ): __lowerCAmelCase : int = JsonConfig def lowerCAmelCase__ ( self): if self.config.block_size is not None: logger.warning('The JSON loader parameter `block_size` is deprecated. Please use `chunksize` instead') SCREAMING_SNAKE_CASE_ = self.config.block_size if self.config.use_threads is not True: logger.warning( 'The JSON loader parameter `use_threads` is deprecated and doesn\'t have any effect anymore.') if self.config.newlines_in_values is not None: raise ValueError('The JSON loader parameter `newlines_in_values` is no longer supported') return datasets.DatasetInfo(features=self.config.features) def lowerCAmelCase__ ( self , _A): if not self.config.data_files: raise ValueError(f"""At least one data file must be specified, but got data_files={self.config.data_files}""") SCREAMING_SNAKE_CASE_ = dl_manager.download_and_extract(self.config.data_files) if isinstance(_A , (str, list, tuple)): SCREAMING_SNAKE_CASE_ = data_files if isinstance(_A , _A): SCREAMING_SNAKE_CASE_ = [files] SCREAMING_SNAKE_CASE_ = [dl_manager.iter_files(_A) for file in files] return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'files': files})] SCREAMING_SNAKE_CASE_ = [] for split_name, files in data_files.items(): if isinstance(_A , _A): SCREAMING_SNAKE_CASE_ = [files] SCREAMING_SNAKE_CASE_ = [dl_manager.iter_files(_A) for file in files] splits.append(datasets.SplitGenerator(name=_A , gen_kwargs={'files': files})) return splits def lowerCAmelCase__ ( self , _A): if self.config.features is not None: # adding missing columns for column_name in set(self.config.features) - set(pa_table.column_names): SCREAMING_SNAKE_CASE_ = self.config.features.arrow_schema.field(_A).type SCREAMING_SNAKE_CASE_ = pa_table.append_column(_A , pa.array([None] * len(_A) , type=_A)) # more expensive cast to support nested structures with keys in a different order # allows str <-> int/float or str to Audio for example SCREAMING_SNAKE_CASE_ = table_cast(_A , self.config.features.arrow_schema) return pa_table def lowerCAmelCase__ ( self , _A): for file_idx, file in enumerate(itertools.chain.from_iterable(_A)): # If the file is one json object and if we need to look at the list of items in one specific field if self.config.field is not None: with open(_A , encoding=self.config.encoding , errors=self.config.encoding_errors) as f: SCREAMING_SNAKE_CASE_ = json.load(_A) # We keep only the field we are interested in SCREAMING_SNAKE_CASE_ = dataset[self.config.field] # We accept two format: a list of dicts or a dict of lists if isinstance(_A , (list, tuple)): SCREAMING_SNAKE_CASE_ = set().union(*[row.keys() for row in dataset]) SCREAMING_SNAKE_CASE_ = {col: [row.get(_A) for row in dataset] for col in keys} else: SCREAMING_SNAKE_CASE_ = dataset SCREAMING_SNAKE_CASE_ = pa.Table.from_pydict(_A) yield file_idx, self._cast_table(_A) # If the file has one json object per line else: with open(_A , 'rb') as f: SCREAMING_SNAKE_CASE_ = 0 # Use block_size equal to the chunk size divided by 32 to leverage multithreading # Set a default minimum value of 16kB if the chunk size is really small SCREAMING_SNAKE_CASE_ = max(self.config.chunksize // 32 , 16 << 10) SCREAMING_SNAKE_CASE_ = ( self.config.encoding_errors if self.config.encoding_errors is not None else 'strict' ) while True: SCREAMING_SNAKE_CASE_ = f.read(self.config.chunksize) if not batch: break # Finish current line try: batch += f.readline() except (AttributeError, io.UnsupportedOperation): batch += readline(_A) # PyArrow only accepts utf-8 encoded bytes if self.config.encoding != "utf-8": SCREAMING_SNAKE_CASE_ = batch.decode(self.config.encoding , errors=_A).encode('utf-8') try: while True: try: SCREAMING_SNAKE_CASE_ = paj.read_json( io.BytesIO(_A) , read_options=paj.ReadOptions(block_size=_A)) break except (pa.ArrowInvalid, pa.ArrowNotImplementedError) as e: if ( isinstance(_A , pa.ArrowInvalid) and "straddling" not in str(_A) or block_size > len(_A) ): raise else: # Increase the block size in case it was too small. # The block size will be reset for the next file. logger.debug( f"""Batch of {len(_A)} bytes couldn't be parsed with block_size={block_size}. Retrying with block_size={block_size * 2}.""") block_size *= 2 except pa.ArrowInvalid as e: try: with open( _A , encoding=self.config.encoding , errors=self.config.encoding_errors) as f: SCREAMING_SNAKE_CASE_ = json.load(_A) except json.JSONDecodeError: logger.error(f"""Failed to read file '{file}' with error {type(_A)}: {e}""") raise e # If possible, parse the file as a list of json objects and exit the loop if isinstance(_A , _A): # list is the only sequence type supported in JSON try: SCREAMING_SNAKE_CASE_ = set().union(*[row.keys() for row in dataset]) SCREAMING_SNAKE_CASE_ = {col: [row.get(_A) for row in dataset] for col in keys} SCREAMING_SNAKE_CASE_ = pa.Table.from_pydict(_A) except (pa.ArrowInvalid, AttributeError) as e: logger.error(f"""Failed to read file '{file}' with error {type(_A)}: {e}""") raise ValueError(f"""Not able to read records in the JSON file at {file}.""") from None yield file_idx, self._cast_table(_A) break else: logger.error(f"""Failed to read file '{file}' with error {type(_A)}: {e}""") raise ValueError( f"""Not able to read records in the JSON file at {file}. """ f"""You should probably indicate the field of the JSON file containing your records. """ f"""This JSON file contain the following fields: {str(list(dataset.keys()))}. """ f"""Select the correct one and provide it as `field='XXX'` to the dataset loading method. """) from None # Uncomment for debugging (will print the Arrow table size and elements) # logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}") # logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows))) yield (file_idx, batch_idx), self._cast_table(_A) batch_idx += 1
620
0
import json import os import re import unicodedata from json.encoder import INFINITY from typing import Any, Dict, List, Optional, Tuple, Union import numpy as np import regex from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...tokenization_utils_base import BatchEncoding from ...utils import TensorType, is_flax_available, is_tf_available, is_torch_available, logging from ...utils.generic import _is_jax, _is_numpy UpperCamelCase__ : Any = logging.get_logger(__name__) UpperCamelCase__ : Any = { "artists_file": "artists.json", "lyrics_file": "lyrics.json", "genres_file": "genres.json", } UpperCamelCase__ : int = { "artists_file": { "jukebox": "https://huggingface.co/ArthurZ/jukebox/blob/main/artists.json", }, "genres_file": { "jukebox": "https://huggingface.co/ArthurZ/jukebox/blob/main/genres.json", }, "lyrics_file": { "jukebox": "https://huggingface.co/ArthurZ/jukebox/blob/main/lyrics.json", }, } UpperCamelCase__ : Optional[Any] = { "jukebox": 512, } class __snake_case ( a__ ): __lowerCAmelCase : Dict = VOCAB_FILES_NAMES __lowerCAmelCase : Optional[int] = PRETRAINED_VOCAB_FILES_MAP __lowerCAmelCase : Optional[Any] = PRETRAINED_LYRIC_TOKENS_SIZES __lowerCAmelCase : int = ['input_ids', 'attention_mask'] def __init__( self , _A , _A , _A , _A=["v3", "v2", "v2"] , _A=512 , _A=5 , _A="<|endoftext|>" , **_A , ): SCREAMING_SNAKE_CASE_ = AddedToken(_A , lstrip=_A , rstrip=_A) if isinstance(_A , _A) else unk_token super().__init__( unk_token=_A , n_genres=_A , version=_A , max_n_lyric_tokens=_A , **_A , ) SCREAMING_SNAKE_CASE_ = version SCREAMING_SNAKE_CASE_ = max_n_lyric_tokens SCREAMING_SNAKE_CASE_ = n_genres with open(_A , encoding='utf-8') as vocab_handle: SCREAMING_SNAKE_CASE_ = json.load(_A) with open(_A , encoding='utf-8') as vocab_handle: SCREAMING_SNAKE_CASE_ = json.load(_A) with open(_A , encoding='utf-8') as vocab_handle: SCREAMING_SNAKE_CASE_ = json.load(_A) SCREAMING_SNAKE_CASE_ = r'[^A-Za-z0-9.,:;!?\-\'\"()\[\] \t\n]+' # In v2, we had a n_vocab=80 and in v3 we missed + and so n_vocab=79 of characters. if len(self.lyrics_encoder) == 79: SCREAMING_SNAKE_CASE_ = oov.replace(r'\-\'' , r'\-+\'') SCREAMING_SNAKE_CASE_ = regex.compile(_A) SCREAMING_SNAKE_CASE_ = {v: k for k, v in self.artists_encoder.items()} SCREAMING_SNAKE_CASE_ = {v: k for k, v in self.genres_encoder.items()} SCREAMING_SNAKE_CASE_ = {v: k for k, v in self.lyrics_encoder.items()} @property def lowerCAmelCase__ ( self): return len(self.artists_encoder) + len(self.genres_encoder) + len(self.lyrics_encoder) def lowerCAmelCase__ ( self): return dict(self.artists_encoder , self.genres_encoder , self.lyrics_encoder) def lowerCAmelCase__ ( self , _A , _A , _A): SCREAMING_SNAKE_CASE_ = [self.artists_encoder.get(_A , 0) for artist in list_artists] for genres in range(len(_A)): SCREAMING_SNAKE_CASE_ = [self.genres_encoder.get(_A , 0) for genre in list_genres[genres]] SCREAMING_SNAKE_CASE_ = list_genres[genres] + [-1] * (self.n_genres - len(list_genres[genres])) SCREAMING_SNAKE_CASE_ = [[self.lyrics_encoder.get(_A , 0) for character in list_lyrics[0]], [], []] return artists_id, list_genres, lyric_ids def lowerCAmelCase__ ( self , _A): return list(_A) def lowerCAmelCase__ ( self , _A , _A , _A , **_A): SCREAMING_SNAKE_CASE_ = self.prepare_for_tokenization(_A , _A , _A) SCREAMING_SNAKE_CASE_ = self._tokenize(_A) return artist, genre, lyrics def lowerCAmelCase__ ( self , _A , _A , _A , _A = False): for idx in range(len(self.version)): if self.version[idx] == "v3": SCREAMING_SNAKE_CASE_ = artists[idx].lower() SCREAMING_SNAKE_CASE_ = [genres[idx].lower()] else: SCREAMING_SNAKE_CASE_ = self._normalize(artists[idx]) + '.v2' SCREAMING_SNAKE_CASE_ = [ self._normalize(_A) + '.v2' for genre in genres[idx].split('_') ] # split is for the full dictionary with combined genres if self.version[0] == "v2": SCREAMING_SNAKE_CASE_ = regex.compile(r'[^A-Za-z0-9.,:;!?\-\'\"()\[\] \t\n]+') SCREAMING_SNAKE_CASE_ = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789.,:;!?-+\'\"()[] \t\n' SCREAMING_SNAKE_CASE_ = {vocab[index]: index + 1 for index in range(len(_A))} SCREAMING_SNAKE_CASE_ = 0 SCREAMING_SNAKE_CASE_ = len(_A) + 1 SCREAMING_SNAKE_CASE_ = self.vocab SCREAMING_SNAKE_CASE_ = {v: k for k, v in self.vocab.items()} SCREAMING_SNAKE_CASE_ = '' else: SCREAMING_SNAKE_CASE_ = regex.compile(r'[^A-Za-z0-9.,:;!?\-+\'\"()\[\] \t\n]+') SCREAMING_SNAKE_CASE_ = self._run_strip_accents(_A) SCREAMING_SNAKE_CASE_ = lyrics.replace('\\' , '\n') SCREAMING_SNAKE_CASE_ = self.out_of_vocab.sub('' , _A), [], [] return artists, genres, lyrics def lowerCAmelCase__ ( self , _A): SCREAMING_SNAKE_CASE_ = unicodedata.normalize('NFD' , _A) SCREAMING_SNAKE_CASE_ = [] for char in text: SCREAMING_SNAKE_CASE_ = unicodedata.category(_A) if cat == "Mn": continue output.append(_A) return "".join(_A) def lowerCAmelCase__ ( self , _A): SCREAMING_SNAKE_CASE_ = ( [chr(_A) for i in range(ord('a') , ord('z') + 1)] + [chr(_A) for i in range(ord('A') , ord('Z') + 1)] + [chr(_A) for i in range(ord('0') , ord('9') + 1)] + ['.'] ) SCREAMING_SNAKE_CASE_ = frozenset(_A) SCREAMING_SNAKE_CASE_ = re.compile(r'_+') SCREAMING_SNAKE_CASE_ = ''.join([c if c in accepted else '_' for c in text.lower()]) SCREAMING_SNAKE_CASE_ = pattern.sub('_' , _A).strip('_') return text def lowerCAmelCase__ ( self , _A): return " ".join(_A) def lowerCAmelCase__ ( self , _A , _A = None , _A = False): if not isinstance(_A , _A): SCREAMING_SNAKE_CASE_ = TensorType(_A) # Get a function reference for the correct framework if tensor_type == TensorType.TENSORFLOW: if not is_tf_available(): raise ImportError( 'Unable to convert output to TensorFlow tensors format, TensorFlow is not installed.') import tensorflow as tf SCREAMING_SNAKE_CASE_ = tf.constant SCREAMING_SNAKE_CASE_ = tf.is_tensor elif tensor_type == TensorType.PYTORCH: if not is_torch_available(): raise ImportError('Unable to convert output to PyTorch tensors format, PyTorch is not installed.') import torch SCREAMING_SNAKE_CASE_ = torch.tensor SCREAMING_SNAKE_CASE_ = torch.is_tensor elif tensor_type == TensorType.JAX: if not is_flax_available(): raise ImportError('Unable to convert output to JAX tensors format, JAX is not installed.') import jax.numpy as jnp # noqa: F811 SCREAMING_SNAKE_CASE_ = jnp.array SCREAMING_SNAKE_CASE_ = _is_jax else: SCREAMING_SNAKE_CASE_ = np.asarray SCREAMING_SNAKE_CASE_ = _is_numpy # Do the tensor conversion in batch try: if prepend_batch_axis: SCREAMING_SNAKE_CASE_ = [inputs] if not is_tensor(_A): SCREAMING_SNAKE_CASE_ = as_tensor(_A) except: # noqa E722 raise ValueError( 'Unable to create tensor, you should probably activate truncation and/or padding ' 'with \'padding=True\' \'truncation=True\' to have batched tensors with the same length.') return inputs def __call__( self , _A , _A , _A="" , _A="pt"): SCREAMING_SNAKE_CASE_ = [0, 0, 0] SCREAMING_SNAKE_CASE_ = [artist] * len(self.version) SCREAMING_SNAKE_CASE_ = [genres] * len(self.version) SCREAMING_SNAKE_CASE_ = self.tokenize(_A , _A , _A) SCREAMING_SNAKE_CASE_ = self._convert_token_to_id(_A , _A , _A) SCREAMING_SNAKE_CASE_ = [-INFINITY] * len(full_tokens[-1]) SCREAMING_SNAKE_CASE_ = [ self.convert_to_tensors( [input_ids + [artists_id[i]] + genres_ids[i] + full_tokens[i]] , tensor_type=_A) for i in range(len(self.version)) ] return BatchEncoding({'input_ids': input_ids, 'attention_masks': attention_masks}) def lowerCAmelCase__ ( self , _A , _A = None): if not os.path.isdir(_A): logger.error(f"""Vocabulary path ({save_directory}) should be a directory""") return SCREAMING_SNAKE_CASE_ = os.path.join( _A , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['artists_file']) with open(_A , 'w' , encoding='utf-8') as f: f.write(json.dumps(self.artists_encoder , ensure_ascii=_A)) SCREAMING_SNAKE_CASE_ = os.path.join( _A , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['genres_file']) with open(_A , 'w' , encoding='utf-8') as f: f.write(json.dumps(self.genres_encoder , ensure_ascii=_A)) SCREAMING_SNAKE_CASE_ = os.path.join( _A , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['lyrics_file']) with open(_A , 'w' , encoding='utf-8') as f: f.write(json.dumps(self.lyrics_encoder , ensure_ascii=_A)) return (artists_file, genres_file, lyrics_file) def lowerCAmelCase__ ( self , _A , _A , _A): SCREAMING_SNAKE_CASE_ = self.artists_decoder.get(_A) SCREAMING_SNAKE_CASE_ = [self.genres_decoder.get(_A) for genre in genres_index] SCREAMING_SNAKE_CASE_ = [self.lyrics_decoder.get(_A) for character in lyric_index] return artist, genres, lyrics
716
import unittest from transformers import TrOCRConfig from transformers.testing_utils import is_torch_available, require_torch, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers.models.trocr.modeling_trocr import TrOCRDecoder, TrOCRForCausalLM @require_torch class __snake_case : def __init__( self , _A , _A=99 , _A=13 , _A=16 , _A=7 , _A=True , _A=True , _A=True , _A=False , _A=True , _A=2 , _A=32 , _A=4 , _A=4 , _A=30 , _A=0 , _A=1 , _A=2 , _A=None , ): SCREAMING_SNAKE_CASE_ = parent SCREAMING_SNAKE_CASE_ = batch_size SCREAMING_SNAKE_CASE_ = decoder_seq_length # For common tests SCREAMING_SNAKE_CASE_ = self.decoder_seq_length SCREAMING_SNAKE_CASE_ = is_training SCREAMING_SNAKE_CASE_ = use_attention_mask SCREAMING_SNAKE_CASE_ = use_labels SCREAMING_SNAKE_CASE_ = vocab_size SCREAMING_SNAKE_CASE_ = d_model SCREAMING_SNAKE_CASE_ = d_model SCREAMING_SNAKE_CASE_ = decoder_layers SCREAMING_SNAKE_CASE_ = decoder_layers SCREAMING_SNAKE_CASE_ = decoder_ffn_dim SCREAMING_SNAKE_CASE_ = decoder_attention_heads SCREAMING_SNAKE_CASE_ = decoder_attention_heads SCREAMING_SNAKE_CASE_ = eos_token_id SCREAMING_SNAKE_CASE_ = bos_token_id SCREAMING_SNAKE_CASE_ = pad_token_id SCREAMING_SNAKE_CASE_ = decoder_start_token_id SCREAMING_SNAKE_CASE_ = use_cache SCREAMING_SNAKE_CASE_ = max_position_embeddings SCREAMING_SNAKE_CASE_ = None SCREAMING_SNAKE_CASE_ = decoder_seq_length SCREAMING_SNAKE_CASE_ = 2 SCREAMING_SNAKE_CASE_ = 1 def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size) SCREAMING_SNAKE_CASE_ = None if self.use_attention_mask: SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size, self.decoder_seq_length] , vocab_size=2) SCREAMING_SNAKE_CASE_ = None if self.use_labels: SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size) SCREAMING_SNAKE_CASE_ = TrOCRConfig( vocab_size=self.vocab_size , d_model=self.d_model , decoder_layers=self.decoder_layers , decoder_ffn_dim=self.decoder_ffn_dim , decoder_attention_heads=self.decoder_attention_heads , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , use_cache=self.use_cache , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , max_position_embeddings=self.max_position_embeddings , ) return (config, input_ids, attention_mask, lm_labels) def lowerCAmelCase__ ( self , _A , _A , _A , _A , ): SCREAMING_SNAKE_CASE_ = True SCREAMING_SNAKE_CASE_ = TrOCRDecoder(config=_A).to(_A).eval() SCREAMING_SNAKE_CASE_ = input_ids[:2] input_ids[input_ids == 0] += 1 # first forward pass SCREAMING_SNAKE_CASE_ = model(_A , use_cache=_A) SCREAMING_SNAKE_CASE_ = model(_A) SCREAMING_SNAKE_CASE_ = model(_A , use_cache=_A) self.parent.assertTrue(len(_A) == len(_A)) self.parent.assertTrue(len(_A) == len(_A) + 1) SCREAMING_SNAKE_CASE_ = outputs['past_key_values'] # create hypothetical next token and extent to next_input_ids SCREAMING_SNAKE_CASE_ = ids_tensor((2, 1) , config.vocab_size - 1) + 1 # append to next input_ids and SCREAMING_SNAKE_CASE_ = torch.cat([input_ids, next_tokens] , dim=-1) SCREAMING_SNAKE_CASE_ = model(_A)['last_hidden_state'] SCREAMING_SNAKE_CASE_ = model(_A , past_key_values=_A)['last_hidden_state'] # select random slice SCREAMING_SNAKE_CASE_ = ids_tensor((1,) , output_from_past.shape[-1]).item() SCREAMING_SNAKE_CASE_ = output_from_no_past[:, next_input_ids.shape[-1] - 1, random_slice_idx].detach() SCREAMING_SNAKE_CASE_ = output_from_past[:, 0, random_slice_idx].detach() # test that outputs are equal for slice assert torch.allclose(_A , _A , atol=1E-3) def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = self.prepare_config_and_inputs() SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = config_and_inputs SCREAMING_SNAKE_CASE_ = {'input_ids': input_ids, 'attention_mask': attention_mask} return config, inputs_dict @require_torch class __snake_case ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ): __lowerCAmelCase : Tuple = (TrOCRDecoder, TrOCRForCausalLM) if is_torch_available() else () __lowerCAmelCase : Union[str, Any] = (TrOCRForCausalLM,) if is_torch_available() else () __lowerCAmelCase : str = {'text-generation': TrOCRForCausalLM} if is_torch_available() else {} __lowerCAmelCase : Any = True __lowerCAmelCase : str = False def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = TrOCRStandaloneDecoderModelTester(self , is_training=_A) SCREAMING_SNAKE_CASE_ = ConfigTester(self , config_class=_A) def lowerCAmelCase__ ( self): pass def lowerCAmelCase__ ( self): pass def lowerCAmelCase__ ( self): pass def lowerCAmelCase__ ( self): self.config_tester.run_common_tests() def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_decoder_model_past(*_A) def lowerCAmelCase__ ( self): return @unittest.skip('The model doesn\'t support left padding') # and it's not used enough to be worth fixing :) def lowerCAmelCase__ ( self): pass
620
0
from __future__ import annotations from typing import Any class __snake_case ( _A ): pass class __snake_case : def __init__( self , _A): SCREAMING_SNAKE_CASE_ = data SCREAMING_SNAKE_CASE_ = None def __iter__( self): SCREAMING_SNAKE_CASE_ = self SCREAMING_SNAKE_CASE_ = [] while node: if node in visited: raise ContainsLoopError visited.append(__lowerCamelCase) yield node.data SCREAMING_SNAKE_CASE_ = node.next_node @property def lowerCAmelCase__ ( self): try: list(self) return False except ContainsLoopError: return True if __name__ == "__main__": UpperCamelCase__ : Tuple = Node(1) UpperCamelCase__ : Optional[Any] = Node(2) UpperCamelCase__ : Dict = Node(3) UpperCamelCase__ : str = Node(4) print(root_node.has_loop) # False UpperCamelCase__ : Optional[int] = root_node.next_node print(root_node.has_loop) # True UpperCamelCase__ : Union[str, Any] = Node(5) UpperCamelCase__ : Dict = Node(6) UpperCamelCase__ : Optional[int] = Node(5) UpperCamelCase__ : Dict = Node(6) print(root_node.has_loop) # False UpperCamelCase__ : Any = Node(1) print(root_node.has_loop) # False
717
from dataclasses import dataclass from typing import Optional, Tuple, Union import torch import torch.nn as nn from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput, apply_forward_hook from .modeling_utils import ModelMixin from .vae import Decoder, DecoderOutput, Encoder, VectorQuantizer @dataclass class __snake_case ( lowerCAmelCase__ ): __lowerCAmelCase : torch.FloatTensor class __snake_case ( lowerCAmelCase__ , lowerCAmelCase__ ): @register_to_config def __init__( self , _A = 3 , _A = 3 , _A = ("DownEncoderBlock2D",) , _A = ("UpDecoderBlock2D",) , _A = (64,) , _A = 1 , _A = "silu" , _A = 3 , _A = 32 , _A = 256 , _A = 32 , _A = None , _A = 0.1_8_2_1_5 , _A = "group" , ): super().__init__() # pass init params to Encoder SCREAMING_SNAKE_CASE_ = Encoder( in_channels=_A , out_channels=_A , down_block_types=_A , block_out_channels=_A , layers_per_block=_A , act_fn=_A , norm_num_groups=_A , double_z=_A , ) SCREAMING_SNAKE_CASE_ = vq_embed_dim if vq_embed_dim is not None else latent_channels SCREAMING_SNAKE_CASE_ = nn.Convad(_A , _A , 1) SCREAMING_SNAKE_CASE_ = VectorQuantizer(_A , _A , beta=0.2_5 , remap=_A , sane_index_shape=_A) SCREAMING_SNAKE_CASE_ = nn.Convad(_A , _A , 1) # pass init params to Decoder SCREAMING_SNAKE_CASE_ = Decoder( in_channels=_A , out_channels=_A , up_block_types=_A , block_out_channels=_A , layers_per_block=_A , act_fn=_A , norm_num_groups=_A , norm_type=_A , ) @apply_forward_hook def lowerCAmelCase__ ( self , _A , _A = True): SCREAMING_SNAKE_CASE_ = self.encoder(_A) SCREAMING_SNAKE_CASE_ = self.quant_conv(_A) if not return_dict: return (h,) return VQEncoderOutput(latents=_A) @apply_forward_hook def lowerCAmelCase__ ( self , _A , _A = False , _A = True): # also go through quantization layer if not force_not_quantize: SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.quantize(_A) else: SCREAMING_SNAKE_CASE_ = h SCREAMING_SNAKE_CASE_ = self.post_quant_conv(_A) SCREAMING_SNAKE_CASE_ = self.decoder(_A , quant if self.config.norm_type == 'spatial' else None) if not return_dict: return (dec,) return DecoderOutput(sample=_A) def lowerCAmelCase__ ( self , _A , _A = True): SCREAMING_SNAKE_CASE_ = sample SCREAMING_SNAKE_CASE_ = self.encode(_A).latents SCREAMING_SNAKE_CASE_ = self.decode(_A).sample if not return_dict: return (dec,) return DecoderOutput(sample=_A)
620
0
import copy import random from transformers import CLIPTokenizer class __snake_case ( _UpperCAmelCase ): def __init__( self , *_A , **_A): super().__init__(*lowerCamelCase_ , **lowerCamelCase_) SCREAMING_SNAKE_CASE_ = {} def lowerCAmelCase__ ( self , _A , *_A , **_A): SCREAMING_SNAKE_CASE_ = super().add_tokens(lowerCamelCase_ , *lowerCamelCase_ , **lowerCamelCase_) if num_added_tokens == 0: raise ValueError( f"""The tokenizer already contains the token {placeholder_token}. Please pass a different""" ' `placeholder_token` that is not already in the tokenizer.') def lowerCAmelCase__ ( self , _A , *_A , _A=1 , **_A): SCREAMING_SNAKE_CASE_ = [] if num_vec_per_token == 1: self.try_adding_tokens(lowerCamelCase_ , *lowerCamelCase_ , **lowerCamelCase_) output.append(lowerCamelCase_) else: SCREAMING_SNAKE_CASE_ = [] for i in range(lowerCamelCase_): SCREAMING_SNAKE_CASE_ = placeholder_token + f"""_{i}""" self.try_adding_tokens(lowerCamelCase_ , *lowerCamelCase_ , **lowerCamelCase_) output.append(lowerCamelCase_) # handle cases where there is a new placeholder token that contains the current placeholder token but is larger for token in self.token_map: if token in placeholder_token: raise ValueError( f"""The tokenizer already has placeholder token {token} that can get confused with""" f""" {placeholder_token}keep placeholder tokens independent""") SCREAMING_SNAKE_CASE_ = output def lowerCAmelCase__ ( self , _A , _A=False , _A=1.0): if isinstance(lowerCamelCase_ , lowerCamelCase_): SCREAMING_SNAKE_CASE_ = [] for i in range(len(lowerCamelCase_)): output.append(self.replace_placeholder_tokens_in_text(text[i] , vector_shuffle=lowerCamelCase_)) return output for placeholder_token in self.token_map: if placeholder_token in text: SCREAMING_SNAKE_CASE_ = self.token_map[placeholder_token] SCREAMING_SNAKE_CASE_ = tokens[: 1 + int(len(lowerCamelCase_) * prop_tokens_to_load)] if vector_shuffle: SCREAMING_SNAKE_CASE_ = copy.copy(lowerCamelCase_) random.shuffle(lowerCamelCase_) SCREAMING_SNAKE_CASE_ = text.replace(lowerCamelCase_ , ' '.join(lowerCamelCase_)) return text def __call__( self , _A , *_A , _A=False , _A=1.0 , **_A): return super().__call__( self.replace_placeholder_tokens_in_text( lowerCamelCase_ , vector_shuffle=lowerCamelCase_ , prop_tokens_to_load=lowerCamelCase_) , *lowerCamelCase_ , **lowerCamelCase_ , ) def lowerCAmelCase__ ( self , _A , *_A , _A=False , _A=1.0 , **_A): return super().encode( self.replace_placeholder_tokens_in_text( lowerCamelCase_ , vector_shuffle=lowerCamelCase_ , prop_tokens_to_load=lowerCamelCase_) , *lowerCamelCase_ , **lowerCamelCase_ , )
718
import logging import os from typing import Dict, List, Optional, Union import torch import torch.nn as nn from accelerate.utils.imports import ( is_abit_bnb_available, is_abit_bnb_available, is_bnb_available, ) from ..big_modeling import dispatch_model, init_empty_weights from .dataclasses import BnbQuantizationConfig from .modeling import ( find_tied_parameters, get_balanced_memory, infer_auto_device_map, load_checkpoint_in_model, offload_weight, set_module_tensor_to_device, ) if is_bnb_available(): import bitsandbytes as bnb from copy import deepcopy UpperCamelCase__ : Optional[int] = logging.getLogger(__name__) def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : torch.nn.Module , _SCREAMING_SNAKE_CASE : BnbQuantizationConfig , _SCREAMING_SNAKE_CASE : Union[str, os.PathLike] = None , _SCREAMING_SNAKE_CASE : Optional[Dict[str, Union[int, str, torch.device]]] = None , _SCREAMING_SNAKE_CASE : Optional[List[str]] = None , _SCREAMING_SNAKE_CASE : Optional[Dict[Union[int, str], Union[int, str]]] = None , _SCREAMING_SNAKE_CASE : Optional[Union[str, os.PathLike]] = None , _SCREAMING_SNAKE_CASE : bool = False , ): """simple docstring""" SCREAMING_SNAKE_CASE_ = bnb_quantization_config.load_in_abit SCREAMING_SNAKE_CASE_ = bnb_quantization_config.load_in_abit if load_in_abit and not is_abit_bnb_available(): raise ImportError( 'You have a version of `bitsandbytes` that is not compatible with 8bit quantization,' ' make sure you have the latest version of `bitsandbytes` installed.' ) if load_in_abit and not is_abit_bnb_available(): raise ValueError( 'You have a version of `bitsandbytes` that is not compatible with 4bit quantization,' 'make sure you have the latest version of `bitsandbytes` installed.' ) SCREAMING_SNAKE_CASE_ = [] # custom device map if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) and len(device_map.keys() ) > 1: SCREAMING_SNAKE_CASE_ = [key for key, value in device_map.items() if value in ['disk', 'cpu']] # We keep some modules such as the lm_head in their original dtype for numerical stability reasons if bnb_quantization_config.skip_modules is None: SCREAMING_SNAKE_CASE_ = get_keys_to_not_convert(_SCREAMING_SNAKE_CASE ) # add cpu modules to skip modules only for 4-bit modules if load_in_abit: bnb_quantization_config.skip_modules.extend(_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE_ = bnb_quantization_config.skip_modules # We add the modules we want to keep in full precision if bnb_quantization_config.keep_in_fpaa_modules is None: SCREAMING_SNAKE_CASE_ = [] SCREAMING_SNAKE_CASE_ = bnb_quantization_config.keep_in_fpaa_modules modules_to_not_convert.extend(_SCREAMING_SNAKE_CASE ) # compatibility with peft SCREAMING_SNAKE_CASE_ = load_in_abit SCREAMING_SNAKE_CASE_ = load_in_abit SCREAMING_SNAKE_CASE_ = get_parameter_device(_SCREAMING_SNAKE_CASE ) if model_device.type != "meta": # quantization of an already loaded model logger.warning( 'It is not recommended to quantize a loaded model. ' 'The model should be instantiated under the `init_empty_weights` context manager.' ) SCREAMING_SNAKE_CASE_ = replace_with_bnb_layers(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , modules_to_not_convert=_SCREAMING_SNAKE_CASE ) # convert param to the right dtype SCREAMING_SNAKE_CASE_ = bnb_quantization_config.torch_dtype for name, param in model.state_dict().items(): if any(module_to_keep_in_fpaa in name for module_to_keep_in_fpaa in keep_in_fpaa_modules ): param.to(torch.floataa ) if param.dtype != torch.floataa: SCREAMING_SNAKE_CASE_ = name.replace('.weight' , '' ).replace('.bias' , '' ) SCREAMING_SNAKE_CASE_ = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) if param is not None: param.to(torch.floataa ) elif torch.is_floating_point(_SCREAMING_SNAKE_CASE ): param.to(_SCREAMING_SNAKE_CASE ) if model_device.type == "cuda": # move everything to cpu in the first place because we can't do quantization if the weights are already on cuda model.cuda(torch.cuda.current_device() ) torch.cuda.empty_cache() elif torch.cuda.is_available(): model.to(torch.cuda.current_device() ) else: raise RuntimeError('No GPU found. A GPU is needed for quantization.' ) logger.info( f"""The model device type is {model_device.type}. However, cuda is needed for quantization.""" 'We move the model to cuda.' ) return model elif weights_location is None: raise RuntimeError( f"""`weights_location` needs to be the folder path containing the weights of the model, but we found {weights_location} """ ) else: with init_empty_weights(): SCREAMING_SNAKE_CASE_ = replace_with_bnb_layers( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , modules_to_not_convert=_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE_ = get_quantized_model_device_map( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , max_memory=_SCREAMING_SNAKE_CASE , no_split_module_classes=_SCREAMING_SNAKE_CASE , ) if offload_state_dict is None and device_map is not None and "disk" in device_map.values(): SCREAMING_SNAKE_CASE_ = True SCREAMING_SNAKE_CASE_ = any(x in list(device_map.values() ) for x in ['cpu', 'disk'] ) load_checkpoint_in_model( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , dtype=bnb_quantization_config.torch_dtype , offload_folder=_SCREAMING_SNAKE_CASE , offload_state_dict=_SCREAMING_SNAKE_CASE , keep_in_fpaa_modules=bnb_quantization_config.keep_in_fpaa_modules , offload_abit_bnb=load_in_abit and offload , ) return dispatch_model(_SCREAMING_SNAKE_CASE , device_map=_SCREAMING_SNAKE_CASE , offload_dir=_SCREAMING_SNAKE_CASE ) def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : List[str]=None , _SCREAMING_SNAKE_CASE : List[str]=None , _SCREAMING_SNAKE_CASE : Union[str, Any]=None ): """simple docstring""" if device_map is None: if torch.cuda.is_available(): SCREAMING_SNAKE_CASE_ = {'': torch.cuda.current_device()} else: raise RuntimeError('No GPU found. A GPU is needed for quantization.' ) logger.info('The device_map was not initialized.' 'Setting device_map to `{\'\':torch.cuda.current_device()}`.' ) if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): if device_map not in ["auto", "balanced", "balanced_low_0", "sequential"]: raise ValueError( 'If passing a string for `device_map`, please choose \'auto\', \'balanced\', \'balanced_low_0\' or ' '\'sequential\'.' ) SCREAMING_SNAKE_CASE_ = {} special_dtypes.update( { name: bnb_quantization_config.torch_dtype for name, _ in model.named_parameters() if any(m in name for m in bnb_quantization_config.skip_modules ) } ) special_dtypes.update( { name: torch.floataa for name, _ in model.named_parameters() if any(m in name for m in bnb_quantization_config.keep_in_fpaa_modules ) } ) SCREAMING_SNAKE_CASE_ = {} SCREAMING_SNAKE_CASE_ = special_dtypes SCREAMING_SNAKE_CASE_ = no_split_module_classes SCREAMING_SNAKE_CASE_ = bnb_quantization_config.target_dtype # get max_memory for each device. if device_map != "sequential": SCREAMING_SNAKE_CASE_ = get_balanced_memory( _SCREAMING_SNAKE_CASE , low_zero=(device_map == 'balanced_low_0') , max_memory=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , ) SCREAMING_SNAKE_CASE_ = max_memory SCREAMING_SNAKE_CASE_ = infer_auto_device_map(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): # check if don't have any quantized module on the cpu SCREAMING_SNAKE_CASE_ = bnb_quantization_config.skip_modules + bnb_quantization_config.keep_in_fpaa_modules SCREAMING_SNAKE_CASE_ = { key: device_map[key] for key in device_map.keys() if key not in modules_not_to_convert } for device in ["cpu", "disk"]: if device in device_map_without_some_modules.values(): if bnb_quantization_config.load_in_abit: raise ValueError( '\n Some modules are dispatched on the CPU or the disk. Make sure you have enough GPU RAM to fit\n the quantized model. If you want to dispatch the model on the CPU or the disk while keeping\n these modules in `torch_dtype`, you need to pass a custom `device_map` to\n `load_and_quantize_model`. Check\n https://huggingface.co/docs/accelerate/main/en/usage_guides/quantization#offload-modules-to-cpu-and-disk\n for more details.\n ' ) else: logger.info( 'Some modules are are offloaded to the CPU or the disk. Note that these modules will be converted to 8-bit' ) del device_map_without_some_modules return device_map def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int=None , _SCREAMING_SNAKE_CASE : Union[str, Any]=None ): """simple docstring""" if modules_to_not_convert is None: SCREAMING_SNAKE_CASE_ = [] SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = _replace_with_bnb_layers( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) if not has_been_replaced: logger.warning( 'You are loading your model in 8bit or 4bit but no linear modules were found in your model.' ' this can happen for some architectures such as gpt2 that uses Conv1D instead of Linear layers.' ' Please double check your model architecture, or submit an issue on github if you think this is' ' a bug.' ) return model def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : Optional[Any]=None , _SCREAMING_SNAKE_CASE : str=None , ): """simple docstring""" SCREAMING_SNAKE_CASE_ = False for name, module in model.named_children(): if current_key_name is None: SCREAMING_SNAKE_CASE_ = [] current_key_name.append(_SCREAMING_SNAKE_CASE ) if isinstance(_SCREAMING_SNAKE_CASE , nn.Linear ) and name not in modules_to_not_convert: # Check if the current key is not in the `modules_to_not_convert` SCREAMING_SNAKE_CASE_ = '.'.join(_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE_ = True for key in modules_to_not_convert: if ( (key in current_key_name_str) and (key + "." in current_key_name_str) ) or key == current_key_name_str: SCREAMING_SNAKE_CASE_ = False break if proceed: # Load bnb module with empty weight and replace ``nn.Linear` module if bnb_quantization_config.load_in_abit: SCREAMING_SNAKE_CASE_ = bnb.nn.LinearabitLt( module.in_features , module.out_features , module.bias is not None , has_fpaa_weights=_SCREAMING_SNAKE_CASE , threshold=bnb_quantization_config.llm_inta_threshold , ) elif bnb_quantization_config.load_in_abit: SCREAMING_SNAKE_CASE_ = bnb.nn.Linearabit( module.in_features , module.out_features , module.bias is not None , bnb_quantization_config.bnb_abit_compute_dtype , compress_statistics=bnb_quantization_config.bnb_abit_use_double_quant , quant_type=bnb_quantization_config.bnb_abit_quant_type , ) else: raise ValueError('load_in_8bit and load_in_4bit can\'t be both False' ) SCREAMING_SNAKE_CASE_ = module.weight.data if module.bias is not None: SCREAMING_SNAKE_CASE_ = module.bias.data bnb_module.requires_grad_(_SCREAMING_SNAKE_CASE ) setattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE_ = True if len(list(module.children() ) ) > 0: SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = _replace_with_bnb_layers( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE_ = has_been_replaced | _has_been_replaced # Remove the last key for recursion current_key_name.pop(-1 ) return model, has_been_replaced def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : Union[str, Any] ): """simple docstring""" with init_empty_weights(): SCREAMING_SNAKE_CASE_ = deepcopy(_SCREAMING_SNAKE_CASE ) # this has 0 cost since it is done inside `init_empty_weights` context manager` SCREAMING_SNAKE_CASE_ = find_tied_parameters(_SCREAMING_SNAKE_CASE ) # For compatibility with Accelerate < 0.18 if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): SCREAMING_SNAKE_CASE_ = sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() ) else: SCREAMING_SNAKE_CASE_ = sum(_SCREAMING_SNAKE_CASE , [] ) SCREAMING_SNAKE_CASE_ = len(_SCREAMING_SNAKE_CASE ) > 0 # Check if it is a base model SCREAMING_SNAKE_CASE_ = False if hasattr(_SCREAMING_SNAKE_CASE , 'base_model_prefix' ): SCREAMING_SNAKE_CASE_ = not hasattr(_SCREAMING_SNAKE_CASE , model.base_model_prefix ) # Ignore this for base models (BertModel, GPT2Model, etc.) if (not has_tied_params) and is_base_model: return [] # otherwise they have an attached head SCREAMING_SNAKE_CASE_ = list(model.named_children() ) SCREAMING_SNAKE_CASE_ = [list_modules[-1][0]] # add last module together with tied weights SCREAMING_SNAKE_CASE_ = set(_SCREAMING_SNAKE_CASE ) - set(_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE_ = list(set(_SCREAMING_SNAKE_CASE ) ) + list(_SCREAMING_SNAKE_CASE ) # remove ".weight" from the keys SCREAMING_SNAKE_CASE_ = ['.weight', '.bias'] SCREAMING_SNAKE_CASE_ = [] for name in list_untouched: for name_to_remove in names_to_remove: if name_to_remove in name: SCREAMING_SNAKE_CASE_ = name.replace(_SCREAMING_SNAKE_CASE , '' ) filtered_module_names.append(_SCREAMING_SNAKE_CASE ) return filtered_module_names def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : Dict ): """simple docstring""" for m in model.modules(): if isinstance(_SCREAMING_SNAKE_CASE , bnb.nn.Linearabit ): return True return False def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : nn.Module ): """simple docstring""" return next(parameter.parameters() ).device def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : str ): """simple docstring""" if fpaa_statistics is None: set_module_tensor_to_device(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , 0 , dtype=_SCREAMING_SNAKE_CASE , value=_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE_ = param_name SCREAMING_SNAKE_CASE_ = model if "." in tensor_name: SCREAMING_SNAKE_CASE_ = tensor_name.split('.' ) for split in splits[:-1]: SCREAMING_SNAKE_CASE_ = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) if new_module is None: raise ValueError(f"""{module} has no attribute {split}.""" ) SCREAMING_SNAKE_CASE_ = new_module SCREAMING_SNAKE_CASE_ = splits[-1] # offload weights SCREAMING_SNAKE_CASE_ = False offload_weight(module._parameters[tensor_name] , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , index=_SCREAMING_SNAKE_CASE ) if hasattr(module._parameters[tensor_name] , 'SCB' ): offload_weight( module._parameters[tensor_name].SCB , param_name.replace('weight' , 'SCB' ) , _SCREAMING_SNAKE_CASE , index=_SCREAMING_SNAKE_CASE , ) else: offload_weight(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , index=_SCREAMING_SNAKE_CASE ) offload_weight(_SCREAMING_SNAKE_CASE , param_name.replace('weight' , 'SCB' ) , _SCREAMING_SNAKE_CASE , index=_SCREAMING_SNAKE_CASE ) set_module_tensor_to_device(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , 'meta' , dtype=_SCREAMING_SNAKE_CASE , value=torch.empty(*param.size() ) )
620
0
from __future__ import annotations from functools import lru_cache from math import ceil UpperCamelCase__ : str = 100 UpperCamelCase__ : Any = set(range(3, NUM_PRIMES, 2)) primes.add(2) UpperCamelCase__ : int for prime in range(3, ceil(NUM_PRIMES**0.5), 2): if prime not in primes: continue primes.difference_update(set(range(prime * prime, NUM_PRIMES, prime))) @lru_cache(maxsize=100 ) def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : int ): """simple docstring""" if number_to_partition < 0: return set() elif number_to_partition == 0: return {1} SCREAMING_SNAKE_CASE_ = set() SCREAMING_SNAKE_CASE_ = 42 SCREAMING_SNAKE_CASE_ = 42 for prime in primes: if prime > number_to_partition: continue for sub in partition(number_to_partition - prime ): ret.add(sub * prime ) return ret def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : int = 5_000 ): """simple docstring""" for number_to_partition in range(1 , _A ): if len(partition(_A ) ) > number_unique_partitions: return number_to_partition return None if __name__ == "__main__": print(F'{solution() = }')
719
import json import os from functools import lru_cache from typing import List, Optional, Tuple import regex as re from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging UpperCamelCase__ : Union[str, Any] = logging.get_logger(__name__) UpperCamelCase__ : Optional[Any] = {"vocab_file": "vocab.json", "merges_file": "merges.txt"} # See all BART models at https://huggingface.co/models?filter=bart UpperCamelCase__ : List[str] = { "vocab_file": { "facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/vocab.json", "facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/vocab.json", "facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json", "facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json", "facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json", "yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json", }, "merges_file": { "facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/merges.txt", "facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/merges.txt", "facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt", "facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt", "facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt", "yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt", }, } UpperCamelCase__ : str = { "facebook/bart-base": 1_024, "facebook/bart-large": 1_024, "facebook/bart-large-mnli": 1_024, "facebook/bart-large-cnn": 1_024, "facebook/bart-large-xsum": 1_024, "yjernite/bart_eli5": 1_024, } @lru_cache() def _UpperCAmelCase ( ): """simple docstring""" SCREAMING_SNAKE_CASE_ = ( list(range(ord('!' ) , ord('~' ) + 1 ) ) + list(range(ord('¡' ) , ord('¬' ) + 1 ) ) + list(range(ord('®' ) , ord('ÿ' ) + 1 ) ) ) SCREAMING_SNAKE_CASE_ = bs[:] SCREAMING_SNAKE_CASE_ = 0 for b in range(2**8 ): if b not in bs: bs.append(_SCREAMING_SNAKE_CASE ) cs.append(2**8 + n ) n += 1 SCREAMING_SNAKE_CASE_ = [chr(_SCREAMING_SNAKE_CASE ) for n in cs] return dict(zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ) def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : List[str] ): """simple docstring""" SCREAMING_SNAKE_CASE_ = set() SCREAMING_SNAKE_CASE_ = word[0] for char in word[1:]: pairs.add((prev_char, char) ) SCREAMING_SNAKE_CASE_ = char return pairs class __snake_case ( lowerCAmelCase__ ): __lowerCAmelCase : str = VOCAB_FILES_NAMES __lowerCAmelCase : Any = PRETRAINED_VOCAB_FILES_MAP __lowerCAmelCase : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __lowerCAmelCase : List[Any] = ['input_ids', 'attention_mask'] def __init__( self , _A , _A , _A="replace" , _A="<s>" , _A="</s>" , _A="</s>" , _A="<s>" , _A="<unk>" , _A="<pad>" , _A="<mask>" , _A=False , **_A , ): SCREAMING_SNAKE_CASE_ = AddedToken(_A , lstrip=_A , rstrip=_A) if isinstance(_A , _A) else bos_token SCREAMING_SNAKE_CASE_ = AddedToken(_A , lstrip=_A , rstrip=_A) if isinstance(_A , _A) else eos_token SCREAMING_SNAKE_CASE_ = AddedToken(_A , lstrip=_A , rstrip=_A) if isinstance(_A , _A) else sep_token SCREAMING_SNAKE_CASE_ = AddedToken(_A , lstrip=_A , rstrip=_A) if isinstance(_A , _A) else cls_token SCREAMING_SNAKE_CASE_ = AddedToken(_A , lstrip=_A , rstrip=_A) if isinstance(_A , _A) else unk_token SCREAMING_SNAKE_CASE_ = AddedToken(_A , lstrip=_A , rstrip=_A) if isinstance(_A , _A) else pad_token # Mask token behave like a normal word, i.e. include the space before it SCREAMING_SNAKE_CASE_ = AddedToken(_A , lstrip=_A , rstrip=_A) if isinstance(_A , _A) else mask_token super().__init__( errors=_A , bos_token=_A , eos_token=_A , unk_token=_A , sep_token=_A , cls_token=_A , pad_token=_A , mask_token=_A , add_prefix_space=_A , **_A , ) with open(_A , encoding='utf-8') as vocab_handle: SCREAMING_SNAKE_CASE_ = json.load(_A) SCREAMING_SNAKE_CASE_ = {v: k for k, v in self.encoder.items()} SCREAMING_SNAKE_CASE_ = errors # how to handle errors in decoding SCREAMING_SNAKE_CASE_ = bytes_to_unicode() SCREAMING_SNAKE_CASE_ = {v: k for k, v in self.byte_encoder.items()} with open(_A , encoding='utf-8') as merges_handle: SCREAMING_SNAKE_CASE_ = merges_handle.read().split('\n')[1:-1] SCREAMING_SNAKE_CASE_ = [tuple(merge.split()) for merge in bpe_merges] SCREAMING_SNAKE_CASE_ = dict(zip(_A , range(len(_A)))) SCREAMING_SNAKE_CASE_ = {} SCREAMING_SNAKE_CASE_ = add_prefix_space # Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions SCREAMING_SNAKE_CASE_ = re.compile(r'\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+') @property def lowerCAmelCase__ ( self): return len(self.encoder) def lowerCAmelCase__ ( self): return dict(self.encoder , **self.added_tokens_encoder) def lowerCAmelCase__ ( self , _A): if token in self.cache: return self.cache[token] SCREAMING_SNAKE_CASE_ = tuple(_A) SCREAMING_SNAKE_CASE_ = get_pairs(_A) if not pairs: return token while True: SCREAMING_SNAKE_CASE_ = min(_A , key=lambda _A: self.bpe_ranks.get(_A , float('inf'))) if bigram not in self.bpe_ranks: break SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = bigram SCREAMING_SNAKE_CASE_ = [] SCREAMING_SNAKE_CASE_ = 0 while i < len(_A): try: SCREAMING_SNAKE_CASE_ = word.index(_A , _A) except ValueError: new_word.extend(word[i:]) break else: new_word.extend(word[i:j]) SCREAMING_SNAKE_CASE_ = j if word[i] == first and i < len(_A) - 1 and word[i + 1] == second: new_word.append(first + second) i += 2 else: new_word.append(word[i]) i += 1 SCREAMING_SNAKE_CASE_ = tuple(_A) SCREAMING_SNAKE_CASE_ = new_word if len(_A) == 1: break else: SCREAMING_SNAKE_CASE_ = get_pairs(_A) SCREAMING_SNAKE_CASE_ = ' '.join(_A) SCREAMING_SNAKE_CASE_ = word return word def lowerCAmelCase__ ( self , _A): SCREAMING_SNAKE_CASE_ = [] for token in re.findall(self.pat , _A): SCREAMING_SNAKE_CASE_ = ''.join( self.byte_encoder[b] for b in token.encode('utf-8')) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case) bpe_tokens.extend(bpe_token for bpe_token in self.bpe(_A).split(' ')) return bpe_tokens def lowerCAmelCase__ ( self , _A): return self.encoder.get(_A , self.encoder.get(self.unk_token)) def lowerCAmelCase__ ( self , _A): return self.decoder.get(_A) def lowerCAmelCase__ ( self , _A): SCREAMING_SNAKE_CASE_ = ''.join(_A) SCREAMING_SNAKE_CASE_ = bytearray([self.byte_decoder[c] for c in text]).decode('utf-8' , errors=self.errors) return text def lowerCAmelCase__ ( self , _A , _A = None): if not os.path.isdir(_A): logger.error(f"""Vocabulary path ({save_directory}) should be a directory""") return SCREAMING_SNAKE_CASE_ = os.path.join( _A , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file']) SCREAMING_SNAKE_CASE_ = os.path.join( _A , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file']) with open(_A , 'w' , encoding='utf-8') as f: f.write(json.dumps(self.encoder , indent=2 , sort_keys=_A , ensure_ascii=_A) + '\n') SCREAMING_SNAKE_CASE_ = 0 with open(_A , 'w' , encoding='utf-8') as writer: writer.write('#version: 0.2\n') for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda _A: kv[1]): if index != token_index: logger.warning( f"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.""" ' Please check that the tokenizer is not corrupted!') SCREAMING_SNAKE_CASE_ = token_index writer.write(' '.join(_A) + '\n') index += 1 return vocab_file, merge_file def lowerCAmelCase__ ( self , _A , _A = None): if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] SCREAMING_SNAKE_CASE_ = [self.cls_token_id] SCREAMING_SNAKE_CASE_ = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def lowerCAmelCase__ ( self , _A , _A = None , _A = False): if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=_A , token_ids_a=_A , already_has_special_tokens=_A) if token_ids_a is None: return [1] + ([0] * len(_A)) + [1] return [1] + ([0] * len(_A)) + [1, 1] + ([0] * len(_A)) + [1] def lowerCAmelCase__ ( self , _A , _A = None): SCREAMING_SNAKE_CASE_ = [self.sep_token_id] SCREAMING_SNAKE_CASE_ = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0] def lowerCAmelCase__ ( self , _A , _A=False , **_A): SCREAMING_SNAKE_CASE_ = kwargs.pop('add_prefix_space' , self.add_prefix_space) if (is_split_into_words or add_prefix_space) and (len(_A) > 0 and not text[0].isspace()): SCREAMING_SNAKE_CASE_ = ' ' + text return (text, kwargs)
620
0
from __future__ import annotations from fractions import Fraction from math import gcd, sqrt def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : Optional[Any] ): """simple docstring""" SCREAMING_SNAKE_CASE_ = int(number**0.5 ) return number == sq * sq def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : Optional[Any] ): """simple docstring""" SCREAMING_SNAKE_CASE_ = x_num * y_den * z_den + y_num * x_den * z_den + z_num * x_den * y_den SCREAMING_SNAKE_CASE_ = x_den * y_den * z_den SCREAMING_SNAKE_CASE_ = gcd(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) top //= hcf bottom //= hcf return top, bottom def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : List[str] = 35 ): """simple docstring""" SCREAMING_SNAKE_CASE_ = set() SCREAMING_SNAKE_CASE_ = 42 SCREAMING_SNAKE_CASE_ = Fraction(0 ) SCREAMING_SNAKE_CASE_ = 42 for x_num in range(1 , order + 1 ): for x_den in range(x_num + 1 , order + 1 ): for y_num in range(1 , order + 1 ): for y_den in range(y_num + 1 , order + 1 ): # n=1 SCREAMING_SNAKE_CASE_ = x_num * y_den + x_den * y_num SCREAMING_SNAKE_CASE_ = x_den * y_den SCREAMING_SNAKE_CASE_ = gcd(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) z_num //= hcf z_den //= hcf if 0 < z_num < z_den <= order: SCREAMING_SNAKE_CASE_ = add_three( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) unique_s.add(_SCREAMING_SNAKE_CASE ) # n=2 SCREAMING_SNAKE_CASE_ = ( x_num * x_num * y_den * y_den + x_den * x_den * y_num * y_num ) SCREAMING_SNAKE_CASE_ = x_den * x_den * y_den * y_den if is_sq(_SCREAMING_SNAKE_CASE ) and is_sq(_SCREAMING_SNAKE_CASE ): SCREAMING_SNAKE_CASE_ = int(sqrt(_SCREAMING_SNAKE_CASE ) ) SCREAMING_SNAKE_CASE_ = int(sqrt(_SCREAMING_SNAKE_CASE ) ) SCREAMING_SNAKE_CASE_ = gcd(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) z_num //= hcf z_den //= hcf if 0 < z_num < z_den <= order: SCREAMING_SNAKE_CASE_ = add_three( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) unique_s.add(_SCREAMING_SNAKE_CASE ) # n=-1 SCREAMING_SNAKE_CASE_ = x_num * y_num SCREAMING_SNAKE_CASE_ = x_den * y_num + x_num * y_den SCREAMING_SNAKE_CASE_ = gcd(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) z_num //= hcf z_den //= hcf if 0 < z_num < z_den <= order: SCREAMING_SNAKE_CASE_ = add_three( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) unique_s.add(_SCREAMING_SNAKE_CASE ) # n=2 SCREAMING_SNAKE_CASE_ = x_num * x_num * y_num * y_num SCREAMING_SNAKE_CASE_ = ( x_den * x_den * y_num * y_num + x_num * x_num * y_den * y_den ) if is_sq(_SCREAMING_SNAKE_CASE ) and is_sq(_SCREAMING_SNAKE_CASE ): SCREAMING_SNAKE_CASE_ = int(sqrt(_SCREAMING_SNAKE_CASE ) ) SCREAMING_SNAKE_CASE_ = int(sqrt(_SCREAMING_SNAKE_CASE ) ) SCREAMING_SNAKE_CASE_ = gcd(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) z_num //= hcf z_den //= hcf if 0 < z_num < z_den <= order: SCREAMING_SNAKE_CASE_ = add_three( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) unique_s.add(_SCREAMING_SNAKE_CASE ) for num, den in unique_s: total += Fraction(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) return total.denominator + total.numerator if __name__ == "__main__": print(F'{solution() = }')
720
from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCamelCase__ : str = logging.get_logger(__name__) UpperCamelCase__ : Optional[int] = { "facebook/dpr-ctx_encoder-single-nq-base": ( "https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/config.json" ), "facebook/dpr-question_encoder-single-nq-base": ( "https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/config.json" ), "facebook/dpr-reader-single-nq-base": ( "https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/config.json" ), "facebook/dpr-ctx_encoder-multiset-base": ( "https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/config.json" ), "facebook/dpr-question_encoder-multiset-base": ( "https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/config.json" ), "facebook/dpr-reader-multiset-base": ( "https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/config.json" ), } class __snake_case ( lowerCAmelCase__ ): __lowerCAmelCase : Optional[int] = 'dpr' def __init__( self , _A=30522 , _A=768 , _A=12 , _A=12 , _A=3072 , _A="gelu" , _A=0.1 , _A=0.1 , _A=512 , _A=2 , _A=0.0_2 , _A=1E-12 , _A=0 , _A="absolute" , _A = 0 , **_A , ): super().__init__(pad_token_id=_A , **_A) SCREAMING_SNAKE_CASE_ = vocab_size SCREAMING_SNAKE_CASE_ = hidden_size SCREAMING_SNAKE_CASE_ = num_hidden_layers SCREAMING_SNAKE_CASE_ = num_attention_heads SCREAMING_SNAKE_CASE_ = hidden_act SCREAMING_SNAKE_CASE_ = intermediate_size SCREAMING_SNAKE_CASE_ = hidden_dropout_prob SCREAMING_SNAKE_CASE_ = attention_probs_dropout_prob SCREAMING_SNAKE_CASE_ = max_position_embeddings SCREAMING_SNAKE_CASE_ = type_vocab_size SCREAMING_SNAKE_CASE_ = initializer_range SCREAMING_SNAKE_CASE_ = layer_norm_eps SCREAMING_SNAKE_CASE_ = projection_dim SCREAMING_SNAKE_CASE_ = position_embedding_type
620
0
from __future__ import annotations def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : list[float] ): SCREAMING_SNAKE_CASE_ = 0.00 SCREAMING_SNAKE_CASE_ = 0 for resistor in resistors: if resistor <= 0: SCREAMING_SNAKE_CASE_ = f"""Resistor at index {index} has a negative or zero value!""" raise ValueError(__UpperCamelCase ) first_sum += 1 / float(__UpperCamelCase ) index += 1 return 1 / first_sum def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : list[float] ): SCREAMING_SNAKE_CASE_ = 0.00 SCREAMING_SNAKE_CASE_ = 0 for resistor in resistors: sum_r += resistor if resistor < 0: SCREAMING_SNAKE_CASE_ = f"""Resistor at index {index} has a negative value!""" raise ValueError(__UpperCamelCase ) index += 1 return sum_r if __name__ == "__main__": import doctest doctest.testmod()
721
import pytest import datasets # Import fixture modules as plugins UpperCamelCase__ : Union[str, Any] = ["tests.fixtures.files", "tests.fixtures.hub", "tests.fixtures.fsspec"] def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : Dict ): """simple docstring""" for item in items: if any(marker in item.keywords for marker in ['integration', 'unit'] ): continue item.add_marker(pytest.mark.unit ) def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : Optional[int] ): """simple docstring""" config.addinivalue_line('markers' , 'torchaudio_latest: mark test to run with torchaudio>=0.12' ) @pytest.fixture(autouse=_SCREAMING_SNAKE_CASE ) def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : str ): """simple docstring""" SCREAMING_SNAKE_CASE_ = tmp_path_factory.getbasetemp() / 'cache' SCREAMING_SNAKE_CASE_ = test_hf_cache_home / 'datasets' SCREAMING_SNAKE_CASE_ = test_hf_cache_home / 'metrics' SCREAMING_SNAKE_CASE_ = test_hf_cache_home / 'modules' monkeypatch.setattr('datasets.config.HF_DATASETS_CACHE' , str(_SCREAMING_SNAKE_CASE ) ) monkeypatch.setattr('datasets.config.HF_METRICS_CACHE' , str(_SCREAMING_SNAKE_CASE ) ) monkeypatch.setattr('datasets.config.HF_MODULES_CACHE' , str(_SCREAMING_SNAKE_CASE ) ) SCREAMING_SNAKE_CASE_ = test_hf_datasets_cache / 'downloads' monkeypatch.setattr('datasets.config.DOWNLOADED_DATASETS_PATH' , str(_SCREAMING_SNAKE_CASE ) ) SCREAMING_SNAKE_CASE_ = test_hf_datasets_cache / 'downloads' / 'extracted' monkeypatch.setattr('datasets.config.EXTRACTED_DATASETS_PATH' , str(_SCREAMING_SNAKE_CASE ) ) @pytest.fixture(autouse=_SCREAMING_SNAKE_CASE , scope='session' ) def _UpperCAmelCase ( ): """simple docstring""" datasets.disable_progress_bar() @pytest.fixture(autouse=_SCREAMING_SNAKE_CASE ) def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : Any ): """simple docstring""" monkeypatch.setattr('datasets.config.HF_UPDATE_DOWNLOAD_COUNTS' , _SCREAMING_SNAKE_CASE ) @pytest.fixture def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : Tuple ): """simple docstring""" monkeypatch.setattr('sqlalchemy.util.deprecations.SILENCE_UBER_WARNING' , _SCREAMING_SNAKE_CASE )
620
0
import json import os import shutil import warnings from argparse import ArgumentParser, Namespace from pathlib import Path from typing import List from ..utils import logging from . import BaseTransformersCLICommand try: from cookiecutter.main import cookiecutter UpperCamelCase__ : int = True except ImportError: UpperCamelCase__ : Optional[Any] = False UpperCamelCase__ : Any = logging.get_logger(__name__) # pylint: disable=invalid-name def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : Namespace ): """simple docstring""" return AddNewModelCommand(args.testing , args.testing_file , path=args.path ) class __snake_case ( _lowercase ): @staticmethod def lowerCAmelCase__ ( _A): SCREAMING_SNAKE_CASE_ = parser.add_parser('add-new-model') add_new_model_parser.add_argument('--testing' , action='store_true' , help='If in testing mode.') add_new_model_parser.add_argument('--testing_file' , type=A_ , help='Configuration file on which to run.') add_new_model_parser.add_argument( '--path' , type=A_ , help='Path to cookiecutter. Should only be used for testing purposes.') add_new_model_parser.set_defaults(func=A_) def __init__( self , _A , _A , _A=None , *_A): SCREAMING_SNAKE_CASE_ = testing SCREAMING_SNAKE_CASE_ = testing_file SCREAMING_SNAKE_CASE_ = path def lowerCAmelCase__ ( self): warnings.warn( 'The command `transformers-cli add-new-model` is deprecated and will be removed in v5 of Transformers. ' 'It is not actively maintained anymore, so might give a result that won\'t pass all tests and quality ' 'checks, you should use `transformers-cli add-new-model-like` instead.') if not _has_cookiecutter: raise ImportError( 'Model creation dependencies are required to use the `add_new_model` command. Install them by running ' 'the following at the root of your `transformers` clone:\n\n\t$ pip install -e .[modelcreation]\n') # Ensure that there is no other `cookiecutter-template-xxx` directory in the current working directory SCREAMING_SNAKE_CASE_ = [directory for directory in os.listdir() if 'cookiecutter-template-' == directory[:22]] if len(A_) > 0: raise ValueError( 'Several directories starting with `cookiecutter-template-` in current working directory. ' 'Please clean your directory by removing all folders starting with `cookiecutter-template-` or ' 'change your working directory.') SCREAMING_SNAKE_CASE_ = ( Path(A_).parent.parent.parent.parent if self._path is None else Path(self._path).parent.parent ) SCREAMING_SNAKE_CASE_ = path_to_transformer_root / 'templates' / 'adding_a_new_model' # Execute cookiecutter if not self._testing: cookiecutter(str(A_)) else: with open(self._testing_file , 'r') as configuration_file: SCREAMING_SNAKE_CASE_ = json.load(A_) cookiecutter( str(path_to_cookiecutter if self._path is None else self._path) , no_input=A_ , extra_context=A_ , ) SCREAMING_SNAKE_CASE_ = [directory for directory in os.listdir() if 'cookiecutter-template-' in directory[:22]][0] # Retrieve configuration with open(directory + '/configuration.json' , 'r') as configuration_file: SCREAMING_SNAKE_CASE_ = json.load(A_) SCREAMING_SNAKE_CASE_ = configuration['lowercase_modelname'] SCREAMING_SNAKE_CASE_ = configuration['generate_tensorflow_pytorch_and_flax'] os.remove(f"""{directory}/configuration.json""") SCREAMING_SNAKE_CASE_ = 'PyTorch' in generate_tensorflow_pytorch_and_flax SCREAMING_SNAKE_CASE_ = 'TensorFlow' in generate_tensorflow_pytorch_and_flax SCREAMING_SNAKE_CASE_ = 'Flax' in generate_tensorflow_pytorch_and_flax SCREAMING_SNAKE_CASE_ = f"""{path_to_transformer_root}/src/transformers/models/{lowercase_model_name}""" os.makedirs(A_ , exist_ok=A_) os.makedirs(f"""{path_to_transformer_root}/tests/models/{lowercase_model_name}""" , exist_ok=A_) # Tests require submodules as they have parent imports with open(f"""{path_to_transformer_root}/tests/models/{lowercase_model_name}/__init__.py""" , 'w'): pass shutil.move( f"""{directory}/__init__.py""" , f"""{model_dir}/__init__.py""" , ) shutil.move( f"""{directory}/configuration_{lowercase_model_name}.py""" , f"""{model_dir}/configuration_{lowercase_model_name}.py""" , ) def remove_copy_lines(_A): with open(A_ , 'r') as f: SCREAMING_SNAKE_CASE_ = f.readlines() with open(A_ , 'w') as f: for line in lines: if "# Copied from transformers." not in line: f.write(A_) if output_pytorch: if not self._testing: remove_copy_lines(f"""{directory}/modeling_{lowercase_model_name}.py""") shutil.move( f"""{directory}/modeling_{lowercase_model_name}.py""" , f"""{model_dir}/modeling_{lowercase_model_name}.py""" , ) shutil.move( f"""{directory}/test_modeling_{lowercase_model_name}.py""" , f"""{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_{lowercase_model_name}.py""" , ) else: os.remove(f"""{directory}/modeling_{lowercase_model_name}.py""") os.remove(f"""{directory}/test_modeling_{lowercase_model_name}.py""") if output_tensorflow: if not self._testing: remove_copy_lines(f"""{directory}/modeling_tf_{lowercase_model_name}.py""") shutil.move( f"""{directory}/modeling_tf_{lowercase_model_name}.py""" , f"""{model_dir}/modeling_tf_{lowercase_model_name}.py""" , ) shutil.move( f"""{directory}/test_modeling_tf_{lowercase_model_name}.py""" , f"""{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_tf_{lowercase_model_name}.py""" , ) else: os.remove(f"""{directory}/modeling_tf_{lowercase_model_name}.py""") os.remove(f"""{directory}/test_modeling_tf_{lowercase_model_name}.py""") if output_flax: if not self._testing: remove_copy_lines(f"""{directory}/modeling_flax_{lowercase_model_name}.py""") shutil.move( f"""{directory}/modeling_flax_{lowercase_model_name}.py""" , f"""{model_dir}/modeling_flax_{lowercase_model_name}.py""" , ) shutil.move( f"""{directory}/test_modeling_flax_{lowercase_model_name}.py""" , f"""{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_flax_{lowercase_model_name}.py""" , ) else: os.remove(f"""{directory}/modeling_flax_{lowercase_model_name}.py""") os.remove(f"""{directory}/test_modeling_flax_{lowercase_model_name}.py""") shutil.move( f"""{directory}/{lowercase_model_name}.md""" , f"""{path_to_transformer_root}/docs/source/en/model_doc/{lowercase_model_name}.md""" , ) shutil.move( f"""{directory}/tokenization_{lowercase_model_name}.py""" , f"""{model_dir}/tokenization_{lowercase_model_name}.py""" , ) shutil.move( f"""{directory}/tokenization_fast_{lowercase_model_name}.py""" , f"""{model_dir}/tokenization_{lowercase_model_name}_fast.py""" , ) from os import fdopen, remove from shutil import copymode, move from tempfile import mkstemp def replace(_A , _A , _A): # Create temp file SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = mkstemp() SCREAMING_SNAKE_CASE_ = False with fdopen(A_ , 'w') as new_file: with open(A_) as old_file: for line in old_file: new_file.write(A_) if line_to_copy_below in line: SCREAMING_SNAKE_CASE_ = True for line_to_copy in lines_to_copy: new_file.write(A_) if not line_found: raise ValueError(f"""Line {line_to_copy_below} was not found in file.""") # Copy the file permissions from the old file to the new file copymode(A_ , A_) # Remove original file remove(A_) # Move new file move(A_ , A_) def skip_units(_A): return ( ("generating PyTorch" in line and not output_pytorch) or ("generating TensorFlow" in line and not output_tensorflow) or ("generating Flax" in line and not output_flax) ) def replace_in_files(_A): with open(A_) as datafile: SCREAMING_SNAKE_CASE_ = [] SCREAMING_SNAKE_CASE_ = False SCREAMING_SNAKE_CASE_ = False for line in datafile: if "# To replace in: " in line and "##" not in line: SCREAMING_SNAKE_CASE_ = line.split('"')[1] SCREAMING_SNAKE_CASE_ = skip_units(A_) elif "# Below: " in line and "##" not in line: SCREAMING_SNAKE_CASE_ = line.split('"')[1] SCREAMING_SNAKE_CASE_ = skip_units(A_) elif "# End." in line and "##" not in line: if not skip_file and not skip_snippet: replace(A_ , A_ , A_) SCREAMING_SNAKE_CASE_ = [] elif "# Replace with" in line and "##" not in line: SCREAMING_SNAKE_CASE_ = [] elif "##" not in line: lines_to_copy.append(A_) remove(A_) replace_in_files(f"""{directory}/to_replace_{lowercase_model_name}.py""") os.rmdir(A_)
700
from typing import List import numpy as np def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : dict ): """simple docstring""" SCREAMING_SNAKE_CASE_ = {key: len(_SCREAMING_SNAKE_CASE ) for key, value in gen_kwargs.items() if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )} if len(set(lists_lengths.values() ) ) > 1: raise RuntimeError( ( 'Sharding is ambiguous for this dataset: ' + 'we found several data sources lists of different lengths, and we don\'t know over which list we should parallelize:\n' + '\n'.join(f"""\t- key {key} has length {length}""" for key, length in lists_lengths.items() ) + '\nTo fix this, check the \'gen_kwargs\' and make sure to use lists only for data sources, ' + 'and use tuples otherwise. In the end there should only be one single list, or several lists with the same length.' ) ) SCREAMING_SNAKE_CASE_ = max(lists_lengths.values() , default=0 ) return max(1 , _SCREAMING_SNAKE_CASE ) def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int ): """simple docstring""" SCREAMING_SNAKE_CASE_ = [] for group_idx in range(_SCREAMING_SNAKE_CASE ): SCREAMING_SNAKE_CASE_ = num_shards // max_num_jobs + (group_idx < (num_shards % max_num_jobs)) if num_shards_to_add == 0: break SCREAMING_SNAKE_CASE_ = shards_indices_per_group[-1].stop if shards_indices_per_group else 0 SCREAMING_SNAKE_CASE_ = range(_SCREAMING_SNAKE_CASE , start + num_shards_to_add ) shards_indices_per_group.append(_SCREAMING_SNAKE_CASE ) return shards_indices_per_group def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : dict , _SCREAMING_SNAKE_CASE : int ): """simple docstring""" SCREAMING_SNAKE_CASE_ = _number_of_shards_in_gen_kwargs(_SCREAMING_SNAKE_CASE ) if num_shards == 1: return [dict(_SCREAMING_SNAKE_CASE )] else: SCREAMING_SNAKE_CASE_ = _distribute_shards(num_shards=_SCREAMING_SNAKE_CASE , max_num_jobs=_SCREAMING_SNAKE_CASE ) return [ { key: [value[shard_idx] for shard_idx in shard_indices_per_group[group_idx]] if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) else value for key, value in gen_kwargs.items() } for group_idx in range(len(_SCREAMING_SNAKE_CASE ) ) ] def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : List[dict] ): """simple docstring""" return { key: [value for gen_kwargs in gen_kwargs_list for value in gen_kwargs[key]] if isinstance(gen_kwargs_list[0][key] , _SCREAMING_SNAKE_CASE ) else gen_kwargs_list[0][key] for key in gen_kwargs_list[0] } def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : np.random.Generator , _SCREAMING_SNAKE_CASE : dict ): """simple docstring""" SCREAMING_SNAKE_CASE_ = {len(_SCREAMING_SNAKE_CASE ) for value in gen_kwargs.values() if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )} SCREAMING_SNAKE_CASE_ = {} for size in list_sizes: SCREAMING_SNAKE_CASE_ = list(range(_SCREAMING_SNAKE_CASE ) ) rng.shuffle(indices_per_size[size] ) # Now let's copy the gen_kwargs and shuffle the lists based on their sizes SCREAMING_SNAKE_CASE_ = dict(_SCREAMING_SNAKE_CASE ) for key, value in shuffled_kwargs.items(): if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): SCREAMING_SNAKE_CASE_ = [value[i] for i in indices_per_size[len(_SCREAMING_SNAKE_CASE )]] return shuffled_kwargs
620
0
from __future__ import annotations UpperCamelCase__ : List[str] = list[list[int]] # assigning initial values to the grid UpperCamelCase__ : Dict = [ [3, 0, 6, 5, 0, 8, 4, 0, 0], [5, 2, 0, 0, 0, 0, 0, 0, 0], [0, 8, 7, 0, 0, 0, 0, 3, 1], [0, 0, 3, 0, 1, 0, 0, 8, 0], [9, 0, 0, 8, 6, 3, 0, 0, 5], [0, 5, 0, 0, 9, 0, 6, 0, 0], [1, 3, 0, 0, 0, 0, 2, 5, 0], [0, 0, 0, 0, 0, 0, 0, 7, 4], [0, 0, 5, 2, 0, 6, 3, 0, 0], ] # a grid with no solution UpperCamelCase__ : List[str] = [ [5, 0, 6, 5, 0, 8, 4, 0, 3], [5, 2, 0, 0, 0, 0, 0, 0, 2], [1, 8, 7, 0, 0, 0, 0, 3, 1], [0, 0, 3, 0, 1, 0, 0, 8, 0], [9, 0, 0, 8, 6, 3, 0, 0, 5], [0, 5, 0, 0, 9, 0, 6, 0, 0], [1, 3, 0, 0, 0, 0, 2, 5, 0], [0, 0, 0, 0, 0, 0, 0, 7, 4], [0, 0, 5, 2, 0, 6, 3, 0, 0], ] def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Any ): """simple docstring""" for i in range(9 ): if grid[row][i] == n or grid[i][column] == n: return False for i in range(3 ): for j in range(3 ): if grid[(row - row % 3) + i][(column - column % 3) + j] == n: return False return True def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : Tuple ): """simple docstring""" for i in range(9 ): for j in range(9 ): if grid[i][j] == 0: return i, j return None def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : Optional[Any] ): """simple docstring""" if location := find_empty_location(__SCREAMING_SNAKE_CASE ): SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = location else: # If the location is ``None``, then the grid is solved. return grid for digit in range(1 , 10 ): if is_safe(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): SCREAMING_SNAKE_CASE_ = digit if sudoku(__SCREAMING_SNAKE_CASE ) is not None: return grid SCREAMING_SNAKE_CASE_ = 0 return None def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : List[str] ): """simple docstring""" for row in grid: for cell in row: print(__SCREAMING_SNAKE_CASE , end=' ' ) print() if __name__ == "__main__": # make a copy of grid so that you can compare with the unmodified grid for example_grid in (initial_grid, no_solution): print("\nExample grid:\n" + "=" * 20) print_solution(example_grid) print("\nExample grid solution:") UpperCamelCase__ : str = sudoku(example_grid) if solution is not None: print_solution(solution) else: print("Cannot find a solution.")
701
from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCamelCase__ : List[Any] = logging.get_logger(__name__) UpperCamelCase__ : List[str] = { "microsoft/biogpt": "https://huggingface.co/microsoft/biogpt/resolve/main/config.json", # See all BioGPT models at https://huggingface.co/models?filter=biogpt } class __snake_case ( lowerCAmelCase__ ): __lowerCAmelCase : Any = 'biogpt' def __init__( self , _A=42384 , _A=1024 , _A=24 , _A=16 , _A=4096 , _A="gelu" , _A=0.1 , _A=0.1 , _A=1024 , _A=0.0_2 , _A=1E-12 , _A=True , _A=True , _A=0.0 , _A=0.0 , _A=1 , _A=0 , _A=2 , **_A , ): SCREAMING_SNAKE_CASE_ = vocab_size SCREAMING_SNAKE_CASE_ = max_position_embeddings SCREAMING_SNAKE_CASE_ = hidden_size SCREAMING_SNAKE_CASE_ = num_hidden_layers SCREAMING_SNAKE_CASE_ = num_attention_heads SCREAMING_SNAKE_CASE_ = intermediate_size SCREAMING_SNAKE_CASE_ = hidden_act SCREAMING_SNAKE_CASE_ = hidden_dropout_prob SCREAMING_SNAKE_CASE_ = attention_probs_dropout_prob SCREAMING_SNAKE_CASE_ = initializer_range SCREAMING_SNAKE_CASE_ = layer_norm_eps SCREAMING_SNAKE_CASE_ = scale_embedding SCREAMING_SNAKE_CASE_ = use_cache SCREAMING_SNAKE_CASE_ = layerdrop SCREAMING_SNAKE_CASE_ = activation_dropout super().__init__(pad_token_id=_A , bos_token_id=_A , eos_token_id=_A , **_A)
620
0
from collections.abc import Callable import numpy as np def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : Callable , _SCREAMING_SNAKE_CASE : float , _SCREAMING_SNAKE_CASE : float , _SCREAMING_SNAKE_CASE : float , _SCREAMING_SNAKE_CASE : float ): """simple docstring""" SCREAMING_SNAKE_CASE_ = int(np.ceil((x_end - xa) / step_size ) ) SCREAMING_SNAKE_CASE_ = np.zeros((n + 1,) ) SCREAMING_SNAKE_CASE_ = ya SCREAMING_SNAKE_CASE_ = xa for k in range(_SCREAMING_SNAKE_CASE ): SCREAMING_SNAKE_CASE_ = y[k] + step_size * ode_func(_SCREAMING_SNAKE_CASE , y[k] ) SCREAMING_SNAKE_CASE_ = y[k] + ( (step_size / 2) * (ode_func(_SCREAMING_SNAKE_CASE , y[k] ) + ode_func(x + step_size , _SCREAMING_SNAKE_CASE )) ) x += step_size return y if __name__ == "__main__": import doctest doctest.testmod()
702
from typing import Dict, List, Optional, Tuple, Union import torch from ...models import AutoencoderKL, TransformeraDModel from ...schedulers import KarrasDiffusionSchedulers from ...utils import randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput class __snake_case ( lowerCAmelCase__ ): def __init__( self , _A , _A , _A , _A = None , ): super().__init__() self.register_modules(transformer=_A , vae=_A , scheduler=_A) # create a imagenet -> id dictionary for easier use SCREAMING_SNAKE_CASE_ = {} if idalabel is not None: for key, value in idalabel.items(): for label in value.split(','): SCREAMING_SNAKE_CASE_ = int(_A) SCREAMING_SNAKE_CASE_ = dict(sorted(self.labels.items())) def lowerCAmelCase__ ( self , _A): if not isinstance(_A , _A): SCREAMING_SNAKE_CASE_ = list(_A) for l in label: if l not in self.labels: raise ValueError( f"""{l} does not exist. Please make sure to select one of the following labels: \n {self.labels}.""") return [self.labels[l] for l in label] @torch.no_grad() def __call__( self , _A , _A = 4.0 , _A = None , _A = 50 , _A = "pil" , _A = True , ): SCREAMING_SNAKE_CASE_ = len(_A) SCREAMING_SNAKE_CASE_ = self.transformer.config.sample_size SCREAMING_SNAKE_CASE_ = self.transformer.config.in_channels SCREAMING_SNAKE_CASE_ = randn_tensor( shape=(batch_size, latent_channels, latent_size, latent_size) , generator=_A , device=self.device , dtype=self.transformer.dtype , ) SCREAMING_SNAKE_CASE_ = torch.cat([latents] * 2) if guidance_scale > 1 else latents SCREAMING_SNAKE_CASE_ = torch.tensor(_A , device=self.device).reshape(-1) SCREAMING_SNAKE_CASE_ = torch.tensor([1000] * batch_size , device=self.device) SCREAMING_SNAKE_CASE_ = torch.cat([class_labels, class_null] , 0) if guidance_scale > 1 else class_labels # set step values self.scheduler.set_timesteps(_A) for t in self.progress_bar(self.scheduler.timesteps): if guidance_scale > 1: SCREAMING_SNAKE_CASE_ = latent_model_input[: len(_A) // 2] SCREAMING_SNAKE_CASE_ = torch.cat([half, half] , dim=0) SCREAMING_SNAKE_CASE_ = self.scheduler.scale_model_input(_A , _A) SCREAMING_SNAKE_CASE_ = t if not torch.is_tensor(_A): # TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can # This would be a good case for the `match` statement (Python 3.10+) SCREAMING_SNAKE_CASE_ = latent_model_input.device.type == 'mps' if isinstance(_A , _A): SCREAMING_SNAKE_CASE_ = torch.floataa if is_mps else torch.floataa else: SCREAMING_SNAKE_CASE_ = torch.intaa if is_mps else torch.intaa SCREAMING_SNAKE_CASE_ = torch.tensor([timesteps] , dtype=_A , device=latent_model_input.device) elif len(timesteps.shape) == 0: SCREAMING_SNAKE_CASE_ = timesteps[None].to(latent_model_input.device) # broadcast to batch dimension in a way that's compatible with ONNX/Core ML SCREAMING_SNAKE_CASE_ = timesteps.expand(latent_model_input.shape[0]) # predict noise model_output SCREAMING_SNAKE_CASE_ = self.transformer( _A , timestep=_A , class_labels=_A).sample # perform guidance if guidance_scale > 1: SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = noise_pred[:, :latent_channels], noise_pred[:, latent_channels:] SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = torch.split(_A , len(_A) // 2 , dim=0) SCREAMING_SNAKE_CASE_ = uncond_eps + guidance_scale * (cond_eps - uncond_eps) SCREAMING_SNAKE_CASE_ = torch.cat([half_eps, half_eps] , dim=0) SCREAMING_SNAKE_CASE_ = torch.cat([eps, rest] , dim=1) # learned sigma if self.transformer.config.out_channels // 2 == latent_channels: SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = torch.split(_A , _A , dim=1) else: SCREAMING_SNAKE_CASE_ = noise_pred # compute previous image: x_t -> x_t-1 SCREAMING_SNAKE_CASE_ = self.scheduler.step(_A , _A , _A).prev_sample if guidance_scale > 1: SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = latent_model_input.chunk(2 , dim=0) else: SCREAMING_SNAKE_CASE_ = latent_model_input SCREAMING_SNAKE_CASE_ = 1 / self.vae.config.scaling_factor * latents SCREAMING_SNAKE_CASE_ = self.vae.decode(_A).sample SCREAMING_SNAKE_CASE_ = (samples / 2 + 0.5).clamp(0 , 1) # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16 SCREAMING_SNAKE_CASE_ = samples.cpu().permute(0 , 2 , 3 , 1).float().numpy() if output_type == "pil": SCREAMING_SNAKE_CASE_ = self.numpy_to_pil(_A) if not return_dict: return (samples,) return ImagePipelineOutput(images=_A)
620
0
from collections.abc import Generator def _UpperCAmelCase ( ): """simple docstring""" SCREAMING_SNAKE_CASE_ = 0, 1 while True: SCREAMING_SNAKE_CASE_ = b, a + b yield b def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : int = 1_000 ): """simple docstring""" SCREAMING_SNAKE_CASE_ = 1 SCREAMING_SNAKE_CASE_ = fibonacci_generator() while len(str(next(snake_case_ ) ) ) < n: answer += 1 return answer + 1 if __name__ == "__main__": print(solution(int(str(input()).strip())))
703
import pickle import numpy as np from matplotlib import pyplot as plt class __snake_case : def __init__( self , _A , _A , _A , _A , _A , _A=0.2 , _A=0.2): SCREAMING_SNAKE_CASE_ = bp_numa SCREAMING_SNAKE_CASE_ = bp_numa SCREAMING_SNAKE_CASE_ = bp_numa SCREAMING_SNAKE_CASE_ = conva_get[:2] SCREAMING_SNAKE_CASE_ = conva_get[2] SCREAMING_SNAKE_CASE_ = size_pa SCREAMING_SNAKE_CASE_ = rate_w SCREAMING_SNAKE_CASE_ = rate_t SCREAMING_SNAKE_CASE_ = [ np.mat(-1 * np.random.rand(self.conva[0] , self.conva[0]) + 0.5) for i in range(self.conva[1]) ] SCREAMING_SNAKE_CASE_ = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa) + 0.5) SCREAMING_SNAKE_CASE_ = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa) + 0.5) SCREAMING_SNAKE_CASE_ = -2 * np.random.rand(self.conva[1]) + 1 SCREAMING_SNAKE_CASE_ = -2 * np.random.rand(self.num_bpa) + 1 SCREAMING_SNAKE_CASE_ = -2 * np.random.rand(self.num_bpa) + 1 def lowerCAmelCase__ ( self , _A): # save model dict with pickle SCREAMING_SNAKE_CASE_ = { 'num_bp1': self.num_bpa, 'num_bp2': self.num_bpa, 'num_bp3': self.num_bpa, 'conv1': self.conva, 'step_conv1': self.step_conva, 'size_pooling1': self.size_poolinga, 'rate_weight': self.rate_weight, 'rate_thre': self.rate_thre, 'w_conv1': self.w_conva, 'wkj': self.wkj, 'vji': self.vji, 'thre_conv1': self.thre_conva, 'thre_bp2': self.thre_bpa, 'thre_bp3': self.thre_bpa, } with open(_A , 'wb') as f: pickle.dump(_A , _A) print(f"""Model saved: {save_path}""") @classmethod def lowerCAmelCase__ ( cls , _A): # read saved model with open(_A , 'rb') as f: SCREAMING_SNAKE_CASE_ = pickle.load(_A) # noqa: S301 SCREAMING_SNAKE_CASE_ = model_dic.get('conv1') conv_get.append(model_dic.get('step_conv1')) SCREAMING_SNAKE_CASE_ = model_dic.get('size_pooling1') SCREAMING_SNAKE_CASE_ = model_dic.get('num_bp1') SCREAMING_SNAKE_CASE_ = model_dic.get('num_bp2') SCREAMING_SNAKE_CASE_ = model_dic.get('num_bp3') SCREAMING_SNAKE_CASE_ = model_dic.get('rate_weight') SCREAMING_SNAKE_CASE_ = model_dic.get('rate_thre') # create model instance SCREAMING_SNAKE_CASE_ = CNN(_A , _A , _A , _A , _A , _A , _A) # modify model parameter SCREAMING_SNAKE_CASE_ = model_dic.get('w_conv1') SCREAMING_SNAKE_CASE_ = model_dic.get('wkj') SCREAMING_SNAKE_CASE_ = model_dic.get('vji') SCREAMING_SNAKE_CASE_ = model_dic.get('thre_conv1') SCREAMING_SNAKE_CASE_ = model_dic.get('thre_bp2') SCREAMING_SNAKE_CASE_ = model_dic.get('thre_bp3') return conv_ins def lowerCAmelCase__ ( self , _A): return 1 / (1 + np.exp(-1 * x)) def lowerCAmelCase__ ( self , _A): return round(_A , 3) def lowerCAmelCase__ ( self , _A , _A , _A , _A , _A): # convolution process SCREAMING_SNAKE_CASE_ = convs[0] SCREAMING_SNAKE_CASE_ = convs[1] SCREAMING_SNAKE_CASE_ = np.shape(_A)[0] # get the data slice of original image data, data_focus SCREAMING_SNAKE_CASE_ = [] for i_focus in range(0 , size_data - size_conv + 1 , _A): for j_focus in range(0 , size_data - size_conv + 1 , _A): SCREAMING_SNAKE_CASE_ = data[ i_focus : i_focus + size_conv, j_focus : j_focus + size_conv ] data_focus.append(_A) # calculate the feature map of every single kernel, and saved as list of matrix SCREAMING_SNAKE_CASE_ = [] SCREAMING_SNAKE_CASE_ = int((size_data - size_conv) / conv_step + 1) for i_map in range(_A): SCREAMING_SNAKE_CASE_ = [] for i_focus in range(len(_A)): SCREAMING_SNAKE_CASE_ = ( np.sum(np.multiply(data_focus[i_focus] , w_convs[i_map])) - thre_convs[i_map] ) featuremap.append(self.sig(_A)) SCREAMING_SNAKE_CASE_ = np.asmatrix(_A).reshape( _A , _A) data_featuremap.append(_A) # expanding the data slice to One dimenssion SCREAMING_SNAKE_CASE_ = [] for each_focus in data_focus: focusa_list.extend(self.Expand_Mat(_A)) SCREAMING_SNAKE_CASE_ = np.asarray(_A) return focus_list, data_featuremap def lowerCAmelCase__ ( self , _A , _A , _A="average_pool"): # pooling process SCREAMING_SNAKE_CASE_ = len(featuremaps[0]) SCREAMING_SNAKE_CASE_ = int(size_map / size_pooling) SCREAMING_SNAKE_CASE_ = [] for i_map in range(len(_A)): SCREAMING_SNAKE_CASE_ = featuremaps[i_map] SCREAMING_SNAKE_CASE_ = [] for i_focus in range(0 , _A , _A): for j_focus in range(0 , _A , _A): SCREAMING_SNAKE_CASE_ = feature_map[ i_focus : i_focus + size_pooling, j_focus : j_focus + size_pooling, ] if pooling_type == "average_pool": # average pooling map_pooled.append(np.average(_A)) elif pooling_type == "max_pooling": # max pooling map_pooled.append(np.max(_A)) SCREAMING_SNAKE_CASE_ = np.asmatrix(_A).reshape(_A , _A) featuremap_pooled.append(_A) return featuremap_pooled def lowerCAmelCase__ ( self , _A): # expanding three dimension data to one dimension list SCREAMING_SNAKE_CASE_ = [] for i in range(len(_A)): SCREAMING_SNAKE_CASE_ = np.shape(data[i]) SCREAMING_SNAKE_CASE_ = data[i].reshape(1 , shapes[0] * shapes[1]) SCREAMING_SNAKE_CASE_ = data_listed.getA().tolist()[0] data_expanded.extend(_A) SCREAMING_SNAKE_CASE_ = np.asarray(_A) return data_expanded def lowerCAmelCase__ ( self , _A): # expanding matrix to one dimension list SCREAMING_SNAKE_CASE_ = np.asarray(_A) SCREAMING_SNAKE_CASE_ = np.shape(_A) SCREAMING_SNAKE_CASE_ = data_mat.reshape(1 , shapes[0] * shapes[1]) return data_expanded def lowerCAmelCase__ ( self , _A , _A , _A , _A , _A): SCREAMING_SNAKE_CASE_ = [] SCREAMING_SNAKE_CASE_ = 0 for i_map in range(_A): SCREAMING_SNAKE_CASE_ = np.ones((size_map, size_map)) for i in range(0 , _A , _A): for j in range(0 , _A , _A): SCREAMING_SNAKE_CASE_ = pd_pool[ i_pool ] SCREAMING_SNAKE_CASE_ = i_pool + 1 SCREAMING_SNAKE_CASE_ = np.multiply( _A , np.multiply(out_map[i_map] , (1 - out_map[i_map]))) pd_all.append(_A) return pd_all def lowerCAmelCase__ ( self , _A , _A , _A , _A , _A , _A=bool): # model traning print('----------------------Start Training-------------------------') print((' - - Shape: Train_Data ', np.shape(_A))) print((' - - Shape: Teach_Data ', np.shape(_A))) SCREAMING_SNAKE_CASE_ = 0 SCREAMING_SNAKE_CASE_ = [] SCREAMING_SNAKE_CASE_ = 10000 while rp < n_repeat and mse >= error_accuracy: SCREAMING_SNAKE_CASE_ = 0 print(f"""-------------Learning Time {rp}--------------""") for p in range(len(_A)): # print('------------Learning Image: %d--------------'%p) SCREAMING_SNAKE_CASE_ = np.asmatrix(datas_train[p]) SCREAMING_SNAKE_CASE_ = np.asarray(datas_teach[p]) SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.convolute( _A , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , ) SCREAMING_SNAKE_CASE_ = self.pooling(_A , self.size_poolinga) SCREAMING_SNAKE_CASE_ = np.shape(_A) SCREAMING_SNAKE_CASE_ = self._expand(_A) SCREAMING_SNAKE_CASE_ = data_bp_input SCREAMING_SNAKE_CASE_ = np.dot(_A , self.vji.T) - self.thre_bpa SCREAMING_SNAKE_CASE_ = self.sig(_A) SCREAMING_SNAKE_CASE_ = np.dot(_A , self.wkj.T) - self.thre_bpa SCREAMING_SNAKE_CASE_ = self.sig(_A) # --------------Model Leaning ------------------------ # calculate error and gradient--------------- SCREAMING_SNAKE_CASE_ = np.multiply( (data_teach - bp_outa) , np.multiply(_A , (1 - bp_outa))) SCREAMING_SNAKE_CASE_ = np.multiply( np.dot(_A , self.wkj) , np.multiply(_A , (1 - bp_outa))) SCREAMING_SNAKE_CASE_ = np.dot(_A , self.vji) SCREAMING_SNAKE_CASE_ = pd_i_all / (self.size_poolinga * self.size_poolinga) SCREAMING_SNAKE_CASE_ = pd_conva_pooled.T.getA().tolist() SCREAMING_SNAKE_CASE_ = self._calculate_gradient_from_pool( _A , _A , shape_featuremapa[0] , shape_featuremapa[1] , self.size_poolinga , ) # weight and threshold learning process--------- # convolution layer for k_conv in range(self.conva[1]): SCREAMING_SNAKE_CASE_ = self._expand_mat(pd_conva_all[k_conv]) SCREAMING_SNAKE_CASE_ = self.rate_weight * np.dot(_A , _A) SCREAMING_SNAKE_CASE_ = self.w_conva[k_conv] + delta_w.reshape( (self.conva[0], self.conva[0])) SCREAMING_SNAKE_CASE_ = ( self.thre_conva[k_conv] - np.sum(pd_conva_all[k_conv]) * self.rate_thre ) # all connected layer SCREAMING_SNAKE_CASE_ = self.wkj + pd_k_all.T * bp_outa * self.rate_weight SCREAMING_SNAKE_CASE_ = self.vji + pd_j_all.T * bp_outa * self.rate_weight SCREAMING_SNAKE_CASE_ = self.thre_bpa - pd_k_all * self.rate_thre SCREAMING_SNAKE_CASE_ = self.thre_bpa - pd_j_all * self.rate_thre # calculate the sum error of all single image SCREAMING_SNAKE_CASE_ = np.sum(abs(data_teach - bp_outa)) error_count += errors # print(' ----Teach ',data_teach) # print(' ----BP_output ',bp_out3) SCREAMING_SNAKE_CASE_ = rp + 1 SCREAMING_SNAKE_CASE_ = error_count / patterns all_mse.append(_A) def draw_error(): SCREAMING_SNAKE_CASE_ = [error_accuracy for i in range(int(n_repeat * 1.2))] plt.plot(_A , '+-') plt.plot(_A , 'r--') plt.xlabel('Learning Times') plt.ylabel('All_mse') plt.grid(_A , alpha=0.5) plt.show() print('------------------Training Complished---------------------') print((' - - Training epoch: ', rp, f""" - - Mse: {mse:.6f}""")) if draw_e: draw_error() return mse def lowerCAmelCase__ ( self , _A): # model predict SCREAMING_SNAKE_CASE_ = [] print('-------------------Start Testing-------------------------') print((' - - Shape: Test_Data ', np.shape(_A))) for p in range(len(_A)): SCREAMING_SNAKE_CASE_ = np.asmatrix(datas_test[p]) SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.convolute( _A , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , ) SCREAMING_SNAKE_CASE_ = self.pooling(_A , self.size_poolinga) SCREAMING_SNAKE_CASE_ = self._expand(_A) SCREAMING_SNAKE_CASE_ = data_bp_input SCREAMING_SNAKE_CASE_ = bp_outa * self.vji.T - self.thre_bpa SCREAMING_SNAKE_CASE_ = self.sig(_A) SCREAMING_SNAKE_CASE_ = bp_outa * self.wkj.T - self.thre_bpa SCREAMING_SNAKE_CASE_ = self.sig(_A) produce_out.extend(bp_outa.getA().tolist()) SCREAMING_SNAKE_CASE_ = [list(map(self.do_round , _A)) for each in produce_out] return np.asarray(_A) def lowerCAmelCase__ ( self , _A): # return the data of image after convoluting process so we can check it out SCREAMING_SNAKE_CASE_ = np.asmatrix(_A) SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.convolute( _A , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , ) SCREAMING_SNAKE_CASE_ = self.pooling(_A , self.size_poolinga) return data_conveda, data_pooleda if __name__ == "__main__": pass
620
0
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tf_available, is_tokenizers_available, is_torch_available, ) UpperCamelCase__ : Optional[Any] = {"configuration_xlnet": ["XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP", "XLNetConfig"]} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase__ : List[Any] = ["XLNetTokenizer"] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase__ : Any = ["XLNetTokenizerFast"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase__ : Optional[int] = [ "XLNET_PRETRAINED_MODEL_ARCHIVE_LIST", "XLNetForMultipleChoice", "XLNetForQuestionAnswering", "XLNetForQuestionAnsweringSimple", "XLNetForSequenceClassification", "XLNetForTokenClassification", "XLNetLMHeadModel", "XLNetModel", "XLNetPreTrainedModel", "load_tf_weights_in_xlnet", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase__ : Dict = [ "TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST", "TFXLNetForMultipleChoice", "TFXLNetForQuestionAnsweringSimple", "TFXLNetForSequenceClassification", "TFXLNetForTokenClassification", "TFXLNetLMHeadModel", "TFXLNetMainLayer", "TFXLNetModel", "TFXLNetPreTrainedModel", ] if TYPE_CHECKING: from .configuration_xlnet import XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP, XLNetConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_xlnet import XLNetTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_xlnet_fast import XLNetTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_xlnet import ( XLNET_PRETRAINED_MODEL_ARCHIVE_LIST, XLNetForMultipleChoice, XLNetForQuestionAnswering, XLNetForQuestionAnsweringSimple, XLNetForSequenceClassification, XLNetForTokenClassification, XLNetLMHeadModel, XLNetModel, XLNetPreTrainedModel, load_tf_weights_in_xlnet, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_xlnet import ( TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST, TFXLNetForMultipleChoice, TFXLNetForQuestionAnsweringSimple, TFXLNetForSequenceClassification, TFXLNetForTokenClassification, TFXLNetLMHeadModel, TFXLNetMainLayer, TFXLNetModel, TFXLNetPreTrainedModel, ) else: import sys UpperCamelCase__ : Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
704
import os import zipfile import requests from get_ci_error_statistics import download_artifact, get_artifacts_links def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : int=7 ): """simple docstring""" SCREAMING_SNAKE_CASE_ = None if token is not None: SCREAMING_SNAKE_CASE_ = {'Accept': 'application/vnd.github+json', 'Authorization': f"""Bearer {token}"""} # The id of a workflow (not of a workflow run) SCREAMING_SNAKE_CASE_ = '636036' SCREAMING_SNAKE_CASE_ = f"""https://api.github.com/repos/huggingface/transformers/actions/workflows/{workflow_id}/runs""" # On `main` branch + event being `schedule` + not returning PRs + only `num_runs` results url += f"""?branch=main&event=schedule&exclude_pull_requests=true&per_page={num_runs}""" SCREAMING_SNAKE_CASE_ = requests.get(_SCREAMING_SNAKE_CASE , headers=_SCREAMING_SNAKE_CASE ).json() return result["workflow_runs"] def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : str ): """simple docstring""" SCREAMING_SNAKE_CASE_ = get_daily_ci_runs(_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE_ = None for workflow_run in workflow_runs: if workflow_run["status"] == "completed": SCREAMING_SNAKE_CASE_ = workflow_run['id'] break return workflow_run_id def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Optional[int] ): """simple docstring""" SCREAMING_SNAKE_CASE_ = get_last_daily_ci_runs(_SCREAMING_SNAKE_CASE ) if workflow_run_id is not None: SCREAMING_SNAKE_CASE_ = get_artifacts_links(worflow_run_id=_SCREAMING_SNAKE_CASE , token=_SCREAMING_SNAKE_CASE ) for artifact_name in artifact_names: if artifact_name in artifacts_links: SCREAMING_SNAKE_CASE_ = artifacts_links[artifact_name] download_artifact( artifact_name=_SCREAMING_SNAKE_CASE , artifact_url=_SCREAMING_SNAKE_CASE , output_dir=_SCREAMING_SNAKE_CASE , token=_SCREAMING_SNAKE_CASE ) def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : List[Any] ): """simple docstring""" get_last_daily_ci_artifacts(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE_ = {} for artifact_name in artifact_names: SCREAMING_SNAKE_CASE_ = os.path.join(_SCREAMING_SNAKE_CASE , f"""{artifact_name}.zip""" ) if os.path.isfile(_SCREAMING_SNAKE_CASE ): SCREAMING_SNAKE_CASE_ = {} with zipfile.ZipFile(_SCREAMING_SNAKE_CASE ) as z: for filename in z.namelist(): if not os.path.isdir(_SCREAMING_SNAKE_CASE ): # read the file with z.open(_SCREAMING_SNAKE_CASE ) as f: SCREAMING_SNAKE_CASE_ = f.read().decode('UTF-8' ) return results
620
0
from typing import TYPE_CHECKING from ...file_utils import _LazyModule, is_torch_available from ...utils import OptionalDependencyNotAvailable UpperCamelCase__ : Any = { "configuration_gpt_neox_japanese": ["GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP", "GPTNeoXJapaneseConfig"], "tokenization_gpt_neox_japanese": ["GPTNeoXJapaneseTokenizer"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase__ : List[Any] = [ "GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST", "GPTNeoXJapaneseForCausalLM", "GPTNeoXJapaneseLayer", "GPTNeoXJapaneseModel", "GPTNeoXJapanesePreTrainedModel", ] if TYPE_CHECKING: from .configuration_gpt_neox_japanese import GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXJapaneseConfig from .tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_gpt_neox_japanese import ( GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST, GPTNeoXJapaneseForCausalLM, GPTNeoXJapaneseLayer, GPTNeoXJapaneseModel, GPTNeoXJapanesePreTrainedModel, ) else: import sys UpperCamelCase__ : List[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
705
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available UpperCamelCase__ : Any = { "configuration_mvp": ["MVP_PRETRAINED_CONFIG_ARCHIVE_MAP", "MvpConfig", "MvpOnnxConfig"], "tokenization_mvp": ["MvpTokenizer"], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase__ : Optional[int] = ["MvpTokenizerFast"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase__ : str = [ "MVP_PRETRAINED_MODEL_ARCHIVE_LIST", "MvpForCausalLM", "MvpForConditionalGeneration", "MvpForQuestionAnswering", "MvpForSequenceClassification", "MvpModel", "MvpPreTrainedModel", ] if TYPE_CHECKING: from .configuration_mvp import MVP_PRETRAINED_CONFIG_ARCHIVE_MAP, MvpConfig, MvpOnnxConfig from .tokenization_mvp import MvpTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_mvp_fast import MvpTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mvp import ( MVP_PRETRAINED_MODEL_ARCHIVE_LIST, MvpForCausalLM, MvpForConditionalGeneration, MvpForQuestionAnswering, MvpForSequenceClassification, MvpModel, MvpPreTrainedModel, ) else: import sys UpperCamelCase__ : Dict = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
620
0
from abc import ABC, abstractmethod from typing import List, Optional class __snake_case ( __a ): def __init__( self): self.test() def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = 0 SCREAMING_SNAKE_CASE_ = False while not completed: if counter == 1: self.reset() SCREAMING_SNAKE_CASE_ = self.advance() if not self.does_advance(a_): raise Exception( 'Custom Constraint is not defined correctly. self.does_advance(self.advance()) must be true.') SCREAMING_SNAKE_CASE_ = self.update(a_) counter += 1 if counter > 10000: raise Exception('update() does not fulfill the constraint.') if self.remaining() != 0: raise Exception('Custom Constraint is not defined correctly.') @abstractmethod def lowerCAmelCase__ ( self): raise NotImplementedError( f"""{self.__class__} is an abstract class. Only classes inheriting this class can be called.""") @abstractmethod def lowerCAmelCase__ ( self , _A): raise NotImplementedError( f"""{self.__class__} is an abstract class. Only classes inheriting this class can be called.""") @abstractmethod def lowerCAmelCase__ ( self , _A): raise NotImplementedError( f"""{self.__class__} is an abstract class. Only classes inheriting this class can be called.""") @abstractmethod def lowerCAmelCase__ ( self): raise NotImplementedError( f"""{self.__class__} is an abstract class. Only classes inheriting this class can be called.""") @abstractmethod def lowerCAmelCase__ ( self): raise NotImplementedError( f"""{self.__class__} is an abstract class. Only classes inheriting this class can be called.""") @abstractmethod def lowerCAmelCase__ ( self , _A=False): raise NotImplementedError( f"""{self.__class__} is an abstract class. Only classes inheriting this class can be called.""") class __snake_case ( __a ): def __init__( self , _A): super(a_ , self).__init__() if not isinstance(a_ , a_) or len(a_) == 0: raise ValueError(f"""`token_ids` has to be a non-empty list, but is {token_ids}.""") if any((not isinstance(a_ , a_) or token_id < 0) for token_id in token_ids): raise ValueError(f"""Each list in `token_ids` has to be a list of positive integers, but is {token_ids}.""") SCREAMING_SNAKE_CASE_ = token_ids SCREAMING_SNAKE_CASE_ = len(self.token_ids) SCREAMING_SNAKE_CASE_ = -1 # the index of the currently fulfilled step SCREAMING_SNAKE_CASE_ = False def lowerCAmelCase__ ( self): if self.completed: return None return self.token_ids[self.fulfilled_idx + 1] def lowerCAmelCase__ ( self , _A): if not isinstance(a_ , a_): raise ValueError(f"""`token_id` has to be an `int`, but is {token_id} of type {type(a_)}""") if self.completed: return False return token_id == self.token_ids[self.fulfilled_idx + 1] def lowerCAmelCase__ ( self , _A): if not isinstance(a_ , a_): raise ValueError(f"""`token_id` has to be an `int`, but is {token_id} of type {type(a_)}""") SCREAMING_SNAKE_CASE_ = False SCREAMING_SNAKE_CASE_ = False SCREAMING_SNAKE_CASE_ = False if self.does_advance(a_): self.fulfilled_idx += 1 SCREAMING_SNAKE_CASE_ = True if self.fulfilled_idx == (self.seqlen - 1): SCREAMING_SNAKE_CASE_ = True SCREAMING_SNAKE_CASE_ = completed else: # failed to make progress. SCREAMING_SNAKE_CASE_ = True self.reset() return stepped, completed, reset def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = False SCREAMING_SNAKE_CASE_ = 0 def lowerCAmelCase__ ( self): return self.seqlen - (self.fulfilled_idx + 1) def lowerCAmelCase__ ( self , _A=False): SCREAMING_SNAKE_CASE_ = PhrasalConstraint(self.token_ids) if stateful: SCREAMING_SNAKE_CASE_ = self.seqlen SCREAMING_SNAKE_CASE_ = self.fulfilled_idx SCREAMING_SNAKE_CASE_ = self.completed return new_constraint class __snake_case : def __init__( self , _A , _A=True): SCREAMING_SNAKE_CASE_ = max([len(a_) for one in nested_token_ids]) SCREAMING_SNAKE_CASE_ = {} for token_ids in nested_token_ids: SCREAMING_SNAKE_CASE_ = root for tidx, token_id in enumerate(a_): if token_id not in level: SCREAMING_SNAKE_CASE_ = {} SCREAMING_SNAKE_CASE_ = level[token_id] if no_subsets and self.has_subsets(a_ , a_): raise ValueError( 'Each list in `nested_token_ids` can\'t be a complete subset of another list, but is' f""" {nested_token_ids}.""") SCREAMING_SNAKE_CASE_ = root def lowerCAmelCase__ ( self , _A): SCREAMING_SNAKE_CASE_ = self.trie for current_token in current_seq: SCREAMING_SNAKE_CASE_ = start[current_token] SCREAMING_SNAKE_CASE_ = list(start.keys()) return next_tokens def lowerCAmelCase__ ( self , _A): SCREAMING_SNAKE_CASE_ = self.next_tokens(a_) return len(a_) == 0 def lowerCAmelCase__ ( self , _A): SCREAMING_SNAKE_CASE_ = list(root.values()) if len(a_) == 0: return 1 else: return sum([self.count_leaves(a_) for nn in next_nodes]) def lowerCAmelCase__ ( self , _A , _A): SCREAMING_SNAKE_CASE_ = self.count_leaves(a_) return len(a_) != leaf_count class __snake_case ( __a ): def __init__( self , _A): super(a_ , self).__init__() if not isinstance(a_ , a_) or len(a_) == 0: raise ValueError(f"""`nested_token_ids` has to be a non-empty list, but is {nested_token_ids}.""") if any(not isinstance(a_ , a_) for token_ids in nested_token_ids): raise ValueError(f"""`nested_token_ids` has to be a list of lists, but is {nested_token_ids}.""") if any( any((not isinstance(a_ , a_) or token_id < 0) for token_id in token_ids) for token_ids in nested_token_ids): raise ValueError( f"""Each list in `nested_token_ids` has to be a list of positive integers, but is {nested_token_ids}.""") SCREAMING_SNAKE_CASE_ = DisjunctiveTrie(a_) SCREAMING_SNAKE_CASE_ = nested_token_ids SCREAMING_SNAKE_CASE_ = self.trie.max_height SCREAMING_SNAKE_CASE_ = [] SCREAMING_SNAKE_CASE_ = False def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = self.trie.next_tokens(self.current_seq) if len(a_) == 0: return None else: return token_list def lowerCAmelCase__ ( self , _A): if not isinstance(a_ , a_): raise ValueError(f"""`token_id` is supposed to be type `int`, but is {token_id} of type {type(a_)}""") SCREAMING_SNAKE_CASE_ = self.trie.next_tokens(self.current_seq) return token_id in next_tokens def lowerCAmelCase__ ( self , _A): if not isinstance(a_ , a_): raise ValueError(f"""`token_id` is supposed to be type `int`, but is {token_id} of type {type(a_)}""") SCREAMING_SNAKE_CASE_ = False SCREAMING_SNAKE_CASE_ = False SCREAMING_SNAKE_CASE_ = False if self.does_advance(a_): self.current_seq.append(a_) SCREAMING_SNAKE_CASE_ = True else: SCREAMING_SNAKE_CASE_ = True self.reset() SCREAMING_SNAKE_CASE_ = self.trie.reached_leaf(self.current_seq) SCREAMING_SNAKE_CASE_ = completed return stepped, completed, reset def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = False SCREAMING_SNAKE_CASE_ = [] def lowerCAmelCase__ ( self): if self.completed: # since this can be completed without reaching max height return 0 else: return self.seqlen - len(self.current_seq) def lowerCAmelCase__ ( self , _A=False): SCREAMING_SNAKE_CASE_ = DisjunctiveConstraint(self.token_ids) if stateful: SCREAMING_SNAKE_CASE_ = self.seqlen SCREAMING_SNAKE_CASE_ = self.current_seq SCREAMING_SNAKE_CASE_ = self.completed return new_constraint class __snake_case : def __init__( self , _A): SCREAMING_SNAKE_CASE_ = constraints # max # of steps required to fulfill a given constraint SCREAMING_SNAKE_CASE_ = max([c.seqlen for c in constraints]) SCREAMING_SNAKE_CASE_ = len(a_) SCREAMING_SNAKE_CASE_ = False self.init_state() def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = [] SCREAMING_SNAKE_CASE_ = None SCREAMING_SNAKE_CASE_ = [constraint.copy(stateful=a_) for constraint in self.constraints] def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = 0 if self.inprogress_constraint: # extra points for having a constraint mid-fulfilled add += self.max_seqlen - self.inprogress_constraint.remaining() return (len(self.complete_constraints) * self.max_seqlen) + add def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = [] if self.inprogress_constraint is None: for constraint in self.pending_constraints: # "pending" == "unfulfilled yet" SCREAMING_SNAKE_CASE_ = constraint.advance() if isinstance(a_ , a_): token_list.append(a_) elif isinstance(a_ , a_): token_list.extend(a_) else: SCREAMING_SNAKE_CASE_ = self.inprogress_constraint.advance() if isinstance(a_ , a_): token_list.append(a_) elif isinstance(a_ , a_): token_list.extend(a_) if len(a_) == 0: return None else: return token_list def lowerCAmelCase__ ( self , _A): self.init_state() if token_ids is not None: for token in token_ids: # completes or steps **one** constraint SCREAMING_SNAKE_CASE_ = self.add(a_) # the entire list of constraints are fulfilled if self.completed: break def lowerCAmelCase__ ( self , _A): if not isinstance(a_ , a_): raise ValueError(f"""`token_id` should be an `int`, but is `{token_id}`.""") SCREAMING_SNAKE_CASE_ = False, False if self.completed: SCREAMING_SNAKE_CASE_ = True SCREAMING_SNAKE_CASE_ = False return complete, stepped if self.inprogress_constraint is not None: # In the middle of fulfilling a constraint. If the `token_id` *does* makes an incremental progress to current # job, simply update the state SCREAMING_SNAKE_CASE_ = self.inprogress_constraint.update(a_) if reset: # 1. If the next token breaks the progress, then we must restart. # e.g. constraint = "I love pies" and sequence so far is "I love" but `token_id` == "books". # But that doesn't mean we self.init_state(), since we only reset the state for this particular # constraint, not the full list of constraints. self.pending_constraints.append(self.inprogress_constraint.copy(stateful=a_)) SCREAMING_SNAKE_CASE_ = None if complete: # 2. If the next token completes the constraint, move it to completed list, set # inprogress to None. If there are no pending constraints either, then this full list of constraints # is complete. self.complete_constraints.append(self.inprogress_constraint) SCREAMING_SNAKE_CASE_ = None if len(self.pending_constraints) == 0: # we're done! SCREAMING_SNAKE_CASE_ = True else: # Not in the middle of fulfilling a constraint. So does this `token_id` helps us step towards any of our list # of constraints? for cidx, pending_constraint in enumerate(self.pending_constraints): if pending_constraint.does_advance(a_): SCREAMING_SNAKE_CASE_ = pending_constraint.update(a_) if not stepped: raise Exception( '`constraint.update(token_id)` is not yielding incremental progress, ' 'even though `constraint.does_advance(token_id)` is true.') if complete: self.complete_constraints.append(a_) SCREAMING_SNAKE_CASE_ = None if not complete and stepped: SCREAMING_SNAKE_CASE_ = pending_constraint if complete or stepped: # If we made any progress at all, then it's at least not a "pending constraint". SCREAMING_SNAKE_CASE_ = ( self.pending_constraints[:cidx] + self.pending_constraints[cidx + 1 :] ) if len(self.pending_constraints) == 0 and self.inprogress_constraint is None: # If there's no longer any pending after this and no inprogress either, then we must be # complete. SCREAMING_SNAKE_CASE_ = True break # prevent accidentally stepping through multiple constraints with just one token. return complete, stepped def lowerCAmelCase__ ( self , _A=True): SCREAMING_SNAKE_CASE_ = ConstraintListState(self.constraints) # we actually never though self.constraints objects # throughout this process. So it's at initialization state. if stateful: SCREAMING_SNAKE_CASE_ = [ constraint.copy(stateful=a_) for constraint in self.complete_constraints ] if self.inprogress_constraint is not None: SCREAMING_SNAKE_CASE_ = self.inprogress_constraint.copy(stateful=a_) SCREAMING_SNAKE_CASE_ = [constraint.copy() for constraint in self.pending_constraints] return new_state
706
import inspect import os import unittest from pathlib import Path import torch import accelerate from accelerate.test_utils import execute_subprocess_async from accelerate.test_utils.testing import run_command class __snake_case ( unittest.TestCase ): __lowerCAmelCase : Dict = inspect.getfile(accelerate.test_utils ) __lowerCAmelCase : Optional[Any] = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['scripts', 'test_cli.py'] ) __lowerCAmelCase : Tuple = ['accelerate', 'launch'] __lowerCAmelCase : Union[str, Any] = Path.home() / '.cache/huggingface/accelerate' __lowerCAmelCase : List[str] = 'default_config.yaml' __lowerCAmelCase : List[Any] = config_folder / config_file __lowerCAmelCase : str = config_folder / '_default_config.yaml' __lowerCAmelCase : Optional[int] = Path('tests/test_configs' ) @classmethod def lowerCAmelCase__ ( cls): if cls.config_path.is_file(): cls.config_path.rename(cls.changed_path) @classmethod def lowerCAmelCase__ ( cls): if cls.changed_path.is_file(): cls.changed_path.rename(cls.config_path) def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = self.base_cmd if torch.cuda.is_available() and (torch.cuda.device_count() > 1): cmd += ["--multi_gpu"] execute_subprocess_async(cmd + [self.test_file_path] , env=os.environ.copy()) def lowerCAmelCase__ ( self): for config in sorted(self.test_config_path.glob('**/*.yaml')): with self.subTest(config_file=_A): execute_subprocess_async( self.base_cmd + ['--config_file', str(_A), self.test_file_path] , env=os.environ.copy()) def lowerCAmelCase__ ( self): execute_subprocess_async(['accelerate', 'test'] , env=os.environ.copy()) class __snake_case ( unittest.TestCase ): __lowerCAmelCase : Optional[Any] = 'test-tpu' __lowerCAmelCase : str = 'us-central1-a' __lowerCAmelCase : Union[str, Any] = 'ls' __lowerCAmelCase : Union[str, Any] = ['accelerate', 'tpu-config'] __lowerCAmelCase : Union[str, Any] = 'cd /usr/share' __lowerCAmelCase : List[Any] = 'tests/test_samples/test_command_file.sh' __lowerCAmelCase : Dict = 'Running gcloud compute tpus tpu-vm ssh' def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = run_command( self.cmd + ['--command', self.command, '--tpu_zone', self.tpu_zone, '--tpu_name', self.tpu_name, '--debug'] , return_stdout=_A , ) self.assertIn( f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all""" , _A , ) def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = run_command( self.cmd + [ '--config_file', 'tests/test_configs/0_12_0.yaml', '--command', self.command, '--tpu_zone', self.tpu_zone, '--tpu_name', self.tpu_name, '--debug', ] , return_stdout=_A , ) self.assertIn( f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all""" , _A , ) def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = run_command( self.cmd + ['--config_file', 'tests/test_configs/latest.yaml', '--debug'] , return_stdout=_A) self.assertIn( f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo \"hello world\"; echo \"this is a second command\" --worker all""" , _A , ) def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = run_command( self.cmd + ['--config_file', 'tests/test_configs/latest.yaml', '--command', self.command, '--debug'] , return_stdout=_A , ) self.assertIn( f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all""" , _A , ) def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = run_command( self.cmd + [ '--config_file', 'tests/test_configs/latest.yaml', '--command', self.command, '--command', 'echo "Hello World"', '--debug', ] , return_stdout=_A , ) self.assertIn( f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls; echo \"Hello World\" --worker all""" , _A , ) def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = run_command( self.cmd + ['--config_file', 'tests/test_configs/latest.yaml', '--command_file', self.command_file, '--debug'] , return_stdout=_A , ) self.assertIn( f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo \"hello world\"; echo \"this is a second command\" --worker all""" , _A , ) def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = run_command( self.cmd + [ '--config_file', 'tests/test_configs/0_12_0.yaml', '--command_file', self.command_file, '--tpu_zone', self.tpu_zone, '--tpu_name', self.tpu_name, '--debug', ] , return_stdout=_A , ) self.assertIn( f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo \"hello world\"; echo \"this is a second command\" --worker all""" , _A , ) def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = run_command( self.cmd + ['--config_file', 'tests/test_configs/latest.yaml', '--install_accelerate', '--debug'] , return_stdout=_A , ) self.assertIn( f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate -U; echo \"hello world\"; echo \"this is a second command\" --worker all""" , _A , ) def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = run_command( self.cmd + [ '--config_file', 'tests/test_configs/latest.yaml', '--install_accelerate', '--accelerate_version', '12.0.0', '--debug', ] , return_stdout=_A , ) self.assertIn( f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate==12.0.0; echo \"hello world\"; echo \"this is a second command\" --worker all""" , _A , )
620
0
from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding class __snake_case ( lowerCAmelCase__ ): __lowerCAmelCase : str = 'ClapFeatureExtractor' __lowerCAmelCase : Union[str, Any] = ('RobertaTokenizer', 'RobertaTokenizerFast') def __init__( self , _A , _A): super().__init__(__lowerCAmelCase , __lowerCAmelCase) def __call__( self , _A=None , _A=None , _A=None , **_A): SCREAMING_SNAKE_CASE_ = kwargs.pop('sampling_rate' , __lowerCAmelCase) if text is None and audios is None: raise ValueError('You have to specify either text or audios. Both cannot be none.') if text is not None: SCREAMING_SNAKE_CASE_ = self.tokenizer(__lowerCAmelCase , return_tensors=__lowerCAmelCase , **__lowerCAmelCase) if audios is not None: SCREAMING_SNAKE_CASE_ = self.feature_extractor( __lowerCAmelCase , sampling_rate=__lowerCAmelCase , return_tensors=__lowerCAmelCase , **__lowerCAmelCase) if text is not None and audios is not None: SCREAMING_SNAKE_CASE_ = audio_features.input_features return encoding elif text is not None: return encoding else: return BatchEncoding(data=dict(**__lowerCAmelCase) , tensor_type=__lowerCAmelCase) def lowerCAmelCase__ ( self , *_A , **_A): return self.tokenizer.batch_decode(*__lowerCAmelCase , **__lowerCAmelCase) def lowerCAmelCase__ ( self , *_A , **_A): return self.tokenizer.decode(*__lowerCAmelCase , **__lowerCAmelCase) @property def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = self.tokenizer.model_input_names SCREAMING_SNAKE_CASE_ = self.feature_extractor.model_input_names return list(dict.fromkeys(tokenizer_input_names + feature_extractor_input_names))
707
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_speech_available, is_torch_available, ) UpperCamelCase__ : Tuple = { "configuration_trocr": ["TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP", "TrOCRConfig"], "processing_trocr": ["TrOCRProcessor"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase__ : Tuple = [ "TROCR_PRETRAINED_MODEL_ARCHIVE_LIST", "TrOCRForCausalLM", "TrOCRPreTrainedModel", ] if TYPE_CHECKING: from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig from .processing_trocr import TrOCRProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel else: import sys UpperCamelCase__ : str = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
620
0
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_speech_available, is_torch_available, ) UpperCamelCase__ : str = { "configuration_trocr": ["TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP", "TrOCRConfig"], "processing_trocr": ["TrOCRProcessor"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase__ : Optional[Any] = [ "TROCR_PRETRAINED_MODEL_ARCHIVE_LIST", "TrOCRForCausalLM", "TrOCRPreTrainedModel", ] if TYPE_CHECKING: from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig from .processing_trocr import TrOCRProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel else: import sys UpperCamelCase__ : List[str] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
708
from multiprocessing import Lock, Pipe, Process # lock used to ensure that two processes do not access a pipe at the same time UpperCamelCase__ : int = Lock() def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : Any ): """simple docstring""" global process_lock # we perform n swaps since after n swaps we know we are sorted # we *could* stop early if we are sorted already, but it takes as long to # find out we are sorted as it does to sort the list with this algorithm for i in range(0 , 10 ): if (i + position) % 2 == 0 and r_send is not None: # send your value to your right neighbor process_lock.acquire() r_send[1].send(_SCREAMING_SNAKE_CASE ) process_lock.release() # receive your right neighbor's value process_lock.acquire() SCREAMING_SNAKE_CASE_ = rr_cv[0].recv() process_lock.release() # take the lower value since you are on the left SCREAMING_SNAKE_CASE_ = min(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) elif (i + position) % 2 != 0 and l_send is not None: # send your value to your left neighbor process_lock.acquire() l_send[1].send(_SCREAMING_SNAKE_CASE ) process_lock.release() # receive your left neighbor's value process_lock.acquire() SCREAMING_SNAKE_CASE_ = lr_cv[0].recv() process_lock.release() # take the higher value since you are on the right SCREAMING_SNAKE_CASE_ = max(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # after all swaps are performed, send the values back to main result_pipe[1].send(_SCREAMING_SNAKE_CASE ) def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : Union[str, Any] ): """simple docstring""" SCREAMING_SNAKE_CASE_ = [] SCREAMING_SNAKE_CASE_ = [] # initialize the list of pipes where the values will be retrieved for _ in arr: result_pipe.append(Pipe() ) # creates the processes # the first and last process only have one neighbor so they are made outside # of the loop SCREAMING_SNAKE_CASE_ = Pipe() SCREAMING_SNAKE_CASE_ = Pipe() process_array_.append( Process( target=_SCREAMING_SNAKE_CASE , args=(0, arr[0], None, temp_rs, None, temp_rr, result_pipe[0]) , ) ) SCREAMING_SNAKE_CASE_ = temp_rs SCREAMING_SNAKE_CASE_ = temp_rr for i in range(1 , len(_SCREAMING_SNAKE_CASE ) - 1 ): SCREAMING_SNAKE_CASE_ = Pipe() SCREAMING_SNAKE_CASE_ = Pipe() process_array_.append( Process( target=_SCREAMING_SNAKE_CASE , args=(i, arr[i], temp_ls, temp_rs, temp_lr, temp_rr, result_pipe[i]) , ) ) SCREAMING_SNAKE_CASE_ = temp_rs SCREAMING_SNAKE_CASE_ = temp_rr process_array_.append( Process( target=_SCREAMING_SNAKE_CASE , args=( len(_SCREAMING_SNAKE_CASE ) - 1, arr[len(_SCREAMING_SNAKE_CASE ) - 1], temp_ls, None, temp_lr, None, result_pipe[len(_SCREAMING_SNAKE_CASE ) - 1], ) , ) ) # start the processes for p in process_array_: p.start() # wait for the processes to end and write their values to the list for p in range(0 , len(_SCREAMING_SNAKE_CASE ) ): SCREAMING_SNAKE_CASE_ = result_pipe[p][0].recv() process_array_[p].join() return arr def _UpperCAmelCase ( ): """simple docstring""" SCREAMING_SNAKE_CASE_ = list(range(10 , 0 , -1 ) ) print('Initial List' ) print(*_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE_ = odd_even_transposition(_SCREAMING_SNAKE_CASE ) print('Sorted List\n' ) print(*_SCREAMING_SNAKE_CASE ) if __name__ == "__main__": main()
620
0
import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST, OpenAIGPTConfig, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification, OpenAIGPTLMHeadModel, OpenAIGPTModel, ) class __snake_case : def __init__( self , _A , _A=13 , _A=7 , _A=True , _A=True , _A=True , _A=99 , _A=32 , _A=5 , _A=4 , _A=37 , _A="gelu" , _A=0.1 , _A=0.1 , _A=512 , _A=16 , _A=2 , _A=0.0_2 , _A=3 , _A=4 , _A=None , ): SCREAMING_SNAKE_CASE_ = parent SCREAMING_SNAKE_CASE_ = batch_size SCREAMING_SNAKE_CASE_ = seq_length SCREAMING_SNAKE_CASE_ = is_training SCREAMING_SNAKE_CASE_ = use_token_type_ids SCREAMING_SNAKE_CASE_ = use_labels SCREAMING_SNAKE_CASE_ = vocab_size SCREAMING_SNAKE_CASE_ = hidden_size SCREAMING_SNAKE_CASE_ = num_hidden_layers SCREAMING_SNAKE_CASE_ = num_attention_heads SCREAMING_SNAKE_CASE_ = intermediate_size SCREAMING_SNAKE_CASE_ = hidden_act SCREAMING_SNAKE_CASE_ = hidden_dropout_prob SCREAMING_SNAKE_CASE_ = attention_probs_dropout_prob SCREAMING_SNAKE_CASE_ = max_position_embeddings SCREAMING_SNAKE_CASE_ = type_vocab_size SCREAMING_SNAKE_CASE_ = type_sequence_label_size SCREAMING_SNAKE_CASE_ = initializer_range SCREAMING_SNAKE_CASE_ = num_labels SCREAMING_SNAKE_CASE_ = num_choices SCREAMING_SNAKE_CASE_ = scope SCREAMING_SNAKE_CASE_ = self.vocab_size - 1 def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size) SCREAMING_SNAKE_CASE_ = None if self.use_token_type_ids: SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size) SCREAMING_SNAKE_CASE_ = None SCREAMING_SNAKE_CASE_ = None SCREAMING_SNAKE_CASE_ = None if self.use_labels: SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size] , self.type_sequence_label_size) SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels) SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size] , self.num_choices) SCREAMING_SNAKE_CASE_ = OpenAIGPTConfig( vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , ) SCREAMING_SNAKE_CASE_ = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2) return ( config, input_ids, head_mask, token_type_ids, sequence_labels, token_labels, choice_labels, ) def lowerCAmelCase__ ( self , _A , _A , _A , _A , *_A): SCREAMING_SNAKE_CASE_ = OpenAIGPTModel(config=lowerCAmelCase__) model.to(lowerCAmelCase__) model.eval() SCREAMING_SNAKE_CASE_ = model(lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , head_mask=lowerCAmelCase__) SCREAMING_SNAKE_CASE_ = model(lowerCAmelCase__ , token_type_ids=lowerCAmelCase__) SCREAMING_SNAKE_CASE_ = model(lowerCAmelCase__) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size)) def lowerCAmelCase__ ( self , _A , _A , _A , _A , *_A): SCREAMING_SNAKE_CASE_ = OpenAIGPTLMHeadModel(lowerCAmelCase__) model.to(lowerCAmelCase__) model.eval() SCREAMING_SNAKE_CASE_ = model(lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , labels=lowerCAmelCase__) self.parent.assertEqual(result.loss.shape , ()) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size)) def lowerCAmelCase__ ( self , _A , _A , _A , _A , *_A): SCREAMING_SNAKE_CASE_ = OpenAIGPTDoubleHeadsModel(lowerCAmelCase__) model.to(lowerCAmelCase__) model.eval() SCREAMING_SNAKE_CASE_ = model(lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , labels=lowerCAmelCase__) self.parent.assertEqual(result.loss.shape , ()) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size)) def lowerCAmelCase__ ( self , _A , _A , _A , _A , *_A): SCREAMING_SNAKE_CASE_ = self.num_labels SCREAMING_SNAKE_CASE_ = OpenAIGPTForSequenceClassification(lowerCAmelCase__) model.to(lowerCAmelCase__) model.eval() SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size] , self.type_sequence_label_size) SCREAMING_SNAKE_CASE_ = model(lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , labels=lowerCAmelCase__) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels)) def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = self.prepare_config_and_inputs() ( SCREAMING_SNAKE_CASE_ ) = config_and_inputs SCREAMING_SNAKE_CASE_ = { "input_ids": input_ids, "token_type_ids": token_type_ids, "head_mask": head_mask, } return config, inputs_dict @require_torch class __snake_case ( a__ , a__ , a__ , unittest.TestCase ): __lowerCAmelCase : List[Any] = ( (OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification) if is_torch_available() else () ) __lowerCAmelCase : Dict = ( (OpenAIGPTLMHeadModel,) if is_torch_available() else () ) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly __lowerCAmelCase : Any = ( { """feature-extraction""": OpenAIGPTModel, """text-classification""": OpenAIGPTForSequenceClassification, """text-generation""": OpenAIGPTLMHeadModel, """zero-shot""": OpenAIGPTForSequenceClassification, } if is_torch_available() else {} ) def lowerCAmelCase__ ( self , _A , _A , _A , _A , _A): if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests": # Get `tokenizer does not have a padding token` error for both fast/slow tokenizers. # `OpenAIGPTConfig` was never used in pipeline tests, either because of a missing checkpoint or because a # tiny config could not be created. return True return False def lowerCAmelCase__ ( self , _A , _A , _A=False): SCREAMING_SNAKE_CASE_ = super()._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ , return_labels=lowerCAmelCase__) if return_labels: if model_class.__name__ == "OpenAIGPTDoubleHeadsModel": SCREAMING_SNAKE_CASE_ = torch.zeros( (self.model_tester.batch_size, self.model_tester.num_choices, self.model_tester.seq_length) , dtype=torch.long , device=lowerCAmelCase__ , ) SCREAMING_SNAKE_CASE_ = inputs_dict["labels"] SCREAMING_SNAKE_CASE_ = inputs_dict["labels"] SCREAMING_SNAKE_CASE_ = torch.zeros( (self.model_tester.batch_size, self.model_tester.num_choices) , dtype=torch.long , device=lowerCAmelCase__ , ) SCREAMING_SNAKE_CASE_ = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=lowerCAmelCase__) return inputs_dict def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = OpenAIGPTModelTester(self) SCREAMING_SNAKE_CASE_ = ConfigTester(self , config_class=lowerCAmelCase__ , n_embd=37) def lowerCAmelCase__ ( self): self.config_tester.run_common_tests() def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_openai_gpt_model(*lowerCAmelCase__) def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_lm_head_model(*lowerCAmelCase__) def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_double_lm_head_model(*lowerCAmelCase__) def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*lowerCAmelCase__) @slow def lowerCAmelCase__ ( self): for model_name in OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: SCREAMING_SNAKE_CASE_ = OpenAIGPTModel.from_pretrained(lowerCAmelCase__) self.assertIsNotNone(lowerCAmelCase__) @require_torch class __snake_case ( unittest.TestCase ): @slow def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = OpenAIGPTLMHeadModel.from_pretrained('openai-gpt') model.to(lowerCAmelCase__) SCREAMING_SNAKE_CASE_ = torch.tensor([[481, 4735, 544]] , dtype=torch.long , device=lowerCAmelCase__) # the president is SCREAMING_SNAKE_CASE_ = [ 481, 4735, 544, 246, 963, 870, 762, 239, 244, 40477, 244, 249, 719, 881, 487, 544, 240, 244, 603, 481, ] # the president is a very good man. " \n " i\'m sure he is, " said the SCREAMING_SNAKE_CASE_ = model.generate(lowerCAmelCase__ , do_sample=lowerCAmelCase__) self.assertListEqual(output_ids[0].tolist() , lowerCAmelCase__)
709
import unittest from transformers import load_tool from .test_tools_common import ToolTesterMixin UpperCamelCase__ : int = "\nHugging Face was founded in 2016 by French entrepreneurs Clément Delangue, Julien Chaumond, and Thomas Wolf originally as a company that developed a chatbot app targeted at teenagers.[2] After open-sourcing the model behind the chatbot, the company pivoted to focus on being a platform for machine learning.\n\nIn March 2021, Hugging Face raised $40 million in a Series B funding round.[3]\n\nOn April 28, 2021, the company launched the BigScience Research Workshop in collaboration with several other research groups to release an open large language model.[4] In 2022, the workshop concluded with the announcement of BLOOM, a multilingual large language model with 176 billion parameters.[5]\n" class __snake_case ( unittest.TestCase , lowerCAmelCase__ ): def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = load_tool('text-question-answering') self.tool.setup() SCREAMING_SNAKE_CASE_ = load_tool('text-question-answering' , remote=_A) def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = self.tool(_A , 'What did Hugging Face do in April 2021?') self.assertEqual(_A , 'launched the BigScience Research Workshop') def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = self.remote_tool(_A , 'What did Hugging Face do in April 2021?') self.assertEqual(_A , 'launched the BigScience Research Workshop') def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = self.tool(text=_A , question='What did Hugging Face do in April 2021?') self.assertEqual(_A , 'launched the BigScience Research Workshop') def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = self.remote_tool(text=_A , question='What did Hugging Face do in April 2021?') self.assertEqual(_A , 'launched the BigScience Research Workshop')
620
0
'''simple docstring''' import gc import unittest import numpy as np import torch from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel from diffusers.utils import slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps from ..pipeline_params import UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS, UNCONDITIONAL_AUDIO_GENERATION_PARAMS from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class __snake_case ( UpperCAmelCase__ , unittest.TestCase ): __lowerCAmelCase : str = DanceDiffusionPipeline __lowerCAmelCase : Dict = UNCONDITIONAL_AUDIO_GENERATION_PARAMS __lowerCAmelCase : Union[str, Any] = PipelineTesterMixin.required_optional_params - { 'callback', 'latents', 'callback_steps', 'output_type', 'num_images_per_prompt', } __lowerCAmelCase : str = UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS __lowerCAmelCase : int = False __lowerCAmelCase : List[str] = False def lowerCAmelCase__ ( self): torch.manual_seed(0) SCREAMING_SNAKE_CASE_ = UNetaDModel( block_out_channels=(32, 32, 64) , extra_in_channels=16 , sample_size=512 , sample_rate=16000 , in_channels=2 , out_channels=2 , flip_sin_to_cos=__lowerCAmelCase , use_timestep_embedding=__lowerCAmelCase , time_embedding_type='fourier' , mid_block_type='UNetMidBlock1D' , down_block_types=('DownBlock1DNoSkip', 'DownBlock1D', 'AttnDownBlock1D') , up_block_types=('AttnUpBlock1D', 'UpBlock1D', 'UpBlock1DNoSkip') , ) SCREAMING_SNAKE_CASE_ = IPNDMScheduler() SCREAMING_SNAKE_CASE_ = { 'unet': unet, 'scheduler': scheduler, } return components def lowerCAmelCase__ ( self , _A , _A=0): if str(__lowerCAmelCase).startswith('mps'): SCREAMING_SNAKE_CASE_ = torch.manual_seed(__lowerCAmelCase) else: SCREAMING_SNAKE_CASE_ = torch.Generator(device=__lowerCAmelCase).manual_seed(__lowerCAmelCase) SCREAMING_SNAKE_CASE_ = { 'batch_size': 1, 'generator': generator, 'num_inference_steps': 4, } return inputs def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = 'cpu' # ensure determinism for the device-dependent torch.Generator SCREAMING_SNAKE_CASE_ = self.get_dummy_components() SCREAMING_SNAKE_CASE_ = DanceDiffusionPipeline(**__lowerCAmelCase) SCREAMING_SNAKE_CASE_ = pipe.to(__lowerCAmelCase) pipe.set_progress_bar_config(disable=__lowerCAmelCase) SCREAMING_SNAKE_CASE_ = self.get_dummy_inputs(__lowerCAmelCase) SCREAMING_SNAKE_CASE_ = pipe(**__lowerCAmelCase) SCREAMING_SNAKE_CASE_ = output.audios SCREAMING_SNAKE_CASE_ = audio[0, -3:, -3:] assert audio.shape == (1, 2, components["unet"].sample_size) SCREAMING_SNAKE_CASE_ = np.array([-0.7_2_6_5, 1.0_0_0_0, -0.8_3_8_8, 0.1_1_7_5, 0.9_4_9_8, -1.0_0_0_0]) assert np.abs(audio_slice.flatten() - expected_slice).max() < 1E-2 @skip_mps def lowerCAmelCase__ ( self): return super().test_save_load_local() @skip_mps def lowerCAmelCase__ ( self): return super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3) @skip_mps def lowerCAmelCase__ ( self): return super().test_save_load_optional_components() @skip_mps def lowerCAmelCase__ ( self): return super().test_attention_slicing_forward_pass() def lowerCAmelCase__ ( self): super().test_inference_batch_single_identical(expected_max_diff=3E-3) @slow @require_torch_gpu class __snake_case ( unittest.TestCase ): def lowerCAmelCase__ ( self): super().tearDown() gc.collect() torch.cuda.empty_cache() def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = torch_device SCREAMING_SNAKE_CASE_ = DanceDiffusionPipeline.from_pretrained('harmonai/maestro-150k') SCREAMING_SNAKE_CASE_ = pipe.to(__lowerCAmelCase) pipe.set_progress_bar_config(disable=__lowerCAmelCase) SCREAMING_SNAKE_CASE_ = torch.manual_seed(0) SCREAMING_SNAKE_CASE_ = pipe(generator=__lowerCAmelCase , num_inference_steps=100 , audio_length_in_s=4.0_9_6) SCREAMING_SNAKE_CASE_ = output.audios SCREAMING_SNAKE_CASE_ = audio[0, -3:, -3:] assert audio.shape == (1, 2, pipe.unet.sample_size) SCREAMING_SNAKE_CASE_ = np.array([-0.0_1_9_2, -0.0_2_3_1, -0.0_3_1_8, -0.0_0_5_9, 0.0_0_0_2, -0.0_0_2_0]) assert np.abs(audio_slice.flatten() - expected_slice).max() < 1E-2 def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = torch_device SCREAMING_SNAKE_CASE_ = DanceDiffusionPipeline.from_pretrained('harmonai/maestro-150k' , torch_dtype=torch.floataa) SCREAMING_SNAKE_CASE_ = pipe.to(__lowerCAmelCase) pipe.set_progress_bar_config(disable=__lowerCAmelCase) SCREAMING_SNAKE_CASE_ = torch.manual_seed(0) SCREAMING_SNAKE_CASE_ = pipe(generator=__lowerCAmelCase , num_inference_steps=100 , audio_length_in_s=4.0_9_6) SCREAMING_SNAKE_CASE_ = output.audios SCREAMING_SNAKE_CASE_ = audio[0, -3:, -3:] assert audio.shape == (1, 2, pipe.unet.sample_size) SCREAMING_SNAKE_CASE_ = np.array([-0.0_3_6_7, -0.0_4_8_8, -0.0_7_7_1, -0.0_5_2_5, -0.0_4_4_4, -0.0_3_4_1]) assert np.abs(audio_slice.flatten() - expected_slice).max() < 1E-2
710
import unittest import numpy as np from datasets import load_dataset from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import BeitImageProcessor class __snake_case ( unittest.TestCase ): def __init__( self , _A , _A=7 , _A=3 , _A=18 , _A=30 , _A=400 , _A=True , _A=None , _A=True , _A=None , _A=True , _A=[0.5, 0.5, 0.5] , _A=[0.5, 0.5, 0.5] , _A=False , ): SCREAMING_SNAKE_CASE_ = size if size is not None else {'height': 20, 'width': 20} SCREAMING_SNAKE_CASE_ = crop_size if crop_size is not None else {'height': 18, 'width': 18} SCREAMING_SNAKE_CASE_ = parent SCREAMING_SNAKE_CASE_ = batch_size SCREAMING_SNAKE_CASE_ = num_channels SCREAMING_SNAKE_CASE_ = image_size SCREAMING_SNAKE_CASE_ = min_resolution SCREAMING_SNAKE_CASE_ = max_resolution SCREAMING_SNAKE_CASE_ = do_resize SCREAMING_SNAKE_CASE_ = size SCREAMING_SNAKE_CASE_ = do_center_crop SCREAMING_SNAKE_CASE_ = crop_size SCREAMING_SNAKE_CASE_ = do_normalize SCREAMING_SNAKE_CASE_ = image_mean SCREAMING_SNAKE_CASE_ = image_std SCREAMING_SNAKE_CASE_ = do_reduce_labels def lowerCAmelCase__ ( self): return { "do_resize": self.do_resize, "size": self.size, "do_center_crop": self.do_center_crop, "crop_size": self.crop_size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "do_reduce_labels": self.do_reduce_labels, } def _UpperCAmelCase ( ): """simple docstring""" SCREAMING_SNAKE_CASE_ = load_dataset('hf-internal-testing/fixtures_ade20k' , split='test' ) SCREAMING_SNAKE_CASE_ = Image.open(dataset[0]['file'] ) SCREAMING_SNAKE_CASE_ = Image.open(dataset[1]['file'] ) return image, map def _UpperCAmelCase ( ): """simple docstring""" SCREAMING_SNAKE_CASE_ = load_dataset('hf-internal-testing/fixtures_ade20k' , split='test' ) SCREAMING_SNAKE_CASE_ = Image.open(ds[0]['file'] ) SCREAMING_SNAKE_CASE_ = Image.open(ds[1]['file'] ) SCREAMING_SNAKE_CASE_ = Image.open(ds[2]['file'] ) SCREAMING_SNAKE_CASE_ = Image.open(ds[3]['file'] ) return [imagea, imagea], [mapa, mapa] @require_torch @require_vision class __snake_case ( lowerCAmelCase__ , unittest.TestCase ): __lowerCAmelCase : Union[str, Any] = BeitImageProcessor if is_vision_available() else None def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = BeitImageProcessingTester(self) @property def lowerCAmelCase__ ( self): return self.image_processor_tester.prepare_image_processor_dict() def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = self.image_processing_class(**self.image_processor_dict) self.assertTrue(hasattr(_A , 'do_resize')) self.assertTrue(hasattr(_A , 'size')) self.assertTrue(hasattr(_A , 'do_center_crop')) self.assertTrue(hasattr(_A , 'center_crop')) self.assertTrue(hasattr(_A , 'do_normalize')) self.assertTrue(hasattr(_A , 'image_mean')) self.assertTrue(hasattr(_A , 'image_std')) def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = self.image_processing_class.from_dict(self.image_processor_dict) self.assertEqual(image_processor.size , {'height': 20, 'width': 20}) self.assertEqual(image_processor.crop_size , {'height': 18, 'width': 18}) self.assertEqual(image_processor.do_reduce_labels , _A) SCREAMING_SNAKE_CASE_ = self.image_processing_class.from_dict( self.image_processor_dict , size=42 , crop_size=84 , reduce_labels=_A) self.assertEqual(image_processor.size , {'height': 42, 'width': 42}) self.assertEqual(image_processor.crop_size , {'height': 84, 'width': 84}) self.assertEqual(image_processor.do_reduce_labels , _A) def lowerCAmelCase__ ( self): pass def lowerCAmelCase__ ( self): # Initialize image_processing SCREAMING_SNAKE_CASE_ = self.image_processing_class(**self.image_processor_dict) # create random PIL images SCREAMING_SNAKE_CASE_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A) for image in image_inputs: self.assertIsInstance(_A , Image.Image) # Test not batched input SCREAMING_SNAKE_CASE_ = image_processing(image_inputs[0] , return_tensors='pt').pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) # Test batched SCREAMING_SNAKE_CASE_ = image_processing(_A , return_tensors='pt').pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) def lowerCAmelCase__ ( self): # Initialize image_processing SCREAMING_SNAKE_CASE_ = self.image_processing_class(**self.image_processor_dict) # create random numpy tensors SCREAMING_SNAKE_CASE_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A , numpify=_A) for image in image_inputs: self.assertIsInstance(_A , np.ndarray) # Test not batched input SCREAMING_SNAKE_CASE_ = image_processing(image_inputs[0] , return_tensors='pt').pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) # Test batched SCREAMING_SNAKE_CASE_ = image_processing(_A , return_tensors='pt').pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) def lowerCAmelCase__ ( self): # Initialize image_processing SCREAMING_SNAKE_CASE_ = self.image_processing_class(**self.image_processor_dict) # create random PyTorch tensors SCREAMING_SNAKE_CASE_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A , torchify=_A) for image in image_inputs: self.assertIsInstance(_A , torch.Tensor) # Test not batched input SCREAMING_SNAKE_CASE_ = image_processing(image_inputs[0] , return_tensors='pt').pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) # Test batched SCREAMING_SNAKE_CASE_ = image_processing(_A , return_tensors='pt').pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) def lowerCAmelCase__ ( self): # Initialize image_processing SCREAMING_SNAKE_CASE_ = self.image_processing_class(**self.image_processor_dict) # create random PyTorch tensors SCREAMING_SNAKE_CASE_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A , torchify=_A) SCREAMING_SNAKE_CASE_ = [] for image in image_inputs: self.assertIsInstance(_A , torch.Tensor) maps.append(torch.zeros(image.shape[-2:]).long()) # Test not batched input SCREAMING_SNAKE_CASE_ = image_processing(image_inputs[0] , maps[0] , return_tensors='pt') self.assertEqual( encoding['pixel_values'].shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) self.assertEqual( encoding['labels'].shape , ( 1, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) self.assertEqual(encoding['labels'].dtype , torch.long) self.assertTrue(encoding['labels'].min().item() >= 0) self.assertTrue(encoding['labels'].max().item() <= 255) # Test batched SCREAMING_SNAKE_CASE_ = image_processing(_A , _A , return_tensors='pt') self.assertEqual( encoding['pixel_values'].shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) self.assertEqual( encoding['labels'].shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) self.assertEqual(encoding['labels'].dtype , torch.long) self.assertTrue(encoding['labels'].min().item() >= 0) self.assertTrue(encoding['labels'].max().item() <= 255) # Test not batched input (PIL images) SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = prepare_semantic_single_inputs() SCREAMING_SNAKE_CASE_ = image_processing(_A , _A , return_tensors='pt') self.assertEqual( encoding['pixel_values'].shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) self.assertEqual( encoding['labels'].shape , ( 1, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) self.assertEqual(encoding['labels'].dtype , torch.long) self.assertTrue(encoding['labels'].min().item() >= 0) self.assertTrue(encoding['labels'].max().item() <= 255) # Test batched input (PIL images) SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = prepare_semantic_batch_inputs() SCREAMING_SNAKE_CASE_ = image_processing(_A , _A , return_tensors='pt') self.assertEqual( encoding['pixel_values'].shape , ( 2, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) self.assertEqual( encoding['labels'].shape , ( 2, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) self.assertEqual(encoding['labels'].dtype , torch.long) self.assertTrue(encoding['labels'].min().item() >= 0) self.assertTrue(encoding['labels'].max().item() <= 255) def lowerCAmelCase__ ( self): # Initialize image_processing SCREAMING_SNAKE_CASE_ = self.image_processing_class(**self.image_processor_dict) # ADE20k has 150 classes, and the background is included, so labels should be between 0 and 150 SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = prepare_semantic_single_inputs() SCREAMING_SNAKE_CASE_ = image_processing(_A , _A , return_tensors='pt') self.assertTrue(encoding['labels'].min().item() >= 0) self.assertTrue(encoding['labels'].max().item() <= 150) SCREAMING_SNAKE_CASE_ = True SCREAMING_SNAKE_CASE_ = image_processing(_A , _A , return_tensors='pt') self.assertTrue(encoding['labels'].min().item() >= 0) self.assertTrue(encoding['labels'].max().item() <= 255)
620
0
import json import os import pickle import shutil import tempfile from unittest import TestCase from unittest.mock import patch import numpy as np from datasets import Dataset from transformers import is_faiss_available from transformers.models.bart.configuration_bart import BartConfig from transformers.models.bart.tokenization_bart import BartTokenizer from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES from transformers.models.dpr.configuration_dpr import DPRConfig from transformers.models.dpr.tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer from transformers.models.rag.configuration_rag import RagConfig from transformers.models.rag.retrieval_rag import CustomHFIndex, RagRetriever from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES from transformers.testing_utils import require_faiss, require_sentencepiece, require_tokenizers, require_torch if is_faiss_available(): import faiss @require_faiss class __snake_case ( __UpperCAmelCase ): def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = tempfile.mkdtemp() SCREAMING_SNAKE_CASE_ = 8 # DPR tok SCREAMING_SNAKE_CASE_ = [ '[UNK]', '[CLS]', '[SEP]', '[PAD]', '[MASK]', 'want', '##want', '##ed', 'wa', 'un', 'runn', '##ing', ',', 'low', 'lowest', ] SCREAMING_SNAKE_CASE_ = os.path.join(self.tmpdirname , 'dpr_tokenizer') os.makedirs(UpperCAmelCase_ , exist_ok=UpperCAmelCase_) SCREAMING_SNAKE_CASE_ = os.path.join(UpperCAmelCase_ , DPR_VOCAB_FILES_NAMES['vocab_file']) with open(self.vocab_file , 'w' , encoding='utf-8') as vocab_writer: vocab_writer.write(''.join([x + '\n' for x in vocab_tokens])) # BART tok SCREAMING_SNAKE_CASE_ = [ 'l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', '\u0120', '\u0120l', '\u0120n', '\u0120lo', '\u0120low', 'er', '\u0120lowest', '\u0120newer', '\u0120wider', '<unk>', ] SCREAMING_SNAKE_CASE_ = dict(zip(UpperCAmelCase_ , range(len(UpperCAmelCase_)))) SCREAMING_SNAKE_CASE_ = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', ''] SCREAMING_SNAKE_CASE_ = {'unk_token': '<unk>'} SCREAMING_SNAKE_CASE_ = os.path.join(self.tmpdirname , 'bart_tokenizer') os.makedirs(UpperCAmelCase_ , exist_ok=UpperCAmelCase_) SCREAMING_SNAKE_CASE_ = os.path.join(UpperCAmelCase_ , BART_VOCAB_FILES_NAMES['vocab_file']) SCREAMING_SNAKE_CASE_ = os.path.join(UpperCAmelCase_ , BART_VOCAB_FILES_NAMES['merges_file']) with open(self.vocab_file , 'w' , encoding='utf-8') as fp: fp.write(json.dumps(UpperCAmelCase_) + '\n') with open(self.merges_file , 'w' , encoding='utf-8') as fp: fp.write('\n'.join(UpperCAmelCase_)) def lowerCAmelCase__ ( self): return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , 'dpr_tokenizer')) def lowerCAmelCase__ ( self): return DPRContextEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , 'dpr_tokenizer')) def lowerCAmelCase__ ( self): return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname , 'bart_tokenizer')) def lowerCAmelCase__ ( self): shutil.rmtree(self.tmpdirname) def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = Dataset.from_dict( { 'id': ['0', '1'], 'text': ['foo', 'bar'], 'title': ['Foo', 'Bar'], 'embeddings': [np.ones(self.retrieval_vector_size), 2 * np.ones(self.retrieval_vector_size)], }) dataset.add_faiss_index('embeddings' , string_factory='Flat' , metric_type=faiss.METRIC_INNER_PRODUCT) return dataset def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = self.get_dummy_dataset() SCREAMING_SNAKE_CASE_ = RagConfig( retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , ) with patch('transformers.models.rag.retrieval_rag.load_dataset') as mock_load_dataset: SCREAMING_SNAKE_CASE_ = dataset SCREAMING_SNAKE_CASE_ = RagRetriever( UpperCAmelCase_ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , ) return retriever def lowerCAmelCase__ ( self , _A): SCREAMING_SNAKE_CASE_ = self.get_dummy_dataset() SCREAMING_SNAKE_CASE_ = RagConfig( retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name='custom' , ) if from_disk: SCREAMING_SNAKE_CASE_ = os.path.join(self.tmpdirname , 'dataset') SCREAMING_SNAKE_CASE_ = os.path.join(self.tmpdirname , 'index.faiss') dataset.get_index('embeddings').save(os.path.join(self.tmpdirname , 'index.faiss')) dataset.drop_index('embeddings') dataset.save_to_disk(os.path.join(self.tmpdirname , 'dataset')) del dataset SCREAMING_SNAKE_CASE_ = RagRetriever( UpperCAmelCase_ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , ) else: SCREAMING_SNAKE_CASE_ = RagRetriever( UpperCAmelCase_ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , index=CustomHFIndex(config.retrieval_vector_size , UpperCAmelCase_) , ) return retriever def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = Dataset.from_dict( { 'id': ['0', '1'], 'text': ['foo', 'bar'], 'title': ['Foo', 'Bar'], 'embeddings': [np.ones(self.retrieval_vector_size + 1), 2 * np.ones(self.retrieval_vector_size + 1)], }) dataset.add_faiss_index('embeddings' , string_factory='Flat' , metric_type=faiss.METRIC_INNER_PRODUCT) SCREAMING_SNAKE_CASE_ = os.path.join(self.tmpdirname , 'hf_bert_base.hnswSQ8_correct_phi_128.c_index') dataset.save_faiss_index('embeddings' , index_file_name + '.index.dpr') pickle.dump(dataset['id'] , open(index_file_name + '.index_meta.dpr' , 'wb')) SCREAMING_SNAKE_CASE_ = os.path.join(self.tmpdirname , 'psgs_w100.tsv.pkl') SCREAMING_SNAKE_CASE_ = {sample['id']: [sample['text'], sample['title']] for sample in dataset} pickle.dump(UpperCAmelCase_ , open(UpperCAmelCase_ , 'wb')) SCREAMING_SNAKE_CASE_ = RagConfig( retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name='legacy' , index_path=self.tmpdirname , ) SCREAMING_SNAKE_CASE_ = RagRetriever( UpperCAmelCase_ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer()) return retriever def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = 1 SCREAMING_SNAKE_CASE_ = self.get_dummy_canonical_hf_index_retriever() SCREAMING_SNAKE_CASE_ = np.array( [np.ones(self.retrieval_vector_size), -np.ones(self.retrieval_vector_size)] , dtype=np.floataa) SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = retriever.retrieve(UpperCAmelCase_ , n_docs=UpperCAmelCase_) self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size)) self.assertEqual(len(UpperCAmelCase_) , 2) self.assertEqual(sorted(doc_dicts[0]) , ['embeddings', 'id', 'text', 'title']) self.assertEqual(len(doc_dicts[0]['id']) , UpperCAmelCase_) self.assertEqual(doc_dicts[0]['id'][0] , '1') # max inner product is reached with second doc self.assertEqual(doc_dicts[1]['id'][0] , '0') # max inner product is reached with first doc self.assertListEqual(doc_ids.tolist() , [[1], [0]]) def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = self.get_dummy_canonical_hf_index_retriever() with tempfile.TemporaryDirectory() as tmp_dirname: with patch('transformers.models.rag.retrieval_rag.load_dataset') as mock_load_dataset: SCREAMING_SNAKE_CASE_ = self.get_dummy_dataset() retriever.save_pretrained(UpperCAmelCase_) SCREAMING_SNAKE_CASE_ = RagRetriever.from_pretrained(UpperCAmelCase_) self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_) SCREAMING_SNAKE_CASE_ = np.array( [np.ones(self.retrieval_vector_size), -np.ones(self.retrieval_vector_size)] , dtype=np.floataa) SCREAMING_SNAKE_CASE_ = retriever.retrieve(UpperCAmelCase_ , n_docs=1) self.assertTrue(out is not None) def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = 1 SCREAMING_SNAKE_CASE_ = self.get_dummy_custom_hf_index_retriever(from_disk=UpperCAmelCase_) SCREAMING_SNAKE_CASE_ = np.array( [np.ones(self.retrieval_vector_size), -np.ones(self.retrieval_vector_size)] , dtype=np.floataa) SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = retriever.retrieve(UpperCAmelCase_ , n_docs=UpperCAmelCase_) self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size)) self.assertEqual(len(UpperCAmelCase_) , 2) self.assertEqual(sorted(doc_dicts[0]) , ['embeddings', 'id', 'text', 'title']) self.assertEqual(len(doc_dicts[0]['id']) , UpperCAmelCase_) self.assertEqual(doc_dicts[0]['id'][0] , '1') # max inner product is reached with second doc self.assertEqual(doc_dicts[1]['id'][0] , '0') # max inner product is reached with first doc self.assertListEqual(doc_ids.tolist() , [[1], [0]]) def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = self.get_dummy_custom_hf_index_retriever(from_disk=UpperCAmelCase_) with tempfile.TemporaryDirectory() as tmp_dirname: retriever.save_pretrained(UpperCAmelCase_) SCREAMING_SNAKE_CASE_ = RagRetriever.from_pretrained(UpperCAmelCase_) self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_) SCREAMING_SNAKE_CASE_ = np.array( [np.ones(self.retrieval_vector_size), -np.ones(self.retrieval_vector_size)] , dtype=np.floataa) SCREAMING_SNAKE_CASE_ = retriever.retrieve(UpperCAmelCase_ , n_docs=1) self.assertTrue(out is not None) def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = 1 SCREAMING_SNAKE_CASE_ = self.get_dummy_custom_hf_index_retriever(from_disk=UpperCAmelCase_) SCREAMING_SNAKE_CASE_ = np.array( [np.ones(self.retrieval_vector_size), -np.ones(self.retrieval_vector_size)] , dtype=np.floataa) SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = retriever.retrieve(UpperCAmelCase_ , n_docs=UpperCAmelCase_) self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size)) self.assertEqual(len(UpperCAmelCase_) , 2) self.assertEqual(sorted(doc_dicts[0]) , ['embeddings', 'id', 'text', 'title']) self.assertEqual(len(doc_dicts[0]['id']) , UpperCAmelCase_) self.assertEqual(doc_dicts[0]['id'][0] , '1') # max inner product is reached with second doc self.assertEqual(doc_dicts[1]['id'][0] , '0') # max inner product is reached with first doc self.assertListEqual(doc_ids.tolist() , [[1], [0]]) def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = self.get_dummy_custom_hf_index_retriever(from_disk=UpperCAmelCase_) with tempfile.TemporaryDirectory() as tmp_dirname: retriever.save_pretrained(UpperCAmelCase_) SCREAMING_SNAKE_CASE_ = RagRetriever.from_pretrained(UpperCAmelCase_) self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_) SCREAMING_SNAKE_CASE_ = np.array( [np.ones(self.retrieval_vector_size), -np.ones(self.retrieval_vector_size)] , dtype=np.floataa) SCREAMING_SNAKE_CASE_ = retriever.retrieve(UpperCAmelCase_ , n_docs=1) self.assertTrue(out is not None) def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = 1 SCREAMING_SNAKE_CASE_ = self.get_dummy_legacy_index_retriever() SCREAMING_SNAKE_CASE_ = np.array( [np.ones(self.retrieval_vector_size), -np.ones(self.retrieval_vector_size)] , dtype=np.floataa) SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = retriever.retrieve(UpperCAmelCase_ , n_docs=UpperCAmelCase_) self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size)) self.assertEqual(len(UpperCAmelCase_) , 2) self.assertEqual(sorted(doc_dicts[0]) , ['text', 'title']) self.assertEqual(len(doc_dicts[0]['text']) , UpperCAmelCase_) self.assertEqual(doc_dicts[0]['text'][0] , 'bar') # max inner product is reached with second doc self.assertEqual(doc_dicts[1]['text'][0] , 'foo') # max inner product is reached with first doc self.assertListEqual(doc_ids.tolist() , [[1], [0]]) def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = self.get_dummy_legacy_index_retriever() with tempfile.TemporaryDirectory() as tmp_dirname: retriever.save_pretrained(UpperCAmelCase_) SCREAMING_SNAKE_CASE_ = RagRetriever.from_pretrained(UpperCAmelCase_) self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_) SCREAMING_SNAKE_CASE_ = np.array( [np.ones(self.retrieval_vector_size), -np.ones(self.retrieval_vector_size)] , dtype=np.floataa) SCREAMING_SNAKE_CASE_ = retriever.retrieve(UpperCAmelCase_ , n_docs=1) self.assertTrue(out is not None) @require_torch @require_tokenizers @require_sentencepiece def lowerCAmelCase__ ( self): import torch SCREAMING_SNAKE_CASE_ = 1 SCREAMING_SNAKE_CASE_ = self.get_dummy_canonical_hf_index_retriever() SCREAMING_SNAKE_CASE_ = [[5, 7], [10, 11]] SCREAMING_SNAKE_CASE_ = np.array( [np.ones(self.retrieval_vector_size), -np.ones(self.retrieval_vector_size)] , dtype=np.floataa) SCREAMING_SNAKE_CASE_ = retriever(UpperCAmelCase_ , UpperCAmelCase_ , prefix=retriever.config.generator.prefix , n_docs=UpperCAmelCase_) SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = ( out['context_input_ids'], out['context_attention_mask'], out['retrieved_doc_embeds'], ) self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size)) self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_) self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_) self.assertIsInstance(UpperCAmelCase_ , np.ndarray) SCREAMING_SNAKE_CASE_ = retriever( UpperCAmelCase_ , UpperCAmelCase_ , prefix=retriever.config.generator.prefix , n_docs=UpperCAmelCase_ , return_tensors='pt' , ) SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = ( # noqa: F841 out['context_input_ids'], out['context_attention_mask'], out['retrieved_doc_embeds'], out['doc_ids'], ) self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size)) self.assertIsInstance(UpperCAmelCase_ , torch.Tensor) self.assertIsInstance(UpperCAmelCase_ , torch.Tensor) self.assertIsInstance(UpperCAmelCase_ , torch.Tensor) @require_torch @require_tokenizers @require_sentencepiece def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = self.get_dpr_ctx_encoder_tokenizer() SCREAMING_SNAKE_CASE_ = 1 SCREAMING_SNAKE_CASE_ = self.get_dummy_custom_hf_index_retriever(from_disk=UpperCAmelCase_) retriever.set_ctx_encoder_tokenizer(UpperCAmelCase_) SCREAMING_SNAKE_CASE_ = [[5, 7], [10, 11]] SCREAMING_SNAKE_CASE_ = np.array( [np.ones(self.retrieval_vector_size), -np.ones(self.retrieval_vector_size)] , dtype=np.floataa) SCREAMING_SNAKE_CASE_ = retriever(UpperCAmelCase_ , UpperCAmelCase_ , prefix=retriever.config.generator.prefix , n_docs=UpperCAmelCase_) self.assertEqual( len(UpperCAmelCase_) , 6) # check whether the retriever output consist of 6 attributes including tokenized docs self.assertEqual( all(k in out for k in ('tokenized_doc_ids', 'tokenized_doc_attention_mask')) , UpperCAmelCase_) # check for doc token related keys in dictionary.
711
def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : int = 200 ): """simple docstring""" SCREAMING_SNAKE_CASE_ = [1, 2, 5, 10, 20, 50, 100, 200] SCREAMING_SNAKE_CASE_ = [0] * (pence + 1) SCREAMING_SNAKE_CASE_ = 1 # base case: 1 way to make 0 pence for coin in coins: for i in range(_SCREAMING_SNAKE_CASE , pence + 1 , 1 ): number_of_ways[i] += number_of_ways[i - coin] return number_of_ways[pence] if __name__ == "__main__": assert solution(200) == 73_682
620
0
import argparse import json import os import sys import tempfile import unittest from argparse import Namespace from dataclasses import dataclass, field from enum import Enum from pathlib import Path from typing import List, Literal, Optional import yaml from transformers import HfArgumentParser, TrainingArguments from transformers.hf_argparser import make_choice_type_function, string_to_bool # Since Python 3.10, we can use the builtin `|` operator for Union types # See PEP 604: https://peps.python.org/pep-0604 UpperCamelCase__ : int = sys.version_info >= (3, 10) def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : str=None , _SCREAMING_SNAKE_CASE : Optional[int]=None ): """simple docstring""" return field(default_factory=lambda: default , metadata=_lowercase ) @dataclass class __snake_case : __lowerCAmelCase : int = 42 __lowerCAmelCase : str = 42 __lowerCAmelCase : List[Any] = 42 __lowerCAmelCase : Tuple = 42 @dataclass class __snake_case : __lowerCAmelCase : Dict = 42 __lowerCAmelCase : Any = field(default='toto' , metadata={'help': 'help message'} ) @dataclass class __snake_case : __lowerCAmelCase : Tuple = False __lowerCAmelCase : List[Any] = True __lowerCAmelCase : int = None class __snake_case ( UpperCamelCase_ ): __lowerCAmelCase : Dict = 'titi' __lowerCAmelCase : Union[str, Any] = 'toto' class __snake_case ( UpperCamelCase_ ): __lowerCAmelCase : Dict = 'titi' __lowerCAmelCase : Any = 'toto' __lowerCAmelCase : str = 42 @dataclass class __snake_case : __lowerCAmelCase : int = 'toto' def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = BasicEnum(self.foo) @dataclass class __snake_case : __lowerCAmelCase : Dict = 'toto' def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = MixedTypeEnum(self.foo) @dataclass class __snake_case : __lowerCAmelCase : Union[str, Any] = None __lowerCAmelCase : Union[str, Any] = field(default=UpperCamelCase_ , metadata={'help': 'help message'} ) __lowerCAmelCase : Dict = None __lowerCAmelCase : int = list_field(default=[] ) __lowerCAmelCase : Union[str, Any] = list_field(default=[] ) @dataclass class __snake_case : __lowerCAmelCase : Dict = list_field(default=[] ) __lowerCAmelCase : List[str] = list_field(default=[1, 2, 3] ) __lowerCAmelCase : Union[str, Any] = list_field(default=['Hallo', 'Bonjour', 'Hello'] ) __lowerCAmelCase : List[Any] = list_field(default=[0.1, 0.2, 0.3] ) @dataclass class __snake_case : __lowerCAmelCase : List[str] = field() __lowerCAmelCase : int = field() __lowerCAmelCase : Optional[int] = field() def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = BasicEnum(self.required_enum) @dataclass class __snake_case : __lowerCAmelCase : int = 42 __lowerCAmelCase : Optional[int] = field() __lowerCAmelCase : str = None __lowerCAmelCase : Optional[int] = field(default='toto' , metadata={'help': 'help message'} ) __lowerCAmelCase : List[Any] = list_field(default=['Hallo', 'Bonjour', 'Hello'] ) if is_python_no_less_than_3_10: @dataclass class __snake_case : __lowerCAmelCase : int = False __lowerCAmelCase : str = True __lowerCAmelCase : str = None @dataclass class __snake_case : __lowerCAmelCase : Optional[Any] = None __lowerCAmelCase : int = field(default=UpperCamelCase_ , metadata={'help': 'help message'} ) __lowerCAmelCase : Any = None __lowerCAmelCase : Optional[Any] = list_field(default=[] ) __lowerCAmelCase : str = list_field(default=[] ) class __snake_case ( unittest.TestCase ): def lowerCAmelCase__ ( self , _A , _A): self.assertEqual(len(a._actions) , len(b._actions)) for x, y in zip(a._actions , b._actions): SCREAMING_SNAKE_CASE_ = {k: v for k, v in vars(UpperCamelCase__).items() if k != '''container'''} SCREAMING_SNAKE_CASE_ = {k: v for k, v in vars(UpperCamelCase__).items() if k != '''container'''} # Choices with mixed type have custom function as "type" # So we need to compare results directly for equality if xx.get('choices' , UpperCamelCase__) and yy.get('choices' , UpperCamelCase__): for expected_choice in yy["choices"] + xx["choices"]: self.assertEqual(xx['type'](UpperCamelCase__) , yy['type'](UpperCamelCase__)) del xx["type"], yy["type"] self.assertEqual(UpperCamelCase__ , UpperCamelCase__) def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = HfArgumentParser(UpperCamelCase__) SCREAMING_SNAKE_CASE_ = argparse.ArgumentParser() expected.add_argument('--foo' , type=UpperCamelCase__ , required=UpperCamelCase__) expected.add_argument('--bar' , type=UpperCamelCase__ , required=UpperCamelCase__) expected.add_argument('--baz' , type=UpperCamelCase__ , required=UpperCamelCase__) expected.add_argument('--flag' , type=UpperCamelCase__ , default=UpperCamelCase__ , const=UpperCamelCase__ , nargs='?') self.argparsersEqual(UpperCamelCase__ , UpperCamelCase__) SCREAMING_SNAKE_CASE_ = ['''--foo''', '''1''', '''--baz''', '''quux''', '''--bar''', '''0.5'''] (SCREAMING_SNAKE_CASE_ ) = parser.parse_args_into_dataclasses(UpperCamelCase__ , look_for_args_file=UpperCamelCase__) self.assertFalse(example.flag) def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = HfArgumentParser(UpperCamelCase__) SCREAMING_SNAKE_CASE_ = argparse.ArgumentParser() expected.add_argument('--foo' , default=42 , type=UpperCamelCase__) expected.add_argument('--baz' , default='toto' , type=UpperCamelCase__ , help='help message') self.argparsersEqual(UpperCamelCase__ , UpperCamelCase__) def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = argparse.ArgumentParser() expected.add_argument('--foo' , type=UpperCamelCase__ , default=UpperCamelCase__ , const=UpperCamelCase__ , nargs='?') expected.add_argument('--baz' , type=UpperCamelCase__ , default=UpperCamelCase__ , const=UpperCamelCase__ , nargs='?') # A boolean no_* argument always has to come after its "default: True" regular counter-part # and its default must be set to False expected.add_argument('--no_baz' , action='store_false' , default=UpperCamelCase__ , dest='baz') expected.add_argument('--opt' , type=UpperCamelCase__ , default=UpperCamelCase__) SCREAMING_SNAKE_CASE_ = [WithDefaultBoolExample] if is_python_no_less_than_3_10: dataclass_types.append(UpperCamelCase__) for dataclass_type in dataclass_types: SCREAMING_SNAKE_CASE_ = HfArgumentParser(UpperCamelCase__) self.argparsersEqual(UpperCamelCase__ , UpperCamelCase__) SCREAMING_SNAKE_CASE_ = parser.parse_args([]) self.assertEqual(UpperCamelCase__ , Namespace(foo=UpperCamelCase__ , baz=UpperCamelCase__ , opt=UpperCamelCase__)) SCREAMING_SNAKE_CASE_ = parser.parse_args(['--foo', '--no_baz']) self.assertEqual(UpperCamelCase__ , Namespace(foo=UpperCamelCase__ , baz=UpperCamelCase__ , opt=UpperCamelCase__)) SCREAMING_SNAKE_CASE_ = parser.parse_args(['--foo', '--baz']) self.assertEqual(UpperCamelCase__ , Namespace(foo=UpperCamelCase__ , baz=UpperCamelCase__ , opt=UpperCamelCase__)) SCREAMING_SNAKE_CASE_ = parser.parse_args(['--foo', 'True', '--baz', 'True', '--opt', 'True']) self.assertEqual(UpperCamelCase__ , Namespace(foo=UpperCamelCase__ , baz=UpperCamelCase__ , opt=UpperCamelCase__)) SCREAMING_SNAKE_CASE_ = parser.parse_args(['--foo', 'False', '--baz', 'False', '--opt', 'False']) self.assertEqual(UpperCamelCase__ , Namespace(foo=UpperCamelCase__ , baz=UpperCamelCase__ , opt=UpperCamelCase__)) def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = HfArgumentParser(UpperCamelCase__) SCREAMING_SNAKE_CASE_ = argparse.ArgumentParser() expected.add_argument( '--foo' , default='toto' , choices=['titi', 'toto', 42] , type=make_choice_type_function(['titi', 'toto', 42]) , ) self.argparsersEqual(UpperCamelCase__ , UpperCamelCase__) SCREAMING_SNAKE_CASE_ = parser.parse_args([]) self.assertEqual(args.foo , 'toto') SCREAMING_SNAKE_CASE_ = parser.parse_args_into_dataclasses([])[0] self.assertEqual(enum_ex.foo , MixedTypeEnum.toto) SCREAMING_SNAKE_CASE_ = parser.parse_args(['--foo', 'titi']) self.assertEqual(args.foo , 'titi') SCREAMING_SNAKE_CASE_ = parser.parse_args_into_dataclasses(['--foo', 'titi'])[0] self.assertEqual(enum_ex.foo , MixedTypeEnum.titi) SCREAMING_SNAKE_CASE_ = parser.parse_args(['--foo', '42']) self.assertEqual(args.foo , 42) SCREAMING_SNAKE_CASE_ = parser.parse_args_into_dataclasses(['--foo', '42'])[0] self.assertEqual(enum_ex.foo , MixedTypeEnum.fourtytwo) def lowerCAmelCase__ ( self): @dataclass class __snake_case : __lowerCAmelCase : str = 'toto' SCREAMING_SNAKE_CASE_ = HfArgumentParser(UpperCamelCase__) SCREAMING_SNAKE_CASE_ = argparse.ArgumentParser() expected.add_argument( '--foo' , default='toto' , choices=('titi', 'toto', 42) , type=make_choice_type_function(['titi', 'toto', 42]) , ) self.argparsersEqual(UpperCamelCase__ , UpperCamelCase__) SCREAMING_SNAKE_CASE_ = parser.parse_args([]) self.assertEqual(args.foo , 'toto') SCREAMING_SNAKE_CASE_ = parser.parse_args(['--foo', 'titi']) self.assertEqual(args.foo , 'titi') SCREAMING_SNAKE_CASE_ = parser.parse_args(['--foo', '42']) self.assertEqual(args.foo , 42) def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = HfArgumentParser(UpperCamelCase__) SCREAMING_SNAKE_CASE_ = argparse.ArgumentParser() expected.add_argument('--foo_int' , nargs='+' , default=[] , type=UpperCamelCase__) expected.add_argument('--bar_int' , nargs='+' , default=[1, 2, 3] , type=UpperCamelCase__) expected.add_argument('--foo_str' , nargs='+' , default=['Hallo', 'Bonjour', 'Hello'] , type=UpperCamelCase__) expected.add_argument('--foo_float' , nargs='+' , default=[0.1, 0.2, 0.3] , type=UpperCamelCase__) self.argparsersEqual(UpperCamelCase__ , UpperCamelCase__) SCREAMING_SNAKE_CASE_ = parser.parse_args([]) self.assertEqual( UpperCamelCase__ , Namespace(foo_int=[] , bar_int=[1, 2, 3] , foo_str=['Hallo', 'Bonjour', 'Hello'] , foo_float=[0.1, 0.2, 0.3]) , ) SCREAMING_SNAKE_CASE_ = parser.parse_args('--foo_int 1 --bar_int 2 3 --foo_str a b c --foo_float 0.1 0.7'.split()) self.assertEqual(UpperCamelCase__ , Namespace(foo_int=[1] , bar_int=[2, 3] , foo_str=['a', 'b', 'c'] , foo_float=[0.1, 0.7])) def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = argparse.ArgumentParser() expected.add_argument('--foo' , default=UpperCamelCase__ , type=UpperCamelCase__) expected.add_argument('--bar' , default=UpperCamelCase__ , type=UpperCamelCase__ , help='help message') expected.add_argument('--baz' , default=UpperCamelCase__ , type=UpperCamelCase__) expected.add_argument('--ces' , nargs='+' , default=[] , type=UpperCamelCase__) expected.add_argument('--des' , nargs='+' , default=[] , type=UpperCamelCase__) SCREAMING_SNAKE_CASE_ = [OptionalExample] if is_python_no_less_than_3_10: dataclass_types.append(UpperCamelCase__) for dataclass_type in dataclass_types: SCREAMING_SNAKE_CASE_ = HfArgumentParser(UpperCamelCase__) self.argparsersEqual(UpperCamelCase__ , UpperCamelCase__) SCREAMING_SNAKE_CASE_ = parser.parse_args([]) self.assertEqual(UpperCamelCase__ , Namespace(foo=UpperCamelCase__ , bar=UpperCamelCase__ , baz=UpperCamelCase__ , ces=[] , des=[])) SCREAMING_SNAKE_CASE_ = parser.parse_args('--foo 12 --bar 3.14 --baz 42 --ces a b c --des 1 2 3'.split()) self.assertEqual(UpperCamelCase__ , Namespace(foo=12 , bar=3.1_4 , baz='42' , ces=['a', 'b', 'c'] , des=[1, 2, 3])) def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = HfArgumentParser(UpperCamelCase__) SCREAMING_SNAKE_CASE_ = argparse.ArgumentParser() expected.add_argument('--required_list' , nargs='+' , type=UpperCamelCase__ , required=UpperCamelCase__) expected.add_argument('--required_str' , type=UpperCamelCase__ , required=UpperCamelCase__) expected.add_argument( '--required_enum' , type=make_choice_type_function(['titi', 'toto']) , choices=['titi', 'toto'] , required=UpperCamelCase__ , ) self.argparsersEqual(UpperCamelCase__ , UpperCamelCase__) def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = HfArgumentParser(UpperCamelCase__) SCREAMING_SNAKE_CASE_ = argparse.ArgumentParser() expected.add_argument('--foo' , type=UpperCamelCase__ , required=UpperCamelCase__) expected.add_argument( '--required_enum' , type=make_choice_type_function(['titi', 'toto']) , choices=['titi', 'toto'] , required=UpperCamelCase__ , ) expected.add_argument('--opt' , type=UpperCamelCase__ , default=UpperCamelCase__) expected.add_argument('--baz' , default='toto' , type=UpperCamelCase__ , help='help message') expected.add_argument('--foo_str' , nargs='+' , default=['Hallo', 'Bonjour', 'Hello'] , type=UpperCamelCase__) self.argparsersEqual(UpperCamelCase__ , UpperCamelCase__) def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = HfArgumentParser(UpperCamelCase__) SCREAMING_SNAKE_CASE_ = { '''foo''': 12, '''bar''': 3.1_4, '''baz''': '''42''', '''flag''': True, } SCREAMING_SNAKE_CASE_ = parser.parse_dict(UpperCamelCase__)[0] SCREAMING_SNAKE_CASE_ = BasicExample(**UpperCamelCase__) self.assertEqual(UpperCamelCase__ , UpperCamelCase__) def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = HfArgumentParser(UpperCamelCase__) SCREAMING_SNAKE_CASE_ = { '''foo''': 12, '''bar''': 3.1_4, '''baz''': '''42''', '''flag''': True, '''extra''': 42, } self.assertRaises(UpperCamelCase__ , parser.parse_dict , UpperCamelCase__ , allow_extra_keys=UpperCamelCase__) def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = HfArgumentParser(UpperCamelCase__) SCREAMING_SNAKE_CASE_ = { '''foo''': 12, '''bar''': 3.1_4, '''baz''': '''42''', '''flag''': True, } with tempfile.TemporaryDirectory() as tmp_dir: SCREAMING_SNAKE_CASE_ = os.path.join(UpperCamelCase__ , 'temp_json') os.mkdir(UpperCamelCase__) with open(temp_local_path + '.json' , 'w+') as f: json.dump(UpperCamelCase__ , UpperCamelCase__) SCREAMING_SNAKE_CASE_ = parser.parse_yaml_file(Path(temp_local_path + '.json'))[0] SCREAMING_SNAKE_CASE_ = BasicExample(**UpperCamelCase__) self.assertEqual(UpperCamelCase__ , UpperCamelCase__) def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = HfArgumentParser(UpperCamelCase__) SCREAMING_SNAKE_CASE_ = { '''foo''': 12, '''bar''': 3.1_4, '''baz''': '''42''', '''flag''': True, } with tempfile.TemporaryDirectory() as tmp_dir: SCREAMING_SNAKE_CASE_ = os.path.join(UpperCamelCase__ , 'temp_yaml') os.mkdir(UpperCamelCase__) with open(temp_local_path + '.yaml' , 'w+') as f: yaml.dump(UpperCamelCase__ , UpperCamelCase__) SCREAMING_SNAKE_CASE_ = parser.parse_yaml_file(Path(temp_local_path + '.yaml'))[0] SCREAMING_SNAKE_CASE_ = BasicExample(**UpperCamelCase__) self.assertEqual(UpperCamelCase__ , UpperCamelCase__) def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = HfArgumentParser(UpperCamelCase__) self.assertIsNotNone(UpperCamelCase__)
712
def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : list , _SCREAMING_SNAKE_CASE : list , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int ): """simple docstring""" if index == number_of_items: return 0 SCREAMING_SNAKE_CASE_ = 0 SCREAMING_SNAKE_CASE_ = 0 SCREAMING_SNAKE_CASE_ = knapsack(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , index + 1 ) if weights[index] <= max_weight: SCREAMING_SNAKE_CASE_ = values[index] + knapsack( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , max_weight - weights[index] , index + 1 ) return max(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) if __name__ == "__main__": import doctest doctest.testmod()
620
0
import math import unittest from transformers import BioGptConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( BioGptForCausalLM, BioGptForSequenceClassification, BioGptForTokenClassification, BioGptModel, BioGptTokenizer, ) from transformers.models.biogpt.modeling_biogpt import BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST class __snake_case : def __init__( self , _A , _A=13 , _A=7 , _A=True , _A=True , _A=False , _A=True , _A=99 , _A=32 , _A=5 , _A=4 , _A=37 , _A="gelu" , _A=0.1 , _A=0.1 , _A=512 , _A=16 , _A=2 , _A=0.0_2 , _A=3 , _A=4 , _A=None , ): SCREAMING_SNAKE_CASE_ = parent SCREAMING_SNAKE_CASE_ = batch_size SCREAMING_SNAKE_CASE_ = seq_length SCREAMING_SNAKE_CASE_ = is_training SCREAMING_SNAKE_CASE_ = use_input_mask SCREAMING_SNAKE_CASE_ = use_token_type_ids SCREAMING_SNAKE_CASE_ = use_labels SCREAMING_SNAKE_CASE_ = vocab_size SCREAMING_SNAKE_CASE_ = hidden_size SCREAMING_SNAKE_CASE_ = num_hidden_layers SCREAMING_SNAKE_CASE_ = num_attention_heads SCREAMING_SNAKE_CASE_ = intermediate_size SCREAMING_SNAKE_CASE_ = hidden_act SCREAMING_SNAKE_CASE_ = hidden_dropout_prob SCREAMING_SNAKE_CASE_ = attention_probs_dropout_prob SCREAMING_SNAKE_CASE_ = max_position_embeddings SCREAMING_SNAKE_CASE_ = type_vocab_size SCREAMING_SNAKE_CASE_ = type_sequence_label_size SCREAMING_SNAKE_CASE_ = initializer_range SCREAMING_SNAKE_CASE_ = num_labels SCREAMING_SNAKE_CASE_ = num_choices SCREAMING_SNAKE_CASE_ = scope def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size) SCREAMING_SNAKE_CASE_ = None if self.use_input_mask: SCREAMING_SNAKE_CASE_ = random_attention_mask([self.batch_size, self.seq_length]) SCREAMING_SNAKE_CASE_ = None if self.use_token_type_ids: SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size) SCREAMING_SNAKE_CASE_ = None SCREAMING_SNAKE_CASE_ = None SCREAMING_SNAKE_CASE_ = None if self.use_labels: SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size] , self.type_sequence_label_size) SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels) SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size] , self.num_choices) SCREAMING_SNAKE_CASE_ = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def lowerCAmelCase__ ( self): return BioGptConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , ) def lowerCAmelCase__ ( self , _A , _A , _A , _A , _A , _A , _A): SCREAMING_SNAKE_CASE_ = BioGptModel(config=_SCREAMING_SNAKE_CASE) model.to(_SCREAMING_SNAKE_CASE) model.eval() SCREAMING_SNAKE_CASE_ = model(_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE) SCREAMING_SNAKE_CASE_ = model(_SCREAMING_SNAKE_CASE) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size)) def lowerCAmelCase__ ( self , _A , _A , _A , _A , _A , _A , _A , _A , _A , ): SCREAMING_SNAKE_CASE_ = BioGptForCausalLM(config=_SCREAMING_SNAKE_CASE) model.to(_SCREAMING_SNAKE_CASE) model.eval() SCREAMING_SNAKE_CASE_ = model(_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size)) def lowerCAmelCase__ ( self , _A , _A , _A , _A , _A , *_A): SCREAMING_SNAKE_CASE_ = BioGptModel(config=_SCREAMING_SNAKE_CASE) model.to(_SCREAMING_SNAKE_CASE) model.eval() # create attention mask SCREAMING_SNAKE_CASE_ = torch.ones(input_ids.shape , dtype=torch.long , device=_SCREAMING_SNAKE_CASE) SCREAMING_SNAKE_CASE_ = self.seq_length // 2 SCREAMING_SNAKE_CASE_ = 0 # first forward pass SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = model(_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE).to_tuple() # create hypothetical next token and extent to next_input_ids SCREAMING_SNAKE_CASE_ = ids_tensor((self.batch_size, 1) , config.vocab_size) # change a random masked slice from input_ids SCREAMING_SNAKE_CASE_ = ids_tensor((1,) , _SCREAMING_SNAKE_CASE).item() + 1 SCREAMING_SNAKE_CASE_ = ids_tensor((self.batch_size, 1) , config.vocab_size).squeeze(-1) SCREAMING_SNAKE_CASE_ = random_other_next_tokens # append to next input_ids and attn_mask SCREAMING_SNAKE_CASE_ = torch.cat([input_ids, next_tokens] , dim=-1) SCREAMING_SNAKE_CASE_ = torch.cat( [attn_mask, torch.ones((attn_mask.shape[0], 1) , dtype=torch.long , device=_SCREAMING_SNAKE_CASE)] , dim=1 , ) # get two different outputs SCREAMING_SNAKE_CASE_ = model(_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE)['last_hidden_state'] SCREAMING_SNAKE_CASE_ = model(_SCREAMING_SNAKE_CASE , past_key_values=_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE)['last_hidden_state'] # select random slice SCREAMING_SNAKE_CASE_ = ids_tensor((1,) , output_from_past.shape[-1]).item() SCREAMING_SNAKE_CASE_ = output_from_no_past[:, -1, random_slice_idx].detach() SCREAMING_SNAKE_CASE_ = output_from_past[:, 0, random_slice_idx].detach() # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , atol=1E-3)) def lowerCAmelCase__ ( self , _A , _A , _A , _A , _A , *_A): SCREAMING_SNAKE_CASE_ = BioGptModel(config=_SCREAMING_SNAKE_CASE).to(_SCREAMING_SNAKE_CASE).eval() SCREAMING_SNAKE_CASE_ = torch.ones(input_ids.shape , dtype=torch.long , device=_SCREAMING_SNAKE_CASE) # first forward pass SCREAMING_SNAKE_CASE_ = model(_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , use_cache=_SCREAMING_SNAKE_CASE) SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = outputs.to_tuple() # create hypothetical multiple next token and extent to next_input_ids SCREAMING_SNAKE_CASE_ = ids_tensor((self.batch_size, 3) , config.vocab_size) SCREAMING_SNAKE_CASE_ = ids_tensor((self.batch_size, 3) , 2) # append to next input_ids and SCREAMING_SNAKE_CASE_ = torch.cat([input_ids, next_tokens] , dim=-1) SCREAMING_SNAKE_CASE_ = torch.cat([attention_mask, next_attn_mask] , dim=-1) SCREAMING_SNAKE_CASE_ = model(_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE)['last_hidden_state'] SCREAMING_SNAKE_CASE_ = model(_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , past_key_values=_SCREAMING_SNAKE_CASE)[ 'last_hidden_state' ] # select random slice SCREAMING_SNAKE_CASE_ = ids_tensor((1,) , output_from_past.shape[-1]).item() SCREAMING_SNAKE_CASE_ = output_from_no_past[:, -3:, random_slice_idx].detach() SCREAMING_SNAKE_CASE_ = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1]) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , atol=1E-3)) def lowerCAmelCase__ ( self , _A , _A , _A , _A , _A , *_A , _A=False): SCREAMING_SNAKE_CASE_ = BioGptForCausalLM(_SCREAMING_SNAKE_CASE) model.to(_SCREAMING_SNAKE_CASE) if gradient_checkpointing: model.gradient_checkpointing_enable() SCREAMING_SNAKE_CASE_ = model(_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE) self.parent.assertEqual(result.loss.shape , ()) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size)) result.loss.backward() def lowerCAmelCase__ ( self , _A , *_A): SCREAMING_SNAKE_CASE_ = BioGptModel(_SCREAMING_SNAKE_CASE) SCREAMING_SNAKE_CASE_ = model.config.initializer_range / math.sqrt(2 * model.config.num_hidden_layers) for key in model.state_dict().keys(): if "c_proj" in key and "weight" in key: self.parent.assertLessEqual(abs(torch.std(model.state_dict()[key]) - model_std) , 0.0_0_1) self.parent.assertLessEqual(abs(torch.mean(model.state_dict()[key]) - 0.0) , 0.0_1) def lowerCAmelCase__ ( self , _A , _A , _A , _A , _A , *_A): SCREAMING_SNAKE_CASE_ = self.num_labels SCREAMING_SNAKE_CASE_ = BioGptForTokenClassification(_SCREAMING_SNAKE_CASE) model.to(_SCREAMING_SNAKE_CASE) model.eval() SCREAMING_SNAKE_CASE_ = model(_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels)) def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = self.prepare_config_and_inputs() ( ( SCREAMING_SNAKE_CASE_ ) , ( SCREAMING_SNAKE_CASE_ ) , ( SCREAMING_SNAKE_CASE_ ) , ( SCREAMING_SNAKE_CASE_ ) , ( SCREAMING_SNAKE_CASE_ ) , ( SCREAMING_SNAKE_CASE_ ) , ( SCREAMING_SNAKE_CASE_ ) , ) = config_and_inputs SCREAMING_SNAKE_CASE_ = {'input_ids': input_ids, 'attention_mask': input_mask} return config, inputs_dict @require_torch class __snake_case ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ): __lowerCAmelCase : Union[str, Any] = ( (BioGptModel, BioGptForCausalLM, BioGptForSequenceClassification, BioGptForTokenClassification) if is_torch_available() else () ) __lowerCAmelCase : List[str] = (BioGptForCausalLM,) if is_torch_available() else () __lowerCAmelCase : int = ( { 'feature-extraction': BioGptModel, 'text-classification': BioGptForSequenceClassification, 'text-generation': BioGptForCausalLM, 'token-classification': BioGptForTokenClassification, 'zero-shot': BioGptForSequenceClassification, } if is_torch_available() else {} ) __lowerCAmelCase : List[Any] = False def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = BioGptModelTester(self) SCREAMING_SNAKE_CASE_ = ConfigTester(self , config_class=_SCREAMING_SNAKE_CASE , hidden_size=37) def lowerCAmelCase__ ( self): self.config_tester.run_common_tests() def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_SCREAMING_SNAKE_CASE) def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: SCREAMING_SNAKE_CASE_ = type self.model_tester.create_and_check_model(*_SCREAMING_SNAKE_CASE) def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_biogpt_model_attention_mask_past(*_SCREAMING_SNAKE_CASE) def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_forward_and_backwards(*_SCREAMING_SNAKE_CASE , gradient_checkpointing=_SCREAMING_SNAKE_CASE) def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_biogpt_model_past_large_inputs(*_SCREAMING_SNAKE_CASE) def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_biogpt_weight_initialization(*_SCREAMING_SNAKE_CASE) def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_biogpt_for_token_classification(*_SCREAMING_SNAKE_CASE) @slow def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = BioGptForCausalLM.from_pretrained('microsoft/biogpt') model.to(_SCREAMING_SNAKE_CASE) SCREAMING_SNAKE_CASE_ = BioGptTokenizer.from_pretrained('microsoft/biogpt') SCREAMING_SNAKE_CASE_ = 'left' # Define PAD Token = EOS Token = 50256 SCREAMING_SNAKE_CASE_ = tokenizer.eos_token SCREAMING_SNAKE_CASE_ = model.config.eos_token_id # use different length sentences to test batching SCREAMING_SNAKE_CASE_ = [ 'Hello, my dog is a little', 'Today, I', ] SCREAMING_SNAKE_CASE_ = tokenizer(_SCREAMING_SNAKE_CASE , return_tensors='pt' , padding=_SCREAMING_SNAKE_CASE) SCREAMING_SNAKE_CASE_ = inputs['input_ids'].to(_SCREAMING_SNAKE_CASE) SCREAMING_SNAKE_CASE_ = model.generate( input_ids=_SCREAMING_SNAKE_CASE , attention_mask=inputs['attention_mask'].to(_SCREAMING_SNAKE_CASE) , ) SCREAMING_SNAKE_CASE_ = tokenizer(sentences[0] , return_tensors='pt').input_ids.to(_SCREAMING_SNAKE_CASE) SCREAMING_SNAKE_CASE_ = model.generate(input_ids=_SCREAMING_SNAKE_CASE) SCREAMING_SNAKE_CASE_ = inputs_non_padded.shape[-1] - inputs['attention_mask'][-1].long().sum().cpu().item() SCREAMING_SNAKE_CASE_ = tokenizer(sentences[1] , return_tensors='pt').input_ids.to(_SCREAMING_SNAKE_CASE) SCREAMING_SNAKE_CASE_ = model.generate(input_ids=_SCREAMING_SNAKE_CASE , max_length=model.config.max_length - num_paddings) SCREAMING_SNAKE_CASE_ = tokenizer.batch_decode(_SCREAMING_SNAKE_CASE , skip_special_tokens=_SCREAMING_SNAKE_CASE) SCREAMING_SNAKE_CASE_ = tokenizer.decode(output_non_padded[0] , skip_special_tokens=_SCREAMING_SNAKE_CASE) SCREAMING_SNAKE_CASE_ = tokenizer.decode(output_padded[0] , skip_special_tokens=_SCREAMING_SNAKE_CASE) SCREAMING_SNAKE_CASE_ = [ 'Hello, my dog is a little bit bigger than a little bit.', 'Today, I have a good idea of how to use the information', ] self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE) self.assertListEqual(_SCREAMING_SNAKE_CASE , [non_padded_sentence, padded_sentence]) @slow def lowerCAmelCase__ ( self): for model_name in BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: SCREAMING_SNAKE_CASE_ = BioGptModel.from_pretrained(_SCREAMING_SNAKE_CASE) self.assertIsNotNone(_SCREAMING_SNAKE_CASE) def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs_for_common() SCREAMING_SNAKE_CASE_ = 3 SCREAMING_SNAKE_CASE_ = input_dict['input_ids'] SCREAMING_SNAKE_CASE_ = input_ids.ne(1).to(_SCREAMING_SNAKE_CASE) SCREAMING_SNAKE_CASE_ = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size) SCREAMING_SNAKE_CASE_ = BioGptForSequenceClassification(_SCREAMING_SNAKE_CASE) model.to(_SCREAMING_SNAKE_CASE) model.eval() SCREAMING_SNAKE_CASE_ = model(_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels)) def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs_for_common() SCREAMING_SNAKE_CASE_ = 3 SCREAMING_SNAKE_CASE_ = 'multi_label_classification' SCREAMING_SNAKE_CASE_ = input_dict['input_ids'] SCREAMING_SNAKE_CASE_ = input_ids.ne(1).to(_SCREAMING_SNAKE_CASE) SCREAMING_SNAKE_CASE_ = ids_tensor( [self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size).to(torch.float) SCREAMING_SNAKE_CASE_ = BioGptForSequenceClassification(_SCREAMING_SNAKE_CASE) model.to(_SCREAMING_SNAKE_CASE) model.eval() SCREAMING_SNAKE_CASE_ = model(_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels)) @require_torch class __snake_case ( unittest.TestCase ): @slow def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = BioGptForCausalLM.from_pretrained('microsoft/biogpt') SCREAMING_SNAKE_CASE_ = torch.tensor([[2, 4805, 9, 656, 21]]) SCREAMING_SNAKE_CASE_ = model(_SCREAMING_SNAKE_CASE)[0] SCREAMING_SNAKE_CASE_ = 42384 SCREAMING_SNAKE_CASE_ = torch.Size((1, 5, vocab_size)) self.assertEqual(output.shape , _SCREAMING_SNAKE_CASE) SCREAMING_SNAKE_CASE_ = torch.tensor( [[[-9.5_2_3_6, -9.8_9_1_8, 10.4557], [-11.0469, -9.6_4_2_3, 8.1_0_2_2], [-8.8_6_6_4, -7.8_8_2_6, 5.5_3_2_5]]]) self.assertTrue(torch.allclose(output[:, :3, :3] , _SCREAMING_SNAKE_CASE , atol=1E-4)) @slow def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = BioGptTokenizer.from_pretrained('microsoft/biogpt') SCREAMING_SNAKE_CASE_ = BioGptForCausalLM.from_pretrained('microsoft/biogpt') model.to(_SCREAMING_SNAKE_CASE) torch.manual_seed(0) SCREAMING_SNAKE_CASE_ = tokenizer('COVID-19 is' , return_tensors='pt').to(_SCREAMING_SNAKE_CASE) SCREAMING_SNAKE_CASE_ = model.generate( **_SCREAMING_SNAKE_CASE , min_length=100 , max_length=1024 , num_beams=5 , early_stopping=_SCREAMING_SNAKE_CASE , ) SCREAMING_SNAKE_CASE_ = tokenizer.decode(output_ids[0] , skip_special_tokens=_SCREAMING_SNAKE_CASE) SCREAMING_SNAKE_CASE_ = ( 'COVID-19 is a global pandemic caused by severe acute respiratory syndrome coronavirus 2 (SARS-CoV-2), the' ' causative agent of coronavirus disease 2019 (COVID-19), which has spread to more than 200 countries and' ' territories, including the United States (US), Canada, Australia, New Zealand, the United Kingdom (UK),' ' and the United States of America (USA), as of March 11, 2020, with more than 800,000 confirmed cases and' ' more than 800,000 deaths.' ) self.assertEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
713
import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( SwiftFormerConfig, SwiftFormerForImageClassification, ViTImageProcessor, ) from transformers.utils import logging logging.set_verbosity_info() UpperCamelCase__ : Optional[int] = logging.get_logger(__name__) UpperCamelCase__ : List[Any] = torch.device("cpu") def _UpperCAmelCase ( ): """simple docstring""" SCREAMING_SNAKE_CASE_ = 'http://images.cocodataset.org/val2017/000000039769.jpg' SCREAMING_SNAKE_CASE_ = Image.open(requests.get(_SCREAMING_SNAKE_CASE , stream=_SCREAMING_SNAKE_CASE ).raw ) return im def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : int ): """simple docstring""" if swiftformer_name == "swiftformer_xs": return torch.tensor([-2.1_7_0_3E0_0, 2.1_1_0_7E0_0, -2.0_8_1_1E0_0, 8.8_6_8_5E-0_1, 2.4_3_6_0E-0_1] ) elif swiftformer_name == "swiftformer_s": return torch.tensor([3.9_6_3_6E-0_1, 2.3_4_7_8E-0_1, -1.6_9_6_3E0_0, -1.7_3_8_1E0_0, -8.6_3_3_7E-0_1] ) elif swiftformer_name == "swiftformer_l1": return torch.tensor([-4.2_7_6_8E-0_1, -4.7_4_2_9E-0_1, -1.0_8_9_7E0_0, -1.0_2_4_8E0_0, 3.5_5_2_3E-0_2] ) elif swiftformer_name == "swiftformer_l3": return torch.tensor([-2.5_3_3_0E-0_1, 2.4_2_1_1E-0_1, -6.0_1_8_5E-0_1, -8.2_7_8_9E-0_1, -6.0_4_4_6E-0_2] ) def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : Optional[Any] ): """simple docstring""" SCREAMING_SNAKE_CASE_ = dct.pop(_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE_ = val def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : Dict ): """simple docstring""" SCREAMING_SNAKE_CASE_ = [] for k in state_dict.keys(): SCREAMING_SNAKE_CASE_ = k if ".pwconv" in k: SCREAMING_SNAKE_CASE_ = k_new.replace('.pwconv' , '.point_wise_conv' ) if ".dwconv" in k: SCREAMING_SNAKE_CASE_ = k_new.replace('.dwconv' , '.depth_wise_conv' ) if ".Proj." in k: SCREAMING_SNAKE_CASE_ = k_new.replace('.Proj.' , '.proj.' ) if "patch_embed" in k_new: SCREAMING_SNAKE_CASE_ = k_new.replace('patch_embed' , 'swiftformer.patch_embed.patch_embedding' ) if "network" in k_new: SCREAMING_SNAKE_CASE_ = k_new.split('.' ) if ls[2].isdigit(): SCREAMING_SNAKE_CASE_ = 'swiftformer.encoder.network.' + ls[1] + '.blocks.' + ls[2] + '.' + '.'.join(ls[3:] ) else: SCREAMING_SNAKE_CASE_ = k_new.replace('network' , 'swiftformer.encoder.network' ) rename_keys.append((k, k_new) ) return rename_keys @torch.no_grad() def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Optional[Any] ): """simple docstring""" SCREAMING_SNAKE_CASE_ = SwiftFormerConfig() # dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size SCREAMING_SNAKE_CASE_ = 1_000 SCREAMING_SNAKE_CASE_ = 'huggingface/label-files' SCREAMING_SNAKE_CASE_ = 'imagenet-1k-id2label.json' SCREAMING_SNAKE_CASE_ = json.load(open(hf_hub_download(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , repo_type='dataset' ) , 'r' ) ) SCREAMING_SNAKE_CASE_ = {int(_SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()} SCREAMING_SNAKE_CASE_ = idalabel SCREAMING_SNAKE_CASE_ = {v: k for k, v in idalabel.items()} # size of the architecture if swiftformer_name == "swiftformer_xs": SCREAMING_SNAKE_CASE_ = [3, 3, 6, 4] SCREAMING_SNAKE_CASE_ = [48, 56, 112, 220] elif swiftformer_name == "swiftformer_s": SCREAMING_SNAKE_CASE_ = [3, 3, 9, 6] SCREAMING_SNAKE_CASE_ = [48, 64, 168, 224] elif swiftformer_name == "swiftformer_l1": SCREAMING_SNAKE_CASE_ = [4, 3, 10, 5] SCREAMING_SNAKE_CASE_ = [48, 96, 192, 384] elif swiftformer_name == "swiftformer_l3": SCREAMING_SNAKE_CASE_ = [4, 4, 12, 6] SCREAMING_SNAKE_CASE_ = [64, 128, 320, 512] # load state_dict of original model, remove and rename some keys if original_ckpt: if original_ckpt.startswith('https' ): SCREAMING_SNAKE_CASE_ = torch.hub.load_state_dict_from_url(_SCREAMING_SNAKE_CASE , map_location='cpu' , check_hash=_SCREAMING_SNAKE_CASE ) else: SCREAMING_SNAKE_CASE_ = torch.load(_SCREAMING_SNAKE_CASE , map_location='cpu' ) SCREAMING_SNAKE_CASE_ = checkpoint SCREAMING_SNAKE_CASE_ = create_rename_keys(_SCREAMING_SNAKE_CASE ) for rename_key_src, rename_key_dest in rename_keys: rename_key(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # load HuggingFace model SCREAMING_SNAKE_CASE_ = SwiftFormerForImageClassification(_SCREAMING_SNAKE_CASE ).eval() hf_model.load_state_dict(_SCREAMING_SNAKE_CASE ) # prepare test inputs SCREAMING_SNAKE_CASE_ = prepare_img() SCREAMING_SNAKE_CASE_ = ViTImageProcessor.from_pretrained('preprocessor_config' ) SCREAMING_SNAKE_CASE_ = processor(images=_SCREAMING_SNAKE_CASE , return_tensors='pt' ) # compare outputs from both models SCREAMING_SNAKE_CASE_ = get_expected_output(_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE_ = hf_model(inputs['pixel_values'] ).logits assert hf_logits.shape == torch.Size([1, 1_000] ) assert torch.allclose(hf_logits[0, 0:5] , _SCREAMING_SNAKE_CASE , atol=1E-3 ) Path(_SCREAMING_SNAKE_CASE ).mkdir(exist_ok=_SCREAMING_SNAKE_CASE ) print(f"""Saving model {swiftformer_name} to {pytorch_dump_folder_path}""" ) hf_model.save_pretrained(_SCREAMING_SNAKE_CASE ) if __name__ == "__main__": UpperCamelCase__ : str = argparse.ArgumentParser() # Required parameters parser.add_argument( "--swiftformer_name", default="swiftformer_xs", choices=["swiftformer_xs", "swiftformer_s", "swiftformer_l1", "swiftformer_l3"], type=str, help="Name of the SwiftFormer model you'd like to convert.", ) parser.add_argument( "--pytorch_dump_folder_path", default="./converted_outputs/", type=str, help="Path to the output PyTorch model directory.", ) parser.add_argument("--original_ckpt", default=None, type=str, help="Path to the original model checkpoint.") UpperCamelCase__ : Union[str, Any] = parser.parse_args() convert_swiftformer_checkpoint(args.swiftformer_name, args.pytorch_dump_folder_path, args.original_ckpt)
620
0
'''simple docstring''' import gc import random import unittest import numpy as np import torch from PIL import Image from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, EulerAncestralDiscreteScheduler, LMSDiscreteScheduler, PNDMScheduler, StableDiffusionInstructPixaPixPipeline, UNetaDConditionModel, ) from diffusers.image_processor import VaeImageProcessor from diffusers.utils import floats_tensor, load_image, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..pipeline_params import ( IMAGE_TO_IMAGE_IMAGE_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS, ) from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class __snake_case ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ): __lowerCAmelCase : Optional[Any] = StableDiffusionInstructPixaPixPipeline __lowerCAmelCase : Union[str, Any] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'height', 'width', 'cross_attention_kwargs'} __lowerCAmelCase : List[str] = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS __lowerCAmelCase : List[str] = IMAGE_TO_IMAGE_IMAGE_PARAMS __lowerCAmelCase : Tuple = IMAGE_TO_IMAGE_IMAGE_PARAMS def lowerCAmelCase__ ( self): torch.manual_seed(0) SCREAMING_SNAKE_CASE_ = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=8 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , ) SCREAMING_SNAKE_CASE_ = PNDMScheduler(skip_prk_steps=_A) torch.manual_seed(0) SCREAMING_SNAKE_CASE_ = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , ) torch.manual_seed(0) SCREAMING_SNAKE_CASE_ = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) SCREAMING_SNAKE_CASE_ = CLIPTextModel(_A) SCREAMING_SNAKE_CASE_ = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip') SCREAMING_SNAKE_CASE_ = { """unet""": unet, """scheduler""": scheduler, """vae""": vae, """text_encoder""": text_encoder, """tokenizer""": tokenizer, """safety_checker""": None, """feature_extractor""": None, } return components def lowerCAmelCase__ ( self , _A , _A=0): SCREAMING_SNAKE_CASE_ = floats_tensor((1, 3, 32, 32) , rng=random.Random(_A)).to(_A) SCREAMING_SNAKE_CASE_ = image.cpu().permute(0 , 2 , 3 , 1)[0] SCREAMING_SNAKE_CASE_ = Image.fromarray(np.uinta(_A)).convert('RGB') if str(_A).startswith('mps'): SCREAMING_SNAKE_CASE_ = torch.manual_seed(_A) else: SCREAMING_SNAKE_CASE_ = torch.Generator(device=_A).manual_seed(_A) SCREAMING_SNAKE_CASE_ = { """prompt""": """A painting of a squirrel eating a burger""", """image""": image, """generator""": generator, """num_inference_steps""": 2, """guidance_scale""": 6.0, """image_guidance_scale""": 1, """output_type""": """numpy""", } return inputs def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = """cpu""" # ensure determinism for the device-dependent torch.Generator SCREAMING_SNAKE_CASE_ = self.get_dummy_components() SCREAMING_SNAKE_CASE_ = StableDiffusionInstructPixaPixPipeline(**_A) SCREAMING_SNAKE_CASE_ = sd_pipe.to(_A) sd_pipe.set_progress_bar_config(disable=_A) SCREAMING_SNAKE_CASE_ = self.get_dummy_inputs(_A) SCREAMING_SNAKE_CASE_ = sd_pipe(**_A).images SCREAMING_SNAKE_CASE_ = image[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) SCREAMING_SNAKE_CASE_ = np.array([0.7_5_2_6, 0.3_7_5_0, 0.4_5_4_7, 0.6_1_1_7, 0.5_8_6_6, 0.5_0_1_6, 0.4_3_2_7, 0.5_6_4_2, 0.4_8_1_5]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-3 def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = """cpu""" # ensure determinism for the device-dependent torch.Generator SCREAMING_SNAKE_CASE_ = self.get_dummy_components() SCREAMING_SNAKE_CASE_ = StableDiffusionInstructPixaPixPipeline(**_A) SCREAMING_SNAKE_CASE_ = sd_pipe.to(_A) sd_pipe.set_progress_bar_config(disable=_A) SCREAMING_SNAKE_CASE_ = self.get_dummy_inputs(_A) SCREAMING_SNAKE_CASE_ = """french fries""" SCREAMING_SNAKE_CASE_ = sd_pipe(**_A , negative_prompt=_A) SCREAMING_SNAKE_CASE_ = output.images SCREAMING_SNAKE_CASE_ = image[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) SCREAMING_SNAKE_CASE_ = np.array([0.7_5_1_1, 0.3_6_4_2, 0.4_5_5_3, 0.6_2_3_6, 0.5_7_9_7, 0.5_0_1_3, 0.4_3_4_3, 0.5_6_1_1, 0.4_8_3_1]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-3 def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = """cpu""" # ensure determinism for the device-dependent torch.Generator SCREAMING_SNAKE_CASE_ = self.get_dummy_components() SCREAMING_SNAKE_CASE_ = StableDiffusionInstructPixaPixPipeline(**_A) SCREAMING_SNAKE_CASE_ = sd_pipe.to(_A) sd_pipe.set_progress_bar_config(disable=_A) SCREAMING_SNAKE_CASE_ = self.get_dummy_inputs(_A) SCREAMING_SNAKE_CASE_ = [inputs["""prompt"""]] * 2 SCREAMING_SNAKE_CASE_ = np.array(inputs['image']).astype(np.floataa) / 255.0 SCREAMING_SNAKE_CASE_ = torch.from_numpy(_A).unsqueeze(0).to(_A) SCREAMING_SNAKE_CASE_ = image / 2 + 0.5 SCREAMING_SNAKE_CASE_ = image.permute(0 , 3 , 1 , 2) SCREAMING_SNAKE_CASE_ = image.repeat(2 , 1 , 1 , 1) SCREAMING_SNAKE_CASE_ = sd_pipe(**_A).images SCREAMING_SNAKE_CASE_ = image[-1, -3:, -3:, -1] assert image.shape == (2, 32, 32, 3) SCREAMING_SNAKE_CASE_ = np.array([0.5_8_1_2, 0.5_7_4_8, 0.5_2_2_2, 0.5_9_0_8, 0.5_6_9_5, 0.7_1_7_4, 0.6_8_0_4, 0.5_5_2_3, 0.5_5_7_9]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-3 def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = """cpu""" # ensure determinism for the device-dependent torch.Generator SCREAMING_SNAKE_CASE_ = self.get_dummy_components() SCREAMING_SNAKE_CASE_ = EulerAncestralDiscreteScheduler( beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule='scaled_linear') SCREAMING_SNAKE_CASE_ = StableDiffusionInstructPixaPixPipeline(**_A) SCREAMING_SNAKE_CASE_ = sd_pipe.to(_A) sd_pipe.set_progress_bar_config(disable=_A) SCREAMING_SNAKE_CASE_ = self.get_dummy_inputs(_A) SCREAMING_SNAKE_CASE_ = sd_pipe(**_A).images SCREAMING_SNAKE_CASE_ = image[0, -3:, -3:, -1] SCREAMING_SNAKE_CASE_ = [round(_A , 4) for x in image_slice.flatten().tolist()] print(','.join([str(_A) for x in slice])) assert image.shape == (1, 32, 32, 3) SCREAMING_SNAKE_CASE_ = np.array([0.7_4_1_7, 0.3_8_4_2, 0.4_7_3_2, 0.5_7_7_6, 0.5_8_9_1, 0.5_1_3_9, 0.4_0_5_2, 0.5_6_7_3, 0.4_9_8_6]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-3 def lowerCAmelCase__ ( self): super().test_inference_batch_single_identical(expected_max_diff=3E-3) def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = self.get_dummy_components() SCREAMING_SNAKE_CASE_ = StableDiffusionInstructPixaPixPipeline(**_A) SCREAMING_SNAKE_CASE_ = VaeImageProcessor(do_resize=_A , do_normalize=_A) SCREAMING_SNAKE_CASE_ = pipe.to(_A) pipe.set_progress_bar_config(disable=_A) SCREAMING_SNAKE_CASE_ = pipe(**self.get_dummy_inputs_by_type(_A , input_image_type='pt'))[0] SCREAMING_SNAKE_CASE_ = components["""vae"""] SCREAMING_SNAKE_CASE_ = self.get_dummy_inputs_by_type(_A , input_image_type='pt') for image_param in self.image_latents_params: if image_param in inputs.keys(): SCREAMING_SNAKE_CASE_ = vae.encode(inputs[image_param]).latent_dist.mode() SCREAMING_SNAKE_CASE_ = pipe(**_A)[0] SCREAMING_SNAKE_CASE_ = np.abs(out - out_latents_inputs).max() self.assertLess(_A , 1E-4 , 'passing latents as image input generate different result from passing image') @slow @require_torch_gpu class __snake_case ( unittest.TestCase ): def lowerCAmelCase__ ( self): super().tearDown() gc.collect() torch.cuda.empty_cache() def lowerCAmelCase__ ( self , _A=0): SCREAMING_SNAKE_CASE_ = torch.manual_seed(_A) SCREAMING_SNAKE_CASE_ = load_image( 'https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/stable_diffusion_pix2pix/example.jpg') SCREAMING_SNAKE_CASE_ = { """prompt""": """turn him into a cyborg""", """image""": image, """generator""": generator, """num_inference_steps""": 3, """guidance_scale""": 7.5, """image_guidance_scale""": 1.0, """output_type""": """numpy""", } return inputs def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = StableDiffusionInstructPixaPixPipeline.from_pretrained( 'timbrooks/instruct-pix2pix' , safety_checker=_A) pipe.to(_A) pipe.set_progress_bar_config(disable=_A) pipe.enable_attention_slicing() SCREAMING_SNAKE_CASE_ = self.get_inputs() SCREAMING_SNAKE_CASE_ = pipe(**_A).images SCREAMING_SNAKE_CASE_ = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 512, 512, 3) SCREAMING_SNAKE_CASE_ = np.array([0.5_9_0_2, 0.6_0_1_5, 0.6_0_2_7, 0.5_9_8_3, 0.6_0_9_2, 0.6_0_6_1, 0.5_7_6_5, 0.5_7_8_5, 0.5_5_5_5]) assert np.abs(expected_slice - image_slice).max() < 1E-3 def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = StableDiffusionInstructPixaPixPipeline.from_pretrained( 'timbrooks/instruct-pix2pix' , safety_checker=_A) SCREAMING_SNAKE_CASE_ = LMSDiscreteScheduler.from_config(pipe.scheduler.config) pipe.to(_A) pipe.set_progress_bar_config(disable=_A) pipe.enable_attention_slicing() SCREAMING_SNAKE_CASE_ = self.get_inputs() SCREAMING_SNAKE_CASE_ = pipe(**_A).images SCREAMING_SNAKE_CASE_ = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 512, 512, 3) SCREAMING_SNAKE_CASE_ = np.array([0.6_5_7_8, 0.6_8_1_7, 0.6_9_7_2, 0.6_7_6_1, 0.6_8_5_6, 0.6_9_1_6, 0.6_4_2_8, 0.6_5_1_6, 0.6_3_0_1]) assert np.abs(expected_slice - image_slice).max() < 1E-3 def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = StableDiffusionInstructPixaPixPipeline.from_pretrained( 'timbrooks/instruct-pix2pix' , safety_checker=_A) SCREAMING_SNAKE_CASE_ = DDIMScheduler.from_config(pipe.scheduler.config) pipe.to(_A) pipe.set_progress_bar_config(disable=_A) pipe.enable_attention_slicing() SCREAMING_SNAKE_CASE_ = self.get_inputs() SCREAMING_SNAKE_CASE_ = pipe(**_A).images SCREAMING_SNAKE_CASE_ = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 512, 512, 3) SCREAMING_SNAKE_CASE_ = np.array([0.3_8_2_8, 0.3_8_3_4, 0.3_8_1_8, 0.3_7_9_2, 0.3_8_6_5, 0.3_7_5_2, 0.3_7_9_2, 0.3_8_4_7, 0.3_7_5_3]) assert np.abs(expected_slice - image_slice).max() < 1E-3 def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = 0 def callback_fn(_A , _A , _A) -> None: SCREAMING_SNAKE_CASE_ = True nonlocal number_of_steps number_of_steps += 1 if step == 1: SCREAMING_SNAKE_CASE_ = latents.detach().cpu().numpy() assert latents.shape == (1, 4, 64, 64) SCREAMING_SNAKE_CASE_ = latents[0, -3:, -3:, -1] SCREAMING_SNAKE_CASE_ = np.array([-0.2_4_6_3, -0.4_6_4_4, -0.9_7_5_6, 1.5_1_7_6, 1.4_4_1_4, 0.7_8_6_6, 0.9_8_9_7, 0.8_5_2_1, 0.7_9_8_3]) assert np.abs(latents_slice.flatten() - expected_slice).max() < 5E-2 elif step == 2: SCREAMING_SNAKE_CASE_ = latents.detach().cpu().numpy() assert latents.shape == (1, 4, 64, 64) SCREAMING_SNAKE_CASE_ = latents[0, -3:, -3:, -1] SCREAMING_SNAKE_CASE_ = np.array([-0.2_6_4_4, -0.4_6_2_6, -0.9_6_5_3, 1.5_1_7_6, 1.4_5_5_1, 0.7_6_8_6, 0.9_8_0_5, 0.8_4_5_2, 0.8_1_1_5]) assert np.abs(latents_slice.flatten() - expected_slice).max() < 5E-2 SCREAMING_SNAKE_CASE_ = False SCREAMING_SNAKE_CASE_ = StableDiffusionInstructPixaPixPipeline.from_pretrained( 'timbrooks/instruct-pix2pix' , safety_checker=_A , torch_dtype=torch.floataa) SCREAMING_SNAKE_CASE_ = pipe.to(_A) pipe.set_progress_bar_config(disable=_A) pipe.enable_attention_slicing() SCREAMING_SNAKE_CASE_ = self.get_inputs() pipe(**_A , callback=_A , callback_steps=1) assert callback_fn.has_been_called assert number_of_steps == 3 def lowerCAmelCase__ ( self): torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() SCREAMING_SNAKE_CASE_ = StableDiffusionInstructPixaPixPipeline.from_pretrained( 'timbrooks/instruct-pix2pix' , safety_checker=_A , torch_dtype=torch.floataa) SCREAMING_SNAKE_CASE_ = pipe.to(_A) pipe.set_progress_bar_config(disable=_A) pipe.enable_attention_slicing(1) pipe.enable_sequential_cpu_offload() SCREAMING_SNAKE_CASE_ = self.get_inputs() SCREAMING_SNAKE_CASE_ = pipe(**_A) SCREAMING_SNAKE_CASE_ = torch.cuda.max_memory_allocated() # make sure that less than 2.2 GB is allocated assert mem_bytes < 2.2 * 10**9 def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = self.get_inputs() # resize to resolution that is divisible by 8 but not 16 or 32 SCREAMING_SNAKE_CASE_ = inputs["""image"""].resize((504, 504)) SCREAMING_SNAKE_CASE_ = """timbrooks/instruct-pix2pix""" SCREAMING_SNAKE_CASE_ = StableDiffusionInstructPixaPixPipeline.from_pretrained( _A , safety_checker=_A , ) pipe.to(_A) pipe.set_progress_bar_config(disable=_A) pipe.enable_attention_slicing() SCREAMING_SNAKE_CASE_ = pipe(**_A) SCREAMING_SNAKE_CASE_ = output.images[0] SCREAMING_SNAKE_CASE_ = image[255:258, 383:386, -1] assert image.shape == (504, 504, 3) SCREAMING_SNAKE_CASE_ = np.array([0.2_7_2_6, 0.2_5_2_9, 0.2_6_6_4, 0.2_6_5_5, 0.2_6_4_1, 0.2_6_4_2, 0.2_5_9_1, 0.2_6_4_9, 0.2_5_9_0]) assert np.abs(image_slice.flatten() - expected_slice).max() < 5E-3
714
def _UpperCAmelCase ( ): """simple docstring""" for n in range(1 , 1_000_000 ): yield n * (n + 1) // 2 def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : Tuple ): """simple docstring""" SCREAMING_SNAKE_CASE_ = 1 SCREAMING_SNAKE_CASE_ = 2 while i * i <= n: SCREAMING_SNAKE_CASE_ = 0 while n % i == 0: n //= i multiplicity += 1 divisors_count *= multiplicity + 1 i += 1 if n > 1: divisors_count *= 2 return divisors_count def _UpperCAmelCase ( ): """simple docstring""" return next(i for i in triangle_number_generator() if count_divisors(_SCREAMING_SNAKE_CASE ) > 500 ) if __name__ == "__main__": print(solution())
620
0
import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from .tokenization_electra import ElectraTokenizer UpperCamelCase__ : Optional[int] = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"} UpperCamelCase__ : Any = { "vocab_file": { "google/electra-small-generator": ( "https://huggingface.co/google/electra-small-generator/resolve/main/vocab.txt" ), "google/electra-base-generator": "https://huggingface.co/google/electra-base-generator/resolve/main/vocab.txt", "google/electra-large-generator": ( "https://huggingface.co/google/electra-large-generator/resolve/main/vocab.txt" ), "google/electra-small-discriminator": ( "https://huggingface.co/google/electra-small-discriminator/resolve/main/vocab.txt" ), "google/electra-base-discriminator": ( "https://huggingface.co/google/electra-base-discriminator/resolve/main/vocab.txt" ), "google/electra-large-discriminator": ( "https://huggingface.co/google/electra-large-discriminator/resolve/main/vocab.txt" ), }, "tokenizer_file": { "google/electra-small-generator": ( "https://huggingface.co/google/electra-small-generator/resolve/main/tokenizer.json" ), "google/electra-base-generator": ( "https://huggingface.co/google/electra-base-generator/resolve/main/tokenizer.json" ), "google/electra-large-generator": ( "https://huggingface.co/google/electra-large-generator/resolve/main/tokenizer.json" ), "google/electra-small-discriminator": ( "https://huggingface.co/google/electra-small-discriminator/resolve/main/tokenizer.json" ), "google/electra-base-discriminator": ( "https://huggingface.co/google/electra-base-discriminator/resolve/main/tokenizer.json" ), "google/electra-large-discriminator": ( "https://huggingface.co/google/electra-large-discriminator/resolve/main/tokenizer.json" ), }, } UpperCamelCase__ : List[Any] = { "google/electra-small-generator": 512, "google/electra-base-generator": 512, "google/electra-large-generator": 512, "google/electra-small-discriminator": 512, "google/electra-base-discriminator": 512, "google/electra-large-discriminator": 512, } UpperCamelCase__ : List[str] = { "google/electra-small-generator": {"do_lower_case": True}, "google/electra-base-generator": {"do_lower_case": True}, "google/electra-large-generator": {"do_lower_case": True}, "google/electra-small-discriminator": {"do_lower_case": True}, "google/electra-base-discriminator": {"do_lower_case": True}, "google/electra-large-discriminator": {"do_lower_case": True}, } class __snake_case ( lowercase__ ): __lowerCAmelCase : Optional[Any] = VOCAB_FILES_NAMES __lowerCAmelCase : Dict = PRETRAINED_VOCAB_FILES_MAP __lowerCAmelCase : Union[str, Any] = PRETRAINED_INIT_CONFIGURATION __lowerCAmelCase : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __lowerCAmelCase : Union[str, Any] = ElectraTokenizer def __init__( self , _A=None , _A=None , _A=True , _A="[UNK]" , _A="[SEP]" , _A="[PAD]" , _A="[CLS]" , _A="[MASK]" , _A=True , _A=None , **_A , ): super().__init__( __lowercase , tokenizer_file=__lowercase , do_lower_case=__lowercase , unk_token=__lowercase , sep_token=__lowercase , pad_token=__lowercase , cls_token=__lowercase , mask_token=__lowercase , tokenize_chinese_chars=__lowercase , strip_accents=__lowercase , **__lowercase , ) SCREAMING_SNAKE_CASE_ = json.loads(self.backend_tokenizer.normalizer.__getstate__()) if ( normalizer_state.get('lowercase' , __lowercase) != do_lower_case or normalizer_state.get('strip_accents' , __lowercase) != strip_accents or normalizer_state.get('handle_chinese_chars' , __lowercase) != tokenize_chinese_chars ): SCREAMING_SNAKE_CASE_ = getattr(__lowercase , normalizer_state.pop('type')) SCREAMING_SNAKE_CASE_ = do_lower_case SCREAMING_SNAKE_CASE_ = strip_accents SCREAMING_SNAKE_CASE_ = tokenize_chinese_chars SCREAMING_SNAKE_CASE_ = normalizer_class(**__lowercase) SCREAMING_SNAKE_CASE_ = do_lower_case def lowerCAmelCase__ ( self , _A , _A=None): SCREAMING_SNAKE_CASE_ = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def lowerCAmelCase__ ( self , _A , _A = None): SCREAMING_SNAKE_CASE_ = [self.sep_token_id] SCREAMING_SNAKE_CASE_ = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep) * [0] return len(cls + token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1] def lowerCAmelCase__ ( self , _A , _A = None): SCREAMING_SNAKE_CASE_ = self._tokenizer.model.save(__lowercase , name=__lowercase) return tuple(__lowercase)
715
import io import itertools import json from dataclasses import dataclass from typing import Optional import pyarrow as pa import pyarrow.json as paj import datasets from datasets.table import table_cast from datasets.utils.file_utils import readline UpperCamelCase__ : Optional[int] = datasets.utils.logging.get_logger(__name__) @dataclass class __snake_case ( datasets.BuilderConfig ): __lowerCAmelCase : Optional[datasets.Features] = None __lowerCAmelCase : str = "utf-8" __lowerCAmelCase : Optional[str] = None __lowerCAmelCase : Optional[str] = None __lowerCAmelCase : bool = True # deprecated __lowerCAmelCase : Optional[int] = None # deprecated __lowerCAmelCase : int = 10 << 20 # 10MB __lowerCAmelCase : Optional[bool] = None class __snake_case ( datasets.ArrowBasedBuilder ): __lowerCAmelCase : int = JsonConfig def lowerCAmelCase__ ( self): if self.config.block_size is not None: logger.warning('The JSON loader parameter `block_size` is deprecated. Please use `chunksize` instead') SCREAMING_SNAKE_CASE_ = self.config.block_size if self.config.use_threads is not True: logger.warning( 'The JSON loader parameter `use_threads` is deprecated and doesn\'t have any effect anymore.') if self.config.newlines_in_values is not None: raise ValueError('The JSON loader parameter `newlines_in_values` is no longer supported') return datasets.DatasetInfo(features=self.config.features) def lowerCAmelCase__ ( self , _A): if not self.config.data_files: raise ValueError(f"""At least one data file must be specified, but got data_files={self.config.data_files}""") SCREAMING_SNAKE_CASE_ = dl_manager.download_and_extract(self.config.data_files) if isinstance(_A , (str, list, tuple)): SCREAMING_SNAKE_CASE_ = data_files if isinstance(_A , _A): SCREAMING_SNAKE_CASE_ = [files] SCREAMING_SNAKE_CASE_ = [dl_manager.iter_files(_A) for file in files] return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'files': files})] SCREAMING_SNAKE_CASE_ = [] for split_name, files in data_files.items(): if isinstance(_A , _A): SCREAMING_SNAKE_CASE_ = [files] SCREAMING_SNAKE_CASE_ = [dl_manager.iter_files(_A) for file in files] splits.append(datasets.SplitGenerator(name=_A , gen_kwargs={'files': files})) return splits def lowerCAmelCase__ ( self , _A): if self.config.features is not None: # adding missing columns for column_name in set(self.config.features) - set(pa_table.column_names): SCREAMING_SNAKE_CASE_ = self.config.features.arrow_schema.field(_A).type SCREAMING_SNAKE_CASE_ = pa_table.append_column(_A , pa.array([None] * len(_A) , type=_A)) # more expensive cast to support nested structures with keys in a different order # allows str <-> int/float or str to Audio for example SCREAMING_SNAKE_CASE_ = table_cast(_A , self.config.features.arrow_schema) return pa_table def lowerCAmelCase__ ( self , _A): for file_idx, file in enumerate(itertools.chain.from_iterable(_A)): # If the file is one json object and if we need to look at the list of items in one specific field if self.config.field is not None: with open(_A , encoding=self.config.encoding , errors=self.config.encoding_errors) as f: SCREAMING_SNAKE_CASE_ = json.load(_A) # We keep only the field we are interested in SCREAMING_SNAKE_CASE_ = dataset[self.config.field] # We accept two format: a list of dicts or a dict of lists if isinstance(_A , (list, tuple)): SCREAMING_SNAKE_CASE_ = set().union(*[row.keys() for row in dataset]) SCREAMING_SNAKE_CASE_ = {col: [row.get(_A) for row in dataset] for col in keys} else: SCREAMING_SNAKE_CASE_ = dataset SCREAMING_SNAKE_CASE_ = pa.Table.from_pydict(_A) yield file_idx, self._cast_table(_A) # If the file has one json object per line else: with open(_A , 'rb') as f: SCREAMING_SNAKE_CASE_ = 0 # Use block_size equal to the chunk size divided by 32 to leverage multithreading # Set a default minimum value of 16kB if the chunk size is really small SCREAMING_SNAKE_CASE_ = max(self.config.chunksize // 32 , 16 << 10) SCREAMING_SNAKE_CASE_ = ( self.config.encoding_errors if self.config.encoding_errors is not None else 'strict' ) while True: SCREAMING_SNAKE_CASE_ = f.read(self.config.chunksize) if not batch: break # Finish current line try: batch += f.readline() except (AttributeError, io.UnsupportedOperation): batch += readline(_A) # PyArrow only accepts utf-8 encoded bytes if self.config.encoding != "utf-8": SCREAMING_SNAKE_CASE_ = batch.decode(self.config.encoding , errors=_A).encode('utf-8') try: while True: try: SCREAMING_SNAKE_CASE_ = paj.read_json( io.BytesIO(_A) , read_options=paj.ReadOptions(block_size=_A)) break except (pa.ArrowInvalid, pa.ArrowNotImplementedError) as e: if ( isinstance(_A , pa.ArrowInvalid) and "straddling" not in str(_A) or block_size > len(_A) ): raise else: # Increase the block size in case it was too small. # The block size will be reset for the next file. logger.debug( f"""Batch of {len(_A)} bytes couldn't be parsed with block_size={block_size}. Retrying with block_size={block_size * 2}.""") block_size *= 2 except pa.ArrowInvalid as e: try: with open( _A , encoding=self.config.encoding , errors=self.config.encoding_errors) as f: SCREAMING_SNAKE_CASE_ = json.load(_A) except json.JSONDecodeError: logger.error(f"""Failed to read file '{file}' with error {type(_A)}: {e}""") raise e # If possible, parse the file as a list of json objects and exit the loop if isinstance(_A , _A): # list is the only sequence type supported in JSON try: SCREAMING_SNAKE_CASE_ = set().union(*[row.keys() for row in dataset]) SCREAMING_SNAKE_CASE_ = {col: [row.get(_A) for row in dataset] for col in keys} SCREAMING_SNAKE_CASE_ = pa.Table.from_pydict(_A) except (pa.ArrowInvalid, AttributeError) as e: logger.error(f"""Failed to read file '{file}' with error {type(_A)}: {e}""") raise ValueError(f"""Not able to read records in the JSON file at {file}.""") from None yield file_idx, self._cast_table(_A) break else: logger.error(f"""Failed to read file '{file}' with error {type(_A)}: {e}""") raise ValueError( f"""Not able to read records in the JSON file at {file}. """ f"""You should probably indicate the field of the JSON file containing your records. """ f"""This JSON file contain the following fields: {str(list(dataset.keys()))}. """ f"""Select the correct one and provide it as `field='XXX'` to the dataset loading method. """) from None # Uncomment for debugging (will print the Arrow table size and elements) # logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}") # logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows))) yield (file_idx, batch_idx), self._cast_table(_A) batch_idx += 1
620
0
from ....configuration_utils import PretrainedConfig from ....utils import logging UpperCamelCase__ : Optional[int] = logging.get_logger(__name__) # TODO: upload to AWS UpperCamelCase__ : List[str] = { """yjernite/retribert-base-uncased""": ( """https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/config.json""" ), } class __snake_case ( __snake_case ): __lowerCAmelCase : Any = 'retribert' def __init__( self , _A=30522 , _A=768 , _A=8 , _A=12 , _A=3072 , _A="gelu" , _A=0.1 , _A=0.1 , _A=512 , _A=2 , _A=0.0_2 , _A=1E-12 , _A=True , _A=128 , _A=0 , **_A , ): super().__init__(pad_token_id=A_ , **A_) SCREAMING_SNAKE_CASE_ = vocab_size SCREAMING_SNAKE_CASE_ = hidden_size SCREAMING_SNAKE_CASE_ = num_hidden_layers SCREAMING_SNAKE_CASE_ = num_attention_heads SCREAMING_SNAKE_CASE_ = hidden_act SCREAMING_SNAKE_CASE_ = intermediate_size SCREAMING_SNAKE_CASE_ = hidden_dropout_prob SCREAMING_SNAKE_CASE_ = attention_probs_dropout_prob SCREAMING_SNAKE_CASE_ = max_position_embeddings SCREAMING_SNAKE_CASE_ = type_vocab_size SCREAMING_SNAKE_CASE_ = initializer_range SCREAMING_SNAKE_CASE_ = layer_norm_eps SCREAMING_SNAKE_CASE_ = share_encoders SCREAMING_SNAKE_CASE_ = projection_dim
716
import unittest from transformers import TrOCRConfig from transformers.testing_utils import is_torch_available, require_torch, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers.models.trocr.modeling_trocr import TrOCRDecoder, TrOCRForCausalLM @require_torch class __snake_case : def __init__( self , _A , _A=99 , _A=13 , _A=16 , _A=7 , _A=True , _A=True , _A=True , _A=False , _A=True , _A=2 , _A=32 , _A=4 , _A=4 , _A=30 , _A=0 , _A=1 , _A=2 , _A=None , ): SCREAMING_SNAKE_CASE_ = parent SCREAMING_SNAKE_CASE_ = batch_size SCREAMING_SNAKE_CASE_ = decoder_seq_length # For common tests SCREAMING_SNAKE_CASE_ = self.decoder_seq_length SCREAMING_SNAKE_CASE_ = is_training SCREAMING_SNAKE_CASE_ = use_attention_mask SCREAMING_SNAKE_CASE_ = use_labels SCREAMING_SNAKE_CASE_ = vocab_size SCREAMING_SNAKE_CASE_ = d_model SCREAMING_SNAKE_CASE_ = d_model SCREAMING_SNAKE_CASE_ = decoder_layers SCREAMING_SNAKE_CASE_ = decoder_layers SCREAMING_SNAKE_CASE_ = decoder_ffn_dim SCREAMING_SNAKE_CASE_ = decoder_attention_heads SCREAMING_SNAKE_CASE_ = decoder_attention_heads SCREAMING_SNAKE_CASE_ = eos_token_id SCREAMING_SNAKE_CASE_ = bos_token_id SCREAMING_SNAKE_CASE_ = pad_token_id SCREAMING_SNAKE_CASE_ = decoder_start_token_id SCREAMING_SNAKE_CASE_ = use_cache SCREAMING_SNAKE_CASE_ = max_position_embeddings SCREAMING_SNAKE_CASE_ = None SCREAMING_SNAKE_CASE_ = decoder_seq_length SCREAMING_SNAKE_CASE_ = 2 SCREAMING_SNAKE_CASE_ = 1 def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size) SCREAMING_SNAKE_CASE_ = None if self.use_attention_mask: SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size, self.decoder_seq_length] , vocab_size=2) SCREAMING_SNAKE_CASE_ = None if self.use_labels: SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size) SCREAMING_SNAKE_CASE_ = TrOCRConfig( vocab_size=self.vocab_size , d_model=self.d_model , decoder_layers=self.decoder_layers , decoder_ffn_dim=self.decoder_ffn_dim , decoder_attention_heads=self.decoder_attention_heads , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , use_cache=self.use_cache , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , max_position_embeddings=self.max_position_embeddings , ) return (config, input_ids, attention_mask, lm_labels) def lowerCAmelCase__ ( self , _A , _A , _A , _A , ): SCREAMING_SNAKE_CASE_ = True SCREAMING_SNAKE_CASE_ = TrOCRDecoder(config=_A).to(_A).eval() SCREAMING_SNAKE_CASE_ = input_ids[:2] input_ids[input_ids == 0] += 1 # first forward pass SCREAMING_SNAKE_CASE_ = model(_A , use_cache=_A) SCREAMING_SNAKE_CASE_ = model(_A) SCREAMING_SNAKE_CASE_ = model(_A , use_cache=_A) self.parent.assertTrue(len(_A) == len(_A)) self.parent.assertTrue(len(_A) == len(_A) + 1) SCREAMING_SNAKE_CASE_ = outputs['past_key_values'] # create hypothetical next token and extent to next_input_ids SCREAMING_SNAKE_CASE_ = ids_tensor((2, 1) , config.vocab_size - 1) + 1 # append to next input_ids and SCREAMING_SNAKE_CASE_ = torch.cat([input_ids, next_tokens] , dim=-1) SCREAMING_SNAKE_CASE_ = model(_A)['last_hidden_state'] SCREAMING_SNAKE_CASE_ = model(_A , past_key_values=_A)['last_hidden_state'] # select random slice SCREAMING_SNAKE_CASE_ = ids_tensor((1,) , output_from_past.shape[-1]).item() SCREAMING_SNAKE_CASE_ = output_from_no_past[:, next_input_ids.shape[-1] - 1, random_slice_idx].detach() SCREAMING_SNAKE_CASE_ = output_from_past[:, 0, random_slice_idx].detach() # test that outputs are equal for slice assert torch.allclose(_A , _A , atol=1E-3) def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = self.prepare_config_and_inputs() SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = config_and_inputs SCREAMING_SNAKE_CASE_ = {'input_ids': input_ids, 'attention_mask': attention_mask} return config, inputs_dict @require_torch class __snake_case ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ): __lowerCAmelCase : Tuple = (TrOCRDecoder, TrOCRForCausalLM) if is_torch_available() else () __lowerCAmelCase : Union[str, Any] = (TrOCRForCausalLM,) if is_torch_available() else () __lowerCAmelCase : str = {'text-generation': TrOCRForCausalLM} if is_torch_available() else {} __lowerCAmelCase : Any = True __lowerCAmelCase : str = False def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = TrOCRStandaloneDecoderModelTester(self , is_training=_A) SCREAMING_SNAKE_CASE_ = ConfigTester(self , config_class=_A) def lowerCAmelCase__ ( self): pass def lowerCAmelCase__ ( self): pass def lowerCAmelCase__ ( self): pass def lowerCAmelCase__ ( self): self.config_tester.run_common_tests() def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_decoder_model_past(*_A) def lowerCAmelCase__ ( self): return @unittest.skip('The model doesn\'t support left padding') # and it's not used enough to be worth fixing :) def lowerCAmelCase__ ( self): pass
620
0
from argparse import ArgumentParser from . import BaseTransformersCLICommand def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : List[str] ): """simple docstring""" return DownloadCommand(args.model , args.cache_dir , args.force , args.trust_remote_code ) class __snake_case ( UpperCamelCase__ ): @staticmethod def lowerCAmelCase__ ( _A): SCREAMING_SNAKE_CASE_ = parser.add_parser('download') download_parser.add_argument( '--cache-dir' , type=_A , default=_A , help='Path to location to store the models') download_parser.add_argument( '--force' , action='store_true' , help='Force the model to be download even if already in cache-dir') download_parser.add_argument( '--trust-remote-code' , action='store_true' , help='Whether or not to allow for custom models defined on the Hub in their own modeling files. Use only if you\'ve reviewed the code as it will execute on your local machine' , ) download_parser.add_argument('model' , type=_A , help='Name of the model to download') download_parser.set_defaults(func=_A) def __init__( self , _A , _A , _A , _A): SCREAMING_SNAKE_CASE_ = model SCREAMING_SNAKE_CASE_ = cache SCREAMING_SNAKE_CASE_ = force SCREAMING_SNAKE_CASE_ = trust_remote_code def lowerCAmelCase__ ( self): from ..models.auto import AutoModel, AutoTokenizer AutoModel.from_pretrained( self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code) AutoTokenizer.from_pretrained( self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code)
717
from dataclasses import dataclass from typing import Optional, Tuple, Union import torch import torch.nn as nn from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput, apply_forward_hook from .modeling_utils import ModelMixin from .vae import Decoder, DecoderOutput, Encoder, VectorQuantizer @dataclass class __snake_case ( lowerCAmelCase__ ): __lowerCAmelCase : torch.FloatTensor class __snake_case ( lowerCAmelCase__ , lowerCAmelCase__ ): @register_to_config def __init__( self , _A = 3 , _A = 3 , _A = ("DownEncoderBlock2D",) , _A = ("UpDecoderBlock2D",) , _A = (64,) , _A = 1 , _A = "silu" , _A = 3 , _A = 32 , _A = 256 , _A = 32 , _A = None , _A = 0.1_8_2_1_5 , _A = "group" , ): super().__init__() # pass init params to Encoder SCREAMING_SNAKE_CASE_ = Encoder( in_channels=_A , out_channels=_A , down_block_types=_A , block_out_channels=_A , layers_per_block=_A , act_fn=_A , norm_num_groups=_A , double_z=_A , ) SCREAMING_SNAKE_CASE_ = vq_embed_dim if vq_embed_dim is not None else latent_channels SCREAMING_SNAKE_CASE_ = nn.Convad(_A , _A , 1) SCREAMING_SNAKE_CASE_ = VectorQuantizer(_A , _A , beta=0.2_5 , remap=_A , sane_index_shape=_A) SCREAMING_SNAKE_CASE_ = nn.Convad(_A , _A , 1) # pass init params to Decoder SCREAMING_SNAKE_CASE_ = Decoder( in_channels=_A , out_channels=_A , up_block_types=_A , block_out_channels=_A , layers_per_block=_A , act_fn=_A , norm_num_groups=_A , norm_type=_A , ) @apply_forward_hook def lowerCAmelCase__ ( self , _A , _A = True): SCREAMING_SNAKE_CASE_ = self.encoder(_A) SCREAMING_SNAKE_CASE_ = self.quant_conv(_A) if not return_dict: return (h,) return VQEncoderOutput(latents=_A) @apply_forward_hook def lowerCAmelCase__ ( self , _A , _A = False , _A = True): # also go through quantization layer if not force_not_quantize: SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.quantize(_A) else: SCREAMING_SNAKE_CASE_ = h SCREAMING_SNAKE_CASE_ = self.post_quant_conv(_A) SCREAMING_SNAKE_CASE_ = self.decoder(_A , quant if self.config.norm_type == 'spatial' else None) if not return_dict: return (dec,) return DecoderOutput(sample=_A) def lowerCAmelCase__ ( self , _A , _A = True): SCREAMING_SNAKE_CASE_ = sample SCREAMING_SNAKE_CASE_ = self.encode(_A).latents SCREAMING_SNAKE_CASE_ = self.decode(_A).sample if not return_dict: return (dec,) return DecoderOutput(sample=_A)
620
0
from __future__ import annotations UpperCamelCase__ : List[str] = [-10, -5, 0, 5, 5.1, 11, 13, 21, 3, 4, -21, -10, -5, -1, 0] UpperCamelCase__ : Optional[int] = [-5, 0, 5, 5.1, 11, 13, 21, -1, 4, -1, -10, -5, -1, 0, -1] def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : List[Any] ): """simple docstring""" SCREAMING_SNAKE_CASE_ = [] SCREAMING_SNAKE_CASE_ = len(__lowerCAmelCase ) for i in range(__lowerCAmelCase ): SCREAMING_SNAKE_CASE_ = -1 for j in range(i + 1 , __lowerCAmelCase ): if arr[i] < arr[j]: SCREAMING_SNAKE_CASE_ = arr[j] break result.append(__lowerCAmelCase ) return result def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : Union[str, Any] ): """simple docstring""" SCREAMING_SNAKE_CASE_ = [] for i, outer in enumerate(__lowerCAmelCase ): SCREAMING_SNAKE_CASE_ = -1 for inner in arr[i + 1 :]: if outer < inner: SCREAMING_SNAKE_CASE_ = inner break result.append(__lowerCAmelCase ) return result def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : Tuple ): """simple docstring""" SCREAMING_SNAKE_CASE_ = len(__lowerCAmelCase ) SCREAMING_SNAKE_CASE_ = [] SCREAMING_SNAKE_CASE_ = [-1] * arr_size for index in reversed(range(__lowerCAmelCase ) ): if stack: while stack[-1] <= arr[index]: stack.pop() if not stack: break if stack: SCREAMING_SNAKE_CASE_ = stack[-1] stack.append(arr[index] ) return result if __name__ == "__main__": from doctest import testmod from timeit import timeit testmod() print(next_greatest_element_slow(arr)) print(next_greatest_element_fast(arr)) print(next_greatest_element(arr)) UpperCamelCase__ : Optional[Any] = ( "from __main__ import arr, next_greatest_element_slow, " "next_greatest_element_fast, next_greatest_element" ) print( "next_greatest_element_slow():", timeit("next_greatest_element_slow(arr)", setup=setup), ) print( "next_greatest_element_fast():", timeit("next_greatest_element_fast(arr)", setup=setup), ) print( " next_greatest_element():", timeit("next_greatest_element(arr)", setup=setup), )
718
import logging import os from typing import Dict, List, Optional, Union import torch import torch.nn as nn from accelerate.utils.imports import ( is_abit_bnb_available, is_abit_bnb_available, is_bnb_available, ) from ..big_modeling import dispatch_model, init_empty_weights from .dataclasses import BnbQuantizationConfig from .modeling import ( find_tied_parameters, get_balanced_memory, infer_auto_device_map, load_checkpoint_in_model, offload_weight, set_module_tensor_to_device, ) if is_bnb_available(): import bitsandbytes as bnb from copy import deepcopy UpperCamelCase__ : Optional[int] = logging.getLogger(__name__) def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : torch.nn.Module , _SCREAMING_SNAKE_CASE : BnbQuantizationConfig , _SCREAMING_SNAKE_CASE : Union[str, os.PathLike] = None , _SCREAMING_SNAKE_CASE : Optional[Dict[str, Union[int, str, torch.device]]] = None , _SCREAMING_SNAKE_CASE : Optional[List[str]] = None , _SCREAMING_SNAKE_CASE : Optional[Dict[Union[int, str], Union[int, str]]] = None , _SCREAMING_SNAKE_CASE : Optional[Union[str, os.PathLike]] = None , _SCREAMING_SNAKE_CASE : bool = False , ): """simple docstring""" SCREAMING_SNAKE_CASE_ = bnb_quantization_config.load_in_abit SCREAMING_SNAKE_CASE_ = bnb_quantization_config.load_in_abit if load_in_abit and not is_abit_bnb_available(): raise ImportError( 'You have a version of `bitsandbytes` that is not compatible with 8bit quantization,' ' make sure you have the latest version of `bitsandbytes` installed.' ) if load_in_abit and not is_abit_bnb_available(): raise ValueError( 'You have a version of `bitsandbytes` that is not compatible with 4bit quantization,' 'make sure you have the latest version of `bitsandbytes` installed.' ) SCREAMING_SNAKE_CASE_ = [] # custom device map if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) and len(device_map.keys() ) > 1: SCREAMING_SNAKE_CASE_ = [key for key, value in device_map.items() if value in ['disk', 'cpu']] # We keep some modules such as the lm_head in their original dtype for numerical stability reasons if bnb_quantization_config.skip_modules is None: SCREAMING_SNAKE_CASE_ = get_keys_to_not_convert(_SCREAMING_SNAKE_CASE ) # add cpu modules to skip modules only for 4-bit modules if load_in_abit: bnb_quantization_config.skip_modules.extend(_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE_ = bnb_quantization_config.skip_modules # We add the modules we want to keep in full precision if bnb_quantization_config.keep_in_fpaa_modules is None: SCREAMING_SNAKE_CASE_ = [] SCREAMING_SNAKE_CASE_ = bnb_quantization_config.keep_in_fpaa_modules modules_to_not_convert.extend(_SCREAMING_SNAKE_CASE ) # compatibility with peft SCREAMING_SNAKE_CASE_ = load_in_abit SCREAMING_SNAKE_CASE_ = load_in_abit SCREAMING_SNAKE_CASE_ = get_parameter_device(_SCREAMING_SNAKE_CASE ) if model_device.type != "meta": # quantization of an already loaded model logger.warning( 'It is not recommended to quantize a loaded model. ' 'The model should be instantiated under the `init_empty_weights` context manager.' ) SCREAMING_SNAKE_CASE_ = replace_with_bnb_layers(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , modules_to_not_convert=_SCREAMING_SNAKE_CASE ) # convert param to the right dtype SCREAMING_SNAKE_CASE_ = bnb_quantization_config.torch_dtype for name, param in model.state_dict().items(): if any(module_to_keep_in_fpaa in name for module_to_keep_in_fpaa in keep_in_fpaa_modules ): param.to(torch.floataa ) if param.dtype != torch.floataa: SCREAMING_SNAKE_CASE_ = name.replace('.weight' , '' ).replace('.bias' , '' ) SCREAMING_SNAKE_CASE_ = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) if param is not None: param.to(torch.floataa ) elif torch.is_floating_point(_SCREAMING_SNAKE_CASE ): param.to(_SCREAMING_SNAKE_CASE ) if model_device.type == "cuda": # move everything to cpu in the first place because we can't do quantization if the weights are already on cuda model.cuda(torch.cuda.current_device() ) torch.cuda.empty_cache() elif torch.cuda.is_available(): model.to(torch.cuda.current_device() ) else: raise RuntimeError('No GPU found. A GPU is needed for quantization.' ) logger.info( f"""The model device type is {model_device.type}. However, cuda is needed for quantization.""" 'We move the model to cuda.' ) return model elif weights_location is None: raise RuntimeError( f"""`weights_location` needs to be the folder path containing the weights of the model, but we found {weights_location} """ ) else: with init_empty_weights(): SCREAMING_SNAKE_CASE_ = replace_with_bnb_layers( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , modules_to_not_convert=_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE_ = get_quantized_model_device_map( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , max_memory=_SCREAMING_SNAKE_CASE , no_split_module_classes=_SCREAMING_SNAKE_CASE , ) if offload_state_dict is None and device_map is not None and "disk" in device_map.values(): SCREAMING_SNAKE_CASE_ = True SCREAMING_SNAKE_CASE_ = any(x in list(device_map.values() ) for x in ['cpu', 'disk'] ) load_checkpoint_in_model( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , dtype=bnb_quantization_config.torch_dtype , offload_folder=_SCREAMING_SNAKE_CASE , offload_state_dict=_SCREAMING_SNAKE_CASE , keep_in_fpaa_modules=bnb_quantization_config.keep_in_fpaa_modules , offload_abit_bnb=load_in_abit and offload , ) return dispatch_model(_SCREAMING_SNAKE_CASE , device_map=_SCREAMING_SNAKE_CASE , offload_dir=_SCREAMING_SNAKE_CASE ) def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : List[str]=None , _SCREAMING_SNAKE_CASE : List[str]=None , _SCREAMING_SNAKE_CASE : Union[str, Any]=None ): """simple docstring""" if device_map is None: if torch.cuda.is_available(): SCREAMING_SNAKE_CASE_ = {'': torch.cuda.current_device()} else: raise RuntimeError('No GPU found. A GPU is needed for quantization.' ) logger.info('The device_map was not initialized.' 'Setting device_map to `{\'\':torch.cuda.current_device()}`.' ) if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): if device_map not in ["auto", "balanced", "balanced_low_0", "sequential"]: raise ValueError( 'If passing a string for `device_map`, please choose \'auto\', \'balanced\', \'balanced_low_0\' or ' '\'sequential\'.' ) SCREAMING_SNAKE_CASE_ = {} special_dtypes.update( { name: bnb_quantization_config.torch_dtype for name, _ in model.named_parameters() if any(m in name for m in bnb_quantization_config.skip_modules ) } ) special_dtypes.update( { name: torch.floataa for name, _ in model.named_parameters() if any(m in name for m in bnb_quantization_config.keep_in_fpaa_modules ) } ) SCREAMING_SNAKE_CASE_ = {} SCREAMING_SNAKE_CASE_ = special_dtypes SCREAMING_SNAKE_CASE_ = no_split_module_classes SCREAMING_SNAKE_CASE_ = bnb_quantization_config.target_dtype # get max_memory for each device. if device_map != "sequential": SCREAMING_SNAKE_CASE_ = get_balanced_memory( _SCREAMING_SNAKE_CASE , low_zero=(device_map == 'balanced_low_0') , max_memory=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , ) SCREAMING_SNAKE_CASE_ = max_memory SCREAMING_SNAKE_CASE_ = infer_auto_device_map(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): # check if don't have any quantized module on the cpu SCREAMING_SNAKE_CASE_ = bnb_quantization_config.skip_modules + bnb_quantization_config.keep_in_fpaa_modules SCREAMING_SNAKE_CASE_ = { key: device_map[key] for key in device_map.keys() if key not in modules_not_to_convert } for device in ["cpu", "disk"]: if device in device_map_without_some_modules.values(): if bnb_quantization_config.load_in_abit: raise ValueError( '\n Some modules are dispatched on the CPU or the disk. Make sure you have enough GPU RAM to fit\n the quantized model. If you want to dispatch the model on the CPU or the disk while keeping\n these modules in `torch_dtype`, you need to pass a custom `device_map` to\n `load_and_quantize_model`. Check\n https://huggingface.co/docs/accelerate/main/en/usage_guides/quantization#offload-modules-to-cpu-and-disk\n for more details.\n ' ) else: logger.info( 'Some modules are are offloaded to the CPU or the disk. Note that these modules will be converted to 8-bit' ) del device_map_without_some_modules return device_map def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int=None , _SCREAMING_SNAKE_CASE : Union[str, Any]=None ): """simple docstring""" if modules_to_not_convert is None: SCREAMING_SNAKE_CASE_ = [] SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = _replace_with_bnb_layers( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) if not has_been_replaced: logger.warning( 'You are loading your model in 8bit or 4bit but no linear modules were found in your model.' ' this can happen for some architectures such as gpt2 that uses Conv1D instead of Linear layers.' ' Please double check your model architecture, or submit an issue on github if you think this is' ' a bug.' ) return model def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : Optional[Any]=None , _SCREAMING_SNAKE_CASE : str=None , ): """simple docstring""" SCREAMING_SNAKE_CASE_ = False for name, module in model.named_children(): if current_key_name is None: SCREAMING_SNAKE_CASE_ = [] current_key_name.append(_SCREAMING_SNAKE_CASE ) if isinstance(_SCREAMING_SNAKE_CASE , nn.Linear ) and name not in modules_to_not_convert: # Check if the current key is not in the `modules_to_not_convert` SCREAMING_SNAKE_CASE_ = '.'.join(_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE_ = True for key in modules_to_not_convert: if ( (key in current_key_name_str) and (key + "." in current_key_name_str) ) or key == current_key_name_str: SCREAMING_SNAKE_CASE_ = False break if proceed: # Load bnb module with empty weight and replace ``nn.Linear` module if bnb_quantization_config.load_in_abit: SCREAMING_SNAKE_CASE_ = bnb.nn.LinearabitLt( module.in_features , module.out_features , module.bias is not None , has_fpaa_weights=_SCREAMING_SNAKE_CASE , threshold=bnb_quantization_config.llm_inta_threshold , ) elif bnb_quantization_config.load_in_abit: SCREAMING_SNAKE_CASE_ = bnb.nn.Linearabit( module.in_features , module.out_features , module.bias is not None , bnb_quantization_config.bnb_abit_compute_dtype , compress_statistics=bnb_quantization_config.bnb_abit_use_double_quant , quant_type=bnb_quantization_config.bnb_abit_quant_type , ) else: raise ValueError('load_in_8bit and load_in_4bit can\'t be both False' ) SCREAMING_SNAKE_CASE_ = module.weight.data if module.bias is not None: SCREAMING_SNAKE_CASE_ = module.bias.data bnb_module.requires_grad_(_SCREAMING_SNAKE_CASE ) setattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE_ = True if len(list(module.children() ) ) > 0: SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = _replace_with_bnb_layers( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE_ = has_been_replaced | _has_been_replaced # Remove the last key for recursion current_key_name.pop(-1 ) return model, has_been_replaced def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : Union[str, Any] ): """simple docstring""" with init_empty_weights(): SCREAMING_SNAKE_CASE_ = deepcopy(_SCREAMING_SNAKE_CASE ) # this has 0 cost since it is done inside `init_empty_weights` context manager` SCREAMING_SNAKE_CASE_ = find_tied_parameters(_SCREAMING_SNAKE_CASE ) # For compatibility with Accelerate < 0.18 if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): SCREAMING_SNAKE_CASE_ = sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() ) else: SCREAMING_SNAKE_CASE_ = sum(_SCREAMING_SNAKE_CASE , [] ) SCREAMING_SNAKE_CASE_ = len(_SCREAMING_SNAKE_CASE ) > 0 # Check if it is a base model SCREAMING_SNAKE_CASE_ = False if hasattr(_SCREAMING_SNAKE_CASE , 'base_model_prefix' ): SCREAMING_SNAKE_CASE_ = not hasattr(_SCREAMING_SNAKE_CASE , model.base_model_prefix ) # Ignore this for base models (BertModel, GPT2Model, etc.) if (not has_tied_params) and is_base_model: return [] # otherwise they have an attached head SCREAMING_SNAKE_CASE_ = list(model.named_children() ) SCREAMING_SNAKE_CASE_ = [list_modules[-1][0]] # add last module together with tied weights SCREAMING_SNAKE_CASE_ = set(_SCREAMING_SNAKE_CASE ) - set(_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE_ = list(set(_SCREAMING_SNAKE_CASE ) ) + list(_SCREAMING_SNAKE_CASE ) # remove ".weight" from the keys SCREAMING_SNAKE_CASE_ = ['.weight', '.bias'] SCREAMING_SNAKE_CASE_ = [] for name in list_untouched: for name_to_remove in names_to_remove: if name_to_remove in name: SCREAMING_SNAKE_CASE_ = name.replace(_SCREAMING_SNAKE_CASE , '' ) filtered_module_names.append(_SCREAMING_SNAKE_CASE ) return filtered_module_names def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : Dict ): """simple docstring""" for m in model.modules(): if isinstance(_SCREAMING_SNAKE_CASE , bnb.nn.Linearabit ): return True return False def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : nn.Module ): """simple docstring""" return next(parameter.parameters() ).device def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : str ): """simple docstring""" if fpaa_statistics is None: set_module_tensor_to_device(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , 0 , dtype=_SCREAMING_SNAKE_CASE , value=_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE_ = param_name SCREAMING_SNAKE_CASE_ = model if "." in tensor_name: SCREAMING_SNAKE_CASE_ = tensor_name.split('.' ) for split in splits[:-1]: SCREAMING_SNAKE_CASE_ = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) if new_module is None: raise ValueError(f"""{module} has no attribute {split}.""" ) SCREAMING_SNAKE_CASE_ = new_module SCREAMING_SNAKE_CASE_ = splits[-1] # offload weights SCREAMING_SNAKE_CASE_ = False offload_weight(module._parameters[tensor_name] , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , index=_SCREAMING_SNAKE_CASE ) if hasattr(module._parameters[tensor_name] , 'SCB' ): offload_weight( module._parameters[tensor_name].SCB , param_name.replace('weight' , 'SCB' ) , _SCREAMING_SNAKE_CASE , index=_SCREAMING_SNAKE_CASE , ) else: offload_weight(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , index=_SCREAMING_SNAKE_CASE ) offload_weight(_SCREAMING_SNAKE_CASE , param_name.replace('weight' , 'SCB' ) , _SCREAMING_SNAKE_CASE , index=_SCREAMING_SNAKE_CASE ) set_module_tensor_to_device(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , 'meta' , dtype=_SCREAMING_SNAKE_CASE , value=torch.empty(*param.size() ) )
620
0
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available UpperCamelCase__ : Tuple = { "configuration_poolformer": [ "POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "PoolFormerConfig", "PoolFormerOnnxConfig", ] } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase__ : Any = ["PoolFormerFeatureExtractor"] UpperCamelCase__ : Tuple = ["PoolFormerImageProcessor"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase__ : Tuple = [ "POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST", "PoolFormerForImageClassification", "PoolFormerModel", "PoolFormerPreTrainedModel", ] if TYPE_CHECKING: from .configuration_poolformer import ( POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, PoolFormerConfig, PoolFormerOnnxConfig, ) try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_poolformer import PoolFormerFeatureExtractor from .image_processing_poolformer import PoolFormerImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_poolformer import ( POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, PoolFormerForImageClassification, PoolFormerModel, PoolFormerPreTrainedModel, ) else: import sys UpperCamelCase__ : Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure)
719
import json import os from functools import lru_cache from typing import List, Optional, Tuple import regex as re from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging UpperCamelCase__ : Union[str, Any] = logging.get_logger(__name__) UpperCamelCase__ : Optional[Any] = {"vocab_file": "vocab.json", "merges_file": "merges.txt"} # See all BART models at https://huggingface.co/models?filter=bart UpperCamelCase__ : List[str] = { "vocab_file": { "facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/vocab.json", "facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/vocab.json", "facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json", "facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json", "facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json", "yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json", }, "merges_file": { "facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/merges.txt", "facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/merges.txt", "facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt", "facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt", "facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt", "yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt", }, } UpperCamelCase__ : str = { "facebook/bart-base": 1_024, "facebook/bart-large": 1_024, "facebook/bart-large-mnli": 1_024, "facebook/bart-large-cnn": 1_024, "facebook/bart-large-xsum": 1_024, "yjernite/bart_eli5": 1_024, } @lru_cache() def _UpperCAmelCase ( ): """simple docstring""" SCREAMING_SNAKE_CASE_ = ( list(range(ord('!' ) , ord('~' ) + 1 ) ) + list(range(ord('¡' ) , ord('¬' ) + 1 ) ) + list(range(ord('®' ) , ord('ÿ' ) + 1 ) ) ) SCREAMING_SNAKE_CASE_ = bs[:] SCREAMING_SNAKE_CASE_ = 0 for b in range(2**8 ): if b not in bs: bs.append(_SCREAMING_SNAKE_CASE ) cs.append(2**8 + n ) n += 1 SCREAMING_SNAKE_CASE_ = [chr(_SCREAMING_SNAKE_CASE ) for n in cs] return dict(zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ) def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : List[str] ): """simple docstring""" SCREAMING_SNAKE_CASE_ = set() SCREAMING_SNAKE_CASE_ = word[0] for char in word[1:]: pairs.add((prev_char, char) ) SCREAMING_SNAKE_CASE_ = char return pairs class __snake_case ( lowerCAmelCase__ ): __lowerCAmelCase : str = VOCAB_FILES_NAMES __lowerCAmelCase : Any = PRETRAINED_VOCAB_FILES_MAP __lowerCAmelCase : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __lowerCAmelCase : List[Any] = ['input_ids', 'attention_mask'] def __init__( self , _A , _A , _A="replace" , _A="<s>" , _A="</s>" , _A="</s>" , _A="<s>" , _A="<unk>" , _A="<pad>" , _A="<mask>" , _A=False , **_A , ): SCREAMING_SNAKE_CASE_ = AddedToken(_A , lstrip=_A , rstrip=_A) if isinstance(_A , _A) else bos_token SCREAMING_SNAKE_CASE_ = AddedToken(_A , lstrip=_A , rstrip=_A) if isinstance(_A , _A) else eos_token SCREAMING_SNAKE_CASE_ = AddedToken(_A , lstrip=_A , rstrip=_A) if isinstance(_A , _A) else sep_token SCREAMING_SNAKE_CASE_ = AddedToken(_A , lstrip=_A , rstrip=_A) if isinstance(_A , _A) else cls_token SCREAMING_SNAKE_CASE_ = AddedToken(_A , lstrip=_A , rstrip=_A) if isinstance(_A , _A) else unk_token SCREAMING_SNAKE_CASE_ = AddedToken(_A , lstrip=_A , rstrip=_A) if isinstance(_A , _A) else pad_token # Mask token behave like a normal word, i.e. include the space before it SCREAMING_SNAKE_CASE_ = AddedToken(_A , lstrip=_A , rstrip=_A) if isinstance(_A , _A) else mask_token super().__init__( errors=_A , bos_token=_A , eos_token=_A , unk_token=_A , sep_token=_A , cls_token=_A , pad_token=_A , mask_token=_A , add_prefix_space=_A , **_A , ) with open(_A , encoding='utf-8') as vocab_handle: SCREAMING_SNAKE_CASE_ = json.load(_A) SCREAMING_SNAKE_CASE_ = {v: k for k, v in self.encoder.items()} SCREAMING_SNAKE_CASE_ = errors # how to handle errors in decoding SCREAMING_SNAKE_CASE_ = bytes_to_unicode() SCREAMING_SNAKE_CASE_ = {v: k for k, v in self.byte_encoder.items()} with open(_A , encoding='utf-8') as merges_handle: SCREAMING_SNAKE_CASE_ = merges_handle.read().split('\n')[1:-1] SCREAMING_SNAKE_CASE_ = [tuple(merge.split()) for merge in bpe_merges] SCREAMING_SNAKE_CASE_ = dict(zip(_A , range(len(_A)))) SCREAMING_SNAKE_CASE_ = {} SCREAMING_SNAKE_CASE_ = add_prefix_space # Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions SCREAMING_SNAKE_CASE_ = re.compile(r'\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+') @property def lowerCAmelCase__ ( self): return len(self.encoder) def lowerCAmelCase__ ( self): return dict(self.encoder , **self.added_tokens_encoder) def lowerCAmelCase__ ( self , _A): if token in self.cache: return self.cache[token] SCREAMING_SNAKE_CASE_ = tuple(_A) SCREAMING_SNAKE_CASE_ = get_pairs(_A) if not pairs: return token while True: SCREAMING_SNAKE_CASE_ = min(_A , key=lambda _A: self.bpe_ranks.get(_A , float('inf'))) if bigram not in self.bpe_ranks: break SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = bigram SCREAMING_SNAKE_CASE_ = [] SCREAMING_SNAKE_CASE_ = 0 while i < len(_A): try: SCREAMING_SNAKE_CASE_ = word.index(_A , _A) except ValueError: new_word.extend(word[i:]) break else: new_word.extend(word[i:j]) SCREAMING_SNAKE_CASE_ = j if word[i] == first and i < len(_A) - 1 and word[i + 1] == second: new_word.append(first + second) i += 2 else: new_word.append(word[i]) i += 1 SCREAMING_SNAKE_CASE_ = tuple(_A) SCREAMING_SNAKE_CASE_ = new_word if len(_A) == 1: break else: SCREAMING_SNAKE_CASE_ = get_pairs(_A) SCREAMING_SNAKE_CASE_ = ' '.join(_A) SCREAMING_SNAKE_CASE_ = word return word def lowerCAmelCase__ ( self , _A): SCREAMING_SNAKE_CASE_ = [] for token in re.findall(self.pat , _A): SCREAMING_SNAKE_CASE_ = ''.join( self.byte_encoder[b] for b in token.encode('utf-8')) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case) bpe_tokens.extend(bpe_token for bpe_token in self.bpe(_A).split(' ')) return bpe_tokens def lowerCAmelCase__ ( self , _A): return self.encoder.get(_A , self.encoder.get(self.unk_token)) def lowerCAmelCase__ ( self , _A): return self.decoder.get(_A) def lowerCAmelCase__ ( self , _A): SCREAMING_SNAKE_CASE_ = ''.join(_A) SCREAMING_SNAKE_CASE_ = bytearray([self.byte_decoder[c] for c in text]).decode('utf-8' , errors=self.errors) return text def lowerCAmelCase__ ( self , _A , _A = None): if not os.path.isdir(_A): logger.error(f"""Vocabulary path ({save_directory}) should be a directory""") return SCREAMING_SNAKE_CASE_ = os.path.join( _A , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file']) SCREAMING_SNAKE_CASE_ = os.path.join( _A , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file']) with open(_A , 'w' , encoding='utf-8') as f: f.write(json.dumps(self.encoder , indent=2 , sort_keys=_A , ensure_ascii=_A) + '\n') SCREAMING_SNAKE_CASE_ = 0 with open(_A , 'w' , encoding='utf-8') as writer: writer.write('#version: 0.2\n') for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda _A: kv[1]): if index != token_index: logger.warning( f"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.""" ' Please check that the tokenizer is not corrupted!') SCREAMING_SNAKE_CASE_ = token_index writer.write(' '.join(_A) + '\n') index += 1 return vocab_file, merge_file def lowerCAmelCase__ ( self , _A , _A = None): if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] SCREAMING_SNAKE_CASE_ = [self.cls_token_id] SCREAMING_SNAKE_CASE_ = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def lowerCAmelCase__ ( self , _A , _A = None , _A = False): if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=_A , token_ids_a=_A , already_has_special_tokens=_A) if token_ids_a is None: return [1] + ([0] * len(_A)) + [1] return [1] + ([0] * len(_A)) + [1, 1] + ([0] * len(_A)) + [1] def lowerCAmelCase__ ( self , _A , _A = None): SCREAMING_SNAKE_CASE_ = [self.sep_token_id] SCREAMING_SNAKE_CASE_ = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0] def lowerCAmelCase__ ( self , _A , _A=False , **_A): SCREAMING_SNAKE_CASE_ = kwargs.pop('add_prefix_space' , self.add_prefix_space) if (is_split_into_words or add_prefix_space) and (len(_A) > 0 and not text[0].isspace()): SCREAMING_SNAKE_CASE_ = ' ' + text return (text, kwargs)
620
0
import importlib import math import os from dataclasses import dataclass from enum import Enum from typing import Any, Dict, Optional, Tuple, Union import flax import jax.numpy as jnp from ..utils import BaseOutput UpperCamelCase__ : Optional[Any] = 'scheduler_config.json' class __snake_case ( __lowercase ): __lowerCAmelCase : List[Any] = 1 __lowerCAmelCase : Tuple = 2 __lowerCAmelCase : Dict = 3 __lowerCAmelCase : Tuple = 4 __lowerCAmelCase : Tuple = 5 @dataclass class __snake_case ( __lowercase ): __lowerCAmelCase : jnp.ndarray class __snake_case : __lowerCAmelCase : Tuple = SCHEDULER_CONFIG_NAME __lowerCAmelCase : Union[str, Any] = ['''dtype'''] __lowerCAmelCase : Union[str, Any] = [] __lowerCAmelCase : Any = True @classmethod def lowerCAmelCase__ ( cls , _A = None , _A = None , _A=False , **_A , ): SCREAMING_SNAKE_CASE_ = cls.load_config( pretrained_model_name_or_path=__a , subfolder=__a , return_unused_kwargs=__a , **__a , ) SCREAMING_SNAKE_CASE_ = cls.from_config(__a , return_unused_kwargs=__a , **__a) if hasattr(__a , 'create_state') and getattr(__a , 'has_state' , __a): SCREAMING_SNAKE_CASE_ = scheduler.create_state() if return_unused_kwargs: return scheduler, state, unused_kwargs return scheduler, state def lowerCAmelCase__ ( self , _A , _A = False , **_A): self.save_config(save_directory=__a , push_to_hub=__a , **__a) @property def lowerCAmelCase__ ( self): return self._get_compatibles() @classmethod def lowerCAmelCase__ ( cls): SCREAMING_SNAKE_CASE_ = list(set([cls.__name__] + cls._compatibles)) SCREAMING_SNAKE_CASE_ = importlib.import_module(__name__.split('.')[0]) SCREAMING_SNAKE_CASE_ = [ getattr(__a , __a) for c in compatible_classes_str if hasattr(__a , __a) ] return compatible_classes def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : jnp.ndarray , _SCREAMING_SNAKE_CASE : Tuple[int] ): """simple docstring""" assert len(UpperCamelCase__ ) >= x.ndim return jnp.broadcast_to(x.reshape(x.shape + (1,) * (len(UpperCamelCase__ ) - x.ndim) ) , UpperCamelCase__ ) def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : str=0.999 , _SCREAMING_SNAKE_CASE : Dict=jnp.floataa ): """simple docstring""" def alpha_bar(_SCREAMING_SNAKE_CASE : List[Any] ): return math.cos((time_step + 0.008) / 1.008 * math.pi / 2 ) ** 2 SCREAMING_SNAKE_CASE_ = [] for i in range(UpperCamelCase__ ): SCREAMING_SNAKE_CASE_ = i / num_diffusion_timesteps SCREAMING_SNAKE_CASE_ = (i + 1) / num_diffusion_timesteps betas.append(min(1 - alpha_bar(UpperCamelCase__ ) / alpha_bar(UpperCamelCase__ ) , UpperCamelCase__ ) ) return jnp.array(UpperCamelCase__ , dtype=UpperCamelCase__ ) @flax.struct.dataclass class __snake_case : __lowerCAmelCase : jnp.ndarray __lowerCAmelCase : jnp.ndarray __lowerCAmelCase : jnp.ndarray @classmethod def lowerCAmelCase__ ( cls , _A): SCREAMING_SNAKE_CASE_ = scheduler.config if config.trained_betas is not None: SCREAMING_SNAKE_CASE_ = jnp.asarray(config.trained_betas , dtype=scheduler.dtype) elif config.beta_schedule == "linear": SCREAMING_SNAKE_CASE_ = jnp.linspace(config.beta_start , config.beta_end , config.num_train_timesteps , dtype=scheduler.dtype) elif config.beta_schedule == "scaled_linear": # this schedule is very specific to the latent diffusion model. SCREAMING_SNAKE_CASE_ = ( jnp.linspace( config.beta_start**0.5 , config.beta_end**0.5 , config.num_train_timesteps , dtype=scheduler.dtype) ** 2 ) elif config.beta_schedule == "squaredcos_cap_v2": # Glide cosine schedule SCREAMING_SNAKE_CASE_ = betas_for_alpha_bar(config.num_train_timesteps , dtype=scheduler.dtype) else: raise NotImplementedError( f"""beta_schedule {config.beta_schedule} is not implemented for scheduler {scheduler.__class__.__name__}""") SCREAMING_SNAKE_CASE_ = 1.0 - betas SCREAMING_SNAKE_CASE_ = jnp.cumprod(__a , axis=0) return cls( alphas=__a , betas=__a , alphas_cumprod=__a , ) def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : CommonSchedulerState , _SCREAMING_SNAKE_CASE : jnp.ndarray , _SCREAMING_SNAKE_CASE : jnp.ndarray , _SCREAMING_SNAKE_CASE : jnp.ndarray ): """simple docstring""" SCREAMING_SNAKE_CASE_ = state.alphas_cumprod SCREAMING_SNAKE_CASE_ = alphas_cumprod[timesteps] ** 0.5 SCREAMING_SNAKE_CASE_ = sqrt_alpha_prod.flatten() SCREAMING_SNAKE_CASE_ = broadcast_to_shape_from_left(UpperCamelCase__ , original_samples.shape ) SCREAMING_SNAKE_CASE_ = (1 - alphas_cumprod[timesteps]) ** 0.5 SCREAMING_SNAKE_CASE_ = sqrt_one_minus_alpha_prod.flatten() SCREAMING_SNAKE_CASE_ = broadcast_to_shape_from_left(UpperCamelCase__ , original_samples.shape ) return sqrt_alpha_prod, sqrt_one_minus_alpha_prod def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : CommonSchedulerState , _SCREAMING_SNAKE_CASE : jnp.ndarray , _SCREAMING_SNAKE_CASE : jnp.ndarray , _SCREAMING_SNAKE_CASE : jnp.ndarray ): """simple docstring""" SCREAMING_SNAKE_CASE_ = get_sqrt_alpha_prod(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) SCREAMING_SNAKE_CASE_ = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise return noisy_samples def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : CommonSchedulerState , _SCREAMING_SNAKE_CASE : jnp.ndarray , _SCREAMING_SNAKE_CASE : jnp.ndarray , _SCREAMING_SNAKE_CASE : jnp.ndarray ): """simple docstring""" SCREAMING_SNAKE_CASE_ = get_sqrt_alpha_prod(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) SCREAMING_SNAKE_CASE_ = sqrt_alpha_prod * noise - sqrt_one_minus_alpha_prod * sample return velocity
720
from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCamelCase__ : str = logging.get_logger(__name__) UpperCamelCase__ : Optional[int] = { "facebook/dpr-ctx_encoder-single-nq-base": ( "https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/config.json" ), "facebook/dpr-question_encoder-single-nq-base": ( "https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/config.json" ), "facebook/dpr-reader-single-nq-base": ( "https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/config.json" ), "facebook/dpr-ctx_encoder-multiset-base": ( "https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/config.json" ), "facebook/dpr-question_encoder-multiset-base": ( "https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/config.json" ), "facebook/dpr-reader-multiset-base": ( "https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/config.json" ), } class __snake_case ( lowerCAmelCase__ ): __lowerCAmelCase : Optional[int] = 'dpr' def __init__( self , _A=30522 , _A=768 , _A=12 , _A=12 , _A=3072 , _A="gelu" , _A=0.1 , _A=0.1 , _A=512 , _A=2 , _A=0.0_2 , _A=1E-12 , _A=0 , _A="absolute" , _A = 0 , **_A , ): super().__init__(pad_token_id=_A , **_A) SCREAMING_SNAKE_CASE_ = vocab_size SCREAMING_SNAKE_CASE_ = hidden_size SCREAMING_SNAKE_CASE_ = num_hidden_layers SCREAMING_SNAKE_CASE_ = num_attention_heads SCREAMING_SNAKE_CASE_ = hidden_act SCREAMING_SNAKE_CASE_ = intermediate_size SCREAMING_SNAKE_CASE_ = hidden_dropout_prob SCREAMING_SNAKE_CASE_ = attention_probs_dropout_prob SCREAMING_SNAKE_CASE_ = max_position_embeddings SCREAMING_SNAKE_CASE_ = type_vocab_size SCREAMING_SNAKE_CASE_ = initializer_range SCREAMING_SNAKE_CASE_ = layer_norm_eps SCREAMING_SNAKE_CASE_ = projection_dim SCREAMING_SNAKE_CASE_ = position_embedding_type
620
0
from collections.abc import Sequence def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : Sequence[float] , _SCREAMING_SNAKE_CASE : float ): return sum(c * (x**i) for i, c in enumerate(_UpperCamelCase ) ) def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : Sequence[float] , _SCREAMING_SNAKE_CASE : float ): SCREAMING_SNAKE_CASE_ = 0.0 for coeff in reversed(_UpperCamelCase ): SCREAMING_SNAKE_CASE_ = result * x + coeff return result if __name__ == "__main__": UpperCamelCase__ : Optional[Any] = (0.0, 0.0, 5.0, 9.3, 7.0) UpperCamelCase__ : List[Any] = 10.0 print(evaluate_poly(poly, x)) print(horner(poly, x))
721
import pytest import datasets # Import fixture modules as plugins UpperCamelCase__ : Union[str, Any] = ["tests.fixtures.files", "tests.fixtures.hub", "tests.fixtures.fsspec"] def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : Dict ): """simple docstring""" for item in items: if any(marker in item.keywords for marker in ['integration', 'unit'] ): continue item.add_marker(pytest.mark.unit ) def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : Optional[int] ): """simple docstring""" config.addinivalue_line('markers' , 'torchaudio_latest: mark test to run with torchaudio>=0.12' ) @pytest.fixture(autouse=_SCREAMING_SNAKE_CASE ) def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : str ): """simple docstring""" SCREAMING_SNAKE_CASE_ = tmp_path_factory.getbasetemp() / 'cache' SCREAMING_SNAKE_CASE_ = test_hf_cache_home / 'datasets' SCREAMING_SNAKE_CASE_ = test_hf_cache_home / 'metrics' SCREAMING_SNAKE_CASE_ = test_hf_cache_home / 'modules' monkeypatch.setattr('datasets.config.HF_DATASETS_CACHE' , str(_SCREAMING_SNAKE_CASE ) ) monkeypatch.setattr('datasets.config.HF_METRICS_CACHE' , str(_SCREAMING_SNAKE_CASE ) ) monkeypatch.setattr('datasets.config.HF_MODULES_CACHE' , str(_SCREAMING_SNAKE_CASE ) ) SCREAMING_SNAKE_CASE_ = test_hf_datasets_cache / 'downloads' monkeypatch.setattr('datasets.config.DOWNLOADED_DATASETS_PATH' , str(_SCREAMING_SNAKE_CASE ) ) SCREAMING_SNAKE_CASE_ = test_hf_datasets_cache / 'downloads' / 'extracted' monkeypatch.setattr('datasets.config.EXTRACTED_DATASETS_PATH' , str(_SCREAMING_SNAKE_CASE ) ) @pytest.fixture(autouse=_SCREAMING_SNAKE_CASE , scope='session' ) def _UpperCAmelCase ( ): """simple docstring""" datasets.disable_progress_bar() @pytest.fixture(autouse=_SCREAMING_SNAKE_CASE ) def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : Any ): """simple docstring""" monkeypatch.setattr('datasets.config.HF_UPDATE_DOWNLOAD_COUNTS' , _SCREAMING_SNAKE_CASE ) @pytest.fixture def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : Tuple ): """simple docstring""" monkeypatch.setattr('sqlalchemy.util.deprecations.SILENCE_UBER_WARNING' , _SCREAMING_SNAKE_CASE )
620
0
def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : int ): """simple docstring""" if upper_limit < 0: raise ValueError('Limit for the Catalan sequence must be ≥ 0' ) SCREAMING_SNAKE_CASE_ = [0] * (upper_limit + 1) # Base case: C(0) = C(1) = 1 SCREAMING_SNAKE_CASE_ = 1 if upper_limit > 0: SCREAMING_SNAKE_CASE_ = 1 # Recurrence relation: C(i) = sum(C(j).C(i-j-1)), from j = 0 to i for i in range(2 , upper_limit + 1 ): for j in range(_SCREAMING_SNAKE_CASE ): catalan_list[i] += catalan_list[j] * catalan_list[i - j - 1] return catalan_list if __name__ == "__main__": print("\n********* Catalan Numbers Using Dynamic Programming ************\n") print("\n*** Enter -1 at any time to quit ***") print("\nEnter the upper limit (≥ 0) for the Catalan number sequence: ", end="") try: while True: UpperCamelCase__ : Dict = int(input().strip()) if N < 0: print("\n********* Goodbye!! ************") break else: print(F'The Catalan numbers from 0 through {N} are:') print(catalan_numbers(N)) print("Try another upper limit for the sequence: ", end="") except (NameError, ValueError): print("\n********* Invalid input, goodbye! ************\n") import doctest doctest.testmod()
700
from typing import List import numpy as np def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : dict ): """simple docstring""" SCREAMING_SNAKE_CASE_ = {key: len(_SCREAMING_SNAKE_CASE ) for key, value in gen_kwargs.items() if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )} if len(set(lists_lengths.values() ) ) > 1: raise RuntimeError( ( 'Sharding is ambiguous for this dataset: ' + 'we found several data sources lists of different lengths, and we don\'t know over which list we should parallelize:\n' + '\n'.join(f"""\t- key {key} has length {length}""" for key, length in lists_lengths.items() ) + '\nTo fix this, check the \'gen_kwargs\' and make sure to use lists only for data sources, ' + 'and use tuples otherwise. In the end there should only be one single list, or several lists with the same length.' ) ) SCREAMING_SNAKE_CASE_ = max(lists_lengths.values() , default=0 ) return max(1 , _SCREAMING_SNAKE_CASE ) def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int ): """simple docstring""" SCREAMING_SNAKE_CASE_ = [] for group_idx in range(_SCREAMING_SNAKE_CASE ): SCREAMING_SNAKE_CASE_ = num_shards // max_num_jobs + (group_idx < (num_shards % max_num_jobs)) if num_shards_to_add == 0: break SCREAMING_SNAKE_CASE_ = shards_indices_per_group[-1].stop if shards_indices_per_group else 0 SCREAMING_SNAKE_CASE_ = range(_SCREAMING_SNAKE_CASE , start + num_shards_to_add ) shards_indices_per_group.append(_SCREAMING_SNAKE_CASE ) return shards_indices_per_group def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : dict , _SCREAMING_SNAKE_CASE : int ): """simple docstring""" SCREAMING_SNAKE_CASE_ = _number_of_shards_in_gen_kwargs(_SCREAMING_SNAKE_CASE ) if num_shards == 1: return [dict(_SCREAMING_SNAKE_CASE )] else: SCREAMING_SNAKE_CASE_ = _distribute_shards(num_shards=_SCREAMING_SNAKE_CASE , max_num_jobs=_SCREAMING_SNAKE_CASE ) return [ { key: [value[shard_idx] for shard_idx in shard_indices_per_group[group_idx]] if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) else value for key, value in gen_kwargs.items() } for group_idx in range(len(_SCREAMING_SNAKE_CASE ) ) ] def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : List[dict] ): """simple docstring""" return { key: [value for gen_kwargs in gen_kwargs_list for value in gen_kwargs[key]] if isinstance(gen_kwargs_list[0][key] , _SCREAMING_SNAKE_CASE ) else gen_kwargs_list[0][key] for key in gen_kwargs_list[0] } def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : np.random.Generator , _SCREAMING_SNAKE_CASE : dict ): """simple docstring""" SCREAMING_SNAKE_CASE_ = {len(_SCREAMING_SNAKE_CASE ) for value in gen_kwargs.values() if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )} SCREAMING_SNAKE_CASE_ = {} for size in list_sizes: SCREAMING_SNAKE_CASE_ = list(range(_SCREAMING_SNAKE_CASE ) ) rng.shuffle(indices_per_size[size] ) # Now let's copy the gen_kwargs and shuffle the lists based on their sizes SCREAMING_SNAKE_CASE_ = dict(_SCREAMING_SNAKE_CASE ) for key, value in shuffled_kwargs.items(): if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): SCREAMING_SNAKE_CASE_ = [value[i] for i in indices_per_size[len(_SCREAMING_SNAKE_CASE )]] return shuffled_kwargs
620
0
UpperCamelCase__ : List[str] = [4, 1, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5] UpperCamelCase__ : int = [3, 7, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5] UpperCamelCase__ : Dict = { 0: "Sunday", 1: "Monday", 2: "Tuesday", 3: "Wednesday", 4: "Thursday", 5: "Friday", 6: "Saturday", } def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : str ): """simple docstring""" assert len(str(_SCREAMING_SNAKE_CASE ) ) > 2, "year should be in YYYY format" assert 1 <= month <= 12, "month should be between 1 to 12" assert 1 <= day <= 31, "day should be between 1 to 31" # Doomsday algorithm: SCREAMING_SNAKE_CASE_ = year // 100 SCREAMING_SNAKE_CASE_ = (5 * (century % 4) + 2) % 7 SCREAMING_SNAKE_CASE_ = year % 100 SCREAMING_SNAKE_CASE_ = centurian % 12 SCREAMING_SNAKE_CASE_ = ( (centurian // 12) + centurian_m + (centurian_m // 4) + century_anchor ) % 7 SCREAMING_SNAKE_CASE_ = ( DOOMSDAY_NOT_LEAP[month - 1] if (year % 4 != 0) or (centurian == 0 and (year % 400) == 0) else DOOMSDAY_LEAP[month - 1] ) SCREAMING_SNAKE_CASE_ = (dooms_day + day - day_anchor) % 7 return WEEK_DAY_NAMES[week_day] if __name__ == "__main__": import doctest doctest.testmod()
701
from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCamelCase__ : List[Any] = logging.get_logger(__name__) UpperCamelCase__ : List[str] = { "microsoft/biogpt": "https://huggingface.co/microsoft/biogpt/resolve/main/config.json", # See all BioGPT models at https://huggingface.co/models?filter=biogpt } class __snake_case ( lowerCAmelCase__ ): __lowerCAmelCase : Any = 'biogpt' def __init__( self , _A=42384 , _A=1024 , _A=24 , _A=16 , _A=4096 , _A="gelu" , _A=0.1 , _A=0.1 , _A=1024 , _A=0.0_2 , _A=1E-12 , _A=True , _A=True , _A=0.0 , _A=0.0 , _A=1 , _A=0 , _A=2 , **_A , ): SCREAMING_SNAKE_CASE_ = vocab_size SCREAMING_SNAKE_CASE_ = max_position_embeddings SCREAMING_SNAKE_CASE_ = hidden_size SCREAMING_SNAKE_CASE_ = num_hidden_layers SCREAMING_SNAKE_CASE_ = num_attention_heads SCREAMING_SNAKE_CASE_ = intermediate_size SCREAMING_SNAKE_CASE_ = hidden_act SCREAMING_SNAKE_CASE_ = hidden_dropout_prob SCREAMING_SNAKE_CASE_ = attention_probs_dropout_prob SCREAMING_SNAKE_CASE_ = initializer_range SCREAMING_SNAKE_CASE_ = layer_norm_eps SCREAMING_SNAKE_CASE_ = scale_embedding SCREAMING_SNAKE_CASE_ = use_cache SCREAMING_SNAKE_CASE_ = layerdrop SCREAMING_SNAKE_CASE_ = activation_dropout super().__init__(pad_token_id=_A , bos_token_id=_A , eos_token_id=_A , **_A)
620
0
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available UpperCamelCase__ : List[Any] = {} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase__ : Dict = ["GPTSw3Tokenizer"] if TYPE_CHECKING: try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_gpt_swa import GPTSwaTokenizer else: import sys UpperCamelCase__ : Dict = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
702
from typing import Dict, List, Optional, Tuple, Union import torch from ...models import AutoencoderKL, TransformeraDModel from ...schedulers import KarrasDiffusionSchedulers from ...utils import randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput class __snake_case ( lowerCAmelCase__ ): def __init__( self , _A , _A , _A , _A = None , ): super().__init__() self.register_modules(transformer=_A , vae=_A , scheduler=_A) # create a imagenet -> id dictionary for easier use SCREAMING_SNAKE_CASE_ = {} if idalabel is not None: for key, value in idalabel.items(): for label in value.split(','): SCREAMING_SNAKE_CASE_ = int(_A) SCREAMING_SNAKE_CASE_ = dict(sorted(self.labels.items())) def lowerCAmelCase__ ( self , _A): if not isinstance(_A , _A): SCREAMING_SNAKE_CASE_ = list(_A) for l in label: if l not in self.labels: raise ValueError( f"""{l} does not exist. Please make sure to select one of the following labels: \n {self.labels}.""") return [self.labels[l] for l in label] @torch.no_grad() def __call__( self , _A , _A = 4.0 , _A = None , _A = 50 , _A = "pil" , _A = True , ): SCREAMING_SNAKE_CASE_ = len(_A) SCREAMING_SNAKE_CASE_ = self.transformer.config.sample_size SCREAMING_SNAKE_CASE_ = self.transformer.config.in_channels SCREAMING_SNAKE_CASE_ = randn_tensor( shape=(batch_size, latent_channels, latent_size, latent_size) , generator=_A , device=self.device , dtype=self.transformer.dtype , ) SCREAMING_SNAKE_CASE_ = torch.cat([latents] * 2) if guidance_scale > 1 else latents SCREAMING_SNAKE_CASE_ = torch.tensor(_A , device=self.device).reshape(-1) SCREAMING_SNAKE_CASE_ = torch.tensor([1000] * batch_size , device=self.device) SCREAMING_SNAKE_CASE_ = torch.cat([class_labels, class_null] , 0) if guidance_scale > 1 else class_labels # set step values self.scheduler.set_timesteps(_A) for t in self.progress_bar(self.scheduler.timesteps): if guidance_scale > 1: SCREAMING_SNAKE_CASE_ = latent_model_input[: len(_A) // 2] SCREAMING_SNAKE_CASE_ = torch.cat([half, half] , dim=0) SCREAMING_SNAKE_CASE_ = self.scheduler.scale_model_input(_A , _A) SCREAMING_SNAKE_CASE_ = t if not torch.is_tensor(_A): # TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can # This would be a good case for the `match` statement (Python 3.10+) SCREAMING_SNAKE_CASE_ = latent_model_input.device.type == 'mps' if isinstance(_A , _A): SCREAMING_SNAKE_CASE_ = torch.floataa if is_mps else torch.floataa else: SCREAMING_SNAKE_CASE_ = torch.intaa if is_mps else torch.intaa SCREAMING_SNAKE_CASE_ = torch.tensor([timesteps] , dtype=_A , device=latent_model_input.device) elif len(timesteps.shape) == 0: SCREAMING_SNAKE_CASE_ = timesteps[None].to(latent_model_input.device) # broadcast to batch dimension in a way that's compatible with ONNX/Core ML SCREAMING_SNAKE_CASE_ = timesteps.expand(latent_model_input.shape[0]) # predict noise model_output SCREAMING_SNAKE_CASE_ = self.transformer( _A , timestep=_A , class_labels=_A).sample # perform guidance if guidance_scale > 1: SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = noise_pred[:, :latent_channels], noise_pred[:, latent_channels:] SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = torch.split(_A , len(_A) // 2 , dim=0) SCREAMING_SNAKE_CASE_ = uncond_eps + guidance_scale * (cond_eps - uncond_eps) SCREAMING_SNAKE_CASE_ = torch.cat([half_eps, half_eps] , dim=0) SCREAMING_SNAKE_CASE_ = torch.cat([eps, rest] , dim=1) # learned sigma if self.transformer.config.out_channels // 2 == latent_channels: SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = torch.split(_A , _A , dim=1) else: SCREAMING_SNAKE_CASE_ = noise_pred # compute previous image: x_t -> x_t-1 SCREAMING_SNAKE_CASE_ = self.scheduler.step(_A , _A , _A).prev_sample if guidance_scale > 1: SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = latent_model_input.chunk(2 , dim=0) else: SCREAMING_SNAKE_CASE_ = latent_model_input SCREAMING_SNAKE_CASE_ = 1 / self.vae.config.scaling_factor * latents SCREAMING_SNAKE_CASE_ = self.vae.decode(_A).sample SCREAMING_SNAKE_CASE_ = (samples / 2 + 0.5).clamp(0 , 1) # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16 SCREAMING_SNAKE_CASE_ = samples.cpu().permute(0 , 2 , 3 , 1).float().numpy() if output_type == "pil": SCREAMING_SNAKE_CASE_ = self.numpy_to_pil(_A) if not return_dict: return (samples,) return ImagePipelineOutput(images=_A)
620
0
import json import os from functools import lru_cache from typing import Dict, List, Optional, Tuple, Union import regex as re from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...tokenization_utils_base import BatchEncoding, EncodedInput from ...utils import PaddingStrategy, logging UpperCamelCase__ : List[str] = logging.get_logger(__name__) UpperCamelCase__ : List[str] = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt'} # See all LED models at https://huggingface.co/models?filter=LED UpperCamelCase__ : Tuple = { 'vocab_file': { 'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json', }, 'merges_file': { 'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt', }, 'tokenizer_file': { 'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json', }, } UpperCamelCase__ : Any = { 'allenai/led-base-16384': 16_384, } @lru_cache() # Copied from transformers.models.bart.tokenization_bart.bytes_to_unicode def _UpperCAmelCase ( ): """simple docstring""" SCREAMING_SNAKE_CASE_ = ( list(range(ord('!' ) , ord('~' ) + 1 ) ) + list(range(ord('¡' ) , ord('¬' ) + 1 ) ) + list(range(ord('®' ) , ord('ÿ' ) + 1 ) ) ) SCREAMING_SNAKE_CASE_ = bs[:] SCREAMING_SNAKE_CASE_ = 0 for b in range(2**8 ): if b not in bs: bs.append(lowerCAmelCase_ ) cs.append(2**8 + n ) n += 1 SCREAMING_SNAKE_CASE_ = [chr(lowerCAmelCase_ ) for n in cs] return dict(zip(lowerCAmelCase_ , lowerCAmelCase_ ) ) def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : str ): """simple docstring""" SCREAMING_SNAKE_CASE_ = set() SCREAMING_SNAKE_CASE_ = word[0] for char in word[1:]: pairs.add((prev_char, char) ) SCREAMING_SNAKE_CASE_ = char return pairs class __snake_case ( a__ ): __lowerCAmelCase : Optional[Any] = VOCAB_FILES_NAMES __lowerCAmelCase : int = PRETRAINED_VOCAB_FILES_MAP __lowerCAmelCase : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __lowerCAmelCase : Optional[int] = ["""input_ids""", """attention_mask"""] def __init__( self , _A , _A , _A="replace" , _A="<s>" , _A="</s>" , _A="</s>" , _A="<s>" , _A="<unk>" , _A="<pad>" , _A="<mask>" , _A=False , **_A , ): SCREAMING_SNAKE_CASE_ = AddedToken(lowercase__ , lstrip=lowercase__ , rstrip=lowercase__) if isinstance(lowercase__ , lowercase__) else bos_token SCREAMING_SNAKE_CASE_ = AddedToken(lowercase__ , lstrip=lowercase__ , rstrip=lowercase__) if isinstance(lowercase__ , lowercase__) else eos_token SCREAMING_SNAKE_CASE_ = AddedToken(lowercase__ , lstrip=lowercase__ , rstrip=lowercase__) if isinstance(lowercase__ , lowercase__) else sep_token SCREAMING_SNAKE_CASE_ = AddedToken(lowercase__ , lstrip=lowercase__ , rstrip=lowercase__) if isinstance(lowercase__ , lowercase__) else cls_token SCREAMING_SNAKE_CASE_ = AddedToken(lowercase__ , lstrip=lowercase__ , rstrip=lowercase__) if isinstance(lowercase__ , lowercase__) else unk_token SCREAMING_SNAKE_CASE_ = AddedToken(lowercase__ , lstrip=lowercase__ , rstrip=lowercase__) if isinstance(lowercase__ , lowercase__) else pad_token # Mask token behave like a normal word, i.e. include the space before it SCREAMING_SNAKE_CASE_ = AddedToken(lowercase__ , lstrip=lowercase__ , rstrip=lowercase__) if isinstance(lowercase__ , lowercase__) else mask_token super().__init__( errors=lowercase__ , bos_token=lowercase__ , eos_token=lowercase__ , unk_token=lowercase__ , sep_token=lowercase__ , cls_token=lowercase__ , pad_token=lowercase__ , mask_token=lowercase__ , add_prefix_space=lowercase__ , **lowercase__ , ) with open(lowercase__ , encoding='utf-8') as vocab_handle: SCREAMING_SNAKE_CASE_ = json.load(lowercase__) SCREAMING_SNAKE_CASE_ = {v: k for k, v in self.encoder.items()} SCREAMING_SNAKE_CASE_ = errors # how to handle errors in decoding SCREAMING_SNAKE_CASE_ = bytes_to_unicode() SCREAMING_SNAKE_CASE_ = {v: k for k, v in self.byte_encoder.items()} with open(lowercase__ , encoding='utf-8') as merges_handle: SCREAMING_SNAKE_CASE_ = merges_handle.read().split('\n')[1:-1] SCREAMING_SNAKE_CASE_ = [tuple(merge.split()) for merge in bpe_merges] SCREAMING_SNAKE_CASE_ = dict(zip(lowercase__ , range(len(lowercase__)))) SCREAMING_SNAKE_CASE_ = {} SCREAMING_SNAKE_CASE_ = add_prefix_space # Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions SCREAMING_SNAKE_CASE_ = re.compile(r'\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+') @property # Copied from transformers.models.bart.tokenization_bart.BartTokenizer.vocab_size def lowerCAmelCase__ ( self): return len(self.encoder) def lowerCAmelCase__ ( self): return dict(self.encoder , **self.added_tokens_encoder) def lowerCAmelCase__ ( self , _A): if token in self.cache: return self.cache[token] SCREAMING_SNAKE_CASE_ = tuple(lowercase__) SCREAMING_SNAKE_CASE_ = get_pairs(lowercase__) if not pairs: return token while True: SCREAMING_SNAKE_CASE_ = min(lowercase__ , key=lambda _A: self.bpe_ranks.get(lowercase__ , float('inf'))) if bigram not in self.bpe_ranks: break SCREAMING_SNAKE_CASE_ = bigram SCREAMING_SNAKE_CASE_ = [] SCREAMING_SNAKE_CASE_ = 0 while i < len(lowercase__): try: SCREAMING_SNAKE_CASE_ = word.index(lowercase__ , lowercase__) except ValueError: new_word.extend(word[i:]) break else: new_word.extend(word[i:j]) SCREAMING_SNAKE_CASE_ = j if word[i] == first and i < len(lowercase__) - 1 and word[i + 1] == second: new_word.append(first + second) i += 2 else: new_word.append(word[i]) i += 1 SCREAMING_SNAKE_CASE_ = tuple(lowercase__) SCREAMING_SNAKE_CASE_ = new_word if len(lowercase__) == 1: break else: SCREAMING_SNAKE_CASE_ = get_pairs(lowercase__) SCREAMING_SNAKE_CASE_ = ''' '''.join(lowercase__) SCREAMING_SNAKE_CASE_ = word return word def lowerCAmelCase__ ( self , _A): SCREAMING_SNAKE_CASE_ = [] for token in re.findall(self.pat , lowercase__): SCREAMING_SNAKE_CASE_ = ''''''.join( self.byte_encoder[b] for b in token.encode('utf-8')) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case) bpe_tokens.extend(bpe_token for bpe_token in self.bpe(lowercase__).split(' ')) return bpe_tokens def lowerCAmelCase__ ( self , _A): return self.encoder.get(lowercase__ , self.encoder.get(self.unk_token)) def lowerCAmelCase__ ( self , _A): return self.decoder.get(lowercase__) def lowerCAmelCase__ ( self , _A): SCREAMING_SNAKE_CASE_ = ''''''.join(lowercase__) SCREAMING_SNAKE_CASE_ = bytearray([self.byte_decoder[c] for c in text]).decode('utf-8' , errors=self.errors) return text def lowerCAmelCase__ ( self , _A , _A = None): if not os.path.isdir(lowercase__): logger.error(f"""Vocabulary path ({save_directory}) should be a directory""") return SCREAMING_SNAKE_CASE_ = os.path.join( lowercase__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file']) SCREAMING_SNAKE_CASE_ = os.path.join( lowercase__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file']) with open(lowercase__ , 'w' , encoding='utf-8') as f: f.write(json.dumps(self.encoder , indent=2 , sort_keys=lowercase__ , ensure_ascii=lowercase__) + '\n') SCREAMING_SNAKE_CASE_ = 0 with open(lowercase__ , 'w' , encoding='utf-8') as writer: writer.write('#version: 0.2\n') for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda _A: kv[1]): if index != token_index: logger.warning( f"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.""" ' Please check that the tokenizer is not corrupted!') SCREAMING_SNAKE_CASE_ = token_index writer.write(' '.join(lowercase__) + '\n') index += 1 return vocab_file, merge_file def lowerCAmelCase__ ( self , _A , _A = None): if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] SCREAMING_SNAKE_CASE_ = [self.cls_token_id] SCREAMING_SNAKE_CASE_ = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def lowerCAmelCase__ ( self , _A , _A = None , _A = False): if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=lowercase__ , token_ids_a=lowercase__ , already_has_special_tokens=lowercase__) if token_ids_a is None: return [1] + ([0] * len(lowercase__)) + [1] return [1] + ([0] * len(lowercase__)) + [1, 1] + ([0] * len(lowercase__)) + [1] def lowerCAmelCase__ ( self , _A , _A = None): SCREAMING_SNAKE_CASE_ = [self.sep_token_id] SCREAMING_SNAKE_CASE_ = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0] def lowerCAmelCase__ ( self , _A , _A=False , **_A): SCREAMING_SNAKE_CASE_ = kwargs.pop('add_prefix_space' , self.add_prefix_space) if (is_split_into_words or add_prefix_space) and (len(lowercase__) > 0 and not text[0].isspace()): SCREAMING_SNAKE_CASE_ = ''' ''' + text return (text, kwargs) def lowerCAmelCase__ ( self , _A , _A = None , _A = PaddingStrategy.DO_NOT_PAD , _A = None , _A = None , ): SCREAMING_SNAKE_CASE_ = super()._pad( encoded_inputs=lowercase__ , max_length=lowercase__ , padding_strategy=lowercase__ , pad_to_multiple_of=lowercase__ , return_attention_mask=lowercase__ , ) # Load from model defaults if return_attention_mask is None: SCREAMING_SNAKE_CASE_ = '''attention_mask''' in self.model_input_names if return_attention_mask and "global_attention_mask" in encoded_inputs: SCREAMING_SNAKE_CASE_ = encoded_inputs[self.model_input_names[0]] # `global_attention_mask` need to have the same length as other (sequential) inputs. SCREAMING_SNAKE_CASE_ = len(encoded_inputs['global_attention_mask']) != len(lowercase__) if needs_to_be_padded: SCREAMING_SNAKE_CASE_ = len(lowercase__) - len(encoded_inputs['global_attention_mask']) if self.padding_side == "right": # Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend` SCREAMING_SNAKE_CASE_ = ( encoded_inputs['''global_attention_mask'''] + [-1] * difference ) elif self.padding_side == "left": SCREAMING_SNAKE_CASE_ = [-1] * difference + encoded_inputs[ '''global_attention_mask''' ] else: raise ValueError('Invalid padding strategy:' + str(self.padding_side)) return encoded_inputs
703
import pickle import numpy as np from matplotlib import pyplot as plt class __snake_case : def __init__( self , _A , _A , _A , _A , _A , _A=0.2 , _A=0.2): SCREAMING_SNAKE_CASE_ = bp_numa SCREAMING_SNAKE_CASE_ = bp_numa SCREAMING_SNAKE_CASE_ = bp_numa SCREAMING_SNAKE_CASE_ = conva_get[:2] SCREAMING_SNAKE_CASE_ = conva_get[2] SCREAMING_SNAKE_CASE_ = size_pa SCREAMING_SNAKE_CASE_ = rate_w SCREAMING_SNAKE_CASE_ = rate_t SCREAMING_SNAKE_CASE_ = [ np.mat(-1 * np.random.rand(self.conva[0] , self.conva[0]) + 0.5) for i in range(self.conva[1]) ] SCREAMING_SNAKE_CASE_ = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa) + 0.5) SCREAMING_SNAKE_CASE_ = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa) + 0.5) SCREAMING_SNAKE_CASE_ = -2 * np.random.rand(self.conva[1]) + 1 SCREAMING_SNAKE_CASE_ = -2 * np.random.rand(self.num_bpa) + 1 SCREAMING_SNAKE_CASE_ = -2 * np.random.rand(self.num_bpa) + 1 def lowerCAmelCase__ ( self , _A): # save model dict with pickle SCREAMING_SNAKE_CASE_ = { 'num_bp1': self.num_bpa, 'num_bp2': self.num_bpa, 'num_bp3': self.num_bpa, 'conv1': self.conva, 'step_conv1': self.step_conva, 'size_pooling1': self.size_poolinga, 'rate_weight': self.rate_weight, 'rate_thre': self.rate_thre, 'w_conv1': self.w_conva, 'wkj': self.wkj, 'vji': self.vji, 'thre_conv1': self.thre_conva, 'thre_bp2': self.thre_bpa, 'thre_bp3': self.thre_bpa, } with open(_A , 'wb') as f: pickle.dump(_A , _A) print(f"""Model saved: {save_path}""") @classmethod def lowerCAmelCase__ ( cls , _A): # read saved model with open(_A , 'rb') as f: SCREAMING_SNAKE_CASE_ = pickle.load(_A) # noqa: S301 SCREAMING_SNAKE_CASE_ = model_dic.get('conv1') conv_get.append(model_dic.get('step_conv1')) SCREAMING_SNAKE_CASE_ = model_dic.get('size_pooling1') SCREAMING_SNAKE_CASE_ = model_dic.get('num_bp1') SCREAMING_SNAKE_CASE_ = model_dic.get('num_bp2') SCREAMING_SNAKE_CASE_ = model_dic.get('num_bp3') SCREAMING_SNAKE_CASE_ = model_dic.get('rate_weight') SCREAMING_SNAKE_CASE_ = model_dic.get('rate_thre') # create model instance SCREAMING_SNAKE_CASE_ = CNN(_A , _A , _A , _A , _A , _A , _A) # modify model parameter SCREAMING_SNAKE_CASE_ = model_dic.get('w_conv1') SCREAMING_SNAKE_CASE_ = model_dic.get('wkj') SCREAMING_SNAKE_CASE_ = model_dic.get('vji') SCREAMING_SNAKE_CASE_ = model_dic.get('thre_conv1') SCREAMING_SNAKE_CASE_ = model_dic.get('thre_bp2') SCREAMING_SNAKE_CASE_ = model_dic.get('thre_bp3') return conv_ins def lowerCAmelCase__ ( self , _A): return 1 / (1 + np.exp(-1 * x)) def lowerCAmelCase__ ( self , _A): return round(_A , 3) def lowerCAmelCase__ ( self , _A , _A , _A , _A , _A): # convolution process SCREAMING_SNAKE_CASE_ = convs[0] SCREAMING_SNAKE_CASE_ = convs[1] SCREAMING_SNAKE_CASE_ = np.shape(_A)[0] # get the data slice of original image data, data_focus SCREAMING_SNAKE_CASE_ = [] for i_focus in range(0 , size_data - size_conv + 1 , _A): for j_focus in range(0 , size_data - size_conv + 1 , _A): SCREAMING_SNAKE_CASE_ = data[ i_focus : i_focus + size_conv, j_focus : j_focus + size_conv ] data_focus.append(_A) # calculate the feature map of every single kernel, and saved as list of matrix SCREAMING_SNAKE_CASE_ = [] SCREAMING_SNAKE_CASE_ = int((size_data - size_conv) / conv_step + 1) for i_map in range(_A): SCREAMING_SNAKE_CASE_ = [] for i_focus in range(len(_A)): SCREAMING_SNAKE_CASE_ = ( np.sum(np.multiply(data_focus[i_focus] , w_convs[i_map])) - thre_convs[i_map] ) featuremap.append(self.sig(_A)) SCREAMING_SNAKE_CASE_ = np.asmatrix(_A).reshape( _A , _A) data_featuremap.append(_A) # expanding the data slice to One dimenssion SCREAMING_SNAKE_CASE_ = [] for each_focus in data_focus: focusa_list.extend(self.Expand_Mat(_A)) SCREAMING_SNAKE_CASE_ = np.asarray(_A) return focus_list, data_featuremap def lowerCAmelCase__ ( self , _A , _A , _A="average_pool"): # pooling process SCREAMING_SNAKE_CASE_ = len(featuremaps[0]) SCREAMING_SNAKE_CASE_ = int(size_map / size_pooling) SCREAMING_SNAKE_CASE_ = [] for i_map in range(len(_A)): SCREAMING_SNAKE_CASE_ = featuremaps[i_map] SCREAMING_SNAKE_CASE_ = [] for i_focus in range(0 , _A , _A): for j_focus in range(0 , _A , _A): SCREAMING_SNAKE_CASE_ = feature_map[ i_focus : i_focus + size_pooling, j_focus : j_focus + size_pooling, ] if pooling_type == "average_pool": # average pooling map_pooled.append(np.average(_A)) elif pooling_type == "max_pooling": # max pooling map_pooled.append(np.max(_A)) SCREAMING_SNAKE_CASE_ = np.asmatrix(_A).reshape(_A , _A) featuremap_pooled.append(_A) return featuremap_pooled def lowerCAmelCase__ ( self , _A): # expanding three dimension data to one dimension list SCREAMING_SNAKE_CASE_ = [] for i in range(len(_A)): SCREAMING_SNAKE_CASE_ = np.shape(data[i]) SCREAMING_SNAKE_CASE_ = data[i].reshape(1 , shapes[0] * shapes[1]) SCREAMING_SNAKE_CASE_ = data_listed.getA().tolist()[0] data_expanded.extend(_A) SCREAMING_SNAKE_CASE_ = np.asarray(_A) return data_expanded def lowerCAmelCase__ ( self , _A): # expanding matrix to one dimension list SCREAMING_SNAKE_CASE_ = np.asarray(_A) SCREAMING_SNAKE_CASE_ = np.shape(_A) SCREAMING_SNAKE_CASE_ = data_mat.reshape(1 , shapes[0] * shapes[1]) return data_expanded def lowerCAmelCase__ ( self , _A , _A , _A , _A , _A): SCREAMING_SNAKE_CASE_ = [] SCREAMING_SNAKE_CASE_ = 0 for i_map in range(_A): SCREAMING_SNAKE_CASE_ = np.ones((size_map, size_map)) for i in range(0 , _A , _A): for j in range(0 , _A , _A): SCREAMING_SNAKE_CASE_ = pd_pool[ i_pool ] SCREAMING_SNAKE_CASE_ = i_pool + 1 SCREAMING_SNAKE_CASE_ = np.multiply( _A , np.multiply(out_map[i_map] , (1 - out_map[i_map]))) pd_all.append(_A) return pd_all def lowerCAmelCase__ ( self , _A , _A , _A , _A , _A , _A=bool): # model traning print('----------------------Start Training-------------------------') print((' - - Shape: Train_Data ', np.shape(_A))) print((' - - Shape: Teach_Data ', np.shape(_A))) SCREAMING_SNAKE_CASE_ = 0 SCREAMING_SNAKE_CASE_ = [] SCREAMING_SNAKE_CASE_ = 10000 while rp < n_repeat and mse >= error_accuracy: SCREAMING_SNAKE_CASE_ = 0 print(f"""-------------Learning Time {rp}--------------""") for p in range(len(_A)): # print('------------Learning Image: %d--------------'%p) SCREAMING_SNAKE_CASE_ = np.asmatrix(datas_train[p]) SCREAMING_SNAKE_CASE_ = np.asarray(datas_teach[p]) SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.convolute( _A , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , ) SCREAMING_SNAKE_CASE_ = self.pooling(_A , self.size_poolinga) SCREAMING_SNAKE_CASE_ = np.shape(_A) SCREAMING_SNAKE_CASE_ = self._expand(_A) SCREAMING_SNAKE_CASE_ = data_bp_input SCREAMING_SNAKE_CASE_ = np.dot(_A , self.vji.T) - self.thre_bpa SCREAMING_SNAKE_CASE_ = self.sig(_A) SCREAMING_SNAKE_CASE_ = np.dot(_A , self.wkj.T) - self.thre_bpa SCREAMING_SNAKE_CASE_ = self.sig(_A) # --------------Model Leaning ------------------------ # calculate error and gradient--------------- SCREAMING_SNAKE_CASE_ = np.multiply( (data_teach - bp_outa) , np.multiply(_A , (1 - bp_outa))) SCREAMING_SNAKE_CASE_ = np.multiply( np.dot(_A , self.wkj) , np.multiply(_A , (1 - bp_outa))) SCREAMING_SNAKE_CASE_ = np.dot(_A , self.vji) SCREAMING_SNAKE_CASE_ = pd_i_all / (self.size_poolinga * self.size_poolinga) SCREAMING_SNAKE_CASE_ = pd_conva_pooled.T.getA().tolist() SCREAMING_SNAKE_CASE_ = self._calculate_gradient_from_pool( _A , _A , shape_featuremapa[0] , shape_featuremapa[1] , self.size_poolinga , ) # weight and threshold learning process--------- # convolution layer for k_conv in range(self.conva[1]): SCREAMING_SNAKE_CASE_ = self._expand_mat(pd_conva_all[k_conv]) SCREAMING_SNAKE_CASE_ = self.rate_weight * np.dot(_A , _A) SCREAMING_SNAKE_CASE_ = self.w_conva[k_conv] + delta_w.reshape( (self.conva[0], self.conva[0])) SCREAMING_SNAKE_CASE_ = ( self.thre_conva[k_conv] - np.sum(pd_conva_all[k_conv]) * self.rate_thre ) # all connected layer SCREAMING_SNAKE_CASE_ = self.wkj + pd_k_all.T * bp_outa * self.rate_weight SCREAMING_SNAKE_CASE_ = self.vji + pd_j_all.T * bp_outa * self.rate_weight SCREAMING_SNAKE_CASE_ = self.thre_bpa - pd_k_all * self.rate_thre SCREAMING_SNAKE_CASE_ = self.thre_bpa - pd_j_all * self.rate_thre # calculate the sum error of all single image SCREAMING_SNAKE_CASE_ = np.sum(abs(data_teach - bp_outa)) error_count += errors # print(' ----Teach ',data_teach) # print(' ----BP_output ',bp_out3) SCREAMING_SNAKE_CASE_ = rp + 1 SCREAMING_SNAKE_CASE_ = error_count / patterns all_mse.append(_A) def draw_error(): SCREAMING_SNAKE_CASE_ = [error_accuracy for i in range(int(n_repeat * 1.2))] plt.plot(_A , '+-') plt.plot(_A , 'r--') plt.xlabel('Learning Times') plt.ylabel('All_mse') plt.grid(_A , alpha=0.5) plt.show() print('------------------Training Complished---------------------') print((' - - Training epoch: ', rp, f""" - - Mse: {mse:.6f}""")) if draw_e: draw_error() return mse def lowerCAmelCase__ ( self , _A): # model predict SCREAMING_SNAKE_CASE_ = [] print('-------------------Start Testing-------------------------') print((' - - Shape: Test_Data ', np.shape(_A))) for p in range(len(_A)): SCREAMING_SNAKE_CASE_ = np.asmatrix(datas_test[p]) SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.convolute( _A , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , ) SCREAMING_SNAKE_CASE_ = self.pooling(_A , self.size_poolinga) SCREAMING_SNAKE_CASE_ = self._expand(_A) SCREAMING_SNAKE_CASE_ = data_bp_input SCREAMING_SNAKE_CASE_ = bp_outa * self.vji.T - self.thre_bpa SCREAMING_SNAKE_CASE_ = self.sig(_A) SCREAMING_SNAKE_CASE_ = bp_outa * self.wkj.T - self.thre_bpa SCREAMING_SNAKE_CASE_ = self.sig(_A) produce_out.extend(bp_outa.getA().tolist()) SCREAMING_SNAKE_CASE_ = [list(map(self.do_round , _A)) for each in produce_out] return np.asarray(_A) def lowerCAmelCase__ ( self , _A): # return the data of image after convoluting process so we can check it out SCREAMING_SNAKE_CASE_ = np.asmatrix(_A) SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.convolute( _A , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , ) SCREAMING_SNAKE_CASE_ = self.pooling(_A , self.size_poolinga) return data_conveda, data_pooleda if __name__ == "__main__": pass
620
0
from datetime import datetime import matplotlib.pyplot as plt import torch def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : Dict ): """simple docstring""" for param in module.parameters(): SCREAMING_SNAKE_CASE_ = False def _UpperCAmelCase ( ): """simple docstring""" SCREAMING_SNAKE_CASE_ = """cuda""" if torch.cuda.is_available() else """cpu""" if torch.backends.mps.is_available() and torch.backends.mps.is_built(): SCREAMING_SNAKE_CASE_ = """mps""" if device == "mps": print( 'WARNING: MPS currently doesn\'t seem to work, and messes up backpropagation without any visible torch' ' errors. I recommend using CUDA on a colab notebook or CPU instead if you\'re facing inexplicable issues' ' with generations.' ) return device def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : Optional[Any] ): """simple docstring""" SCREAMING_SNAKE_CASE_ = plt.imshow(SCREAMING_SNAKE_CASE__ ) fig.axes.get_xaxis().set_visible(SCREAMING_SNAKE_CASE__ ) fig.axes.get_yaxis().set_visible(SCREAMING_SNAKE_CASE__ ) plt.show() def _UpperCAmelCase ( ): """simple docstring""" SCREAMING_SNAKE_CASE_ = datetime.now() SCREAMING_SNAKE_CASE_ = current_time.strftime('%H:%M:%S' ) return timestamp
704
import os import zipfile import requests from get_ci_error_statistics import download_artifact, get_artifacts_links def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : int=7 ): """simple docstring""" SCREAMING_SNAKE_CASE_ = None if token is not None: SCREAMING_SNAKE_CASE_ = {'Accept': 'application/vnd.github+json', 'Authorization': f"""Bearer {token}"""} # The id of a workflow (not of a workflow run) SCREAMING_SNAKE_CASE_ = '636036' SCREAMING_SNAKE_CASE_ = f"""https://api.github.com/repos/huggingface/transformers/actions/workflows/{workflow_id}/runs""" # On `main` branch + event being `schedule` + not returning PRs + only `num_runs` results url += f"""?branch=main&event=schedule&exclude_pull_requests=true&per_page={num_runs}""" SCREAMING_SNAKE_CASE_ = requests.get(_SCREAMING_SNAKE_CASE , headers=_SCREAMING_SNAKE_CASE ).json() return result["workflow_runs"] def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : str ): """simple docstring""" SCREAMING_SNAKE_CASE_ = get_daily_ci_runs(_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE_ = None for workflow_run in workflow_runs: if workflow_run["status"] == "completed": SCREAMING_SNAKE_CASE_ = workflow_run['id'] break return workflow_run_id def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Optional[int] ): """simple docstring""" SCREAMING_SNAKE_CASE_ = get_last_daily_ci_runs(_SCREAMING_SNAKE_CASE ) if workflow_run_id is not None: SCREAMING_SNAKE_CASE_ = get_artifacts_links(worflow_run_id=_SCREAMING_SNAKE_CASE , token=_SCREAMING_SNAKE_CASE ) for artifact_name in artifact_names: if artifact_name in artifacts_links: SCREAMING_SNAKE_CASE_ = artifacts_links[artifact_name] download_artifact( artifact_name=_SCREAMING_SNAKE_CASE , artifact_url=_SCREAMING_SNAKE_CASE , output_dir=_SCREAMING_SNAKE_CASE , token=_SCREAMING_SNAKE_CASE ) def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : List[Any] ): """simple docstring""" get_last_daily_ci_artifacts(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE_ = {} for artifact_name in artifact_names: SCREAMING_SNAKE_CASE_ = os.path.join(_SCREAMING_SNAKE_CASE , f"""{artifact_name}.zip""" ) if os.path.isfile(_SCREAMING_SNAKE_CASE ): SCREAMING_SNAKE_CASE_ = {} with zipfile.ZipFile(_SCREAMING_SNAKE_CASE ) as z: for filename in z.namelist(): if not os.path.isdir(_SCREAMING_SNAKE_CASE ): # read the file with z.open(_SCREAMING_SNAKE_CASE ) as f: SCREAMING_SNAKE_CASE_ = f.read().decode('UTF-8' ) return results
620
0
import datasets import faiss import numpy as np import streamlit as st import torch from elasticsearch import Elasticsearch from elia_utils import ( embed_questions_for_retrieval, make_qa_sas_model, qa_sas_generate, query_es_index, query_qa_dense_index, ) import transformers from transformers import AutoModel, AutoModelForSeqaSeqLM, AutoTokenizer UpperCamelCase__ : Union[str, Any] = "bart" UpperCamelCase__ : List[Any] = True @st.cache(allow_output_mutation=a_ ) def _UpperCAmelCase ( ): """simple docstring""" if LOAD_DENSE_INDEX: SCREAMING_SNAKE_CASE_ = AutoTokenizer.from_pretrained('yjernite/retribert-base-uncased' ) SCREAMING_SNAKE_CASE_ = AutoModel.from_pretrained('yjernite/retribert-base-uncased' ).to('cuda:0' ) SCREAMING_SNAKE_CASE_ = qar_model.eval() else: SCREAMING_SNAKE_CASE_ = (None, None) if MODEL_TYPE == "bart": SCREAMING_SNAKE_CASE_ = AutoTokenizer.from_pretrained('yjernite/bart_eli5' ) SCREAMING_SNAKE_CASE_ = AutoModelForSeqaSeqLM.from_pretrained('yjernite/bart_eli5' ).to('cuda:0' ) SCREAMING_SNAKE_CASE_ = torch.load('seq2seq_models/eli5_bart_model_blm_2.pth' ) sas_model.load_state_dict(save_dict['model'] ) SCREAMING_SNAKE_CASE_ = sas_model.eval() else: SCREAMING_SNAKE_CASE_ = make_qa_sas_model( model_name='t5-small' , from_file='seq2seq_models/eli5_t5_model_1024_4.pth' , device='cuda:0' ) return (qar_tokenizer, qar_model, sas_tokenizer, sas_model) @st.cache(allow_output_mutation=a_ ) def _UpperCAmelCase ( ): """simple docstring""" if LOAD_DENSE_INDEX: SCREAMING_SNAKE_CASE_ = faiss.StandardGpuResources() SCREAMING_SNAKE_CASE_ = datasets.load_dataset(path='wiki_snippets' , name='wiki40b_en_100_0' )['''train'''] SCREAMING_SNAKE_CASE_ = np.memmap( 'wiki40b_passages_reps_32_l-8_h-768_b-512-512.dat' , dtype='float32' , mode='r' , shape=(wikiaab_passages.num_rows, 128) , ) SCREAMING_SNAKE_CASE_ = faiss.IndexFlatIP(128 ) SCREAMING_SNAKE_CASE_ = faiss.index_cpu_to_gpu(a_ , 1 , a_ ) wikiaab_gpu_index_flat.add(a_ ) # TODO fix for larger GPU else: SCREAMING_SNAKE_CASE_ = (None, None) SCREAMING_SNAKE_CASE_ = Elasticsearch([{'host': 'localhost', 'port': '9200'}] ) return (wikiaab_passages, wikiaab_gpu_index_flat, es_client) @st.cache(allow_output_mutation=a_ ) def _UpperCAmelCase ( ): """simple docstring""" SCREAMING_SNAKE_CASE_ = datasets.load_dataset('eli5' , name='LFQA_reddit' ) SCREAMING_SNAKE_CASE_ = elia['''train_eli5'''] SCREAMING_SNAKE_CASE_ = np.memmap( 'eli5_questions_reps.dat' , dtype='float32' , mode='r' , shape=(elia_train.num_rows, 128) ) SCREAMING_SNAKE_CASE_ = faiss.IndexFlatIP(128 ) eli5_train_q_index.add(a_ ) return (elia_train, eli5_train_q_index) UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ : Optional[Any] = load_indexes() UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ : Tuple = load_models() UpperCamelCase__ , UpperCamelCase__ : Optional[Any] = load_train_data() def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : Dict=10 ): """simple docstring""" SCREAMING_SNAKE_CASE_ = embed_questions_for_retrieval([question] , a_ , a_ ) SCREAMING_SNAKE_CASE_ = eli5_train_q_index.search(a_ , a_ ) SCREAMING_SNAKE_CASE_ = [elia_train[int(a_ )] for i in I[0]] return nn_examples def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : List[Any]="wiki40b" , _SCREAMING_SNAKE_CASE : Tuple="dense" , _SCREAMING_SNAKE_CASE : str=10 ): """simple docstring""" if source == "none": SCREAMING_SNAKE_CASE_ = (''' <P> '''.join(['' for _ in range(11 )] ).strip(), []) else: if method == "dense": SCREAMING_SNAKE_CASE_ = query_qa_dense_index( a_ , a_ , a_ , a_ , a_ , a_ ) else: SCREAMING_SNAKE_CASE_ = query_es_index( a_ , a_ , index_name='english_wiki40b_snippets_100w' , n_results=a_ , ) SCREAMING_SNAKE_CASE_ = [ (res['''article_title'''], res['''section_title'''].strip(), res['''score'''], res['''passage_text''']) for res in hit_lst ] SCREAMING_SNAKE_CASE_ = '''question: {} context: {}'''.format(a_ , a_ ) return question_doc, support_list @st.cache( hash_funcs={ torch.Tensor: (lambda _SCREAMING_SNAKE_CASE : None), transformers.models.bart.tokenization_bart.BartTokenizer: (lambda _SCREAMING_SNAKE_CASE : None), } ) def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : Dict=64 , _SCREAMING_SNAKE_CASE : List[str]=256 , _SCREAMING_SNAKE_CASE : Optional[Any]=False , _SCREAMING_SNAKE_CASE : Any=2 , _SCREAMING_SNAKE_CASE : Any=0.95 , _SCREAMING_SNAKE_CASE : List[Any]=0.8 ): """simple docstring""" with torch.no_grad(): SCREAMING_SNAKE_CASE_ = qa_sas_generate( a_ , a_ , a_ , num_answers=1 , num_beams=a_ , min_len=a_ , max_len=a_ , do_sample=a_ , temp=a_ , top_p=a_ , top_k=a_ , max_input_length=1_024 , device='cuda:0' , )[0] return (answer, support_list) st.title("Long Form Question Answering with ELI5") # Start sidebar UpperCamelCase__ : int = "<img src='https://huggingface.co/front/assets/huggingface_logo.svg'>" UpperCamelCase__ : List[str] = "\n<html>\n <head>\n <style>\n .img-container {\n padding-left: 90px;\n padding-right: 90px;\n padding-top: 50px;\n padding-bottom: 50px;\n background-color: #f0f3f9;\n }\n </style>\n </head>\n <body>\n <span class=\"img-container\"> <!-- Inline parent element -->\n %s\n </span>\n </body>\n</html>\n" % ( header_html, ) st.sidebar.markdown( header_full, unsafe_allow_html=True, ) # Long Form QA with ELI5 and Wikipedia UpperCamelCase__ : Optional[Any] = "\nThis demo presents a model trained to [provide long-form answers to open-domain questions](https://yjernite.github.io/lfqa.html).\nFirst, a document retriever fetches a set of relevant Wikipedia passages given the question from the [Wiki40b](https://research.google/pubs/pub49029/) dataset,\na pre-processed fixed snapshot of Wikipedia.\n" st.sidebar.markdown(description, unsafe_allow_html=True) UpperCamelCase__ : Optional[Any] = [ "Answer the question", "View the retrieved document only", "View the most similar ELI5 question and answer", "Show me everything, please!", ] UpperCamelCase__ : List[Any] = st.sidebar.checkbox("Demo options") if demo_options: UpperCamelCase__ : Tuple = st.sidebar.selectbox( "", action_list, index=3, ) UpperCamelCase__ : Any = action_list.index(action_st) UpperCamelCase__ : str = st.sidebar.selectbox( "", ["Show full text of passages", "Show passage section titles"], index=0, ) UpperCamelCase__ : Optional[int] = show_type == "Show full text of passages" else: UpperCamelCase__ : Optional[int] = 3 UpperCamelCase__ : str = True UpperCamelCase__ : Optional[int] = st.sidebar.checkbox("Retrieval options") if retrieval_options: UpperCamelCase__ : Union[str, Any] = "\n ### Information retriever options\n\n The **sparse** retriever uses ElasticSearch, while the **dense** retriever uses max-inner-product search between a question and passage embedding\n trained using the [ELI5](https://arxiv.org/abs/1907.09190) questions-answer pairs.\n The answer is then generated by sequence to sequence model which takes the question and retrieved document as input.\n " st.sidebar.markdown(retriever_info) UpperCamelCase__ : Tuple = st.sidebar.selectbox("Which Wikipedia format should the model use?", ["wiki40b", "none"]) UpperCamelCase__ : List[Any] = st.sidebar.selectbox("Which Wikipedia indexer should the model use?", ["dense", "sparse", "mixed"]) else: UpperCamelCase__ : List[str] = "wiki40b" UpperCamelCase__ : Optional[int] = "dense" UpperCamelCase__ : List[Any] = "beam" UpperCamelCase__ : Any = 2 UpperCamelCase__ : int = 64 UpperCamelCase__ : Any = 256 UpperCamelCase__ : Dict = None UpperCamelCase__ : Optional[int] = None UpperCamelCase__ : Optional[Any] = st.sidebar.checkbox("Generation options") if generate_options: UpperCamelCase__ : int = "\n ### Answer generation options\n\n The sequence-to-sequence model was initialized with [BART](https://huggingface.co/facebook/bart-large)\n weights and fine-tuned on the ELI5 QA pairs and retrieved documents. You can use the model for greedy decoding with\n **beam** search, or **sample** from the decoder's output probabilities.\n " st.sidebar.markdown(generate_info) UpperCamelCase__ : int = st.sidebar.selectbox("Would you like to use beam search or sample an answer?", ["beam", "sampled"]) UpperCamelCase__ : Union[str, Any] = st.sidebar.slider( "Minimum generation length", min_value=8, max_value=256, value=64, step=8, format=None, key=None ) UpperCamelCase__ : int = st.sidebar.slider( "Maximum generation length", min_value=64, max_value=512, value=256, step=16, format=None, key=None ) if sampled == "beam": UpperCamelCase__ : Any = st.sidebar.slider("Beam size", min_value=1, max_value=8, value=2, step=None, format=None, key=None) else: UpperCamelCase__ : Optional[int] = st.sidebar.slider( "Nucleus sampling p", min_value=0.1, max_value=1.0, value=0.95, step=0.01, format=None, key=None ) UpperCamelCase__ : str = st.sidebar.slider( "Temperature", min_value=0.1, max_value=1.0, value=0.7, step=0.01, format=None, key=None ) UpperCamelCase__ : Union[str, Any] = None # start main text UpperCamelCase__ : str = [ "<MY QUESTION>", "How do people make chocolate?", "Why do we get a fever when we are sick?", "How can different animals perceive different colors?", "What is natural language processing?", "What's the best way to treat a sunburn?", "What exactly are vitamins ?", "How does nuclear energy provide electricity?", "What's the difference between viruses and bacteria?", "Why are flutes classified as woodwinds when most of them are made out of metal ?", "Why do people like drinking coffee even though it tastes so bad?", "What happens when wine ages? How does it make the wine taste better?", "If an animal is an herbivore, where does it get the protein that it needs to survive if it only eats grass?", "How can we set a date to the beginning or end of an artistic period? Doesn't the change happen gradually?", "How does New Zealand have so many large bird predators?", ] UpperCamelCase__ : List[Any] = st.selectbox( "What would you like to ask? ---- select <MY QUESTION> to enter a new query", questions_list, index=1, ) if question_s == "<MY QUESTION>": UpperCamelCase__ : Tuple = st.text_input("Enter your question here:", "") else: UpperCamelCase__ : Any = question_s if st.button("Show me!"): if action in [0, 1, 3]: if index_type == "mixed": UpperCamelCase__ , UpperCamelCase__ : List[Any] = make_support(question, source=wiki_source, method="dense", n_results=10) UpperCamelCase__ , UpperCamelCase__ : List[str] = make_support(question, source=wiki_source, method="sparse", n_results=10) UpperCamelCase__ : int = [] for res_d, res_s in zip(support_list_dense, support_list_sparse): if tuple(res_d) not in support_list: support_list += [tuple(res_d)] if tuple(res_s) not in support_list: support_list += [tuple(res_s)] UpperCamelCase__ : Optional[int] = support_list[:10] UpperCamelCase__ : Any = "<P> " + " <P> ".join([res[-1] for res in support_list]) else: UpperCamelCase__ , UpperCamelCase__ : str = make_support(question, source=wiki_source, method=index_type, n_results=10) if action in [0, 3]: UpperCamelCase__ , UpperCamelCase__ : str = answer_question( question_doc, sas_model, sas_tokenizer, min_len=min_len, max_len=int(max_len), sampling=(sampled == "sampled"), n_beams=n_beams, top_p=top_p, temp=temp, ) st.markdown("### The model generated answer is:") st.write(answer) if action in [0, 1, 3] and wiki_source != "none": st.markdown("--- \n ### The model is drawing information from the following Wikipedia passages:") for i, res in enumerate(support_list): UpperCamelCase__ : List[Any] = "https://en.wikipedia.org/wiki/{}".format(res[0].replace(" ", "_")) UpperCamelCase__ : Any = res[1].strip() if sec_titles == "": UpperCamelCase__ : int = "[{}]({})".format(res[0], wiki_url) else: UpperCamelCase__ : Dict = sec_titles.split(" & ") UpperCamelCase__ : Union[str, Any] = " & ".join( ["[{}]({}#{})".format(sec.strip(), wiki_url, sec.strip().replace(" ", "_")) for sec in sec_list] ) st.markdown( "{0:02d} - **Article**: {1:<18} <br> _Section_: {2}".format(i + 1, res[0], sections), unsafe_allow_html=True, ) if show_passages: st.write( "> <span style=\"font-family:arial; font-size:10pt;\">" + res[-1] + "</span>", unsafe_allow_html=True ) if action in [2, 3]: UpperCamelCase__ : List[str] = find_nearest_training(question) UpperCamelCase__ : Dict = nn_train_list[0] st.markdown( "--- \n ### The most similar question in the ELI5 training set was: \n\n {}".format(train_exple["title"]) ) UpperCamelCase__ : Optional[int] = [ "{}. {}".format(i + 1, " \n".join([line.strip() for line in ans.split("\n") if line.strip() != ""])) for i, (ans, sc) in enumerate(zip(train_exple["answers"]["text"], train_exple["answers"]["score"])) if i == 0 or sc > 2 ] st.markdown("##### Its answers were: \n\n {}".format("\n".join(answers_st))) UpperCamelCase__ : Any = "\n---\n\n**Disclaimer**\n\n*The intent of this app is to provide some (hopefully entertaining) insights into the behavior of a current LFQA system.\nEvaluating biases of such a model and ensuring factual generations are still very much open research problems.\nTherefore, until some significant progress is achieved, we caution against using the generated answers for practical purposes.*\n" st.sidebar.markdown(disclaimer, unsafe_allow_html=True)
705
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available UpperCamelCase__ : Any = { "configuration_mvp": ["MVP_PRETRAINED_CONFIG_ARCHIVE_MAP", "MvpConfig", "MvpOnnxConfig"], "tokenization_mvp": ["MvpTokenizer"], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase__ : Optional[int] = ["MvpTokenizerFast"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase__ : str = [ "MVP_PRETRAINED_MODEL_ARCHIVE_LIST", "MvpForCausalLM", "MvpForConditionalGeneration", "MvpForQuestionAnswering", "MvpForSequenceClassification", "MvpModel", "MvpPreTrainedModel", ] if TYPE_CHECKING: from .configuration_mvp import MVP_PRETRAINED_CONFIG_ARCHIVE_MAP, MvpConfig, MvpOnnxConfig from .tokenization_mvp import MvpTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_mvp_fast import MvpTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mvp import ( MVP_PRETRAINED_MODEL_ARCHIVE_LIST, MvpForCausalLM, MvpForConditionalGeneration, MvpForQuestionAnswering, MvpForSequenceClassification, MvpModel, MvpPreTrainedModel, ) else: import sys UpperCamelCase__ : Dict = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
620
0
import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer from ...utils import logging UpperCamelCase__ : str = logging.get_logger(__name__) UpperCamelCase__ : List[Any] = """▁""" UpperCamelCase__ : Optional[Any] = {"""vocab_file""": """sentencepiece.bpe.model"""} UpperCamelCase__ : Any = { """vocab_file""": { """facebook/mbart-large-50-one-to-many-mmt""": ( """https://huggingface.co/facebook/mbart-large-50-one-to-many-mmt/resolve/main/sentencepiece.bpe.model""" ), } } UpperCamelCase__ : int = { """facebook/mbart-large-50-one-to-many-mmt""": 1_024, } # fmt: off UpperCamelCase__ : int = ["""ar_AR""", """cs_CZ""", """de_DE""", """en_XX""", """es_XX""", """et_EE""", """fi_FI""", """fr_XX""", """gu_IN""", """hi_IN""", """it_IT""", """ja_XX""", """kk_KZ""", """ko_KR""", """lt_LT""", """lv_LV""", """my_MM""", """ne_NP""", """nl_XX""", """ro_RO""", """ru_RU""", """si_LK""", """tr_TR""", """vi_VN""", """zh_CN""", """af_ZA""", """az_AZ""", """bn_IN""", """fa_IR""", """he_IL""", """hr_HR""", """id_ID""", """ka_GE""", """km_KH""", """mk_MK""", """ml_IN""", """mn_MN""", """mr_IN""", """pl_PL""", """ps_AF""", """pt_XX""", """sv_SE""", """sw_KE""", """ta_IN""", """te_IN""", """th_TH""", """tl_XX""", """uk_UA""", """ur_PK""", """xh_ZA""", """gl_ES""", """sl_SI"""] class __snake_case ( lowerCAmelCase__ ): __lowerCAmelCase : Optional[int] = VOCAB_FILES_NAMES __lowerCAmelCase : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __lowerCAmelCase : Optional[int] = PRETRAINED_VOCAB_FILES_MAP __lowerCAmelCase : Union[str, Any] = ["input_ids", "attention_mask"] __lowerCAmelCase : List[int] = [] __lowerCAmelCase : List[int] = [] def __init__( self , _A , _A=None , _A=None , _A="</s>" , _A="</s>" , _A="<s>" , _A="<unk>" , _A="<pad>" , _A="<mask>" , _A = None , **_A , ): SCREAMING_SNAKE_CASE_ = AddedToken(_A , lstrip=_A , rstrip=_A) if isinstance(_A , _A) else mask_token SCREAMING_SNAKE_CASE_ = {} if sp_model_kwargs is None else sp_model_kwargs SCREAMING_SNAKE_CASE_ = kwargs.get('additional_special_tokens' , []) kwargs["additional_special_tokens"] += [ code for code in FAIRSEQ_LANGUAGE_CODES if code not in kwargs["additional_special_tokens"] ] super().__init__( src_lang=_A , tgt_lang=_A , eos_token=_A , unk_token=_A , sep_token=_A , cls_token=_A , pad_token=_A , mask_token=_A , sp_model_kwargs=self.sp_model_kwargs , **_A , ) SCREAMING_SNAKE_CASE_ = spm.SentencePieceProcessor(**self.sp_model_kwargs) self.sp_model.Load(str(_A)) SCREAMING_SNAKE_CASE_ = vocab_file # Original fairseq vocab and spm vocab must be "aligned": # Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 # -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ---- # fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-' # spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a' # Mimic fairseq token-to-id alignment for the first 4 token SCREAMING_SNAKE_CASE_ = {'<s>': 0, '<pad>': 1, '</s>': 2, '<unk>': 3} # The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab SCREAMING_SNAKE_CASE_ = 1 SCREAMING_SNAKE_CASE_ = len(self.sp_model) SCREAMING_SNAKE_CASE_ = { code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(_A) } SCREAMING_SNAKE_CASE_ = {v: k for k, v in self.lang_code_to_id.items()} SCREAMING_SNAKE_CASE_ = len(self.sp_model) + len(self.lang_code_to_id) + self.fairseq_offset self.fairseq_tokens_to_ids.update(self.lang_code_to_id) SCREAMING_SNAKE_CASE_ = {v: k for k, v in self.fairseq_tokens_to_ids.items()} SCREAMING_SNAKE_CASE_ = src_lang if src_lang is not None else 'en_XX' SCREAMING_SNAKE_CASE_ = self.lang_code_to_id[self._src_lang] SCREAMING_SNAKE_CASE_ = tgt_lang self.set_src_lang_special_tokens(self._src_lang) @property def lowerCAmelCase__ ( self): return len(self.sp_model) + len(self.lang_code_to_id) + self.fairseq_offset + 1 # Plus 1 for the mask token @property def lowerCAmelCase__ ( self): return self._src_lang @src_lang.setter def lowerCAmelCase__ ( self , _A): SCREAMING_SNAKE_CASE_ = new_src_lang self.set_src_lang_special_tokens(self._src_lang) def __getstate__( self): SCREAMING_SNAKE_CASE_ = self.__dict__.copy() SCREAMING_SNAKE_CASE_ = None return state def __setstate__( self , _A): SCREAMING_SNAKE_CASE_ = d # for backward compatibility if not hasattr(self , 'sp_model_kwargs'): SCREAMING_SNAKE_CASE_ = {} SCREAMING_SNAKE_CASE_ = spm.SentencePieceProcessor(**self.sp_model_kwargs) self.sp_model.Load(self.vocab_file) def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = {self.convert_ids_to_tokens(_A): i for i in range(self.vocab_size)} vocab.update(self.added_tokens_encoder) return vocab def lowerCAmelCase__ ( self , _A): return self.sp_model.encode(_A , out_type=_A) def lowerCAmelCase__ ( self , _A): if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] SCREAMING_SNAKE_CASE_ = self.sp_model.PieceToId(_A) # Need to return unknown token if the SP model returned 0 return spm_id + self.fairseq_offset if spm_id else self.unk_token_id def lowerCAmelCase__ ( self , _A): if index in self.fairseq_ids_to_tokens: return self.fairseq_ids_to_tokens[index] return self.sp_model.IdToPiece(index - self.fairseq_offset) def lowerCAmelCase__ ( self , _A): SCREAMING_SNAKE_CASE_ = [] SCREAMING_SNAKE_CASE_ = '' SCREAMING_SNAKE_CASE_ = False for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: if not prev_is_special: out_string += " " out_string += self.sp_model.decode(_A) + token SCREAMING_SNAKE_CASE_ = True SCREAMING_SNAKE_CASE_ = [] else: current_sub_tokens.append(_A) SCREAMING_SNAKE_CASE_ = False out_string += self.sp_model.decode(_A) return out_string.strip() def lowerCAmelCase__ ( self , _A , _A = None): if not os.path.isdir(_A): logger.error(f"""Vocabulary path ({save_directory}) should be a directory""") return SCREAMING_SNAKE_CASE_ = os.path.join( _A , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file']) if os.path.abspath(self.vocab_file) != os.path.abspath(_A) and os.path.isfile(self.vocab_file): copyfile(self.vocab_file , _A) elif not os.path.isfile(self.vocab_file): with open(_A , 'wb') as fi: SCREAMING_SNAKE_CASE_ = self.sp_model.serialized_model_proto() fi.write(_A) return (out_vocab_file,) def lowerCAmelCase__ ( self , _A , _A = None , _A = False): if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=_A , token_ids_a=_A , already_has_special_tokens=_A) SCREAMING_SNAKE_CASE_ = [1] * len(self.prefix_tokens) SCREAMING_SNAKE_CASE_ = [1] * len(self.suffix_tokens) if token_ids_a is None: return prefix_ones + ([0] * len(_A)) + suffix_ones return prefix_ones + ([0] * len(_A)) + ([0] * len(_A)) + suffix_ones def lowerCAmelCase__ ( self , _A , _A = None): if token_ids_a is None: return self.prefix_tokens + token_ids_a + self.suffix_tokens # We don't expect to process pairs, but leave the pair logic for API consistency return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens def lowerCAmelCase__ ( self , _A , _A , _A , _A , **_A): if src_lang is None or tgt_lang is None: raise ValueError('Translation requires a `src_lang` and a `tgt_lang` for this model') SCREAMING_SNAKE_CASE_ = src_lang SCREAMING_SNAKE_CASE_ = self(_A , add_special_tokens=_A , return_tensors=_A , **_A) SCREAMING_SNAKE_CASE_ = self.convert_tokens_to_ids(_A) SCREAMING_SNAKE_CASE_ = tgt_lang_id return inputs def lowerCAmelCase__ ( self , _A , _A = "en_XX" , _A = None , _A = "ro_RO" , **_A , ): SCREAMING_SNAKE_CASE_ = src_lang SCREAMING_SNAKE_CASE_ = tgt_lang return super().prepare_seqaseq_batch(_A , _A , **_A) def lowerCAmelCase__ ( self): return self.set_src_lang_special_tokens(self.src_lang) def lowerCAmelCase__ ( self): return self.set_tgt_lang_special_tokens(self.tgt_lang) def lowerCAmelCase__ ( self , _A): SCREAMING_SNAKE_CASE_ = self.lang_code_to_id[src_lang] SCREAMING_SNAKE_CASE_ = [self.cur_lang_code_id] SCREAMING_SNAKE_CASE_ = [self.eos_token_id] def lowerCAmelCase__ ( self , _A): SCREAMING_SNAKE_CASE_ = self.lang_code_to_id[tgt_lang] SCREAMING_SNAKE_CASE_ = [self.cur_lang_code_id] SCREAMING_SNAKE_CASE_ = [self.eos_token_id]
706
import inspect import os import unittest from pathlib import Path import torch import accelerate from accelerate.test_utils import execute_subprocess_async from accelerate.test_utils.testing import run_command class __snake_case ( unittest.TestCase ): __lowerCAmelCase : Dict = inspect.getfile(accelerate.test_utils ) __lowerCAmelCase : Optional[Any] = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['scripts', 'test_cli.py'] ) __lowerCAmelCase : Tuple = ['accelerate', 'launch'] __lowerCAmelCase : Union[str, Any] = Path.home() / '.cache/huggingface/accelerate' __lowerCAmelCase : List[str] = 'default_config.yaml' __lowerCAmelCase : List[Any] = config_folder / config_file __lowerCAmelCase : str = config_folder / '_default_config.yaml' __lowerCAmelCase : Optional[int] = Path('tests/test_configs' ) @classmethod def lowerCAmelCase__ ( cls): if cls.config_path.is_file(): cls.config_path.rename(cls.changed_path) @classmethod def lowerCAmelCase__ ( cls): if cls.changed_path.is_file(): cls.changed_path.rename(cls.config_path) def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = self.base_cmd if torch.cuda.is_available() and (torch.cuda.device_count() > 1): cmd += ["--multi_gpu"] execute_subprocess_async(cmd + [self.test_file_path] , env=os.environ.copy()) def lowerCAmelCase__ ( self): for config in sorted(self.test_config_path.glob('**/*.yaml')): with self.subTest(config_file=_A): execute_subprocess_async( self.base_cmd + ['--config_file', str(_A), self.test_file_path] , env=os.environ.copy()) def lowerCAmelCase__ ( self): execute_subprocess_async(['accelerate', 'test'] , env=os.environ.copy()) class __snake_case ( unittest.TestCase ): __lowerCAmelCase : Optional[Any] = 'test-tpu' __lowerCAmelCase : str = 'us-central1-a' __lowerCAmelCase : Union[str, Any] = 'ls' __lowerCAmelCase : Union[str, Any] = ['accelerate', 'tpu-config'] __lowerCAmelCase : Union[str, Any] = 'cd /usr/share' __lowerCAmelCase : List[Any] = 'tests/test_samples/test_command_file.sh' __lowerCAmelCase : Dict = 'Running gcloud compute tpus tpu-vm ssh' def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = run_command( self.cmd + ['--command', self.command, '--tpu_zone', self.tpu_zone, '--tpu_name', self.tpu_name, '--debug'] , return_stdout=_A , ) self.assertIn( f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all""" , _A , ) def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = run_command( self.cmd + [ '--config_file', 'tests/test_configs/0_12_0.yaml', '--command', self.command, '--tpu_zone', self.tpu_zone, '--tpu_name', self.tpu_name, '--debug', ] , return_stdout=_A , ) self.assertIn( f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all""" , _A , ) def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = run_command( self.cmd + ['--config_file', 'tests/test_configs/latest.yaml', '--debug'] , return_stdout=_A) self.assertIn( f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo \"hello world\"; echo \"this is a second command\" --worker all""" , _A , ) def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = run_command( self.cmd + ['--config_file', 'tests/test_configs/latest.yaml', '--command', self.command, '--debug'] , return_stdout=_A , ) self.assertIn( f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all""" , _A , ) def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = run_command( self.cmd + [ '--config_file', 'tests/test_configs/latest.yaml', '--command', self.command, '--command', 'echo "Hello World"', '--debug', ] , return_stdout=_A , ) self.assertIn( f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls; echo \"Hello World\" --worker all""" , _A , ) def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = run_command( self.cmd + ['--config_file', 'tests/test_configs/latest.yaml', '--command_file', self.command_file, '--debug'] , return_stdout=_A , ) self.assertIn( f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo \"hello world\"; echo \"this is a second command\" --worker all""" , _A , ) def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = run_command( self.cmd + [ '--config_file', 'tests/test_configs/0_12_0.yaml', '--command_file', self.command_file, '--tpu_zone', self.tpu_zone, '--tpu_name', self.tpu_name, '--debug', ] , return_stdout=_A , ) self.assertIn( f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo \"hello world\"; echo \"this is a second command\" --worker all""" , _A , ) def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = run_command( self.cmd + ['--config_file', 'tests/test_configs/latest.yaml', '--install_accelerate', '--debug'] , return_stdout=_A , ) self.assertIn( f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate -U; echo \"hello world\"; echo \"this is a second command\" --worker all""" , _A , ) def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = run_command( self.cmd + [ '--config_file', 'tests/test_configs/latest.yaml', '--install_accelerate', '--accelerate_version', '12.0.0', '--debug', ] , return_stdout=_A , ) self.assertIn( f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate==12.0.0; echo \"hello world\"; echo \"this is a second command\" --worker all""" , _A , )
620
0
import unittest from .lib import ( Matrix, Vector, axpy, square_zero_matrix, unit_basis_vector, zero_vector, ) class __snake_case ( unittest.TestCase ): def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = Vector([1, 2, 3]) self.assertEqual(x.component(0) , 1) self.assertEqual(x.component(2) , 3) SCREAMING_SNAKE_CASE_ = Vector() def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = Vector([0, 0, 0, 0, 0, 1]) self.assertEqual(str(__a) , '(0,0,0,0,0,1)') def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = Vector([1, 2, 3, 4]) self.assertEqual(len(__a) , 4) def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = Vector([1, 2]) SCREAMING_SNAKE_CASE_ = Vector([1, 2, 3, 4, 5]) SCREAMING_SNAKE_CASE_ = Vector([0, 0, 0, 0, 0, 0, 0, 0, 0, 0]) SCREAMING_SNAKE_CASE_ = Vector([1, -1, 1, -1, 2, -3, 4, -5]) self.assertAlmostEqual(x.euclidean_length() , 2.2_3_6 , 3) self.assertAlmostEqual(y.euclidean_length() , 7.4_1_6 , 3) self.assertEqual(z.euclidean_length() , 0) self.assertAlmostEqual(w.euclidean_length() , 7.6_1_6 , 3) def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = Vector([1, 2, 3]) SCREAMING_SNAKE_CASE_ = Vector([1, 1, 1]) self.assertEqual((x + y).component(0) , 2) self.assertEqual((x + y).component(1) , 3) self.assertEqual((x + y).component(2) , 4) def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = Vector([1, 2, 3]) SCREAMING_SNAKE_CASE_ = Vector([1, 1, 1]) self.assertEqual((x - y).component(0) , 0) self.assertEqual((x - y).component(1) , 1) self.assertEqual((x - y).component(2) , 2) def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = Vector([1, 2, 3]) SCREAMING_SNAKE_CASE_ = Vector([2, -1, 4]) # for test of dot product SCREAMING_SNAKE_CASE_ = Vector([1, -2, -1]) self.assertEqual(str(x * 3.0) , '(3.0,6.0,9.0)') self.assertEqual((a * b) , 0) def lowerCAmelCase__ ( self): self.assertEqual(str(zero_vector(10)).count('0') , 10) def lowerCAmelCase__ ( self): self.assertEqual(str(unit_basis_vector(3 , 1)) , '(0,1,0)') def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = Vector([1, 2, 3]) SCREAMING_SNAKE_CASE_ = Vector([1, 0, 1]) self.assertEqual(str(axpy(2 , __a , __a)) , '(3,4,7)') def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = Vector([1, 0, 0, 0, 0, 0]) SCREAMING_SNAKE_CASE_ = x.copy() self.assertEqual(str(__a) , str(__a)) def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = Vector([1, 0, 0]) x.change_component(0 , 0) x.change_component(1 , 1) self.assertEqual(str(__a) , '(0,1,0)') def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3) self.assertEqual('|1,2,3|\n|2,4,5|\n|6,7,8|\n' , str(__a)) def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3) SCREAMING_SNAKE_CASE_ = [[-3, -14, -10], [-5, -10, -5], [-2, -1, 0]] for x in range(a.height()): for y in range(a.width()): self.assertEqual(minors[x][y] , a.minor(__a , __a)) def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3) SCREAMING_SNAKE_CASE_ = [[-3, 14, -10], [5, -10, 5], [-2, 1, 0]] for x in range(a.height()): for y in range(a.width()): self.assertEqual(cofactors[x][y] , a.cofactor(__a , __a)) def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3) self.assertEqual(-5 , a.determinant()) def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = Matrix([[1, 2, 3], [4, 5, 6], [7, 8, 9]] , 3 , 3) SCREAMING_SNAKE_CASE_ = Vector([1, 2, 3]) self.assertEqual('(14,32,50)' , str(a * x)) self.assertEqual('|2,4,6|\n|8,10,12|\n|14,16,18|\n' , str(a * 2)) def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3) a.change_component(0 , 2 , 5) self.assertEqual('|1,2,5|\n|2,4,5|\n|6,7,8|\n' , str(__a)) def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3) self.assertEqual(7 , a.component(2 , 1) , 0.0_1) def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3) SCREAMING_SNAKE_CASE_ = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]] , 3 , 3) self.assertEqual('|2,4,10|\n|4,8,10|\n|12,14,18|\n' , str(a + b)) def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3) SCREAMING_SNAKE_CASE_ = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]] , 3 , 3) self.assertEqual('|0,0,-4|\n|0,0,0|\n|0,0,-2|\n' , str(a - b)) def lowerCAmelCase__ ( self): self.assertEqual( '|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n' , str(square_zero_matrix(5)) , ) if __name__ == "__main__": unittest.main()
707
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_speech_available, is_torch_available, ) UpperCamelCase__ : Tuple = { "configuration_trocr": ["TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP", "TrOCRConfig"], "processing_trocr": ["TrOCRProcessor"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase__ : Tuple = [ "TROCR_PRETRAINED_MODEL_ARCHIVE_LIST", "TrOCRForCausalLM", "TrOCRPreTrainedModel", ] if TYPE_CHECKING: from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig from .processing_trocr import TrOCRProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel else: import sys UpperCamelCase__ : str = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
620
0
from typing import List, Optional, Union from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType class __snake_case ( UpperCAmelCase_ ): __lowerCAmelCase : Union[str, Any] = ["image_processor", "tokenizer"] __lowerCAmelCase : Optional[Any] = "BridgeTowerImageProcessor" __lowerCAmelCase : List[Any] = ("RobertaTokenizer", "RobertaTokenizerFast") def __init__( self , _A , _A): super().__init__(_snake_case , _snake_case) def __call__( self , _A , _A = None , _A = True , _A = False , _A = None , _A = None , _A = 0 , _A = None , _A = None , _A = None , _A = False , _A = False , _A = False , _A = False , _A = True , _A = None , **_A , ): SCREAMING_SNAKE_CASE_ = self.tokenizer( text=_snake_case , add_special_tokens=_snake_case , padding=_snake_case , truncation=_snake_case , max_length=_snake_case , stride=_snake_case , pad_to_multiple_of=_snake_case , return_token_type_ids=_snake_case , return_attention_mask=_snake_case , return_overflowing_tokens=_snake_case , return_special_tokens_mask=_snake_case , return_offsets_mapping=_snake_case , return_length=_snake_case , verbose=_snake_case , return_tensors=_snake_case , **_snake_case , ) # add pixel_values + pixel_mask SCREAMING_SNAKE_CASE_ = self.image_processor( _snake_case , return_tensors=_snake_case , do_normalize=_snake_case , do_center_crop=_snake_case , **_snake_case) encoding.update(_snake_case) return encoding def lowerCAmelCase__ ( self , *_A , **_A): return self.tokenizer.batch_decode(*_snake_case , **_snake_case) def lowerCAmelCase__ ( self , *_A , **_A): return self.tokenizer.decode(*_snake_case , **_snake_case) @property def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = self.tokenizer.model_input_names SCREAMING_SNAKE_CASE_ = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names))
708
from multiprocessing import Lock, Pipe, Process # lock used to ensure that two processes do not access a pipe at the same time UpperCamelCase__ : int = Lock() def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : Any ): """simple docstring""" global process_lock # we perform n swaps since after n swaps we know we are sorted # we *could* stop early if we are sorted already, but it takes as long to # find out we are sorted as it does to sort the list with this algorithm for i in range(0 , 10 ): if (i + position) % 2 == 0 and r_send is not None: # send your value to your right neighbor process_lock.acquire() r_send[1].send(_SCREAMING_SNAKE_CASE ) process_lock.release() # receive your right neighbor's value process_lock.acquire() SCREAMING_SNAKE_CASE_ = rr_cv[0].recv() process_lock.release() # take the lower value since you are on the left SCREAMING_SNAKE_CASE_ = min(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) elif (i + position) % 2 != 0 and l_send is not None: # send your value to your left neighbor process_lock.acquire() l_send[1].send(_SCREAMING_SNAKE_CASE ) process_lock.release() # receive your left neighbor's value process_lock.acquire() SCREAMING_SNAKE_CASE_ = lr_cv[0].recv() process_lock.release() # take the higher value since you are on the right SCREAMING_SNAKE_CASE_ = max(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # after all swaps are performed, send the values back to main result_pipe[1].send(_SCREAMING_SNAKE_CASE ) def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : Union[str, Any] ): """simple docstring""" SCREAMING_SNAKE_CASE_ = [] SCREAMING_SNAKE_CASE_ = [] # initialize the list of pipes where the values will be retrieved for _ in arr: result_pipe.append(Pipe() ) # creates the processes # the first and last process only have one neighbor so they are made outside # of the loop SCREAMING_SNAKE_CASE_ = Pipe() SCREAMING_SNAKE_CASE_ = Pipe() process_array_.append( Process( target=_SCREAMING_SNAKE_CASE , args=(0, arr[0], None, temp_rs, None, temp_rr, result_pipe[0]) , ) ) SCREAMING_SNAKE_CASE_ = temp_rs SCREAMING_SNAKE_CASE_ = temp_rr for i in range(1 , len(_SCREAMING_SNAKE_CASE ) - 1 ): SCREAMING_SNAKE_CASE_ = Pipe() SCREAMING_SNAKE_CASE_ = Pipe() process_array_.append( Process( target=_SCREAMING_SNAKE_CASE , args=(i, arr[i], temp_ls, temp_rs, temp_lr, temp_rr, result_pipe[i]) , ) ) SCREAMING_SNAKE_CASE_ = temp_rs SCREAMING_SNAKE_CASE_ = temp_rr process_array_.append( Process( target=_SCREAMING_SNAKE_CASE , args=( len(_SCREAMING_SNAKE_CASE ) - 1, arr[len(_SCREAMING_SNAKE_CASE ) - 1], temp_ls, None, temp_lr, None, result_pipe[len(_SCREAMING_SNAKE_CASE ) - 1], ) , ) ) # start the processes for p in process_array_: p.start() # wait for the processes to end and write their values to the list for p in range(0 , len(_SCREAMING_SNAKE_CASE ) ): SCREAMING_SNAKE_CASE_ = result_pipe[p][0].recv() process_array_[p].join() return arr def _UpperCAmelCase ( ): """simple docstring""" SCREAMING_SNAKE_CASE_ = list(range(10 , 0 , -1 ) ) print('Initial List' ) print(*_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE_ = odd_even_transposition(_SCREAMING_SNAKE_CASE ) print('Sorted List\n' ) print(*_SCREAMING_SNAKE_CASE ) if __name__ == "__main__": main()
620
0
import os import shutil import tempfile from unittest import TestCase from unittest.mock import patch import numpy as np from datasets import Dataset from transformers.models.realm.configuration_realm import RealmConfig from transformers.models.realm.retrieval_realm import _REALM_BLOCK_RECORDS_FILENAME, RealmRetriever from transformers.models.realm.tokenization_realm import VOCAB_FILES_NAMES, RealmTokenizer class __snake_case ( _lowerCAmelCase ): def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = tempfile.mkdtemp() SCREAMING_SNAKE_CASE_ = 5 # Realm tok SCREAMING_SNAKE_CASE_ = [ '[UNK]', '[CLS]', '[SEP]', '[PAD]', '[MASK]', 'test', 'question', 'this', 'is', 'the', 'first', 'second', 'third', 'fourth', 'fifth', 'record', 'want', '##want', '##ed', 'wa', 'un', 'runn', '##ing', ',', 'low', 'lowest', ] SCREAMING_SNAKE_CASE_ = os.path.join(self.tmpdirname , 'realm_tokenizer') os.makedirs(_lowerCAmelCase , exist_ok=_lowerCAmelCase) SCREAMING_SNAKE_CASE_ = os.path.join(_lowerCAmelCase , VOCAB_FILES_NAMES['vocab_file']) with open(self.vocab_file , 'w' , encoding='utf-8') as vocab_writer: vocab_writer.write(''.join([x + '\n' for x in vocab_tokens])) SCREAMING_SNAKE_CASE_ = os.path.join(self.tmpdirname , 'realm_block_records') os.makedirs(_lowerCAmelCase , exist_ok=_lowerCAmelCase) def lowerCAmelCase__ ( self): return RealmTokenizer.from_pretrained(os.path.join(self.tmpdirname , 'realm_tokenizer')) def lowerCAmelCase__ ( self): shutil.rmtree(self.tmpdirname) def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = RealmConfig(num_block_records=self.num_block_records) return config def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = Dataset.from_dict( { 'id': ['0', '1'], 'question': ['foo', 'bar'], 'answers': [['Foo', 'Bar'], ['Bar']], }) return dataset def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = np.array( [ b'This is the first record', b'This is the second record', b'This is the third record', b'This is the fourth record', b'This is the fifth record', b'This is a longer longer longer record', ] , dtype=_lowerCAmelCase , ) return block_records def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = RealmRetriever( block_records=self.get_dummy_block_records() , tokenizer=self.get_tokenizer() , ) return retriever def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = self.get_config() SCREAMING_SNAKE_CASE_ = self.get_dummy_retriever() SCREAMING_SNAKE_CASE_ = retriever.tokenizer SCREAMING_SNAKE_CASE_ = np.array([0, 3] , dtype='long') SCREAMING_SNAKE_CASE_ = tokenizer(['Test question']).input_ids SCREAMING_SNAKE_CASE_ = tokenizer( ['the fourth'] , add_special_tokens=_lowerCAmelCase , return_token_type_ids=_lowerCAmelCase , return_attention_mask=_lowerCAmelCase , ).input_ids SCREAMING_SNAKE_CASE_ = config.reader_seq_len SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = retriever( _lowerCAmelCase , _lowerCAmelCase , answer_ids=_lowerCAmelCase , max_length=_lowerCAmelCase , return_tensors='np') self.assertEqual(len(_lowerCAmelCase) , 2) self.assertEqual(len(_lowerCAmelCase) , 2) self.assertEqual(len(_lowerCAmelCase) , 2) self.assertEqual(concat_inputs.input_ids.shape , (2, 10)) self.assertEqual(concat_inputs.attention_mask.shape , (2, 10)) self.assertEqual(concat_inputs.token_type_ids.shape , (2, 10)) self.assertEqual(concat_inputs.special_tokens_mask.shape , (2, 10)) self.assertEqual( tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[0]) , ['[CLS]', 'test', 'question', '[SEP]', 'this', 'is', 'the', 'first', 'record', '[SEP]'] , ) self.assertEqual( tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[1]) , ['[CLS]', 'test', 'question', '[SEP]', 'this', 'is', 'the', 'fourth', 'record', '[SEP]'] , ) def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = self.get_config() SCREAMING_SNAKE_CASE_ = self.get_dummy_retriever() SCREAMING_SNAKE_CASE_ = retriever.tokenizer SCREAMING_SNAKE_CASE_ = np.array([0, 3, 5] , dtype='long') SCREAMING_SNAKE_CASE_ = tokenizer(['Test question']).input_ids SCREAMING_SNAKE_CASE_ = tokenizer( ['the fourth', 'longer longer'] , add_special_tokens=_lowerCAmelCase , return_token_type_ids=_lowerCAmelCase , return_attention_mask=_lowerCAmelCase , ).input_ids SCREAMING_SNAKE_CASE_ = config.reader_seq_len SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = retriever( _lowerCAmelCase , _lowerCAmelCase , answer_ids=_lowerCAmelCase , max_length=_lowerCAmelCase , return_tensors='np') self.assertEqual([False, True, True] , _lowerCAmelCase) self.assertEqual([[-1, -1, -1], [6, -1, -1], [6, 7, 8]] , _lowerCAmelCase) self.assertEqual([[-1, -1, -1], [7, -1, -1], [7, 8, 9]] , _lowerCAmelCase) def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = self.get_dummy_retriever() retriever.save_pretrained(os.path.join(self.tmpdirname , 'realm_block_records')) # Test local path SCREAMING_SNAKE_CASE_ = retriever.from_pretrained(os.path.join(self.tmpdirname , 'realm_block_records')) self.assertEqual(retriever.block_records[0] , b'This is the first record') # Test mocked remote path with patch('transformers.models.realm.retrieval_realm.hf_hub_download') as mock_hf_hub_download: SCREAMING_SNAKE_CASE_ = os.path.join( os.path.join(self.tmpdirname , 'realm_block_records') , _REALM_BLOCK_RECORDS_FILENAME) SCREAMING_SNAKE_CASE_ = RealmRetriever.from_pretrained('google/realm-cc-news-pretrained-openqa') self.assertEqual(retriever.block_records[0] , b'This is the first record')
709
import unittest from transformers import load_tool from .test_tools_common import ToolTesterMixin UpperCamelCase__ : int = "\nHugging Face was founded in 2016 by French entrepreneurs Clément Delangue, Julien Chaumond, and Thomas Wolf originally as a company that developed a chatbot app targeted at teenagers.[2] After open-sourcing the model behind the chatbot, the company pivoted to focus on being a platform for machine learning.\n\nIn March 2021, Hugging Face raised $40 million in a Series B funding round.[3]\n\nOn April 28, 2021, the company launched the BigScience Research Workshop in collaboration with several other research groups to release an open large language model.[4] In 2022, the workshop concluded with the announcement of BLOOM, a multilingual large language model with 176 billion parameters.[5]\n" class __snake_case ( unittest.TestCase , lowerCAmelCase__ ): def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = load_tool('text-question-answering') self.tool.setup() SCREAMING_SNAKE_CASE_ = load_tool('text-question-answering' , remote=_A) def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = self.tool(_A , 'What did Hugging Face do in April 2021?') self.assertEqual(_A , 'launched the BigScience Research Workshop') def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = self.remote_tool(_A , 'What did Hugging Face do in April 2021?') self.assertEqual(_A , 'launched the BigScience Research Workshop') def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = self.tool(text=_A , question='What did Hugging Face do in April 2021?') self.assertEqual(_A , 'launched the BigScience Research Workshop') def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = self.remote_tool(text=_A , question='What did Hugging Face do in April 2021?') self.assertEqual(_A , 'launched the BigScience Research Workshop')
620
0
'''simple docstring''' import argparse import json import os from collections import OrderedDict import numpy as np import tensorflow as tf import torch def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : int ): """simple docstring""" SCREAMING_SNAKE_CASE_ = os.path.join(args.tf_model_dir , 'parameters.json' ) SCREAMING_SNAKE_CASE_ = json.loads(open(snake_case_ ).read() ) if not params: raise ValueError( f"""It seems that the json file at {parameter_file} is empty. Make sure you have a correct json file.""" ) if not args.output.endswith('.pt' ): SCREAMING_SNAKE_CASE_ = args.output + """.pt""" SCREAMING_SNAKE_CASE_ = OrderedDict() with tf.device('/CPU:0' ): SCREAMING_SNAKE_CASE_ = tf.train.load_checkpoint(args.tf_model_dir ) SCREAMING_SNAKE_CASE_ = reader.get_variable_to_shape_map() for key_name in shapes.keys(): SCREAMING_SNAKE_CASE_ = reader.get_tensor(snake_case_ ).astype(np.floataa ) if key_name.endswith('/adam_m' ) or key_name.endswith('/adam_v' ): continue if key_name.startswith('pasts/' ): if key_name.startswith('pasts/mlp' ): SCREAMING_SNAKE_CASE_ = int(key_name[9] ) elif key_name.startswith('pasts/out' ): SCREAMING_SNAKE_CASE_ = 8 SCREAMING_SNAKE_CASE_ = """model.sqout.%d.weight""" % (player * 2) # enter to nn.Sequencial with Tanh, so 2 at a time SCREAMING_SNAKE_CASE_ = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix SCREAMING_SNAKE_CASE_ = torch.tensor(snake_case_ ) elif key_name.startswith('model/moe' ): SCREAMING_SNAKE_CASE_ = int(key_name[9:].split('/' )[0] ) if key_name.endswith('/switch_gating/kernel' ): SCREAMING_SNAKE_CASE_ = """model.blocks.%d.feed_forward.mlp.router.classifier.weight""" % player SCREAMING_SNAKE_CASE_ = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix SCREAMING_SNAKE_CASE_ = torch.tensor(snake_case_ ) elif key_name.endswith('/softmlp/kernel' ): SCREAMING_SNAKE_CASE_ = """model.blocks.%d.feed_forward.soft_bypass_mlp.weight""" % player SCREAMING_SNAKE_CASE_ = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix SCREAMING_SNAKE_CASE_ = torch.tensor(snake_case_ ) elif key_name.endswith('/wo/kernel' ) or key_name.endswith('/wi/kernel' ): SCREAMING_SNAKE_CASE_ = key_name[-9:-7] for i in range(16 ): SCREAMING_SNAKE_CASE_ = """model.blocks.%d.feed_forward.mlp.experts.expert_%d.%s.weight""" % (player, i, nlayer) SCREAMING_SNAKE_CASE_ = ( vnp[i].transpose([1, 0] ).copy() ) # In Mesh-Tensorflow, it is one array, so it is divided SCREAMING_SNAKE_CASE_ = torch.tensor(snake_case_ ) elif key_name.startswith('model/mlp' ): SCREAMING_SNAKE_CASE_ = int(key_name[9:].split('/' )[0] ) if key_name.endswith('/p1/kernel' ): SCREAMING_SNAKE_CASE_ = """model.blocks.%d.feed_forward.mlp.wi.weight""" % player SCREAMING_SNAKE_CASE_ = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix SCREAMING_SNAKE_CASE_ = torch.tensor(snake_case_ ) elif key_name.endswith('/p1/bias' ): SCREAMING_SNAKE_CASE_ = """model.blocks.%d.feed_forward.mlp.wi.bias""" % player SCREAMING_SNAKE_CASE_ = vnp.copy() # same because it is one dimensional SCREAMING_SNAKE_CASE_ = torch.tensor(snake_case_ ) elif key_name.endswith('/p2/kernel' ): SCREAMING_SNAKE_CASE_ = """model.blocks.%d.feed_forward.mlp.wo.weight""" % player SCREAMING_SNAKE_CASE_ = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix SCREAMING_SNAKE_CASE_ = torch.tensor(snake_case_ ) elif key_name.endswith('/p2/bias' ): SCREAMING_SNAKE_CASE_ = """model.blocks.%d.feed_forward.mlp.wo.bias""" % player SCREAMING_SNAKE_CASE_ = vnp.copy() # same because it is one dimensional SCREAMING_SNAKE_CASE_ = torch.tensor(snake_case_ ) elif key_name.startswith('model/ln' ): SCREAMING_SNAKE_CASE_ = int(key_name[8:].split('/' )[0] ) if key_name.endswith('/b' ): SCREAMING_SNAKE_CASE_ = """model.blocks.%d.feed_forward.norm.bias""" % player SCREAMING_SNAKE_CASE_ = vnp.copy() # same because it is one dimensional SCREAMING_SNAKE_CASE_ = torch.tensor(snake_case_ ) elif key_name.endswith('/g' ): SCREAMING_SNAKE_CASE_ = """model.blocks.%d.feed_forward.norm.weight""" % player SCREAMING_SNAKE_CASE_ = vnp.copy() # same because it is one dimensional SCREAMING_SNAKE_CASE_ = torch.tensor(snake_case_ ) elif key_name.startswith('model/att' ): SCREAMING_SNAKE_CASE_ = int(key_name[9:].split('/' )[0] ) if key_name.endswith('/qkv/kernel' ): SCREAMING_SNAKE_CASE_ = vnp.copy() # Compute same dimension as Mesh-tensorflow using einsum SCREAMING_SNAKE_CASE_ = state[:, 0, :, :] SCREAMING_SNAKE_CASE_ = state[:, 1, :, :] SCREAMING_SNAKE_CASE_ = state[:, 2, :, :] SCREAMING_SNAKE_CASE_ = ( state_q.reshape([state_q.shape[0], state_q.shape[1] * state_q.shape[2]] ) .transpose([1, 0] ) .copy() ) # Mesh-Tensorflow is a diagonal matrix SCREAMING_SNAKE_CASE_ = ( state_k.reshape([state_k.shape[0], state_k.shape[1] * state_k.shape[2]] ) .transpose([1, 0] ) .copy() ) # Mesh-Tensorflow is a diagonal matrix SCREAMING_SNAKE_CASE_ = ( state_v.reshape([state_v.shape[0], state_v.shape[1] * state_v.shape[2]] ) .transpose([1, 0] ) .copy() ) # Mesh-Tensorflow is a diagonal matrix SCREAMING_SNAKE_CASE_ = """model.blocks.%d.self_attn.self_attn.q_proj.weight""" % player SCREAMING_SNAKE_CASE_ = torch.tensor(snake_case_ ) SCREAMING_SNAKE_CASE_ = """model.blocks.%d.self_attn.self_attn.k_proj.weight""" % player SCREAMING_SNAKE_CASE_ = torch.tensor(snake_case_ ) SCREAMING_SNAKE_CASE_ = """model.blocks.%d.self_attn.self_attn.v_proj.weight""" % player SCREAMING_SNAKE_CASE_ = torch.tensor(snake_case_ ) elif key_name.endswith('/o/kernel' ): SCREAMING_SNAKE_CASE_ = """model.blocks.%d.self_attn.self_attn.out_proj.weight""" % player SCREAMING_SNAKE_CASE_ = ( vnp.reshape([vnp.shape[0] * vnp.shape[1], vnp.shape[2]] ).transpose([1, 0] ).copy() ) # Mesh-Tensorflow is a diagonal matrix SCREAMING_SNAKE_CASE_ = torch.tensor(snake_case_ ) elif key_name.startswith('model/an' ): SCREAMING_SNAKE_CASE_ = int(key_name[8:].split('/' )[0] ) if key_name.endswith('/b' ): SCREAMING_SNAKE_CASE_ = """model.blocks.%d.self_attn.norm.bias""" % player SCREAMING_SNAKE_CASE_ = vnp.copy() # same because it is one dimensional SCREAMING_SNAKE_CASE_ = torch.tensor(snake_case_ ) elif key_name.endswith('/g' ): SCREAMING_SNAKE_CASE_ = """model.blocks.%d.self_attn.norm.weight""" % player SCREAMING_SNAKE_CASE_ = vnp.copy() # same because it is one dimensional SCREAMING_SNAKE_CASE_ = torch.tensor(snake_case_ ) elif ( key_name.startswith('model/wte' ) or key_name.startswith('model/wpe' ) or key_name.startswith('model/ete' ) ): SCREAMING_SNAKE_CASE_ = {"""wte""": """embed_tokens""", """wpe""": """position_embeddings""", """ete""": """extra_position_embeddings"""}[ key_name[-3:] ] SCREAMING_SNAKE_CASE_ = """model.%s.weight""" % nlayer SCREAMING_SNAKE_CASE_ = vnp.copy() # same in embedded SCREAMING_SNAKE_CASE_ = torch.tensor(snake_case_ ) if key_name.startswith('model/wte' ): SCREAMING_SNAKE_CASE_ = """lm_head.weight""" SCREAMING_SNAKE_CASE_ = vnp.copy() # same in embedded SCREAMING_SNAKE_CASE_ = torch.tensor(snake_case_ ) elif key_name.startswith('model/wob' ): SCREAMING_SNAKE_CASE_ = """final_logits_bias""" SCREAMING_SNAKE_CASE_ = vnp.copy() # same in embedded SCREAMING_SNAKE_CASE_ = state.reshape((1, -1) ) SCREAMING_SNAKE_CASE_ = torch.tensor(snake_case_ ) elif key_name == "model/dense/kernel": SCREAMING_SNAKE_CASE_ = """model.last_project.weight""" SCREAMING_SNAKE_CASE_ = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix SCREAMING_SNAKE_CASE_ = torch.tensor(snake_case_ ) elif key_name == "model/dense_1/bias": SCREAMING_SNAKE_CASE_ = """model.last_project.bias""" SCREAMING_SNAKE_CASE_ = vnp.copy() # same because it is one dimensional SCREAMING_SNAKE_CASE_ = torch.tensor(snake_case_ ) torch.save(snake_case_ , args.output ) if __name__ == "__main__": UpperCamelCase__ : Union[str, Any] = argparse.ArgumentParser( description="model converter.", formatter_class=argparse.ArgumentDefaultsHelpFormatter ) parser.add_argument("--tf_model_dir", metavar="PATH", type=str, required=True, help="import model") parser.add_argument("--output", metavar="PATH", type=str, required=True, help="output model") UpperCamelCase__ : Dict = parser.parse_args() convert_tf_gptsan_to_pt(args)
710
import unittest import numpy as np from datasets import load_dataset from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import BeitImageProcessor class __snake_case ( unittest.TestCase ): def __init__( self , _A , _A=7 , _A=3 , _A=18 , _A=30 , _A=400 , _A=True , _A=None , _A=True , _A=None , _A=True , _A=[0.5, 0.5, 0.5] , _A=[0.5, 0.5, 0.5] , _A=False , ): SCREAMING_SNAKE_CASE_ = size if size is not None else {'height': 20, 'width': 20} SCREAMING_SNAKE_CASE_ = crop_size if crop_size is not None else {'height': 18, 'width': 18} SCREAMING_SNAKE_CASE_ = parent SCREAMING_SNAKE_CASE_ = batch_size SCREAMING_SNAKE_CASE_ = num_channels SCREAMING_SNAKE_CASE_ = image_size SCREAMING_SNAKE_CASE_ = min_resolution SCREAMING_SNAKE_CASE_ = max_resolution SCREAMING_SNAKE_CASE_ = do_resize SCREAMING_SNAKE_CASE_ = size SCREAMING_SNAKE_CASE_ = do_center_crop SCREAMING_SNAKE_CASE_ = crop_size SCREAMING_SNAKE_CASE_ = do_normalize SCREAMING_SNAKE_CASE_ = image_mean SCREAMING_SNAKE_CASE_ = image_std SCREAMING_SNAKE_CASE_ = do_reduce_labels def lowerCAmelCase__ ( self): return { "do_resize": self.do_resize, "size": self.size, "do_center_crop": self.do_center_crop, "crop_size": self.crop_size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "do_reduce_labels": self.do_reduce_labels, } def _UpperCAmelCase ( ): """simple docstring""" SCREAMING_SNAKE_CASE_ = load_dataset('hf-internal-testing/fixtures_ade20k' , split='test' ) SCREAMING_SNAKE_CASE_ = Image.open(dataset[0]['file'] ) SCREAMING_SNAKE_CASE_ = Image.open(dataset[1]['file'] ) return image, map def _UpperCAmelCase ( ): """simple docstring""" SCREAMING_SNAKE_CASE_ = load_dataset('hf-internal-testing/fixtures_ade20k' , split='test' ) SCREAMING_SNAKE_CASE_ = Image.open(ds[0]['file'] ) SCREAMING_SNAKE_CASE_ = Image.open(ds[1]['file'] ) SCREAMING_SNAKE_CASE_ = Image.open(ds[2]['file'] ) SCREAMING_SNAKE_CASE_ = Image.open(ds[3]['file'] ) return [imagea, imagea], [mapa, mapa] @require_torch @require_vision class __snake_case ( lowerCAmelCase__ , unittest.TestCase ): __lowerCAmelCase : Union[str, Any] = BeitImageProcessor if is_vision_available() else None def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = BeitImageProcessingTester(self) @property def lowerCAmelCase__ ( self): return self.image_processor_tester.prepare_image_processor_dict() def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = self.image_processing_class(**self.image_processor_dict) self.assertTrue(hasattr(_A , 'do_resize')) self.assertTrue(hasattr(_A , 'size')) self.assertTrue(hasattr(_A , 'do_center_crop')) self.assertTrue(hasattr(_A , 'center_crop')) self.assertTrue(hasattr(_A , 'do_normalize')) self.assertTrue(hasattr(_A , 'image_mean')) self.assertTrue(hasattr(_A , 'image_std')) def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = self.image_processing_class.from_dict(self.image_processor_dict) self.assertEqual(image_processor.size , {'height': 20, 'width': 20}) self.assertEqual(image_processor.crop_size , {'height': 18, 'width': 18}) self.assertEqual(image_processor.do_reduce_labels , _A) SCREAMING_SNAKE_CASE_ = self.image_processing_class.from_dict( self.image_processor_dict , size=42 , crop_size=84 , reduce_labels=_A) self.assertEqual(image_processor.size , {'height': 42, 'width': 42}) self.assertEqual(image_processor.crop_size , {'height': 84, 'width': 84}) self.assertEqual(image_processor.do_reduce_labels , _A) def lowerCAmelCase__ ( self): pass def lowerCAmelCase__ ( self): # Initialize image_processing SCREAMING_SNAKE_CASE_ = self.image_processing_class(**self.image_processor_dict) # create random PIL images SCREAMING_SNAKE_CASE_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A) for image in image_inputs: self.assertIsInstance(_A , Image.Image) # Test not batched input SCREAMING_SNAKE_CASE_ = image_processing(image_inputs[0] , return_tensors='pt').pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) # Test batched SCREAMING_SNAKE_CASE_ = image_processing(_A , return_tensors='pt').pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) def lowerCAmelCase__ ( self): # Initialize image_processing SCREAMING_SNAKE_CASE_ = self.image_processing_class(**self.image_processor_dict) # create random numpy tensors SCREAMING_SNAKE_CASE_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A , numpify=_A) for image in image_inputs: self.assertIsInstance(_A , np.ndarray) # Test not batched input SCREAMING_SNAKE_CASE_ = image_processing(image_inputs[0] , return_tensors='pt').pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) # Test batched SCREAMING_SNAKE_CASE_ = image_processing(_A , return_tensors='pt').pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) def lowerCAmelCase__ ( self): # Initialize image_processing SCREAMING_SNAKE_CASE_ = self.image_processing_class(**self.image_processor_dict) # create random PyTorch tensors SCREAMING_SNAKE_CASE_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A , torchify=_A) for image in image_inputs: self.assertIsInstance(_A , torch.Tensor) # Test not batched input SCREAMING_SNAKE_CASE_ = image_processing(image_inputs[0] , return_tensors='pt').pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) # Test batched SCREAMING_SNAKE_CASE_ = image_processing(_A , return_tensors='pt').pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) def lowerCAmelCase__ ( self): # Initialize image_processing SCREAMING_SNAKE_CASE_ = self.image_processing_class(**self.image_processor_dict) # create random PyTorch tensors SCREAMING_SNAKE_CASE_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A , torchify=_A) SCREAMING_SNAKE_CASE_ = [] for image in image_inputs: self.assertIsInstance(_A , torch.Tensor) maps.append(torch.zeros(image.shape[-2:]).long()) # Test not batched input SCREAMING_SNAKE_CASE_ = image_processing(image_inputs[0] , maps[0] , return_tensors='pt') self.assertEqual( encoding['pixel_values'].shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) self.assertEqual( encoding['labels'].shape , ( 1, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) self.assertEqual(encoding['labels'].dtype , torch.long) self.assertTrue(encoding['labels'].min().item() >= 0) self.assertTrue(encoding['labels'].max().item() <= 255) # Test batched SCREAMING_SNAKE_CASE_ = image_processing(_A , _A , return_tensors='pt') self.assertEqual( encoding['pixel_values'].shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) self.assertEqual( encoding['labels'].shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) self.assertEqual(encoding['labels'].dtype , torch.long) self.assertTrue(encoding['labels'].min().item() >= 0) self.assertTrue(encoding['labels'].max().item() <= 255) # Test not batched input (PIL images) SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = prepare_semantic_single_inputs() SCREAMING_SNAKE_CASE_ = image_processing(_A , _A , return_tensors='pt') self.assertEqual( encoding['pixel_values'].shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) self.assertEqual( encoding['labels'].shape , ( 1, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) self.assertEqual(encoding['labels'].dtype , torch.long) self.assertTrue(encoding['labels'].min().item() >= 0) self.assertTrue(encoding['labels'].max().item() <= 255) # Test batched input (PIL images) SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = prepare_semantic_batch_inputs() SCREAMING_SNAKE_CASE_ = image_processing(_A , _A , return_tensors='pt') self.assertEqual( encoding['pixel_values'].shape , ( 2, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) self.assertEqual( encoding['labels'].shape , ( 2, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) self.assertEqual(encoding['labels'].dtype , torch.long) self.assertTrue(encoding['labels'].min().item() >= 0) self.assertTrue(encoding['labels'].max().item() <= 255) def lowerCAmelCase__ ( self): # Initialize image_processing SCREAMING_SNAKE_CASE_ = self.image_processing_class(**self.image_processor_dict) # ADE20k has 150 classes, and the background is included, so labels should be between 0 and 150 SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = prepare_semantic_single_inputs() SCREAMING_SNAKE_CASE_ = image_processing(_A , _A , return_tensors='pt') self.assertTrue(encoding['labels'].min().item() >= 0) self.assertTrue(encoding['labels'].max().item() <= 150) SCREAMING_SNAKE_CASE_ = True SCREAMING_SNAKE_CASE_ = image_processing(_A , _A , return_tensors='pt') self.assertTrue(encoding['labels'].min().item() >= 0) self.assertTrue(encoding['labels'].max().item() <= 255)
620
0
import itertools from dataclasses import dataclass from typing import List, Optional import pyarrow as pa import pyarrow.parquet as pq import datasets from datasets.table import table_cast UpperCamelCase__ : Optional[Any] = datasets.utils.logging.get_logger(__name__) @dataclass class __snake_case ( datasets.BuilderConfig ): __lowerCAmelCase : List[Any] = 10_000 __lowerCAmelCase : Any = None __lowerCAmelCase : List[str] = None class __snake_case ( datasets.ArrowBasedBuilder ): __lowerCAmelCase : int = ParquetConfig def lowerCAmelCase__ ( self): return datasets.DatasetInfo(features=self.config.features) def lowerCAmelCase__ ( self , _A): if not self.config.data_files: raise ValueError(f"""At least one data file must be specified, but got data_files={self.config.data_files}""") SCREAMING_SNAKE_CASE_ = dl_manager.download_and_extract(self.config.data_files) if isinstance(UpperCamelCase__ , (str, list, tuple)): SCREAMING_SNAKE_CASE_ = data_files if isinstance(UpperCamelCase__ , UpperCamelCase__): SCREAMING_SNAKE_CASE_ = [files] # Use `dl_manager.iter_files` to skip hidden files in an extracted archive SCREAMING_SNAKE_CASE_ = [dl_manager.iter_files(UpperCamelCase__) for file in files] return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'files': files})] SCREAMING_SNAKE_CASE_ = [] for split_name, files in data_files.items(): if isinstance(UpperCamelCase__ , UpperCamelCase__): SCREAMING_SNAKE_CASE_ = [files] # Use `dl_manager.iter_files` to skip hidden files in an extracted archive SCREAMING_SNAKE_CASE_ = [dl_manager.iter_files(UpperCamelCase__) for file in files] # Infer features is they are stoed in the arrow schema if self.info.features is None: for file in itertools.chain.from_iterable(UpperCamelCase__): with open(UpperCamelCase__ , 'rb') as f: SCREAMING_SNAKE_CASE_ = datasets.Features.from_arrow_schema(pq.read_schema(UpperCamelCase__)) break splits.append(datasets.SplitGenerator(name=UpperCamelCase__ , gen_kwargs={'files': files})) return splits def lowerCAmelCase__ ( self , _A): if self.info.features is not None: # more expensive cast to support nested features with keys in a different order # allows str <-> int/float or str to Audio for example SCREAMING_SNAKE_CASE_ = table_cast(UpperCamelCase__ , self.info.features.arrow_schema) return pa_table def lowerCAmelCase__ ( self , _A): SCREAMING_SNAKE_CASE_ = self.info.features.arrow_schema if self.info.features is not None else None if self.info.features is not None and self.config.columns is not None: if sorted(field.name for field in schema) != sorted(self.config.columns): raise ValueError( f"""Tried to load parquet data with columns '{self.config.columns}' with mismatching features '{self.info.features}'""") for file_idx, file in enumerate(itertools.chain.from_iterable(UpperCamelCase__)): with open(UpperCamelCase__ , 'rb') as f: SCREAMING_SNAKE_CASE_ = pq.ParquetFile(UpperCamelCase__) try: for batch_idx, record_batch in enumerate( parquet_file.iter_batches(batch_size=self.config.batch_size , columns=self.config.columns)): SCREAMING_SNAKE_CASE_ = pa.Table.from_batches([record_batch]) # Uncomment for debugging (will print the Arrow table size and elements) # logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}") # logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows))) yield f"""{file_idx}_{batch_idx}""", self._cast_table(UpperCamelCase__) except ValueError as e: logger.error(f"""Failed to read file '{file}' with error {type(UpperCamelCase__)}: {e}""") raise
711
def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : int = 200 ): """simple docstring""" SCREAMING_SNAKE_CASE_ = [1, 2, 5, 10, 20, 50, 100, 200] SCREAMING_SNAKE_CASE_ = [0] * (pence + 1) SCREAMING_SNAKE_CASE_ = 1 # base case: 1 way to make 0 pence for coin in coins: for i in range(_SCREAMING_SNAKE_CASE , pence + 1 , 1 ): number_of_ways[i] += number_of_ways[i - coin] return number_of_ways[pence] if __name__ == "__main__": assert solution(200) == 73_682
620
0
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available UpperCamelCase__ : Optional[Any] = { "configuration_nezha": ["NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP", "NezhaConfig"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase__ : Optional[Any] = [ "NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST", "NezhaForNextSentencePrediction", "NezhaForMaskedLM", "NezhaForPreTraining", "NezhaForMultipleChoice", "NezhaForQuestionAnswering", "NezhaForSequenceClassification", "NezhaForTokenClassification", "NezhaModel", "NezhaPreTrainedModel", ] if TYPE_CHECKING: from .configuration_nezha import NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP, NezhaConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_nezha import ( NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST, NezhaForMaskedLM, NezhaForMultipleChoice, NezhaForNextSentencePrediction, NezhaForPreTraining, NezhaForQuestionAnswering, NezhaForSequenceClassification, NezhaForTokenClassification, NezhaModel, NezhaPreTrainedModel, ) else: import sys UpperCamelCase__ : Dict = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
712
def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : list , _SCREAMING_SNAKE_CASE : list , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int ): """simple docstring""" if index == number_of_items: return 0 SCREAMING_SNAKE_CASE_ = 0 SCREAMING_SNAKE_CASE_ = 0 SCREAMING_SNAKE_CASE_ = knapsack(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , index + 1 ) if weights[index] <= max_weight: SCREAMING_SNAKE_CASE_ = values[index] + knapsack( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , max_weight - weights[index] , index + 1 ) return max(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) if __name__ == "__main__": import doctest doctest.testmod()
620
0
import argparse import json import os from collections import OrderedDict import torch from transformers import LukeConfig, LukeForMaskedLM, MLukeTokenizer, XLMRobertaTokenizer from transformers.tokenization_utils_base import AddedToken @torch.no_grad() def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : str ): """simple docstring""" with open(__snake_case ) as metadata_file: SCREAMING_SNAKE_CASE_ = json.load(__snake_case ) SCREAMING_SNAKE_CASE_ = LukeConfig(use_entity_aware_attention=__snake_case , **metadata['model_config'] ) # Load in the weights from the checkpoint_path SCREAMING_SNAKE_CASE_ = torch.load(__snake_case , map_location='cpu' )["module"] # Load the entity vocab file SCREAMING_SNAKE_CASE_ = load_original_entity_vocab(__snake_case ) # add an entry for [MASK2] SCREAMING_SNAKE_CASE_ = max(entity_vocab.values() ) + 1 config.entity_vocab_size += 1 SCREAMING_SNAKE_CASE_ = XLMRobertaTokenizer.from_pretrained(metadata['model_config']['bert_model_name'] ) # Add special tokens to the token vocabulary for downstream tasks SCREAMING_SNAKE_CASE_ = AddedToken('<ent>' , lstrip=__snake_case , rstrip=__snake_case ) SCREAMING_SNAKE_CASE_ = AddedToken('<ent2>' , lstrip=__snake_case , rstrip=__snake_case ) tokenizer.add_special_tokens({'additional_special_tokens': [entity_token_a, entity_token_a]} ) config.vocab_size += 2 print(f"""Saving tokenizer to {pytorch_dump_folder_path}""" ) tokenizer.save_pretrained(__snake_case ) with open(os.path.join(__snake_case , 'tokenizer_config.json' ) , 'r' ) as f: SCREAMING_SNAKE_CASE_ = json.load(__snake_case ) SCREAMING_SNAKE_CASE_ = "MLukeTokenizer" with open(os.path.join(__snake_case , 'tokenizer_config.json' ) , 'w' ) as f: json.dump(__snake_case , __snake_case ) with open(os.path.join(__snake_case , MLukeTokenizer.vocab_files_names['entity_vocab_file'] ) , 'w' ) as f: json.dump(__snake_case , __snake_case ) SCREAMING_SNAKE_CASE_ = MLukeTokenizer.from_pretrained(__snake_case ) # Initialize the embeddings of the special tokens SCREAMING_SNAKE_CASE_ = tokenizer.convert_tokens_to_ids(['@'] )[0] SCREAMING_SNAKE_CASE_ = tokenizer.convert_tokens_to_ids(['#'] )[0] SCREAMING_SNAKE_CASE_ = state_dict["embeddings.word_embeddings.weight"] SCREAMING_SNAKE_CASE_ = word_emb[ent_init_index].unsqueeze(0 ) SCREAMING_SNAKE_CASE_ = word_emb[enta_init_index].unsqueeze(0 ) SCREAMING_SNAKE_CASE_ = torch.cat([word_emb, ent_emb, enta_emb] ) # add special tokens for 'entity_predictions.bias' for bias_name in ["lm_head.decoder.bias", "lm_head.bias"]: SCREAMING_SNAKE_CASE_ = state_dict[bias_name] SCREAMING_SNAKE_CASE_ = decoder_bias[ent_init_index].unsqueeze(0 ) SCREAMING_SNAKE_CASE_ = decoder_bias[enta_init_index].unsqueeze(0 ) SCREAMING_SNAKE_CASE_ = torch.cat([decoder_bias, ent_decoder_bias, enta_decoder_bias] ) # Initialize the query layers of the entity-aware self-attention mechanism for layer_index in range(config.num_hidden_layers ): for matrix_name in ["query.weight", "query.bias"]: SCREAMING_SNAKE_CASE_ = f"""encoder.layer.{layer_index}.attention.self.""" SCREAMING_SNAKE_CASE_ = state_dict[prefix + matrix_name] SCREAMING_SNAKE_CASE_ = state_dict[prefix + matrix_name] SCREAMING_SNAKE_CASE_ = state_dict[prefix + matrix_name] # Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks SCREAMING_SNAKE_CASE_ = state_dict["entity_embeddings.entity_embeddings.weight"] SCREAMING_SNAKE_CASE_ = entity_emb[entity_vocab["[MASK]"]].unsqueeze(0 ) SCREAMING_SNAKE_CASE_ = torch.cat([entity_emb, entity_mask_emb] ) # add [MASK2] for 'entity_predictions.bias' SCREAMING_SNAKE_CASE_ = state_dict["entity_predictions.bias"] SCREAMING_SNAKE_CASE_ = entity_prediction_bias[entity_vocab["[MASK]"]].unsqueeze(0 ) SCREAMING_SNAKE_CASE_ = torch.cat([entity_prediction_bias, entity_mask_bias] ) SCREAMING_SNAKE_CASE_ = LukeForMaskedLM(config=__snake_case ).eval() state_dict.pop('entity_predictions.decoder.weight' ) state_dict.pop('lm_head.decoder.weight' ) state_dict.pop('lm_head.decoder.bias' ) SCREAMING_SNAKE_CASE_ = OrderedDict() for key, value in state_dict.items(): if not (key.startswith('lm_head' ) or key.startswith('entity_predictions' )): SCREAMING_SNAKE_CASE_ = state_dict[key] else: SCREAMING_SNAKE_CASE_ = state_dict[key] SCREAMING_SNAKE_CASE_ = model.load_state_dict(__snake_case , strict=__snake_case ) if set(__snake_case ) != {"luke.embeddings.position_ids"}: raise ValueError(f"""Unexpected unexpected_keys: {unexpected_keys}""" ) if set(__snake_case ) != { "lm_head.decoder.weight", "lm_head.decoder.bias", "entity_predictions.decoder.weight", }: raise ValueError(f"""Unexpected missing_keys: {missing_keys}""" ) model.tie_weights() assert (model.luke.embeddings.word_embeddings.weight == model.lm_head.decoder.weight).all() assert (model.luke.entity_embeddings.entity_embeddings.weight == model.entity_predictions.decoder.weight).all() # Check outputs SCREAMING_SNAKE_CASE_ = MLukeTokenizer.from_pretrained(__snake_case , task='entity_classification' ) SCREAMING_SNAKE_CASE_ = "ISO 639-3 uses the code fas for the dialects spoken across Iran and アフガニスタン (Afghanistan)." SCREAMING_SNAKE_CASE_ = (0, 9) SCREAMING_SNAKE_CASE_ = tokenizer(__snake_case , entity_spans=[span] , return_tensors='pt' ) SCREAMING_SNAKE_CASE_ = model(**__snake_case ) # Verify word hidden states if model_size == "large": raise NotImplementedError else: # base SCREAMING_SNAKE_CASE_ = torch.Size((1, 33, 768) ) SCREAMING_SNAKE_CASE_ = torch.tensor([[0.0892, 0.0596, -0.2819], [0.0134, 0.1199, 0.0573], [-0.0169, 0.0927, 0.0644]] ) if not (outputs.last_hidden_state.shape == expected_shape): raise ValueError( f"""Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}""" ) if not torch.allclose(outputs.last_hidden_state[0, :3, :3] , __snake_case , atol=1E-4 ): raise ValueError # Verify entity hidden states if model_size == "large": raise NotImplementedError else: # base SCREAMING_SNAKE_CASE_ = torch.Size((1, 1, 768) ) SCREAMING_SNAKE_CASE_ = torch.tensor([[-0.1482, 0.0609, 0.0322]] ) if not (outputs.entity_last_hidden_state.shape == expected_shape): raise ValueError( f"""Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is""" f""" {expected_shape}""" ) if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] , __snake_case , atol=1E-4 ): raise ValueError # Verify masked word/entity prediction SCREAMING_SNAKE_CASE_ = MLukeTokenizer.from_pretrained(__snake_case ) SCREAMING_SNAKE_CASE_ = "Tokyo is the capital of <mask>." SCREAMING_SNAKE_CASE_ = (24, 30) SCREAMING_SNAKE_CASE_ = tokenizer(__snake_case , entity_spans=[span] , return_tensors='pt' ) SCREAMING_SNAKE_CASE_ = model(**__snake_case ) SCREAMING_SNAKE_CASE_ = encoding["input_ids"][0].tolist() SCREAMING_SNAKE_CASE_ = input_ids.index(tokenizer.convert_tokens_to_ids('<mask>' ) ) SCREAMING_SNAKE_CASE_ = outputs.logits[0][mask_position_id].argmax(dim=-1 ) assert "Japan" == tokenizer.decode(__snake_case ) SCREAMING_SNAKE_CASE_ = outputs.entity_logits[0][0].argmax().item() SCREAMING_SNAKE_CASE_ = [ entity for entity, entity_id in tokenizer.entity_vocab.items() if entity_id == predicted_entity_id ] assert [e for e in multilingual_predicted_entities if e.startswith('en:' )][0] == "en:Japan" # Finally, save our PyTorch model and tokenizer print('Saving PyTorch model to {}'.format(__snake_case ) ) model.save_pretrained(__snake_case ) def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : Any ): """simple docstring""" SCREAMING_SNAKE_CASE_ = ["[MASK]", "[PAD]", "[UNK]"] SCREAMING_SNAKE_CASE_ = [json.loads(__snake_case ) for line in open(__snake_case )] SCREAMING_SNAKE_CASE_ = {} for entry in data: SCREAMING_SNAKE_CASE_ = entry["id"] for entity_name, language in entry["entities"]: if entity_name in SPECIAL_TOKENS: SCREAMING_SNAKE_CASE_ = entity_id break SCREAMING_SNAKE_CASE_ = f"""{language}:{entity_name}""" SCREAMING_SNAKE_CASE_ = entity_id return new_mapping if __name__ == "__main__": UpperCamelCase__ : Union[str, Any] = argparse.ArgumentParser() # Required parameters parser.add_argument("--checkpoint_path", type=str, help="Path to a pytorch_model.bin file.") parser.add_argument( "--metadata_path", default=None, type=str, help="Path to a metadata.json file, defining the configuration." ) parser.add_argument( "--entity_vocab_path", default=None, type=str, help="Path to an entity_vocab.tsv file, containing the entity vocabulary.", ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, help="Path to where to dump the output PyTorch model." ) parser.add_argument( "--model_size", default="base", type=str, choices=["base", "large"], help="Size of the model to be converted." ) UpperCamelCase__ : Tuple = parser.parse_args() convert_luke_checkpoint( args.checkpoint_path, args.metadata_path, args.entity_vocab_path, args.pytorch_dump_folder_path, args.model_size, )
713
import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( SwiftFormerConfig, SwiftFormerForImageClassification, ViTImageProcessor, ) from transformers.utils import logging logging.set_verbosity_info() UpperCamelCase__ : Optional[int] = logging.get_logger(__name__) UpperCamelCase__ : List[Any] = torch.device("cpu") def _UpperCAmelCase ( ): """simple docstring""" SCREAMING_SNAKE_CASE_ = 'http://images.cocodataset.org/val2017/000000039769.jpg' SCREAMING_SNAKE_CASE_ = Image.open(requests.get(_SCREAMING_SNAKE_CASE , stream=_SCREAMING_SNAKE_CASE ).raw ) return im def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : int ): """simple docstring""" if swiftformer_name == "swiftformer_xs": return torch.tensor([-2.1_7_0_3E0_0, 2.1_1_0_7E0_0, -2.0_8_1_1E0_0, 8.8_6_8_5E-0_1, 2.4_3_6_0E-0_1] ) elif swiftformer_name == "swiftformer_s": return torch.tensor([3.9_6_3_6E-0_1, 2.3_4_7_8E-0_1, -1.6_9_6_3E0_0, -1.7_3_8_1E0_0, -8.6_3_3_7E-0_1] ) elif swiftformer_name == "swiftformer_l1": return torch.tensor([-4.2_7_6_8E-0_1, -4.7_4_2_9E-0_1, -1.0_8_9_7E0_0, -1.0_2_4_8E0_0, 3.5_5_2_3E-0_2] ) elif swiftformer_name == "swiftformer_l3": return torch.tensor([-2.5_3_3_0E-0_1, 2.4_2_1_1E-0_1, -6.0_1_8_5E-0_1, -8.2_7_8_9E-0_1, -6.0_4_4_6E-0_2] ) def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : Optional[Any] ): """simple docstring""" SCREAMING_SNAKE_CASE_ = dct.pop(_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE_ = val def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : Dict ): """simple docstring""" SCREAMING_SNAKE_CASE_ = [] for k in state_dict.keys(): SCREAMING_SNAKE_CASE_ = k if ".pwconv" in k: SCREAMING_SNAKE_CASE_ = k_new.replace('.pwconv' , '.point_wise_conv' ) if ".dwconv" in k: SCREAMING_SNAKE_CASE_ = k_new.replace('.dwconv' , '.depth_wise_conv' ) if ".Proj." in k: SCREAMING_SNAKE_CASE_ = k_new.replace('.Proj.' , '.proj.' ) if "patch_embed" in k_new: SCREAMING_SNAKE_CASE_ = k_new.replace('patch_embed' , 'swiftformer.patch_embed.patch_embedding' ) if "network" in k_new: SCREAMING_SNAKE_CASE_ = k_new.split('.' ) if ls[2].isdigit(): SCREAMING_SNAKE_CASE_ = 'swiftformer.encoder.network.' + ls[1] + '.blocks.' + ls[2] + '.' + '.'.join(ls[3:] ) else: SCREAMING_SNAKE_CASE_ = k_new.replace('network' , 'swiftformer.encoder.network' ) rename_keys.append((k, k_new) ) return rename_keys @torch.no_grad() def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Optional[Any] ): """simple docstring""" SCREAMING_SNAKE_CASE_ = SwiftFormerConfig() # dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size SCREAMING_SNAKE_CASE_ = 1_000 SCREAMING_SNAKE_CASE_ = 'huggingface/label-files' SCREAMING_SNAKE_CASE_ = 'imagenet-1k-id2label.json' SCREAMING_SNAKE_CASE_ = json.load(open(hf_hub_download(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , repo_type='dataset' ) , 'r' ) ) SCREAMING_SNAKE_CASE_ = {int(_SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()} SCREAMING_SNAKE_CASE_ = idalabel SCREAMING_SNAKE_CASE_ = {v: k for k, v in idalabel.items()} # size of the architecture if swiftformer_name == "swiftformer_xs": SCREAMING_SNAKE_CASE_ = [3, 3, 6, 4] SCREAMING_SNAKE_CASE_ = [48, 56, 112, 220] elif swiftformer_name == "swiftformer_s": SCREAMING_SNAKE_CASE_ = [3, 3, 9, 6] SCREAMING_SNAKE_CASE_ = [48, 64, 168, 224] elif swiftformer_name == "swiftformer_l1": SCREAMING_SNAKE_CASE_ = [4, 3, 10, 5] SCREAMING_SNAKE_CASE_ = [48, 96, 192, 384] elif swiftformer_name == "swiftformer_l3": SCREAMING_SNAKE_CASE_ = [4, 4, 12, 6] SCREAMING_SNAKE_CASE_ = [64, 128, 320, 512] # load state_dict of original model, remove and rename some keys if original_ckpt: if original_ckpt.startswith('https' ): SCREAMING_SNAKE_CASE_ = torch.hub.load_state_dict_from_url(_SCREAMING_SNAKE_CASE , map_location='cpu' , check_hash=_SCREAMING_SNAKE_CASE ) else: SCREAMING_SNAKE_CASE_ = torch.load(_SCREAMING_SNAKE_CASE , map_location='cpu' ) SCREAMING_SNAKE_CASE_ = checkpoint SCREAMING_SNAKE_CASE_ = create_rename_keys(_SCREAMING_SNAKE_CASE ) for rename_key_src, rename_key_dest in rename_keys: rename_key(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # load HuggingFace model SCREAMING_SNAKE_CASE_ = SwiftFormerForImageClassification(_SCREAMING_SNAKE_CASE ).eval() hf_model.load_state_dict(_SCREAMING_SNAKE_CASE ) # prepare test inputs SCREAMING_SNAKE_CASE_ = prepare_img() SCREAMING_SNAKE_CASE_ = ViTImageProcessor.from_pretrained('preprocessor_config' ) SCREAMING_SNAKE_CASE_ = processor(images=_SCREAMING_SNAKE_CASE , return_tensors='pt' ) # compare outputs from both models SCREAMING_SNAKE_CASE_ = get_expected_output(_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE_ = hf_model(inputs['pixel_values'] ).logits assert hf_logits.shape == torch.Size([1, 1_000] ) assert torch.allclose(hf_logits[0, 0:5] , _SCREAMING_SNAKE_CASE , atol=1E-3 ) Path(_SCREAMING_SNAKE_CASE ).mkdir(exist_ok=_SCREAMING_SNAKE_CASE ) print(f"""Saving model {swiftformer_name} to {pytorch_dump_folder_path}""" ) hf_model.save_pretrained(_SCREAMING_SNAKE_CASE ) if __name__ == "__main__": UpperCamelCase__ : str = argparse.ArgumentParser() # Required parameters parser.add_argument( "--swiftformer_name", default="swiftformer_xs", choices=["swiftformer_xs", "swiftformer_s", "swiftformer_l1", "swiftformer_l3"], type=str, help="Name of the SwiftFormer model you'd like to convert.", ) parser.add_argument( "--pytorch_dump_folder_path", default="./converted_outputs/", type=str, help="Path to the output PyTorch model directory.", ) parser.add_argument("--original_ckpt", default=None, type=str, help="Path to the original model checkpoint.") UpperCamelCase__ : Union[str, Any] = parser.parse_args() convert_swiftformer_checkpoint(args.swiftformer_name, args.pytorch_dump_folder_path, args.original_ckpt)
620
0
'''simple docstring''' import argparse import os from pathlib import Path from typing import Dict import tensorflow as tf import torch from tqdm import tqdm from transformers import PegasusConfig, PegasusForConditionalGeneration, PegasusTokenizer from transformers.models.pegasus.configuration_pegasus import DEFAULTS, task_specific_params UpperCamelCase__ : Optional[Any] = [ # replace left string with right string to get the relevant state_dict key (identical state dict to bart) ["memory_attention", "encoder_attn"], ["attention", "attn"], ["/", "."], [".LayerNorm.gamma", "_layer_norm.weight"], [".LayerNorm.beta", "_layer_norm.bias"], ["r.layer_", "r.layers."], ["output_proj", "out_proj"], ["ffn.dense_1.", "fc2."], ["ffn.dense.", "fc1."], ["ffn_layer_norm", "final_layer_norm"], ["kernel", "weight"], ["encoder_layer_norm.", "encoder.layer_norm."], ["decoder_layer_norm.", "decoder.layer_norm."], ["embeddings.weights", "shared.weight"], ] def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : List[Any] ): """simple docstring""" for pegasus_name, hf_name in PATTERNS: SCREAMING_SNAKE_CASE_ = k.replace(a__ , a__ ) return k def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : str ): """simple docstring""" SCREAMING_SNAKE_CASE_ = DEFAULTS.copy() cfg_kwargs.update(a__ ) SCREAMING_SNAKE_CASE_ = PegasusConfig(**a__ ) SCREAMING_SNAKE_CASE_ = PegasusForConditionalGeneration(a__ ) SCREAMING_SNAKE_CASE_ = torch_model.model.state_dict() SCREAMING_SNAKE_CASE_ = {} for k, v in tf_weights.items(): SCREAMING_SNAKE_CASE_ = rename_state_dict_key(a__ ) if new_k not in sd: raise ValueError(f"""could not find new key {new_k} in state dict. (converted from {k})""" ) if "dense" in k or "proj" in new_k: SCREAMING_SNAKE_CASE_ = v.T SCREAMING_SNAKE_CASE_ = torch.tensor(a__ , dtype=sd[new_k].dtype ) assert v.shape == sd[new_k].shape, f"""{new_k}, {k}, {v.shape}, {sd[new_k].shape}""" # make sure embedding.padding_idx is respected SCREAMING_SNAKE_CASE_ = torch.zeros_like(mapping['shared.weight'][cfg.pad_token_id + 1] ) SCREAMING_SNAKE_CASE_ = mapping['shared.weight'] SCREAMING_SNAKE_CASE_ = mapping['shared.weight'] SCREAMING_SNAKE_CASE_ = {k: torch.zeros_like(a__ ) for k, v in sd.items() if k.endswith('bias' ) and k not in mapping} mapping.update(**a__ ) SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = torch_model.model.load_state_dict(a__ , strict=a__ ) SCREAMING_SNAKE_CASE_ = [ k for k in missing if k not in ['encoder.embed_positions.weight', 'decoder.embed_positions.weight'] ] assert unexpected_missing == [], f"""no matches found for the following torch keys {unexpected_missing}""" assert extra == [], f"""no matches found for the following tf keys {extra}""" return torch_model def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : Tuple="./ckpt/aeslc/model.ckpt-32000" ): """simple docstring""" SCREAMING_SNAKE_CASE_ = tf.train.list_variables(a__ ) SCREAMING_SNAKE_CASE_ = {} SCREAMING_SNAKE_CASE_ = ['Adafactor', 'global_step'] for name, shape in tqdm(a__ , desc='converting tf checkpoint to dict' ): SCREAMING_SNAKE_CASE_ = any(pat in name for pat in ignore_name ) if skip_key: continue SCREAMING_SNAKE_CASE_ = tf.train.load_variable(a__ , a__ ) SCREAMING_SNAKE_CASE_ = array return tf_weights def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : List[Any] ): """simple docstring""" SCREAMING_SNAKE_CASE_ = Path(a__ ).parent.name SCREAMING_SNAKE_CASE_ = task_specific_params[f"""summarization_{dataset}"""]['max_position_embeddings'] SCREAMING_SNAKE_CASE_ = PegasusTokenizer.from_pretrained('sshleifer/pegasus' , model_max_length=a__ ) assert tok.model_max_length == desired_max_model_length tok.save_pretrained(a__ ) # convert model SCREAMING_SNAKE_CASE_ = get_tf_weights_as_numpy(a__ ) SCREAMING_SNAKE_CASE_ = task_specific_params[f"""summarization_{dataset}"""] if dataset == "large": SCREAMING_SNAKE_CASE_ = task_specific_params SCREAMING_SNAKE_CASE_ = convert_pegasus(a__ , a__ ) torch_model.save_pretrained(a__ ) SCREAMING_SNAKE_CASE_ = torch_model.state_dict() sd.pop('model.decoder.embed_positions.weight' ) sd.pop('model.encoder.embed_positions.weight' ) torch.save(a__ , Path(a__ ) / 'pytorch_model.bin' ) if __name__ == "__main__": UpperCamelCase__ : List[str] = argparse.ArgumentParser() # Required parameters parser.add_argument("tf_ckpt_path", type=str, help="passed to tf.train.list_variables") parser.add_argument("save_dir", default=None, type=str, help="Path to the output PyTorch model.") UpperCamelCase__ : Dict = parser.parse_args() if args.save_dir is None: UpperCamelCase__ : Tuple = Path(args.tf_ckpt_path).parent.name UpperCamelCase__ : Tuple = os.path.join("pegasus", dataset) convert_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir)
714
def _UpperCAmelCase ( ): """simple docstring""" for n in range(1 , 1_000_000 ): yield n * (n + 1) // 2 def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : Tuple ): """simple docstring""" SCREAMING_SNAKE_CASE_ = 1 SCREAMING_SNAKE_CASE_ = 2 while i * i <= n: SCREAMING_SNAKE_CASE_ = 0 while n % i == 0: n //= i multiplicity += 1 divisors_count *= multiplicity + 1 i += 1 if n > 1: divisors_count *= 2 return divisors_count def _UpperCAmelCase ( ): """simple docstring""" return next(i for i in triangle_number_generator() if count_divisors(_SCREAMING_SNAKE_CASE ) > 500 ) if __name__ == "__main__": print(solution())
620
0
import unittest from transformers import SqueezeBertConfig, is_torch_available from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST, SqueezeBertForMaskedLM, SqueezeBertForMultipleChoice, SqueezeBertForQuestionAnswering, SqueezeBertForSequenceClassification, SqueezeBertForTokenClassification, SqueezeBertModel, ) class __snake_case ( UpperCamelCase__ ): def __init__( self , _A , _A=13 , _A=7 , _A=True , _A=True , _A=False , _A=True , _A=99 , _A=32 , _A=5 , _A=4 , _A=64 , _A="gelu" , _A=0.1 , _A=0.1 , _A=512 , _A=16 , _A=2 , _A=0.0_2 , _A=3 , _A=4 , _A=None , _A=2 , _A=2 , _A=2 , _A=2 , _A=4 , _A=1 , ): SCREAMING_SNAKE_CASE_ = parent SCREAMING_SNAKE_CASE_ = batch_size SCREAMING_SNAKE_CASE_ = seq_length SCREAMING_SNAKE_CASE_ = is_training SCREAMING_SNAKE_CASE_ = use_input_mask SCREAMING_SNAKE_CASE_ = use_token_type_ids SCREAMING_SNAKE_CASE_ = use_labels SCREAMING_SNAKE_CASE_ = vocab_size SCREAMING_SNAKE_CASE_ = hidden_size SCREAMING_SNAKE_CASE_ = num_hidden_layers SCREAMING_SNAKE_CASE_ = num_attention_heads SCREAMING_SNAKE_CASE_ = intermediate_size SCREAMING_SNAKE_CASE_ = hidden_act SCREAMING_SNAKE_CASE_ = hidden_dropout_prob SCREAMING_SNAKE_CASE_ = attention_probs_dropout_prob SCREAMING_SNAKE_CASE_ = max_position_embeddings SCREAMING_SNAKE_CASE_ = type_vocab_size SCREAMING_SNAKE_CASE_ = type_sequence_label_size SCREAMING_SNAKE_CASE_ = initializer_range SCREAMING_SNAKE_CASE_ = num_labels SCREAMING_SNAKE_CASE_ = num_choices SCREAMING_SNAKE_CASE_ = scope SCREAMING_SNAKE_CASE_ = q_groups SCREAMING_SNAKE_CASE_ = k_groups SCREAMING_SNAKE_CASE_ = v_groups SCREAMING_SNAKE_CASE_ = post_attention_groups SCREAMING_SNAKE_CASE_ = intermediate_groups SCREAMING_SNAKE_CASE_ = output_groups def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size) SCREAMING_SNAKE_CASE_ = None if self.use_input_mask: SCREAMING_SNAKE_CASE_ = random_attention_mask([self.batch_size, self.seq_length]) SCREAMING_SNAKE_CASE_ = None SCREAMING_SNAKE_CASE_ = None SCREAMING_SNAKE_CASE_ = None if self.use_labels: SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size] , self.type_sequence_label_size) SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels) SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size] , self.num_choices) SCREAMING_SNAKE_CASE_ = self.get_config() return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels def lowerCAmelCase__ ( self): return SqueezeBertConfig( embedding_size=self.hidden_size , vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , attention_probs_dropout_prob=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , q_groups=self.q_groups , k_groups=self.k_groups , v_groups=self.v_groups , post_attention_groups=self.post_attention_groups , intermediate_groups=self.intermediate_groups , output_groups=self.output_groups , ) def lowerCAmelCase__ ( self , _A , _A , _A , _A , _A , _A): SCREAMING_SNAKE_CASE_ = SqueezeBertModel(config=__A) model.to(__A) model.eval() SCREAMING_SNAKE_CASE_ = model(__A , __A) SCREAMING_SNAKE_CASE_ = model(__A) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size)) def lowerCAmelCase__ ( self , _A , _A , _A , _A , _A , _A): SCREAMING_SNAKE_CASE_ = SqueezeBertForMaskedLM(config=__A) model.to(__A) model.eval() SCREAMING_SNAKE_CASE_ = model(__A , attention_mask=__A , labels=__A) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size)) def lowerCAmelCase__ ( self , _A , _A , _A , _A , _A , _A): SCREAMING_SNAKE_CASE_ = SqueezeBertForQuestionAnswering(config=__A) model.to(__A) model.eval() SCREAMING_SNAKE_CASE_ = model( __A , attention_mask=__A , start_positions=__A , end_positions=__A) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length)) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length)) def lowerCAmelCase__ ( self , _A , _A , _A , _A , _A , _A): SCREAMING_SNAKE_CASE_ = self.num_labels SCREAMING_SNAKE_CASE_ = SqueezeBertForSequenceClassification(__A) model.to(__A) model.eval() SCREAMING_SNAKE_CASE_ = model(__A , attention_mask=__A , labels=__A) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels)) def lowerCAmelCase__ ( self , _A , _A , _A , _A , _A , _A): SCREAMING_SNAKE_CASE_ = self.num_labels SCREAMING_SNAKE_CASE_ = SqueezeBertForTokenClassification(config=__A) model.to(__A) model.eval() SCREAMING_SNAKE_CASE_ = model(__A , attention_mask=__A , labels=__A) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels)) def lowerCAmelCase__ ( self , _A , _A , _A , _A , _A , _A): SCREAMING_SNAKE_CASE_ = self.num_choices SCREAMING_SNAKE_CASE_ = SqueezeBertForMultipleChoice(config=__A) model.to(__A) model.eval() SCREAMING_SNAKE_CASE_ = input_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous() SCREAMING_SNAKE_CASE_ = input_mask.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous() SCREAMING_SNAKE_CASE_ = model( __A , attention_mask=__A , labels=__A , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices)) def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = self.prepare_config_and_inputs() ((SCREAMING_SNAKE_CASE_) , (SCREAMING_SNAKE_CASE_) , (SCREAMING_SNAKE_CASE_) , (SCREAMING_SNAKE_CASE_) , (SCREAMING_SNAKE_CASE_) , (SCREAMING_SNAKE_CASE_)) = config_and_inputs SCREAMING_SNAKE_CASE_ = {'input_ids': input_ids, 'attention_mask': input_mask} return config, inputs_dict @require_torch class __snake_case ( UpperCamelCase__ , UpperCamelCase__ , unittest.TestCase ): __lowerCAmelCase : List[Any] = ( ( SqueezeBertModel, SqueezeBertForMaskedLM, SqueezeBertForMultipleChoice, SqueezeBertForQuestionAnswering, SqueezeBertForSequenceClassification, SqueezeBertForTokenClassification, ) if is_torch_available() else None ) __lowerCAmelCase : Tuple = ( { 'feature-extraction': SqueezeBertModel, 'fill-mask': SqueezeBertForMaskedLM, 'question-answering': SqueezeBertForQuestionAnswering, 'text-classification': SqueezeBertForSequenceClassification, 'token-classification': SqueezeBertForTokenClassification, 'zero-shot': SqueezeBertForSequenceClassification, } if is_torch_available() else {} ) __lowerCAmelCase : int = False __lowerCAmelCase : Any = True __lowerCAmelCase : Dict = False def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = SqueezeBertModelTester(self) SCREAMING_SNAKE_CASE_ = ConfigTester(self , config_class=__A , dim=37) def lowerCAmelCase__ ( self): self.config_tester.run_common_tests() def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_squeezebert_model(*__A) def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_squeezebert_for_masked_lm(*__A) def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_squeezebert_for_question_answering(*__A) def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_squeezebert_for_sequence_classification(*__A) def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_squeezebert_for_token_classification(*__A) def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_squeezebert_for_multiple_choice(*__A) @slow def lowerCAmelCase__ ( self): for model_name in SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: SCREAMING_SNAKE_CASE_ = SqueezeBertModel.from_pretrained(__A) self.assertIsNotNone(__A) @require_sentencepiece @require_tokenizers @require_torch class __snake_case ( unittest.TestCase ): @slow def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = SqueezeBertForSequenceClassification.from_pretrained('squeezebert/squeezebert-mnli') SCREAMING_SNAKE_CASE_ = torch.tensor([[1, 29414, 232, 328, 740, 1140, 12695, 69, 13, 1588, 2]]) SCREAMING_SNAKE_CASE_ = model(__A)[0] SCREAMING_SNAKE_CASE_ = torch.Size((1, 3)) self.assertEqual(output.shape , __A) SCREAMING_SNAKE_CASE_ = torch.tensor([[0.6_4_0_1, -0.0_3_4_9, -0.6_0_4_1]]) self.assertTrue(torch.allclose(__A , __A , atol=1E-4))
715
import io import itertools import json from dataclasses import dataclass from typing import Optional import pyarrow as pa import pyarrow.json as paj import datasets from datasets.table import table_cast from datasets.utils.file_utils import readline UpperCamelCase__ : Optional[int] = datasets.utils.logging.get_logger(__name__) @dataclass class __snake_case ( datasets.BuilderConfig ): __lowerCAmelCase : Optional[datasets.Features] = None __lowerCAmelCase : str = "utf-8" __lowerCAmelCase : Optional[str] = None __lowerCAmelCase : Optional[str] = None __lowerCAmelCase : bool = True # deprecated __lowerCAmelCase : Optional[int] = None # deprecated __lowerCAmelCase : int = 10 << 20 # 10MB __lowerCAmelCase : Optional[bool] = None class __snake_case ( datasets.ArrowBasedBuilder ): __lowerCAmelCase : int = JsonConfig def lowerCAmelCase__ ( self): if self.config.block_size is not None: logger.warning('The JSON loader parameter `block_size` is deprecated. Please use `chunksize` instead') SCREAMING_SNAKE_CASE_ = self.config.block_size if self.config.use_threads is not True: logger.warning( 'The JSON loader parameter `use_threads` is deprecated and doesn\'t have any effect anymore.') if self.config.newlines_in_values is not None: raise ValueError('The JSON loader parameter `newlines_in_values` is no longer supported') return datasets.DatasetInfo(features=self.config.features) def lowerCAmelCase__ ( self , _A): if not self.config.data_files: raise ValueError(f"""At least one data file must be specified, but got data_files={self.config.data_files}""") SCREAMING_SNAKE_CASE_ = dl_manager.download_and_extract(self.config.data_files) if isinstance(_A , (str, list, tuple)): SCREAMING_SNAKE_CASE_ = data_files if isinstance(_A , _A): SCREAMING_SNAKE_CASE_ = [files] SCREAMING_SNAKE_CASE_ = [dl_manager.iter_files(_A) for file in files] return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'files': files})] SCREAMING_SNAKE_CASE_ = [] for split_name, files in data_files.items(): if isinstance(_A , _A): SCREAMING_SNAKE_CASE_ = [files] SCREAMING_SNAKE_CASE_ = [dl_manager.iter_files(_A) for file in files] splits.append(datasets.SplitGenerator(name=_A , gen_kwargs={'files': files})) return splits def lowerCAmelCase__ ( self , _A): if self.config.features is not None: # adding missing columns for column_name in set(self.config.features) - set(pa_table.column_names): SCREAMING_SNAKE_CASE_ = self.config.features.arrow_schema.field(_A).type SCREAMING_SNAKE_CASE_ = pa_table.append_column(_A , pa.array([None] * len(_A) , type=_A)) # more expensive cast to support nested structures with keys in a different order # allows str <-> int/float or str to Audio for example SCREAMING_SNAKE_CASE_ = table_cast(_A , self.config.features.arrow_schema) return pa_table def lowerCAmelCase__ ( self , _A): for file_idx, file in enumerate(itertools.chain.from_iterable(_A)): # If the file is one json object and if we need to look at the list of items in one specific field if self.config.field is not None: with open(_A , encoding=self.config.encoding , errors=self.config.encoding_errors) as f: SCREAMING_SNAKE_CASE_ = json.load(_A) # We keep only the field we are interested in SCREAMING_SNAKE_CASE_ = dataset[self.config.field] # We accept two format: a list of dicts or a dict of lists if isinstance(_A , (list, tuple)): SCREAMING_SNAKE_CASE_ = set().union(*[row.keys() for row in dataset]) SCREAMING_SNAKE_CASE_ = {col: [row.get(_A) for row in dataset] for col in keys} else: SCREAMING_SNAKE_CASE_ = dataset SCREAMING_SNAKE_CASE_ = pa.Table.from_pydict(_A) yield file_idx, self._cast_table(_A) # If the file has one json object per line else: with open(_A , 'rb') as f: SCREAMING_SNAKE_CASE_ = 0 # Use block_size equal to the chunk size divided by 32 to leverage multithreading # Set a default minimum value of 16kB if the chunk size is really small SCREAMING_SNAKE_CASE_ = max(self.config.chunksize // 32 , 16 << 10) SCREAMING_SNAKE_CASE_ = ( self.config.encoding_errors if self.config.encoding_errors is not None else 'strict' ) while True: SCREAMING_SNAKE_CASE_ = f.read(self.config.chunksize) if not batch: break # Finish current line try: batch += f.readline() except (AttributeError, io.UnsupportedOperation): batch += readline(_A) # PyArrow only accepts utf-8 encoded bytes if self.config.encoding != "utf-8": SCREAMING_SNAKE_CASE_ = batch.decode(self.config.encoding , errors=_A).encode('utf-8') try: while True: try: SCREAMING_SNAKE_CASE_ = paj.read_json( io.BytesIO(_A) , read_options=paj.ReadOptions(block_size=_A)) break except (pa.ArrowInvalid, pa.ArrowNotImplementedError) as e: if ( isinstance(_A , pa.ArrowInvalid) and "straddling" not in str(_A) or block_size > len(_A) ): raise else: # Increase the block size in case it was too small. # The block size will be reset for the next file. logger.debug( f"""Batch of {len(_A)} bytes couldn't be parsed with block_size={block_size}. Retrying with block_size={block_size * 2}.""") block_size *= 2 except pa.ArrowInvalid as e: try: with open( _A , encoding=self.config.encoding , errors=self.config.encoding_errors) as f: SCREAMING_SNAKE_CASE_ = json.load(_A) except json.JSONDecodeError: logger.error(f"""Failed to read file '{file}' with error {type(_A)}: {e}""") raise e # If possible, parse the file as a list of json objects and exit the loop if isinstance(_A , _A): # list is the only sequence type supported in JSON try: SCREAMING_SNAKE_CASE_ = set().union(*[row.keys() for row in dataset]) SCREAMING_SNAKE_CASE_ = {col: [row.get(_A) for row in dataset] for col in keys} SCREAMING_SNAKE_CASE_ = pa.Table.from_pydict(_A) except (pa.ArrowInvalid, AttributeError) as e: logger.error(f"""Failed to read file '{file}' with error {type(_A)}: {e}""") raise ValueError(f"""Not able to read records in the JSON file at {file}.""") from None yield file_idx, self._cast_table(_A) break else: logger.error(f"""Failed to read file '{file}' with error {type(_A)}: {e}""") raise ValueError( f"""Not able to read records in the JSON file at {file}. """ f"""You should probably indicate the field of the JSON file containing your records. """ f"""This JSON file contain the following fields: {str(list(dataset.keys()))}. """ f"""Select the correct one and provide it as `field='XXX'` to the dataset loading method. """) from None # Uncomment for debugging (will print the Arrow table size and elements) # logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}") # logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows))) yield (file_idx, batch_idx), self._cast_table(_A) batch_idx += 1
620
0
import unittest import numpy as np from datasets import load_dataset from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import BeitImageProcessor class __snake_case ( unittest.TestCase ): def __init__( self , _A , _A=7 , _A=3 , _A=18 , _A=30 , _A=400 , _A=True , _A=None , _A=True , _A=None , _A=True , _A=[0.5, 0.5, 0.5] , _A=[0.5, 0.5, 0.5] , _A=False , ): SCREAMING_SNAKE_CASE_ = size if size is not None else {'height': 20, 'width': 20} SCREAMING_SNAKE_CASE_ = crop_size if crop_size is not None else {'height': 18, 'width': 18} SCREAMING_SNAKE_CASE_ = parent SCREAMING_SNAKE_CASE_ = batch_size SCREAMING_SNAKE_CASE_ = num_channels SCREAMING_SNAKE_CASE_ = image_size SCREAMING_SNAKE_CASE_ = min_resolution SCREAMING_SNAKE_CASE_ = max_resolution SCREAMING_SNAKE_CASE_ = do_resize SCREAMING_SNAKE_CASE_ = size SCREAMING_SNAKE_CASE_ = do_center_crop SCREAMING_SNAKE_CASE_ = crop_size SCREAMING_SNAKE_CASE_ = do_normalize SCREAMING_SNAKE_CASE_ = image_mean SCREAMING_SNAKE_CASE_ = image_std SCREAMING_SNAKE_CASE_ = do_reduce_labels def lowerCAmelCase__ ( self): return { "do_resize": self.do_resize, "size": self.size, "do_center_crop": self.do_center_crop, "crop_size": self.crop_size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "do_reduce_labels": self.do_reduce_labels, } def _UpperCAmelCase ( ): SCREAMING_SNAKE_CASE_ = load_dataset('hf-internal-testing/fixtures_ade20k' , split='test' ) SCREAMING_SNAKE_CASE_ = Image.open(dataset[0]['file'] ) SCREAMING_SNAKE_CASE_ = Image.open(dataset[1]['file'] ) return image, map def _UpperCAmelCase ( ): SCREAMING_SNAKE_CASE_ = load_dataset('hf-internal-testing/fixtures_ade20k' , split='test' ) SCREAMING_SNAKE_CASE_ = Image.open(ds[0]['file'] ) SCREAMING_SNAKE_CASE_ = Image.open(ds[1]['file'] ) SCREAMING_SNAKE_CASE_ = Image.open(ds[2]['file'] ) SCREAMING_SNAKE_CASE_ = Image.open(ds[3]['file'] ) return [imagea, imagea], [mapa, mapa] @require_torch @require_vision class __snake_case ( __UpperCAmelCase , unittest.TestCase ): __lowerCAmelCase : Union[str, Any] = BeitImageProcessor if is_vision_available() else None def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = BeitImageProcessingTester(self) @property def lowerCAmelCase__ ( self): return self.image_processor_tester.prepare_image_processor_dict() def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = self.image_processing_class(**self.image_processor_dict) self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , 'do_resize')) self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , 'size')) self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , 'do_center_crop')) self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , 'center_crop')) self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , 'do_normalize')) self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , 'image_mean')) self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , 'image_std')) def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = self.image_processing_class.from_dict(self.image_processor_dict) self.assertEqual(image_processor.size , {'height': 20, 'width': 20}) self.assertEqual(image_processor.crop_size , {'height': 18, 'width': 18}) self.assertEqual(image_processor.do_reduce_labels , __SCREAMING_SNAKE_CASE) SCREAMING_SNAKE_CASE_ = self.image_processing_class.from_dict( self.image_processor_dict , size=42 , crop_size=84 , reduce_labels=__SCREAMING_SNAKE_CASE) self.assertEqual(image_processor.size , {'height': 42, 'width': 42}) self.assertEqual(image_processor.crop_size , {'height': 84, 'width': 84}) self.assertEqual(image_processor.do_reduce_labels , __SCREAMING_SNAKE_CASE) def lowerCAmelCase__ ( self): pass def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = self.image_processing_class(**self.image_processor_dict) # create random PIL images SCREAMING_SNAKE_CASE_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE) for image in image_inputs: self.assertIsInstance(__SCREAMING_SNAKE_CASE , Image.Image) # Test not batched input SCREAMING_SNAKE_CASE_ = image_processing(image_inputs[0] , return_tensors='pt').pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) # Test batched SCREAMING_SNAKE_CASE_ = image_processing(__SCREAMING_SNAKE_CASE , return_tensors='pt').pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = self.image_processing_class(**self.image_processor_dict) # create random numpy tensors SCREAMING_SNAKE_CASE_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE , numpify=__SCREAMING_SNAKE_CASE) for image in image_inputs: self.assertIsInstance(__SCREAMING_SNAKE_CASE , np.ndarray) # Test not batched input SCREAMING_SNAKE_CASE_ = image_processing(image_inputs[0] , return_tensors='pt').pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) # Test batched SCREAMING_SNAKE_CASE_ = image_processing(__SCREAMING_SNAKE_CASE , return_tensors='pt').pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = self.image_processing_class(**self.image_processor_dict) # create random PyTorch tensors SCREAMING_SNAKE_CASE_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE , torchify=__SCREAMING_SNAKE_CASE) for image in image_inputs: self.assertIsInstance(__SCREAMING_SNAKE_CASE , torch.Tensor) # Test not batched input SCREAMING_SNAKE_CASE_ = image_processing(image_inputs[0] , return_tensors='pt').pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) # Test batched SCREAMING_SNAKE_CASE_ = image_processing(__SCREAMING_SNAKE_CASE , return_tensors='pt').pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = self.image_processing_class(**self.image_processor_dict) # create random PyTorch tensors SCREAMING_SNAKE_CASE_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE , torchify=__SCREAMING_SNAKE_CASE) SCREAMING_SNAKE_CASE_ = [] for image in image_inputs: self.assertIsInstance(__SCREAMING_SNAKE_CASE , torch.Tensor) maps.append(torch.zeros(image.shape[-2:]).long()) # Test not batched input SCREAMING_SNAKE_CASE_ = image_processing(image_inputs[0] , maps[0] , return_tensors='pt') self.assertEqual( encoding['pixel_values'].shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) self.assertEqual( encoding['labels'].shape , ( 1, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) self.assertEqual(encoding['labels'].dtype , torch.long) self.assertTrue(encoding['labels'].min().item() >= 0) self.assertTrue(encoding['labels'].max().item() <= 255) # Test batched SCREAMING_SNAKE_CASE_ = image_processing(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , return_tensors='pt') self.assertEqual( encoding['pixel_values'].shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) self.assertEqual( encoding['labels'].shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) self.assertEqual(encoding['labels'].dtype , torch.long) self.assertTrue(encoding['labels'].min().item() >= 0) self.assertTrue(encoding['labels'].max().item() <= 255) # Test not batched input (PIL images) SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = prepare_semantic_single_inputs() SCREAMING_SNAKE_CASE_ = image_processing(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , return_tensors='pt') self.assertEqual( encoding['pixel_values'].shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) self.assertEqual( encoding['labels'].shape , ( 1, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) self.assertEqual(encoding['labels'].dtype , torch.long) self.assertTrue(encoding['labels'].min().item() >= 0) self.assertTrue(encoding['labels'].max().item() <= 255) # Test batched input (PIL images) SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = prepare_semantic_batch_inputs() SCREAMING_SNAKE_CASE_ = image_processing(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , return_tensors='pt') self.assertEqual( encoding['pixel_values'].shape , ( 2, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) self.assertEqual( encoding['labels'].shape , ( 2, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) self.assertEqual(encoding['labels'].dtype , torch.long) self.assertTrue(encoding['labels'].min().item() >= 0) self.assertTrue(encoding['labels'].max().item() <= 255) def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = self.image_processing_class(**self.image_processor_dict) # ADE20k has 150 classes, and the background is included, so labels should be between 0 and 150 SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = prepare_semantic_single_inputs() SCREAMING_SNAKE_CASE_ = image_processing(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , return_tensors='pt') self.assertTrue(encoding['labels'].min().item() >= 0) self.assertTrue(encoding['labels'].max().item() <= 150) SCREAMING_SNAKE_CASE_ = True SCREAMING_SNAKE_CASE_ = image_processing(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , return_tensors='pt') self.assertTrue(encoding['labels'].min().item() >= 0) self.assertTrue(encoding['labels'].max().item() <= 255)
716
import unittest from transformers import TrOCRConfig from transformers.testing_utils import is_torch_available, require_torch, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers.models.trocr.modeling_trocr import TrOCRDecoder, TrOCRForCausalLM @require_torch class __snake_case : def __init__( self , _A , _A=99 , _A=13 , _A=16 , _A=7 , _A=True , _A=True , _A=True , _A=False , _A=True , _A=2 , _A=32 , _A=4 , _A=4 , _A=30 , _A=0 , _A=1 , _A=2 , _A=None , ): SCREAMING_SNAKE_CASE_ = parent SCREAMING_SNAKE_CASE_ = batch_size SCREAMING_SNAKE_CASE_ = decoder_seq_length # For common tests SCREAMING_SNAKE_CASE_ = self.decoder_seq_length SCREAMING_SNAKE_CASE_ = is_training SCREAMING_SNAKE_CASE_ = use_attention_mask SCREAMING_SNAKE_CASE_ = use_labels SCREAMING_SNAKE_CASE_ = vocab_size SCREAMING_SNAKE_CASE_ = d_model SCREAMING_SNAKE_CASE_ = d_model SCREAMING_SNAKE_CASE_ = decoder_layers SCREAMING_SNAKE_CASE_ = decoder_layers SCREAMING_SNAKE_CASE_ = decoder_ffn_dim SCREAMING_SNAKE_CASE_ = decoder_attention_heads SCREAMING_SNAKE_CASE_ = decoder_attention_heads SCREAMING_SNAKE_CASE_ = eos_token_id SCREAMING_SNAKE_CASE_ = bos_token_id SCREAMING_SNAKE_CASE_ = pad_token_id SCREAMING_SNAKE_CASE_ = decoder_start_token_id SCREAMING_SNAKE_CASE_ = use_cache SCREAMING_SNAKE_CASE_ = max_position_embeddings SCREAMING_SNAKE_CASE_ = None SCREAMING_SNAKE_CASE_ = decoder_seq_length SCREAMING_SNAKE_CASE_ = 2 SCREAMING_SNAKE_CASE_ = 1 def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size) SCREAMING_SNAKE_CASE_ = None if self.use_attention_mask: SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size, self.decoder_seq_length] , vocab_size=2) SCREAMING_SNAKE_CASE_ = None if self.use_labels: SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size) SCREAMING_SNAKE_CASE_ = TrOCRConfig( vocab_size=self.vocab_size , d_model=self.d_model , decoder_layers=self.decoder_layers , decoder_ffn_dim=self.decoder_ffn_dim , decoder_attention_heads=self.decoder_attention_heads , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , use_cache=self.use_cache , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , max_position_embeddings=self.max_position_embeddings , ) return (config, input_ids, attention_mask, lm_labels) def lowerCAmelCase__ ( self , _A , _A , _A , _A , ): SCREAMING_SNAKE_CASE_ = True SCREAMING_SNAKE_CASE_ = TrOCRDecoder(config=_A).to(_A).eval() SCREAMING_SNAKE_CASE_ = input_ids[:2] input_ids[input_ids == 0] += 1 # first forward pass SCREAMING_SNAKE_CASE_ = model(_A , use_cache=_A) SCREAMING_SNAKE_CASE_ = model(_A) SCREAMING_SNAKE_CASE_ = model(_A , use_cache=_A) self.parent.assertTrue(len(_A) == len(_A)) self.parent.assertTrue(len(_A) == len(_A) + 1) SCREAMING_SNAKE_CASE_ = outputs['past_key_values'] # create hypothetical next token and extent to next_input_ids SCREAMING_SNAKE_CASE_ = ids_tensor((2, 1) , config.vocab_size - 1) + 1 # append to next input_ids and SCREAMING_SNAKE_CASE_ = torch.cat([input_ids, next_tokens] , dim=-1) SCREAMING_SNAKE_CASE_ = model(_A)['last_hidden_state'] SCREAMING_SNAKE_CASE_ = model(_A , past_key_values=_A)['last_hidden_state'] # select random slice SCREAMING_SNAKE_CASE_ = ids_tensor((1,) , output_from_past.shape[-1]).item() SCREAMING_SNAKE_CASE_ = output_from_no_past[:, next_input_ids.shape[-1] - 1, random_slice_idx].detach() SCREAMING_SNAKE_CASE_ = output_from_past[:, 0, random_slice_idx].detach() # test that outputs are equal for slice assert torch.allclose(_A , _A , atol=1E-3) def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = self.prepare_config_and_inputs() SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = config_and_inputs SCREAMING_SNAKE_CASE_ = {'input_ids': input_ids, 'attention_mask': attention_mask} return config, inputs_dict @require_torch class __snake_case ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ): __lowerCAmelCase : Tuple = (TrOCRDecoder, TrOCRForCausalLM) if is_torch_available() else () __lowerCAmelCase : Union[str, Any] = (TrOCRForCausalLM,) if is_torch_available() else () __lowerCAmelCase : str = {'text-generation': TrOCRForCausalLM} if is_torch_available() else {} __lowerCAmelCase : Any = True __lowerCAmelCase : str = False def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = TrOCRStandaloneDecoderModelTester(self , is_training=_A) SCREAMING_SNAKE_CASE_ = ConfigTester(self , config_class=_A) def lowerCAmelCase__ ( self): pass def lowerCAmelCase__ ( self): pass def lowerCAmelCase__ ( self): pass def lowerCAmelCase__ ( self): self.config_tester.run_common_tests() def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_decoder_model_past(*_A) def lowerCAmelCase__ ( self): return @unittest.skip('The model doesn\'t support left padding') # and it's not used enough to be worth fixing :) def lowerCAmelCase__ ( self): pass
620
0
import inspect import tempfile from collections import OrderedDict, UserDict from collections.abc import MutableMapping from contextlib import ExitStack, contextmanager from dataclasses import fields from enum import Enum from typing import Any, ContextManager, List, Tuple import numpy as np from .import_utils import is_flax_available, is_tf_available, is_torch_available, is_torch_fx_proxy if is_flax_available(): import jax.numpy as jnp class __snake_case ( UpperCamelCase_ ): def __get__( self , _A , _A=None): if obj is None: return self if self.fget is None: raise AttributeError('unreadable attribute') SCREAMING_SNAKE_CASE_ = '__cached_' + self.fget.__name__ SCREAMING_SNAKE_CASE_ = getattr(__a , __a , __a) if cached is None: SCREAMING_SNAKE_CASE_ = self.fget(__a) setattr(__a , __a , __a) return cached def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : Optional[int] ): """simple docstring""" SCREAMING_SNAKE_CASE_ = val.lower() if val in {"y", "yes", "t", "true", "on", "1"}: return 1 if val in {"n", "no", "f", "false", "off", "0"}: return 0 raise ValueError(f"""invalid truth value {val!r}""" ) def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : str ): """simple docstring""" if is_torch_fx_proxy(_SCREAMING_SNAKE_CASE ): return True if is_torch_available(): import torch if isinstance(_SCREAMING_SNAKE_CASE , torch.Tensor ): return True if is_tf_available(): import tensorflow as tf if isinstance(_SCREAMING_SNAKE_CASE , tf.Tensor ): return True if is_flax_available(): import jax.numpy as jnp from jax.core import Tracer if isinstance(_SCREAMING_SNAKE_CASE , (jnp.ndarray, Tracer) ): return True return isinstance(_SCREAMING_SNAKE_CASE , np.ndarray ) def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : Union[str, Any] ): """simple docstring""" return isinstance(_SCREAMING_SNAKE_CASE , np.ndarray ) def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : str ): """simple docstring""" return _is_numpy(_SCREAMING_SNAKE_CASE ) def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : Optional[Any] ): """simple docstring""" import torch return isinstance(_SCREAMING_SNAKE_CASE , torch.Tensor ) def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : Dict ): """simple docstring""" return False if not is_torch_available() else _is_torch(_SCREAMING_SNAKE_CASE ) def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : Any ): """simple docstring""" import torch return isinstance(_SCREAMING_SNAKE_CASE , torch.device ) def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : Dict ): """simple docstring""" return False if not is_torch_available() else _is_torch_device(_SCREAMING_SNAKE_CASE ) def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : Optional[Any] ): """simple docstring""" import torch if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): if hasattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): SCREAMING_SNAKE_CASE_ = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) else: return False return isinstance(_SCREAMING_SNAKE_CASE , torch.dtype ) def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : Optional[Any] ): """simple docstring""" return False if not is_torch_available() else _is_torch_dtype(_SCREAMING_SNAKE_CASE ) def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : Dict ): """simple docstring""" import tensorflow as tf return isinstance(_SCREAMING_SNAKE_CASE , tf.Tensor ) def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : Dict ): """simple docstring""" return False if not is_tf_available() else _is_tensorflow(_SCREAMING_SNAKE_CASE ) def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : Tuple ): """simple docstring""" import tensorflow as tf # the `is_symbolic_tensor` predicate is only available starting with TF 2.14 if hasattr(_SCREAMING_SNAKE_CASE , 'is_symbolic_tensor' ): return tf.is_symbolic_tensor(_SCREAMING_SNAKE_CASE ) return type(_SCREAMING_SNAKE_CASE ) == tf.Tensor def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : List[str] ): """simple docstring""" return False if not is_tf_available() else _is_tf_symbolic_tensor(_SCREAMING_SNAKE_CASE ) def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : List[str] ): """simple docstring""" import jax.numpy as jnp # noqa: F811 return isinstance(_SCREAMING_SNAKE_CASE , jnp.ndarray ) def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : Optional[Any] ): """simple docstring""" return False if not is_flax_available() else _is_jax(_SCREAMING_SNAKE_CASE ) def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : List[Any] ): """simple docstring""" if isinstance(_SCREAMING_SNAKE_CASE , (dict, UserDict) ): return {k: to_py_obj(_SCREAMING_SNAKE_CASE ) for k, v in obj.items()} elif isinstance(_SCREAMING_SNAKE_CASE , (list, tuple) ): return [to_py_obj(_SCREAMING_SNAKE_CASE ) for o in obj] elif is_tf_tensor(_SCREAMING_SNAKE_CASE ): return obj.numpy().tolist() elif is_torch_tensor(_SCREAMING_SNAKE_CASE ): return obj.detach().cpu().tolist() elif is_jax_tensor(_SCREAMING_SNAKE_CASE ): return np.asarray(_SCREAMING_SNAKE_CASE ).tolist() elif isinstance(_SCREAMING_SNAKE_CASE , (np.ndarray, np.number) ): # tolist also works on 0d np arrays return obj.tolist() else: return obj def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : List[str] ): """simple docstring""" if isinstance(_SCREAMING_SNAKE_CASE , (dict, UserDict) ): return {k: to_numpy(_SCREAMING_SNAKE_CASE ) for k, v in obj.items()} elif isinstance(_SCREAMING_SNAKE_CASE , (list, tuple) ): return np.array(_SCREAMING_SNAKE_CASE ) elif is_tf_tensor(_SCREAMING_SNAKE_CASE ): return obj.numpy() elif is_torch_tensor(_SCREAMING_SNAKE_CASE ): return obj.detach().cpu().numpy() elif is_jax_tensor(_SCREAMING_SNAKE_CASE ): return np.asarray(_SCREAMING_SNAKE_CASE ) else: return obj class __snake_case ( UpperCamelCase_ ): def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = fields(self) # Safety and consistency checks if not len(__a): raise ValueError(f"""{self.__class__.__name__} has no fields.""") if not all(field.default is None for field in class_fields[1:]): raise ValueError(f"""{self.__class__.__name__} should not have more than one required field.""") SCREAMING_SNAKE_CASE_ = getattr(self , class_fields[0].name) SCREAMING_SNAKE_CASE_ = all(getattr(self , field.name) is None for field in class_fields[1:]) if other_fields_are_none and not is_tensor(__a): if isinstance(__a , __a): SCREAMING_SNAKE_CASE_ = first_field.items() SCREAMING_SNAKE_CASE_ = True else: try: SCREAMING_SNAKE_CASE_ = iter(__a) SCREAMING_SNAKE_CASE_ = True except TypeError: SCREAMING_SNAKE_CASE_ = False # if we provided an iterator as first field and the iterator is a (key, value) iterator # set the associated fields if first_field_iterator: for idx, element in enumerate(__a): if ( not isinstance(__a , (list, tuple)) or not len(__a) == 2 or not isinstance(element[0] , __a) ): if idx == 0: # If we do not have an iterator of key/values, set it as attribute SCREAMING_SNAKE_CASE_ = first_field else: # If we have a mixed iterator, raise an error raise ValueError( f"""Cannot set key/value for {element}. It needs to be a tuple (key, value).""") break setattr(self , element[0] , element[1]) if element[1] is not None: SCREAMING_SNAKE_CASE_ = element[1] elif first_field is not None: SCREAMING_SNAKE_CASE_ = first_field else: for field in class_fields: SCREAMING_SNAKE_CASE_ = getattr(self , field.name) if v is not None: SCREAMING_SNAKE_CASE_ = v def __delitem__( self , *_A , **_A): raise Exception(f"""You cannot use ``__delitem__`` on a {self.__class__.__name__} instance.""") def lowerCAmelCase__ ( self , *_A , **_A): raise Exception(f"""You cannot use ``setdefault`` on a {self.__class__.__name__} instance.""") def lowerCAmelCase__ ( self , *_A , **_A): raise Exception(f"""You cannot use ``pop`` on a {self.__class__.__name__} instance.""") def lowerCAmelCase__ ( self , *_A , **_A): raise Exception(f"""You cannot use ``update`` on a {self.__class__.__name__} instance.""") def __getitem__( self , _A): if isinstance(__a , __a): SCREAMING_SNAKE_CASE_ = dict(self.items()) return inner_dict[k] else: return self.to_tuple()[k] def __setattr__( self , _A , _A): if name in self.keys() and value is not None: # Don't call self.__setitem__ to avoid recursion errors super().__setitem__(__a , __a) super().__setattr__(__a , __a) def __setitem__( self , _A , _A): super().__setitem__(__a , __a) # Don't call self.__setattr__ to avoid recursion errors super().__setattr__(__a , __a) def lowerCAmelCase__ ( self): return tuple(self[k] for k in self.keys()) class __snake_case ( UpperCamelCase_ , UpperCamelCase_ ): @classmethod def lowerCAmelCase__ ( cls , _A): raise ValueError( f"""{value} is not a valid {cls.__name__}, please select one of {list(cls._valueamember_map_.keys())}""") class __snake_case ( UpperCamelCase_ ): __lowerCAmelCase : Tuple = 'longest' __lowerCAmelCase : Any = 'max_length' __lowerCAmelCase : Optional[int] = 'do_not_pad' class __snake_case ( UpperCamelCase_ ): __lowerCAmelCase : int = 'pt' __lowerCAmelCase : Any = 'tf' __lowerCAmelCase : int = 'np' __lowerCAmelCase : List[Any] = 'jax' class __snake_case : def __init__( self , _A): SCREAMING_SNAKE_CASE_ = context_managers SCREAMING_SNAKE_CASE_ = ExitStack() def __enter__( self): for context_manager in self.context_managers: self.stack.enter_context(__a) def __exit__( self , *_A , **_A): self.stack.__exit__(*__a , **__a) def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : List[Any] ): """simple docstring""" SCREAMING_SNAKE_CASE_ = infer_framework(_SCREAMING_SNAKE_CASE ) if framework == "tf": SCREAMING_SNAKE_CASE_ = inspect.signature(model_class.call ) # TensorFlow models elif framework == "pt": SCREAMING_SNAKE_CASE_ = inspect.signature(model_class.forward ) # PyTorch models else: SCREAMING_SNAKE_CASE_ = inspect.signature(model_class.__call__ ) # Flax models for p in signature.parameters: if p == "return_loss" and signature.parameters[p].default is True: return True return False def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : List[Any] ): """simple docstring""" SCREAMING_SNAKE_CASE_ = model_class.__name__ SCREAMING_SNAKE_CASE_ = infer_framework(_SCREAMING_SNAKE_CASE ) if framework == "tf": SCREAMING_SNAKE_CASE_ = inspect.signature(model_class.call ) # TensorFlow models elif framework == "pt": SCREAMING_SNAKE_CASE_ = inspect.signature(model_class.forward ) # PyTorch models else: SCREAMING_SNAKE_CASE_ = inspect.signature(model_class.__call__ ) # Flax models if "QuestionAnswering" in model_name: return [p for p in signature.parameters if "label" in p or p in ("start_positions", "end_positions")] else: return [p for p in signature.parameters if "label" in p] def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : MutableMapping , _SCREAMING_SNAKE_CASE : str = "" , _SCREAMING_SNAKE_CASE : str = "." ): """simple docstring""" def _flatten_dict(_SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : Union[str, Any]="" , _SCREAMING_SNAKE_CASE : List[str]="." ): for k, v in d.items(): SCREAMING_SNAKE_CASE_ = str(_SCREAMING_SNAKE_CASE ) + delimiter + str(_SCREAMING_SNAKE_CASE ) if parent_key else k if v and isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): yield from flatten_dict(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , delimiter=_SCREAMING_SNAKE_CASE ).items() else: yield key, v return dict(_flatten_dict(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ) @contextmanager def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : bool = False ): """simple docstring""" if use_temp_dir: with tempfile.TemporaryDirectory() as tmp_dir: yield tmp_dir else: yield working_dir def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : Optional[int]=None ): """simple docstring""" if is_numpy_array(_SCREAMING_SNAKE_CASE ): return np.transpose(_SCREAMING_SNAKE_CASE , axes=_SCREAMING_SNAKE_CASE ) elif is_torch_tensor(_SCREAMING_SNAKE_CASE ): return array.T if axes is None else array.permute(*_SCREAMING_SNAKE_CASE ) elif is_tf_tensor(_SCREAMING_SNAKE_CASE ): import tensorflow as tf return tf.transpose(_SCREAMING_SNAKE_CASE , perm=_SCREAMING_SNAKE_CASE ) elif is_jax_tensor(_SCREAMING_SNAKE_CASE ): return jnp.transpose(_SCREAMING_SNAKE_CASE , axes=_SCREAMING_SNAKE_CASE ) else: raise ValueError(f"""Type not supported for transpose: {type(_SCREAMING_SNAKE_CASE )}.""" ) def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : List[Any] ): """simple docstring""" if is_numpy_array(_SCREAMING_SNAKE_CASE ): return np.reshape(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) elif is_torch_tensor(_SCREAMING_SNAKE_CASE ): return array.reshape(*_SCREAMING_SNAKE_CASE ) elif is_tf_tensor(_SCREAMING_SNAKE_CASE ): import tensorflow as tf return tf.reshape(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) elif is_jax_tensor(_SCREAMING_SNAKE_CASE ): return jnp.reshape(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) else: raise ValueError(f"""Type not supported for reshape: {type(_SCREAMING_SNAKE_CASE )}.""" ) def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : List[Any]=None ): """simple docstring""" if is_numpy_array(_SCREAMING_SNAKE_CASE ): return np.squeeze(_SCREAMING_SNAKE_CASE , axis=_SCREAMING_SNAKE_CASE ) elif is_torch_tensor(_SCREAMING_SNAKE_CASE ): return array.squeeze() if axis is None else array.squeeze(dim=_SCREAMING_SNAKE_CASE ) elif is_tf_tensor(_SCREAMING_SNAKE_CASE ): import tensorflow as tf return tf.squeeze(_SCREAMING_SNAKE_CASE , axis=_SCREAMING_SNAKE_CASE ) elif is_jax_tensor(_SCREAMING_SNAKE_CASE ): return jnp.squeeze(_SCREAMING_SNAKE_CASE , axis=_SCREAMING_SNAKE_CASE ) else: raise ValueError(f"""Type not supported for squeeze: {type(_SCREAMING_SNAKE_CASE )}.""" ) def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : Optional[int] ): """simple docstring""" if is_numpy_array(_SCREAMING_SNAKE_CASE ): return np.expand_dims(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) elif is_torch_tensor(_SCREAMING_SNAKE_CASE ): return array.unsqueeze(dim=_SCREAMING_SNAKE_CASE ) elif is_tf_tensor(_SCREAMING_SNAKE_CASE ): import tensorflow as tf return tf.expand_dims(_SCREAMING_SNAKE_CASE , axis=_SCREAMING_SNAKE_CASE ) elif is_jax_tensor(_SCREAMING_SNAKE_CASE ): return jnp.expand_dims(_SCREAMING_SNAKE_CASE , axis=_SCREAMING_SNAKE_CASE ) else: raise ValueError(f"""Type not supported for expand_dims: {type(_SCREAMING_SNAKE_CASE )}.""" ) def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : List[Any] ): """simple docstring""" if is_numpy_array(_SCREAMING_SNAKE_CASE ): return np.size(_SCREAMING_SNAKE_CASE ) elif is_torch_tensor(_SCREAMING_SNAKE_CASE ): return array.numel() elif is_tf_tensor(_SCREAMING_SNAKE_CASE ): import tensorflow as tf return tf.size(_SCREAMING_SNAKE_CASE ) elif is_jax_tensor(_SCREAMING_SNAKE_CASE ): return array.size else: raise ValueError(f"""Type not supported for expand_dims: {type(_SCREAMING_SNAKE_CASE )}.""" ) def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : Dict ): """simple docstring""" for key, value in auto_map.items(): if isinstance(_SCREAMING_SNAKE_CASE , (tuple, list) ): SCREAMING_SNAKE_CASE_ = [f"""{repo_id}--{v}""" if (v is not None and '--' not in v) else v for v in value] elif value is not None and "--" not in value: SCREAMING_SNAKE_CASE_ = f"""{repo_id}--{value}""" return auto_map def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : Optional[Any] ): """simple docstring""" for base_class in inspect.getmro(_SCREAMING_SNAKE_CASE ): SCREAMING_SNAKE_CASE_ = base_class.__module__ SCREAMING_SNAKE_CASE_ = base_class.__name__ if module.startswith('tensorflow' ) or module.startswith('keras' ) or name == "TFPreTrainedModel": return "tf" elif module.startswith('torch' ) or name == "PreTrainedModel": return "pt" elif module.startswith('flax' ) or module.startswith('jax' ) or name == "FlaxPreTrainedModel": return "flax" else: raise TypeError(f"""Could not infer framework from class {model_class}.""" )
717
from dataclasses import dataclass from typing import Optional, Tuple, Union import torch import torch.nn as nn from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput, apply_forward_hook from .modeling_utils import ModelMixin from .vae import Decoder, DecoderOutput, Encoder, VectorQuantizer @dataclass class __snake_case ( lowerCAmelCase__ ): __lowerCAmelCase : torch.FloatTensor class __snake_case ( lowerCAmelCase__ , lowerCAmelCase__ ): @register_to_config def __init__( self , _A = 3 , _A = 3 , _A = ("DownEncoderBlock2D",) , _A = ("UpDecoderBlock2D",) , _A = (64,) , _A = 1 , _A = "silu" , _A = 3 , _A = 32 , _A = 256 , _A = 32 , _A = None , _A = 0.1_8_2_1_5 , _A = "group" , ): super().__init__() # pass init params to Encoder SCREAMING_SNAKE_CASE_ = Encoder( in_channels=_A , out_channels=_A , down_block_types=_A , block_out_channels=_A , layers_per_block=_A , act_fn=_A , norm_num_groups=_A , double_z=_A , ) SCREAMING_SNAKE_CASE_ = vq_embed_dim if vq_embed_dim is not None else latent_channels SCREAMING_SNAKE_CASE_ = nn.Convad(_A , _A , 1) SCREAMING_SNAKE_CASE_ = VectorQuantizer(_A , _A , beta=0.2_5 , remap=_A , sane_index_shape=_A) SCREAMING_SNAKE_CASE_ = nn.Convad(_A , _A , 1) # pass init params to Decoder SCREAMING_SNAKE_CASE_ = Decoder( in_channels=_A , out_channels=_A , up_block_types=_A , block_out_channels=_A , layers_per_block=_A , act_fn=_A , norm_num_groups=_A , norm_type=_A , ) @apply_forward_hook def lowerCAmelCase__ ( self , _A , _A = True): SCREAMING_SNAKE_CASE_ = self.encoder(_A) SCREAMING_SNAKE_CASE_ = self.quant_conv(_A) if not return_dict: return (h,) return VQEncoderOutput(latents=_A) @apply_forward_hook def lowerCAmelCase__ ( self , _A , _A = False , _A = True): # also go through quantization layer if not force_not_quantize: SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.quantize(_A) else: SCREAMING_SNAKE_CASE_ = h SCREAMING_SNAKE_CASE_ = self.post_quant_conv(_A) SCREAMING_SNAKE_CASE_ = self.decoder(_A , quant if self.config.norm_type == 'spatial' else None) if not return_dict: return (dec,) return DecoderOutput(sample=_A) def lowerCAmelCase__ ( self , _A , _A = True): SCREAMING_SNAKE_CASE_ = sample SCREAMING_SNAKE_CASE_ = self.encode(_A).latents SCREAMING_SNAKE_CASE_ = self.decode(_A).sample if not return_dict: return (dec,) return DecoderOutput(sample=_A)
620
0
from typing import List, Optional, Union from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCamelCase__ : Optional[int] = logging.get_logger(__name__) UpperCamelCase__ : Optional[Any] = { "huggingface/informer-tourism-monthly": ( "https://huggingface.co/huggingface/informer-tourism-monthly/resolve/main/config.json" ), # See all Informer models at https://huggingface.co/models?filter=informer } class __snake_case ( lowerCAmelCase__ ): __lowerCAmelCase : int = 'informer' __lowerCAmelCase : List[str] = { 'hidden_size': 'd_model', 'num_attention_heads': 'encoder_attention_heads', 'num_hidden_layers': 'encoder_layers', } def __init__( self , _A = None , _A = None , _A = "student_t" , _A = "nll" , _A = 1 , _A = None , _A = "mean" , _A = 0 , _A = 0 , _A = 0 , _A = 0 , _A = None , _A = None , _A = 64 , _A = 32 , _A = 32 , _A = 2 , _A = 2 , _A = 2 , _A = 2 , _A = True , _A = "gelu" , _A = 0.0_5 , _A = 0.1 , _A = 0.1 , _A = 0.1 , _A = 0.1 , _A = 100 , _A = 0.0_2 , _A=True , _A = "prob" , _A = 5 , _A = True , **_A , ): # time series specific configuration SCREAMING_SNAKE_CASE_ = prediction_length SCREAMING_SNAKE_CASE_ = context_length or prediction_length SCREAMING_SNAKE_CASE_ = distribution_output SCREAMING_SNAKE_CASE_ = loss SCREAMING_SNAKE_CASE_ = input_size SCREAMING_SNAKE_CASE_ = num_time_features SCREAMING_SNAKE_CASE_ = lags_sequence if lags_sequence is not None else [1, 2, 3, 4, 5, 6, 7] SCREAMING_SNAKE_CASE_ = scaling SCREAMING_SNAKE_CASE_ = num_dynamic_real_features SCREAMING_SNAKE_CASE_ = num_static_real_features SCREAMING_SNAKE_CASE_ = num_static_categorical_features # set cardinality if cardinality and num_static_categorical_features > 0: if len(__A) != num_static_categorical_features: raise ValueError( 'The cardinality should be a list of the same length as `num_static_categorical_features`') SCREAMING_SNAKE_CASE_ = cardinality else: SCREAMING_SNAKE_CASE_ = [0] # set embedding_dimension if embedding_dimension and num_static_categorical_features > 0: if len(__A) != num_static_categorical_features: raise ValueError( 'The embedding dimension should be a list of the same length as `num_static_categorical_features`') SCREAMING_SNAKE_CASE_ = embedding_dimension else: SCREAMING_SNAKE_CASE_ = [min(50 , (cat + 1) // 2) for cat in self.cardinality] SCREAMING_SNAKE_CASE_ = num_parallel_samples # Transformer architecture configuration SCREAMING_SNAKE_CASE_ = input_size * len(self.lags_sequence) + self._number_of_features SCREAMING_SNAKE_CASE_ = d_model SCREAMING_SNAKE_CASE_ = encoder_attention_heads SCREAMING_SNAKE_CASE_ = decoder_attention_heads SCREAMING_SNAKE_CASE_ = encoder_ffn_dim SCREAMING_SNAKE_CASE_ = decoder_ffn_dim SCREAMING_SNAKE_CASE_ = encoder_layers SCREAMING_SNAKE_CASE_ = decoder_layers SCREAMING_SNAKE_CASE_ = dropout SCREAMING_SNAKE_CASE_ = attention_dropout SCREAMING_SNAKE_CASE_ = activation_dropout SCREAMING_SNAKE_CASE_ = encoder_layerdrop SCREAMING_SNAKE_CASE_ = decoder_layerdrop SCREAMING_SNAKE_CASE_ = activation_function SCREAMING_SNAKE_CASE_ = init_std SCREAMING_SNAKE_CASE_ = use_cache # Informer SCREAMING_SNAKE_CASE_ = attention_type SCREAMING_SNAKE_CASE_ = sampling_factor SCREAMING_SNAKE_CASE_ = distil super().__init__(is_encoder_decoder=__A , **__A) @property def lowerCAmelCase__ ( self): return ( sum(self.embedding_dimension) + self.num_dynamic_real_features + self.num_time_features + self.num_static_real_features + self.input_size * 2 # the log1p(abs(loc)) and log(scale) features )
718
import logging import os from typing import Dict, List, Optional, Union import torch import torch.nn as nn from accelerate.utils.imports import ( is_abit_bnb_available, is_abit_bnb_available, is_bnb_available, ) from ..big_modeling import dispatch_model, init_empty_weights from .dataclasses import BnbQuantizationConfig from .modeling import ( find_tied_parameters, get_balanced_memory, infer_auto_device_map, load_checkpoint_in_model, offload_weight, set_module_tensor_to_device, ) if is_bnb_available(): import bitsandbytes as bnb from copy import deepcopy UpperCamelCase__ : Optional[int] = logging.getLogger(__name__) def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : torch.nn.Module , _SCREAMING_SNAKE_CASE : BnbQuantizationConfig , _SCREAMING_SNAKE_CASE : Union[str, os.PathLike] = None , _SCREAMING_SNAKE_CASE : Optional[Dict[str, Union[int, str, torch.device]]] = None , _SCREAMING_SNAKE_CASE : Optional[List[str]] = None , _SCREAMING_SNAKE_CASE : Optional[Dict[Union[int, str], Union[int, str]]] = None , _SCREAMING_SNAKE_CASE : Optional[Union[str, os.PathLike]] = None , _SCREAMING_SNAKE_CASE : bool = False , ): """simple docstring""" SCREAMING_SNAKE_CASE_ = bnb_quantization_config.load_in_abit SCREAMING_SNAKE_CASE_ = bnb_quantization_config.load_in_abit if load_in_abit and not is_abit_bnb_available(): raise ImportError( 'You have a version of `bitsandbytes` that is not compatible with 8bit quantization,' ' make sure you have the latest version of `bitsandbytes` installed.' ) if load_in_abit and not is_abit_bnb_available(): raise ValueError( 'You have a version of `bitsandbytes` that is not compatible with 4bit quantization,' 'make sure you have the latest version of `bitsandbytes` installed.' ) SCREAMING_SNAKE_CASE_ = [] # custom device map if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) and len(device_map.keys() ) > 1: SCREAMING_SNAKE_CASE_ = [key for key, value in device_map.items() if value in ['disk', 'cpu']] # We keep some modules such as the lm_head in their original dtype for numerical stability reasons if bnb_quantization_config.skip_modules is None: SCREAMING_SNAKE_CASE_ = get_keys_to_not_convert(_SCREAMING_SNAKE_CASE ) # add cpu modules to skip modules only for 4-bit modules if load_in_abit: bnb_quantization_config.skip_modules.extend(_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE_ = bnb_quantization_config.skip_modules # We add the modules we want to keep in full precision if bnb_quantization_config.keep_in_fpaa_modules is None: SCREAMING_SNAKE_CASE_ = [] SCREAMING_SNAKE_CASE_ = bnb_quantization_config.keep_in_fpaa_modules modules_to_not_convert.extend(_SCREAMING_SNAKE_CASE ) # compatibility with peft SCREAMING_SNAKE_CASE_ = load_in_abit SCREAMING_SNAKE_CASE_ = load_in_abit SCREAMING_SNAKE_CASE_ = get_parameter_device(_SCREAMING_SNAKE_CASE ) if model_device.type != "meta": # quantization of an already loaded model logger.warning( 'It is not recommended to quantize a loaded model. ' 'The model should be instantiated under the `init_empty_weights` context manager.' ) SCREAMING_SNAKE_CASE_ = replace_with_bnb_layers(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , modules_to_not_convert=_SCREAMING_SNAKE_CASE ) # convert param to the right dtype SCREAMING_SNAKE_CASE_ = bnb_quantization_config.torch_dtype for name, param in model.state_dict().items(): if any(module_to_keep_in_fpaa in name for module_to_keep_in_fpaa in keep_in_fpaa_modules ): param.to(torch.floataa ) if param.dtype != torch.floataa: SCREAMING_SNAKE_CASE_ = name.replace('.weight' , '' ).replace('.bias' , '' ) SCREAMING_SNAKE_CASE_ = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) if param is not None: param.to(torch.floataa ) elif torch.is_floating_point(_SCREAMING_SNAKE_CASE ): param.to(_SCREAMING_SNAKE_CASE ) if model_device.type == "cuda": # move everything to cpu in the first place because we can't do quantization if the weights are already on cuda model.cuda(torch.cuda.current_device() ) torch.cuda.empty_cache() elif torch.cuda.is_available(): model.to(torch.cuda.current_device() ) else: raise RuntimeError('No GPU found. A GPU is needed for quantization.' ) logger.info( f"""The model device type is {model_device.type}. However, cuda is needed for quantization.""" 'We move the model to cuda.' ) return model elif weights_location is None: raise RuntimeError( f"""`weights_location` needs to be the folder path containing the weights of the model, but we found {weights_location} """ ) else: with init_empty_weights(): SCREAMING_SNAKE_CASE_ = replace_with_bnb_layers( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , modules_to_not_convert=_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE_ = get_quantized_model_device_map( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , max_memory=_SCREAMING_SNAKE_CASE , no_split_module_classes=_SCREAMING_SNAKE_CASE , ) if offload_state_dict is None and device_map is not None and "disk" in device_map.values(): SCREAMING_SNAKE_CASE_ = True SCREAMING_SNAKE_CASE_ = any(x in list(device_map.values() ) for x in ['cpu', 'disk'] ) load_checkpoint_in_model( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , dtype=bnb_quantization_config.torch_dtype , offload_folder=_SCREAMING_SNAKE_CASE , offload_state_dict=_SCREAMING_SNAKE_CASE , keep_in_fpaa_modules=bnb_quantization_config.keep_in_fpaa_modules , offload_abit_bnb=load_in_abit and offload , ) return dispatch_model(_SCREAMING_SNAKE_CASE , device_map=_SCREAMING_SNAKE_CASE , offload_dir=_SCREAMING_SNAKE_CASE ) def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : List[str]=None , _SCREAMING_SNAKE_CASE : List[str]=None , _SCREAMING_SNAKE_CASE : Union[str, Any]=None ): """simple docstring""" if device_map is None: if torch.cuda.is_available(): SCREAMING_SNAKE_CASE_ = {'': torch.cuda.current_device()} else: raise RuntimeError('No GPU found. A GPU is needed for quantization.' ) logger.info('The device_map was not initialized.' 'Setting device_map to `{\'\':torch.cuda.current_device()}`.' ) if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): if device_map not in ["auto", "balanced", "balanced_low_0", "sequential"]: raise ValueError( 'If passing a string for `device_map`, please choose \'auto\', \'balanced\', \'balanced_low_0\' or ' '\'sequential\'.' ) SCREAMING_SNAKE_CASE_ = {} special_dtypes.update( { name: bnb_quantization_config.torch_dtype for name, _ in model.named_parameters() if any(m in name for m in bnb_quantization_config.skip_modules ) } ) special_dtypes.update( { name: torch.floataa for name, _ in model.named_parameters() if any(m in name for m in bnb_quantization_config.keep_in_fpaa_modules ) } ) SCREAMING_SNAKE_CASE_ = {} SCREAMING_SNAKE_CASE_ = special_dtypes SCREAMING_SNAKE_CASE_ = no_split_module_classes SCREAMING_SNAKE_CASE_ = bnb_quantization_config.target_dtype # get max_memory for each device. if device_map != "sequential": SCREAMING_SNAKE_CASE_ = get_balanced_memory( _SCREAMING_SNAKE_CASE , low_zero=(device_map == 'balanced_low_0') , max_memory=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , ) SCREAMING_SNAKE_CASE_ = max_memory SCREAMING_SNAKE_CASE_ = infer_auto_device_map(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): # check if don't have any quantized module on the cpu SCREAMING_SNAKE_CASE_ = bnb_quantization_config.skip_modules + bnb_quantization_config.keep_in_fpaa_modules SCREAMING_SNAKE_CASE_ = { key: device_map[key] for key in device_map.keys() if key not in modules_not_to_convert } for device in ["cpu", "disk"]: if device in device_map_without_some_modules.values(): if bnb_quantization_config.load_in_abit: raise ValueError( '\n Some modules are dispatched on the CPU or the disk. Make sure you have enough GPU RAM to fit\n the quantized model. If you want to dispatch the model on the CPU or the disk while keeping\n these modules in `torch_dtype`, you need to pass a custom `device_map` to\n `load_and_quantize_model`. Check\n https://huggingface.co/docs/accelerate/main/en/usage_guides/quantization#offload-modules-to-cpu-and-disk\n for more details.\n ' ) else: logger.info( 'Some modules are are offloaded to the CPU or the disk. Note that these modules will be converted to 8-bit' ) del device_map_without_some_modules return device_map def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int=None , _SCREAMING_SNAKE_CASE : Union[str, Any]=None ): """simple docstring""" if modules_to_not_convert is None: SCREAMING_SNAKE_CASE_ = [] SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = _replace_with_bnb_layers( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) if not has_been_replaced: logger.warning( 'You are loading your model in 8bit or 4bit but no linear modules were found in your model.' ' this can happen for some architectures such as gpt2 that uses Conv1D instead of Linear layers.' ' Please double check your model architecture, or submit an issue on github if you think this is' ' a bug.' ) return model def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : Optional[Any]=None , _SCREAMING_SNAKE_CASE : str=None , ): """simple docstring""" SCREAMING_SNAKE_CASE_ = False for name, module in model.named_children(): if current_key_name is None: SCREAMING_SNAKE_CASE_ = [] current_key_name.append(_SCREAMING_SNAKE_CASE ) if isinstance(_SCREAMING_SNAKE_CASE , nn.Linear ) and name not in modules_to_not_convert: # Check if the current key is not in the `modules_to_not_convert` SCREAMING_SNAKE_CASE_ = '.'.join(_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE_ = True for key in modules_to_not_convert: if ( (key in current_key_name_str) and (key + "." in current_key_name_str) ) or key == current_key_name_str: SCREAMING_SNAKE_CASE_ = False break if proceed: # Load bnb module with empty weight and replace ``nn.Linear` module if bnb_quantization_config.load_in_abit: SCREAMING_SNAKE_CASE_ = bnb.nn.LinearabitLt( module.in_features , module.out_features , module.bias is not None , has_fpaa_weights=_SCREAMING_SNAKE_CASE , threshold=bnb_quantization_config.llm_inta_threshold , ) elif bnb_quantization_config.load_in_abit: SCREAMING_SNAKE_CASE_ = bnb.nn.Linearabit( module.in_features , module.out_features , module.bias is not None , bnb_quantization_config.bnb_abit_compute_dtype , compress_statistics=bnb_quantization_config.bnb_abit_use_double_quant , quant_type=bnb_quantization_config.bnb_abit_quant_type , ) else: raise ValueError('load_in_8bit and load_in_4bit can\'t be both False' ) SCREAMING_SNAKE_CASE_ = module.weight.data if module.bias is not None: SCREAMING_SNAKE_CASE_ = module.bias.data bnb_module.requires_grad_(_SCREAMING_SNAKE_CASE ) setattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE_ = True if len(list(module.children() ) ) > 0: SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = _replace_with_bnb_layers( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE_ = has_been_replaced | _has_been_replaced # Remove the last key for recursion current_key_name.pop(-1 ) return model, has_been_replaced def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : Union[str, Any] ): """simple docstring""" with init_empty_weights(): SCREAMING_SNAKE_CASE_ = deepcopy(_SCREAMING_SNAKE_CASE ) # this has 0 cost since it is done inside `init_empty_weights` context manager` SCREAMING_SNAKE_CASE_ = find_tied_parameters(_SCREAMING_SNAKE_CASE ) # For compatibility with Accelerate < 0.18 if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): SCREAMING_SNAKE_CASE_ = sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() ) else: SCREAMING_SNAKE_CASE_ = sum(_SCREAMING_SNAKE_CASE , [] ) SCREAMING_SNAKE_CASE_ = len(_SCREAMING_SNAKE_CASE ) > 0 # Check if it is a base model SCREAMING_SNAKE_CASE_ = False if hasattr(_SCREAMING_SNAKE_CASE , 'base_model_prefix' ): SCREAMING_SNAKE_CASE_ = not hasattr(_SCREAMING_SNAKE_CASE , model.base_model_prefix ) # Ignore this for base models (BertModel, GPT2Model, etc.) if (not has_tied_params) and is_base_model: return [] # otherwise they have an attached head SCREAMING_SNAKE_CASE_ = list(model.named_children() ) SCREAMING_SNAKE_CASE_ = [list_modules[-1][0]] # add last module together with tied weights SCREAMING_SNAKE_CASE_ = set(_SCREAMING_SNAKE_CASE ) - set(_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE_ = list(set(_SCREAMING_SNAKE_CASE ) ) + list(_SCREAMING_SNAKE_CASE ) # remove ".weight" from the keys SCREAMING_SNAKE_CASE_ = ['.weight', '.bias'] SCREAMING_SNAKE_CASE_ = [] for name in list_untouched: for name_to_remove in names_to_remove: if name_to_remove in name: SCREAMING_SNAKE_CASE_ = name.replace(_SCREAMING_SNAKE_CASE , '' ) filtered_module_names.append(_SCREAMING_SNAKE_CASE ) return filtered_module_names def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : Dict ): """simple docstring""" for m in model.modules(): if isinstance(_SCREAMING_SNAKE_CASE , bnb.nn.Linearabit ): return True return False def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : nn.Module ): """simple docstring""" return next(parameter.parameters() ).device def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : str ): """simple docstring""" if fpaa_statistics is None: set_module_tensor_to_device(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , 0 , dtype=_SCREAMING_SNAKE_CASE , value=_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE_ = param_name SCREAMING_SNAKE_CASE_ = model if "." in tensor_name: SCREAMING_SNAKE_CASE_ = tensor_name.split('.' ) for split in splits[:-1]: SCREAMING_SNAKE_CASE_ = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) if new_module is None: raise ValueError(f"""{module} has no attribute {split}.""" ) SCREAMING_SNAKE_CASE_ = new_module SCREAMING_SNAKE_CASE_ = splits[-1] # offload weights SCREAMING_SNAKE_CASE_ = False offload_weight(module._parameters[tensor_name] , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , index=_SCREAMING_SNAKE_CASE ) if hasattr(module._parameters[tensor_name] , 'SCB' ): offload_weight( module._parameters[tensor_name].SCB , param_name.replace('weight' , 'SCB' ) , _SCREAMING_SNAKE_CASE , index=_SCREAMING_SNAKE_CASE , ) else: offload_weight(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , index=_SCREAMING_SNAKE_CASE ) offload_weight(_SCREAMING_SNAKE_CASE , param_name.replace('weight' , 'SCB' ) , _SCREAMING_SNAKE_CASE , index=_SCREAMING_SNAKE_CASE ) set_module_tensor_to_device(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , 'meta' , dtype=_SCREAMING_SNAKE_CASE , value=torch.empty(*param.size() ) )
620
0
import os import sys import tempfile import torch from .state import AcceleratorState from .utils import PrecisionType, PrepareForLaunch, is_mps_available, patch_environment def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : Dict=() , _SCREAMING_SNAKE_CASE : Union[str, Any]=None , _SCREAMING_SNAKE_CASE : List[str]="no" , _SCREAMING_SNAKE_CASE : List[Any]="29500" ): """simple docstring""" SCREAMING_SNAKE_CASE_ = False SCREAMING_SNAKE_CASE_ = False if any(key.startswith('KAGGLE' ) for key in os.environ.keys() ): SCREAMING_SNAKE_CASE_ = True elif "IPython" in sys.modules: SCREAMING_SNAKE_CASE_ = """google.colab""" in str(sys.modules['IPython'].get_ipython() ) try: SCREAMING_SNAKE_CASE_ = PrecisionType(mixed_precision.lower() ) except ValueError: raise ValueError( f"""Unknown mixed_precision mode: {args.mixed_precision.lower()}. Choose between {PrecisionType.list()}.""" ) if (in_colab or in_kaggle) and (os.environ.get('TPU_NAME' , _lowerCamelCase ) is not None): # TPU launch import torch_xla.distributed.xla_multiprocessing as xmp if len(AcceleratorState._shared_state ) > 0: raise ValueError( 'To train on TPU in Colab or Kaggle Kernel, the `Accelerator` should only be initialized inside ' 'your training function. Restart your notebook and make sure no cells initializes an ' '`Accelerator`.' ) if num_processes is None: SCREAMING_SNAKE_CASE_ = 8 SCREAMING_SNAKE_CASE_ = PrepareForLaunch(_lowerCamelCase , distributed_type='TPU' ) print(f"""Launching a training on {num_processes} TPU cores.""" ) xmp.spawn(_lowerCamelCase , args=_lowerCamelCase , nprocs=_lowerCamelCase , start_method='fork' ) elif in_colab: # No need for a distributed launch otherwise as it's either CPU or one GPU. if torch.cuda.is_available(): print('Launching training on one GPU.' ) else: print('Launching training on one CPU.' ) function(*_lowerCamelCase ) else: if num_processes is None: raise ValueError( 'You have to specify the number of GPUs you would like to use, add `num_processes=...` to your call.' ) if num_processes > 1: # Multi-GPU launch from torch.multiprocessing import start_processes from torch.multiprocessing.spawn import ProcessRaisedException if len(AcceleratorState._shared_state ) > 0: raise ValueError( 'To launch a multi-GPU training from your notebook, the `Accelerator` should only be initialized ' 'inside your training function. Restart your notebook and make sure no cells initializes an ' '`Accelerator`.' ) if torch.cuda.is_initialized(): raise ValueError( 'To launch a multi-GPU training from your notebook, you need to avoid running any instruction ' 'using `torch.cuda` in any cell. Restart your notebook and make sure no cells use any CUDA ' 'function.' ) # torch.distributed will expect a few environment variable to be here. We set the ones common to each # process here (the other ones will be set be the launcher). with patch_environment( world_size=_lowerCamelCase , master_addr='127.0.01' , master_port=_lowerCamelCase , mixed_precision=_lowerCamelCase ): SCREAMING_SNAKE_CASE_ = PrepareForLaunch(_lowerCamelCase , distributed_type='MULTI_GPU' ) print(f"""Launching training on {num_processes} GPUs.""" ) try: start_processes(_lowerCamelCase , args=_lowerCamelCase , nprocs=_lowerCamelCase , start_method='fork' ) except ProcessRaisedException as e: if "Cannot re-initialize CUDA in forked subprocess" in e.args[0]: raise RuntimeError( 'CUDA has been initialized before the `notebook_launcher` could create a forked subprocess. ' 'This likely stems from an outside import causing issues once the `notebook_launcher()` is called. ' 'Please review your imports and test them when running the `notebook_launcher()` to identify ' 'which one is problematic.' ) from e else: # No need for a distributed launch otherwise as it's either CPU, GPU or MPS. if is_mps_available(): SCREAMING_SNAKE_CASE_ = """1""" print('Launching training on MPS.' ) elif torch.cuda.is_available(): print('Launching training on one GPU.' ) else: print('Launching training on CPU.' ) function(*_lowerCamelCase ) def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : Optional[int]=() , _SCREAMING_SNAKE_CASE : Optional[int]=2 ): """simple docstring""" from torch.multiprocessing import start_processes with tempfile.NamedTemporaryFile() as tmp_file: # torch.distributed will expect a few environment variable to be here. We set the ones common to each # process here (the other ones will be set be the launcher). with patch_environment( world_size=_lowerCamelCase , master_addr='127.0.01' , master_port='29500' , accelerate_mixed_precision='no' , accelerate_debug_rdv_file=tmp_file.name , accelerate_use_cpu='yes' , ): SCREAMING_SNAKE_CASE_ = PrepareForLaunch(_lowerCamelCase , debug=_lowerCamelCase ) start_processes(_lowerCamelCase , args=_lowerCamelCase , nprocs=_lowerCamelCase , start_method='fork' )
719
import json import os from functools import lru_cache from typing import List, Optional, Tuple import regex as re from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging UpperCamelCase__ : Union[str, Any] = logging.get_logger(__name__) UpperCamelCase__ : Optional[Any] = {"vocab_file": "vocab.json", "merges_file": "merges.txt"} # See all BART models at https://huggingface.co/models?filter=bart UpperCamelCase__ : List[str] = { "vocab_file": { "facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/vocab.json", "facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/vocab.json", "facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json", "facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json", "facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json", "yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json", }, "merges_file": { "facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/merges.txt", "facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/merges.txt", "facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt", "facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt", "facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt", "yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt", }, } UpperCamelCase__ : str = { "facebook/bart-base": 1_024, "facebook/bart-large": 1_024, "facebook/bart-large-mnli": 1_024, "facebook/bart-large-cnn": 1_024, "facebook/bart-large-xsum": 1_024, "yjernite/bart_eli5": 1_024, } @lru_cache() def _UpperCAmelCase ( ): """simple docstring""" SCREAMING_SNAKE_CASE_ = ( list(range(ord('!' ) , ord('~' ) + 1 ) ) + list(range(ord('¡' ) , ord('¬' ) + 1 ) ) + list(range(ord('®' ) , ord('ÿ' ) + 1 ) ) ) SCREAMING_SNAKE_CASE_ = bs[:] SCREAMING_SNAKE_CASE_ = 0 for b in range(2**8 ): if b not in bs: bs.append(_SCREAMING_SNAKE_CASE ) cs.append(2**8 + n ) n += 1 SCREAMING_SNAKE_CASE_ = [chr(_SCREAMING_SNAKE_CASE ) for n in cs] return dict(zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ) def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : List[str] ): """simple docstring""" SCREAMING_SNAKE_CASE_ = set() SCREAMING_SNAKE_CASE_ = word[0] for char in word[1:]: pairs.add((prev_char, char) ) SCREAMING_SNAKE_CASE_ = char return pairs class __snake_case ( lowerCAmelCase__ ): __lowerCAmelCase : str = VOCAB_FILES_NAMES __lowerCAmelCase : Any = PRETRAINED_VOCAB_FILES_MAP __lowerCAmelCase : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __lowerCAmelCase : List[Any] = ['input_ids', 'attention_mask'] def __init__( self , _A , _A , _A="replace" , _A="<s>" , _A="</s>" , _A="</s>" , _A="<s>" , _A="<unk>" , _A="<pad>" , _A="<mask>" , _A=False , **_A , ): SCREAMING_SNAKE_CASE_ = AddedToken(_A , lstrip=_A , rstrip=_A) if isinstance(_A , _A) else bos_token SCREAMING_SNAKE_CASE_ = AddedToken(_A , lstrip=_A , rstrip=_A) if isinstance(_A , _A) else eos_token SCREAMING_SNAKE_CASE_ = AddedToken(_A , lstrip=_A , rstrip=_A) if isinstance(_A , _A) else sep_token SCREAMING_SNAKE_CASE_ = AddedToken(_A , lstrip=_A , rstrip=_A) if isinstance(_A , _A) else cls_token SCREAMING_SNAKE_CASE_ = AddedToken(_A , lstrip=_A , rstrip=_A) if isinstance(_A , _A) else unk_token SCREAMING_SNAKE_CASE_ = AddedToken(_A , lstrip=_A , rstrip=_A) if isinstance(_A , _A) else pad_token # Mask token behave like a normal word, i.e. include the space before it SCREAMING_SNAKE_CASE_ = AddedToken(_A , lstrip=_A , rstrip=_A) if isinstance(_A , _A) else mask_token super().__init__( errors=_A , bos_token=_A , eos_token=_A , unk_token=_A , sep_token=_A , cls_token=_A , pad_token=_A , mask_token=_A , add_prefix_space=_A , **_A , ) with open(_A , encoding='utf-8') as vocab_handle: SCREAMING_SNAKE_CASE_ = json.load(_A) SCREAMING_SNAKE_CASE_ = {v: k for k, v in self.encoder.items()} SCREAMING_SNAKE_CASE_ = errors # how to handle errors in decoding SCREAMING_SNAKE_CASE_ = bytes_to_unicode() SCREAMING_SNAKE_CASE_ = {v: k for k, v in self.byte_encoder.items()} with open(_A , encoding='utf-8') as merges_handle: SCREAMING_SNAKE_CASE_ = merges_handle.read().split('\n')[1:-1] SCREAMING_SNAKE_CASE_ = [tuple(merge.split()) for merge in bpe_merges] SCREAMING_SNAKE_CASE_ = dict(zip(_A , range(len(_A)))) SCREAMING_SNAKE_CASE_ = {} SCREAMING_SNAKE_CASE_ = add_prefix_space # Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions SCREAMING_SNAKE_CASE_ = re.compile(r'\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+') @property def lowerCAmelCase__ ( self): return len(self.encoder) def lowerCAmelCase__ ( self): return dict(self.encoder , **self.added_tokens_encoder) def lowerCAmelCase__ ( self , _A): if token in self.cache: return self.cache[token] SCREAMING_SNAKE_CASE_ = tuple(_A) SCREAMING_SNAKE_CASE_ = get_pairs(_A) if not pairs: return token while True: SCREAMING_SNAKE_CASE_ = min(_A , key=lambda _A: self.bpe_ranks.get(_A , float('inf'))) if bigram not in self.bpe_ranks: break SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = bigram SCREAMING_SNAKE_CASE_ = [] SCREAMING_SNAKE_CASE_ = 0 while i < len(_A): try: SCREAMING_SNAKE_CASE_ = word.index(_A , _A) except ValueError: new_word.extend(word[i:]) break else: new_word.extend(word[i:j]) SCREAMING_SNAKE_CASE_ = j if word[i] == first and i < len(_A) - 1 and word[i + 1] == second: new_word.append(first + second) i += 2 else: new_word.append(word[i]) i += 1 SCREAMING_SNAKE_CASE_ = tuple(_A) SCREAMING_SNAKE_CASE_ = new_word if len(_A) == 1: break else: SCREAMING_SNAKE_CASE_ = get_pairs(_A) SCREAMING_SNAKE_CASE_ = ' '.join(_A) SCREAMING_SNAKE_CASE_ = word return word def lowerCAmelCase__ ( self , _A): SCREAMING_SNAKE_CASE_ = [] for token in re.findall(self.pat , _A): SCREAMING_SNAKE_CASE_ = ''.join( self.byte_encoder[b] for b in token.encode('utf-8')) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case) bpe_tokens.extend(bpe_token for bpe_token in self.bpe(_A).split(' ')) return bpe_tokens def lowerCAmelCase__ ( self , _A): return self.encoder.get(_A , self.encoder.get(self.unk_token)) def lowerCAmelCase__ ( self , _A): return self.decoder.get(_A) def lowerCAmelCase__ ( self , _A): SCREAMING_SNAKE_CASE_ = ''.join(_A) SCREAMING_SNAKE_CASE_ = bytearray([self.byte_decoder[c] for c in text]).decode('utf-8' , errors=self.errors) return text def lowerCAmelCase__ ( self , _A , _A = None): if not os.path.isdir(_A): logger.error(f"""Vocabulary path ({save_directory}) should be a directory""") return SCREAMING_SNAKE_CASE_ = os.path.join( _A , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file']) SCREAMING_SNAKE_CASE_ = os.path.join( _A , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file']) with open(_A , 'w' , encoding='utf-8') as f: f.write(json.dumps(self.encoder , indent=2 , sort_keys=_A , ensure_ascii=_A) + '\n') SCREAMING_SNAKE_CASE_ = 0 with open(_A , 'w' , encoding='utf-8') as writer: writer.write('#version: 0.2\n') for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda _A: kv[1]): if index != token_index: logger.warning( f"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.""" ' Please check that the tokenizer is not corrupted!') SCREAMING_SNAKE_CASE_ = token_index writer.write(' '.join(_A) + '\n') index += 1 return vocab_file, merge_file def lowerCAmelCase__ ( self , _A , _A = None): if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] SCREAMING_SNAKE_CASE_ = [self.cls_token_id] SCREAMING_SNAKE_CASE_ = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def lowerCAmelCase__ ( self , _A , _A = None , _A = False): if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=_A , token_ids_a=_A , already_has_special_tokens=_A) if token_ids_a is None: return [1] + ([0] * len(_A)) + [1] return [1] + ([0] * len(_A)) + [1, 1] + ([0] * len(_A)) + [1] def lowerCAmelCase__ ( self , _A , _A = None): SCREAMING_SNAKE_CASE_ = [self.sep_token_id] SCREAMING_SNAKE_CASE_ = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0] def lowerCAmelCase__ ( self , _A , _A=False , **_A): SCREAMING_SNAKE_CASE_ = kwargs.pop('add_prefix_space' , self.add_prefix_space) if (is_split_into_words or add_prefix_space) and (len(_A) > 0 and not text[0].isspace()): SCREAMING_SNAKE_CASE_ = ' ' + text return (text, kwargs)
620
0
class __snake_case : # Public class to implement a graph def __init__( self , _A , _A , _A): SCREAMING_SNAKE_CASE_ = row SCREAMING_SNAKE_CASE_ = col SCREAMING_SNAKE_CASE_ = graph def lowerCAmelCase__ ( self , _A , _A , _A): return ( 0 <= i < self.ROW and 0 <= j < self.COL and not visited[i][j] and self.graph[i][j] ) def lowerCAmelCase__ ( self , _A , _A , _A): # Checking all 8 elements surrounding nth element SCREAMING_SNAKE_CASE_ = [-1, -1, -1, 0, 0, 1, 1, 1] # Coordinate order SCREAMING_SNAKE_CASE_ = [-1, 0, 1, -1, 1, -1, 0, 1] SCREAMING_SNAKE_CASE_ = True # Make those cells visited for k in range(8): if self.is_safe(i + row_nbr[k] , j + col_nbr[k] , A__): self.diffs(i + row_nbr[k] , j + col_nbr[k] , A__) def lowerCAmelCase__ ( self): # And finally, count all islands. SCREAMING_SNAKE_CASE_ = [[False for j in range(self.COL)] for i in range(self.ROW)] SCREAMING_SNAKE_CASE_ = 0 for i in range(self.ROW): for j in range(self.COL): if visited[i][j] is False and self.graph[i][j] == 1: self.diffs(A__ , A__ , A__) count += 1 return count
720
from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCamelCase__ : str = logging.get_logger(__name__) UpperCamelCase__ : Optional[int] = { "facebook/dpr-ctx_encoder-single-nq-base": ( "https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/config.json" ), "facebook/dpr-question_encoder-single-nq-base": ( "https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/config.json" ), "facebook/dpr-reader-single-nq-base": ( "https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/config.json" ), "facebook/dpr-ctx_encoder-multiset-base": ( "https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/config.json" ), "facebook/dpr-question_encoder-multiset-base": ( "https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/config.json" ), "facebook/dpr-reader-multiset-base": ( "https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/config.json" ), } class __snake_case ( lowerCAmelCase__ ): __lowerCAmelCase : Optional[int] = 'dpr' def __init__( self , _A=30522 , _A=768 , _A=12 , _A=12 , _A=3072 , _A="gelu" , _A=0.1 , _A=0.1 , _A=512 , _A=2 , _A=0.0_2 , _A=1E-12 , _A=0 , _A="absolute" , _A = 0 , **_A , ): super().__init__(pad_token_id=_A , **_A) SCREAMING_SNAKE_CASE_ = vocab_size SCREAMING_SNAKE_CASE_ = hidden_size SCREAMING_SNAKE_CASE_ = num_hidden_layers SCREAMING_SNAKE_CASE_ = num_attention_heads SCREAMING_SNAKE_CASE_ = hidden_act SCREAMING_SNAKE_CASE_ = intermediate_size SCREAMING_SNAKE_CASE_ = hidden_dropout_prob SCREAMING_SNAKE_CASE_ = attention_probs_dropout_prob SCREAMING_SNAKE_CASE_ = max_position_embeddings SCREAMING_SNAKE_CASE_ = type_vocab_size SCREAMING_SNAKE_CASE_ = initializer_range SCREAMING_SNAKE_CASE_ = layer_norm_eps SCREAMING_SNAKE_CASE_ = projection_dim SCREAMING_SNAKE_CASE_ = position_embedding_type
620
0
import argparse import json import math import os import time import traceback import zipfile from collections import Counter import requests def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Union[str, Any]=None ): SCREAMING_SNAKE_CASE_ = None if token is not None: SCREAMING_SNAKE_CASE_ = {'Accept': 'application/vnd.github+json', 'Authorization': f"""Bearer {token}"""} SCREAMING_SNAKE_CASE_ = f"""https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100""" SCREAMING_SNAKE_CASE_ = requests.get(lowerCAmelCase__ , headers=lowerCAmelCase__ ).json() SCREAMING_SNAKE_CASE_ = {} try: job_links.update({job['name']: job['html_url'] for job in result['jobs']} ) SCREAMING_SNAKE_CASE_ = math.ceil((result['total_count'] - 100) / 100 ) for i in range(lowerCAmelCase__ ): SCREAMING_SNAKE_CASE_ = requests.get(url + f"""&page={i + 2}""" , headers=lowerCAmelCase__ ).json() job_links.update({job['name']: job['html_url'] for job in result['jobs']} ) return job_links except Exception: print(f"""Unknown error, could not fetch links:\n{traceback.format_exc()}""" ) return {} def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : int=None ): SCREAMING_SNAKE_CASE_ = None if token is not None: SCREAMING_SNAKE_CASE_ = {'Accept': 'application/vnd.github+json', 'Authorization': f"""Bearer {token}"""} SCREAMING_SNAKE_CASE_ = f"""https://api.github.com/repos/huggingface/transformers/actions/runs/{worflow_run_id}/artifacts?per_page=100""" SCREAMING_SNAKE_CASE_ = requests.get(lowerCAmelCase__ , headers=lowerCAmelCase__ ).json() SCREAMING_SNAKE_CASE_ = {} try: artifacts.update({artifact['name']: artifact['archive_download_url'] for artifact in result['artifacts']} ) SCREAMING_SNAKE_CASE_ = math.ceil((result['total_count'] - 100) / 100 ) for i in range(lowerCAmelCase__ ): SCREAMING_SNAKE_CASE_ = requests.get(url + f"""&page={i + 2}""" , headers=lowerCAmelCase__ ).json() artifacts.update({artifact['name']: artifact['archive_download_url'] for artifact in result['artifacts']} ) return artifacts except Exception: print(f"""Unknown error, could not fetch links:\n{traceback.format_exc()}""" ) return {} def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : Optional[Any] ): SCREAMING_SNAKE_CASE_ = None if token is not None: SCREAMING_SNAKE_CASE_ = {'Accept': 'application/vnd.github+json', 'Authorization': f"""Bearer {token}"""} SCREAMING_SNAKE_CASE_ = requests.get(lowerCAmelCase__ , headers=lowerCAmelCase__ , allow_redirects=lowerCAmelCase__ ) SCREAMING_SNAKE_CASE_ = result.headers['Location'] SCREAMING_SNAKE_CASE_ = requests.get(lowerCAmelCase__ , allow_redirects=lowerCAmelCase__ ) SCREAMING_SNAKE_CASE_ = os.path.join(lowerCAmelCase__ , f"""{artifact_name}.zip""" ) with open(lowerCAmelCase__ , 'wb' ) as fp: fp.write(response.content ) def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : List[str]=None ): SCREAMING_SNAKE_CASE_ = [] SCREAMING_SNAKE_CASE_ = [] SCREAMING_SNAKE_CASE_ = None with zipfile.ZipFile(lowerCAmelCase__ ) as z: for filename in z.namelist(): if not os.path.isdir(lowerCAmelCase__ ): # read the file if filename in ["failures_line.txt", "summary_short.txt", "job_name.txt"]: with z.open(lowerCAmelCase__ ) as f: for line in f: SCREAMING_SNAKE_CASE_ = line.decode('UTF-8' ).strip() if filename == "failures_line.txt": try: # `error_line` is the place where `error` occurs SCREAMING_SNAKE_CASE_ = line[: line.index(': ' )] SCREAMING_SNAKE_CASE_ = line[line.index(': ' ) + len(': ' ) :] errors.append([error_line, error] ) except Exception: # skip un-related lines pass elif filename == "summary_short.txt" and line.startswith('FAILED ' ): # `test` is the test method that failed SCREAMING_SNAKE_CASE_ = line[len('FAILED ' ) :] failed_tests.append(lowerCAmelCase__ ) elif filename == "job_name.txt": SCREAMING_SNAKE_CASE_ = line if len(lowerCAmelCase__ ) != len(lowerCAmelCase__ ): raise ValueError( f"""`errors` and `failed_tests` should have the same number of elements. Got {len(lowerCAmelCase__ )} for `errors` """ f"""and {len(lowerCAmelCase__ )} for `failed_tests` instead. The test reports in {artifact_zip_path} have some""" ' problem.' ) SCREAMING_SNAKE_CASE_ = None if job_name and job_links: SCREAMING_SNAKE_CASE_ = job_links.get(lowerCAmelCase__ , lowerCAmelCase__ ) # A list with elements of the form (line of error, error, failed test) SCREAMING_SNAKE_CASE_ = [x + [y] + [job_link] for x, y in zip(lowerCAmelCase__ , lowerCAmelCase__ )] return result def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : Any=None ): SCREAMING_SNAKE_CASE_ = [] SCREAMING_SNAKE_CASE_ = [os.path.join(lowerCAmelCase__ , lowerCAmelCase__ ) for p in os.listdir(lowerCAmelCase__ ) if p.endswith('.zip' )] for p in paths: errors.extend(get_errors_from_single_artifact(lowerCAmelCase__ , job_links=lowerCAmelCase__ ) ) return errors def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : Union[str, Any]=None ): SCREAMING_SNAKE_CASE_ = Counter() counter.update([x[1] for x in logs] ) SCREAMING_SNAKE_CASE_ = counter.most_common() SCREAMING_SNAKE_CASE_ = {} for error, count in counts: if error_filter is None or error not in error_filter: SCREAMING_SNAKE_CASE_ = {'count': count, 'failed_tests': [(x[2], x[0]) for x in logs if x[1] == error]} SCREAMING_SNAKE_CASE_ = dict(sorted(r.items() , key=lambda _SCREAMING_SNAKE_CASE : item[1]["count"] , reverse=lowerCAmelCase__ ) ) return r def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : Optional[Any] ): SCREAMING_SNAKE_CASE_ = test.split('::' )[0] if test.startswith('tests/models/' ): SCREAMING_SNAKE_CASE_ = test.split('/' )[2] else: SCREAMING_SNAKE_CASE_ = None return test def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : Union[str, Any]=None ): SCREAMING_SNAKE_CASE_ = [(x[0], x[1], get_model(x[2] )) for x in logs] SCREAMING_SNAKE_CASE_ = [x for x in logs if x[2] is not None] SCREAMING_SNAKE_CASE_ = {x[2] for x in logs} SCREAMING_SNAKE_CASE_ = {} for test in tests: SCREAMING_SNAKE_CASE_ = Counter() # count by errors in `test` counter.update([x[1] for x in logs if x[2] == test] ) SCREAMING_SNAKE_CASE_ = counter.most_common() SCREAMING_SNAKE_CASE_ = {error: count for error, count in counts if (error_filter is None or error not in error_filter)} SCREAMING_SNAKE_CASE_ = sum(error_counts.values() ) if n_errors > 0: SCREAMING_SNAKE_CASE_ = {'count': n_errors, 'errors': error_counts} SCREAMING_SNAKE_CASE_ = dict(sorted(r.items() , key=lambda _SCREAMING_SNAKE_CASE : item[1]["count"] , reverse=lowerCAmelCase__ ) ) return r def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : Optional[Any] ): SCREAMING_SNAKE_CASE_ = '| no. | error | status |' SCREAMING_SNAKE_CASE_ = '|-:|:-|:-|' SCREAMING_SNAKE_CASE_ = [header, sep] for error in reduced_by_error: SCREAMING_SNAKE_CASE_ = reduced_by_error[error]['count'] SCREAMING_SNAKE_CASE_ = f"""| {count} | {error[:100]} | |""" lines.append(lowerCAmelCase__ ) return "\n".join(lowerCAmelCase__ ) def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : Tuple ): SCREAMING_SNAKE_CASE_ = '| model | no. of errors | major error | count |' SCREAMING_SNAKE_CASE_ = '|-:|-:|-:|-:|' SCREAMING_SNAKE_CASE_ = [header, sep] for model in reduced_by_model: SCREAMING_SNAKE_CASE_ = reduced_by_model[model]['count'] SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = list(reduced_by_model[model]['errors'].items() )[0] SCREAMING_SNAKE_CASE_ = f"""| {model} | {count} | {error[:60]} | {_count} |""" lines.append(lowerCAmelCase__ ) return "\n".join(lowerCAmelCase__ ) if __name__ == "__main__": UpperCamelCase__ : str = argparse.ArgumentParser() # Required parameters parser.add_argument("--workflow_run_id", type=str, required=True, help="A GitHub Actions workflow run id.") parser.add_argument( "--output_dir", type=str, required=True, help="Where to store the downloaded artifacts and other result files.", ) parser.add_argument("--token", default=None, type=str, help="A token that has actions:read permission.") UpperCamelCase__ : Tuple = parser.parse_args() os.makedirs(args.output_dir, exist_ok=True) UpperCamelCase__ : Tuple = get_job_links(args.workflow_run_id, token=args.token) UpperCamelCase__ : Optional[int] = {} # To deal with `workflow_call` event, where a job name is the combination of the job names in the caller and callee. # For example, `PyTorch 1.11 / Model tests (models/albert, single-gpu)`. if _job_links: for k, v in _job_links.items(): # This is how GitHub actions combine job names. if " / " in k: UpperCamelCase__ : Optional[int] = k.find(" / ") UpperCamelCase__ : List[str] = k[index + len(" / ") :] UpperCamelCase__ : str = v with open(os.path.join(args.output_dir, "job_links.json"), "w", encoding="UTF-8") as fp: json.dump(job_links, fp, ensure_ascii=False, indent=4) UpperCamelCase__ : int = get_artifacts_links(args.workflow_run_id, token=args.token) with open(os.path.join(args.output_dir, "artifacts.json"), "w", encoding="UTF-8") as fp: json.dump(artifacts, fp, ensure_ascii=False, indent=4) for idx, (name, url) in enumerate(artifacts.items()): download_artifact(name, url, args.output_dir, args.token) # Be gentle to GitHub time.sleep(1) UpperCamelCase__ : Dict = get_all_errors(args.output_dir, job_links=job_links) # `e[1]` is the error UpperCamelCase__ : Optional[int] = Counter() counter.update([e[1] for e in errors]) # print the top 30 most common test errors UpperCamelCase__ : str = counter.most_common(30) for item in most_common: print(item) with open(os.path.join(args.output_dir, "errors.json"), "w", encoding="UTF-8") as fp: json.dump(errors, fp, ensure_ascii=False, indent=4) UpperCamelCase__ : List[Any] = reduce_by_error(errors) UpperCamelCase__ : Optional[Any] = reduce_by_model(errors) UpperCamelCase__ : Tuple = make_github_table(reduced_by_error) UpperCamelCase__ : Optional[Any] = make_github_table_per_model(reduced_by_model) with open(os.path.join(args.output_dir, "reduced_by_error.txt"), "w", encoding="UTF-8") as fp: fp.write(sa) with open(os.path.join(args.output_dir, "reduced_by_model.txt"), "w", encoding="UTF-8") as fp: fp.write(sa)
721
import pytest import datasets # Import fixture modules as plugins UpperCamelCase__ : Union[str, Any] = ["tests.fixtures.files", "tests.fixtures.hub", "tests.fixtures.fsspec"] def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : Dict ): """simple docstring""" for item in items: if any(marker in item.keywords for marker in ['integration', 'unit'] ): continue item.add_marker(pytest.mark.unit ) def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : Optional[int] ): """simple docstring""" config.addinivalue_line('markers' , 'torchaudio_latest: mark test to run with torchaudio>=0.12' ) @pytest.fixture(autouse=_SCREAMING_SNAKE_CASE ) def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : str ): """simple docstring""" SCREAMING_SNAKE_CASE_ = tmp_path_factory.getbasetemp() / 'cache' SCREAMING_SNAKE_CASE_ = test_hf_cache_home / 'datasets' SCREAMING_SNAKE_CASE_ = test_hf_cache_home / 'metrics' SCREAMING_SNAKE_CASE_ = test_hf_cache_home / 'modules' monkeypatch.setattr('datasets.config.HF_DATASETS_CACHE' , str(_SCREAMING_SNAKE_CASE ) ) monkeypatch.setattr('datasets.config.HF_METRICS_CACHE' , str(_SCREAMING_SNAKE_CASE ) ) monkeypatch.setattr('datasets.config.HF_MODULES_CACHE' , str(_SCREAMING_SNAKE_CASE ) ) SCREAMING_SNAKE_CASE_ = test_hf_datasets_cache / 'downloads' monkeypatch.setattr('datasets.config.DOWNLOADED_DATASETS_PATH' , str(_SCREAMING_SNAKE_CASE ) ) SCREAMING_SNAKE_CASE_ = test_hf_datasets_cache / 'downloads' / 'extracted' monkeypatch.setattr('datasets.config.EXTRACTED_DATASETS_PATH' , str(_SCREAMING_SNAKE_CASE ) ) @pytest.fixture(autouse=_SCREAMING_SNAKE_CASE , scope='session' ) def _UpperCAmelCase ( ): """simple docstring""" datasets.disable_progress_bar() @pytest.fixture(autouse=_SCREAMING_SNAKE_CASE ) def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : Any ): """simple docstring""" monkeypatch.setattr('datasets.config.HF_UPDATE_DOWNLOAD_COUNTS' , _SCREAMING_SNAKE_CASE ) @pytest.fixture def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : Tuple ): """simple docstring""" monkeypatch.setattr('sqlalchemy.util.deprecations.SILENCE_UBER_WARNING' , _SCREAMING_SNAKE_CASE )
620
0
from ....configuration_utils import PretrainedConfig from ....utils import logging UpperCamelCase__ : List[Any] = logging.get_logger(__name__) UpperCamelCase__ : List[Any] = { """CarlCochet/trajectory-transformer-halfcheetah-medium-v2""": ( """https://huggingface.co/CarlCochet/trajectory-transformer-halfcheetah-medium-v2/resolve/main/config.json""" ), # See all TrajectoryTransformer models at https://huggingface.co/models?filter=trajectory_transformer } class __snake_case ( lowerCAmelCase__ ): __lowerCAmelCase : str = "trajectory_transformer" __lowerCAmelCase : int = ["past_key_values"] __lowerCAmelCase : Optional[int] = { "hidden_size": "n_embd", "num_attention_heads": "n_head", "num_hidden_layers": "n_layer", } def __init__( self , _A=100 , _A=5 , _A=1 , _A=1 , _A=249 , _A=6 , _A=17 , _A=25 , _A=4 , _A=4 , _A=128 , _A=0.1 , _A=0.1 , _A=0.1 , _A=0.0_0_0_6 , _A=512 , _A=0.0_2 , _A=1E-12 , _A=1 , _A=True , _A=1 , _A=50256 , _A=50256 , **_A , ): SCREAMING_SNAKE_CASE_ = vocab_size SCREAMING_SNAKE_CASE_ = action_weight SCREAMING_SNAKE_CASE_ = reward_weight SCREAMING_SNAKE_CASE_ = value_weight SCREAMING_SNAKE_CASE_ = max_position_embeddings SCREAMING_SNAKE_CASE_ = block_size SCREAMING_SNAKE_CASE_ = action_dim SCREAMING_SNAKE_CASE_ = observation_dim SCREAMING_SNAKE_CASE_ = transition_dim SCREAMING_SNAKE_CASE_ = learning_rate SCREAMING_SNAKE_CASE_ = n_layer SCREAMING_SNAKE_CASE_ = n_head SCREAMING_SNAKE_CASE_ = n_embd SCREAMING_SNAKE_CASE_ = embd_pdrop SCREAMING_SNAKE_CASE_ = attn_pdrop SCREAMING_SNAKE_CASE_ = resid_pdrop SCREAMING_SNAKE_CASE_ = initializer_range SCREAMING_SNAKE_CASE_ = layer_norm_eps SCREAMING_SNAKE_CASE_ = kaiming_initializer_range SCREAMING_SNAKE_CASE_ = use_cache super().__init__(pad_token_id=UpperCamelCase__ , bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ , **UpperCamelCase__)
700
from typing import List import numpy as np def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : dict ): """simple docstring""" SCREAMING_SNAKE_CASE_ = {key: len(_SCREAMING_SNAKE_CASE ) for key, value in gen_kwargs.items() if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )} if len(set(lists_lengths.values() ) ) > 1: raise RuntimeError( ( 'Sharding is ambiguous for this dataset: ' + 'we found several data sources lists of different lengths, and we don\'t know over which list we should parallelize:\n' + '\n'.join(f"""\t- key {key} has length {length}""" for key, length in lists_lengths.items() ) + '\nTo fix this, check the \'gen_kwargs\' and make sure to use lists only for data sources, ' + 'and use tuples otherwise. In the end there should only be one single list, or several lists with the same length.' ) ) SCREAMING_SNAKE_CASE_ = max(lists_lengths.values() , default=0 ) return max(1 , _SCREAMING_SNAKE_CASE ) def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int ): """simple docstring""" SCREAMING_SNAKE_CASE_ = [] for group_idx in range(_SCREAMING_SNAKE_CASE ): SCREAMING_SNAKE_CASE_ = num_shards // max_num_jobs + (group_idx < (num_shards % max_num_jobs)) if num_shards_to_add == 0: break SCREAMING_SNAKE_CASE_ = shards_indices_per_group[-1].stop if shards_indices_per_group else 0 SCREAMING_SNAKE_CASE_ = range(_SCREAMING_SNAKE_CASE , start + num_shards_to_add ) shards_indices_per_group.append(_SCREAMING_SNAKE_CASE ) return shards_indices_per_group def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : dict , _SCREAMING_SNAKE_CASE : int ): """simple docstring""" SCREAMING_SNAKE_CASE_ = _number_of_shards_in_gen_kwargs(_SCREAMING_SNAKE_CASE ) if num_shards == 1: return [dict(_SCREAMING_SNAKE_CASE )] else: SCREAMING_SNAKE_CASE_ = _distribute_shards(num_shards=_SCREAMING_SNAKE_CASE , max_num_jobs=_SCREAMING_SNAKE_CASE ) return [ { key: [value[shard_idx] for shard_idx in shard_indices_per_group[group_idx]] if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) else value for key, value in gen_kwargs.items() } for group_idx in range(len(_SCREAMING_SNAKE_CASE ) ) ] def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : List[dict] ): """simple docstring""" return { key: [value for gen_kwargs in gen_kwargs_list for value in gen_kwargs[key]] if isinstance(gen_kwargs_list[0][key] , _SCREAMING_SNAKE_CASE ) else gen_kwargs_list[0][key] for key in gen_kwargs_list[0] } def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : np.random.Generator , _SCREAMING_SNAKE_CASE : dict ): """simple docstring""" SCREAMING_SNAKE_CASE_ = {len(_SCREAMING_SNAKE_CASE ) for value in gen_kwargs.values() if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )} SCREAMING_SNAKE_CASE_ = {} for size in list_sizes: SCREAMING_SNAKE_CASE_ = list(range(_SCREAMING_SNAKE_CASE ) ) rng.shuffle(indices_per_size[size] ) # Now let's copy the gen_kwargs and shuffle the lists based on their sizes SCREAMING_SNAKE_CASE_ = dict(_SCREAMING_SNAKE_CASE ) for key, value in shuffled_kwargs.items(): if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): SCREAMING_SNAKE_CASE_ = [value[i] for i in indices_per_size[len(_SCREAMING_SNAKE_CASE )]] return shuffled_kwargs
620
0
import argparse import torch from transformers import BertConfig, BertForPreTraining, load_tf_weights_in_bert from transformers.utils import logging logging.set_verbosity_info() def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : List[str] ): """simple docstring""" SCREAMING_SNAKE_CASE_ = BertConfig.from_json_file(_SCREAMING_SNAKE_CASE ) print(f"""Building PyTorch model from configuration: {config}""" ) SCREAMING_SNAKE_CASE_ = BertForPreTraining(_SCREAMING_SNAKE_CASE ) # Load weights from tf checkpoint load_tf_weights_in_bert(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # Save pytorch-model print(f"""Save PyTorch model to {pytorch_dump_path}""" ) torch.save(model.state_dict() , _SCREAMING_SNAKE_CASE ) if __name__ == "__main__": UpperCamelCase__ : Optional[int] = argparse.ArgumentParser() # Required parameters parser.add_argument( "--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path." ) parser.add_argument( "--bert_config_file", default=None, type=str, required=True, help=( "The config json file corresponding to the pre-trained BERT model. \n" "This specifies the model architecture." ), ) parser.add_argument( "--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model." ) UpperCamelCase__ : Dict = parser.parse_args() convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
701
from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCamelCase__ : List[Any] = logging.get_logger(__name__) UpperCamelCase__ : List[str] = { "microsoft/biogpt": "https://huggingface.co/microsoft/biogpt/resolve/main/config.json", # See all BioGPT models at https://huggingface.co/models?filter=biogpt } class __snake_case ( lowerCAmelCase__ ): __lowerCAmelCase : Any = 'biogpt' def __init__( self , _A=42384 , _A=1024 , _A=24 , _A=16 , _A=4096 , _A="gelu" , _A=0.1 , _A=0.1 , _A=1024 , _A=0.0_2 , _A=1E-12 , _A=True , _A=True , _A=0.0 , _A=0.0 , _A=1 , _A=0 , _A=2 , **_A , ): SCREAMING_SNAKE_CASE_ = vocab_size SCREAMING_SNAKE_CASE_ = max_position_embeddings SCREAMING_SNAKE_CASE_ = hidden_size SCREAMING_SNAKE_CASE_ = num_hidden_layers SCREAMING_SNAKE_CASE_ = num_attention_heads SCREAMING_SNAKE_CASE_ = intermediate_size SCREAMING_SNAKE_CASE_ = hidden_act SCREAMING_SNAKE_CASE_ = hidden_dropout_prob SCREAMING_SNAKE_CASE_ = attention_probs_dropout_prob SCREAMING_SNAKE_CASE_ = initializer_range SCREAMING_SNAKE_CASE_ = layer_norm_eps SCREAMING_SNAKE_CASE_ = scale_embedding SCREAMING_SNAKE_CASE_ = use_cache SCREAMING_SNAKE_CASE_ = layerdrop SCREAMING_SNAKE_CASE_ = activation_dropout super().__init__(pad_token_id=_A , bos_token_id=_A , eos_token_id=_A , **_A)
620
0
from __future__ import annotations def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : int ): """simple docstring""" SCREAMING_SNAKE_CASE_ = str(SCREAMING_SNAKE_CASE_ ) return len(SCREAMING_SNAKE_CASE_ ) == 9 and set(SCREAMING_SNAKE_CASE_ ) == set('123456789' ) def _UpperCAmelCase ( ): """simple docstring""" for base_num in range(9_999 , 4_999 , -1 ): SCREAMING_SNAKE_CASE_ = 100_002 * base_num if is_9_pandigital(SCREAMING_SNAKE_CASE_ ): return candidate for base_num in range(333 , 99 , -1 ): SCREAMING_SNAKE_CASE_ = 1_002_003 * base_num if is_9_pandigital(SCREAMING_SNAKE_CASE_ ): return candidate return None if __name__ == "__main__": print(F'{solution() = }')
702
from typing import Dict, List, Optional, Tuple, Union import torch from ...models import AutoencoderKL, TransformeraDModel from ...schedulers import KarrasDiffusionSchedulers from ...utils import randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput class __snake_case ( lowerCAmelCase__ ): def __init__( self , _A , _A , _A , _A = None , ): super().__init__() self.register_modules(transformer=_A , vae=_A , scheduler=_A) # create a imagenet -> id dictionary for easier use SCREAMING_SNAKE_CASE_ = {} if idalabel is not None: for key, value in idalabel.items(): for label in value.split(','): SCREAMING_SNAKE_CASE_ = int(_A) SCREAMING_SNAKE_CASE_ = dict(sorted(self.labels.items())) def lowerCAmelCase__ ( self , _A): if not isinstance(_A , _A): SCREAMING_SNAKE_CASE_ = list(_A) for l in label: if l not in self.labels: raise ValueError( f"""{l} does not exist. Please make sure to select one of the following labels: \n {self.labels}.""") return [self.labels[l] for l in label] @torch.no_grad() def __call__( self , _A , _A = 4.0 , _A = None , _A = 50 , _A = "pil" , _A = True , ): SCREAMING_SNAKE_CASE_ = len(_A) SCREAMING_SNAKE_CASE_ = self.transformer.config.sample_size SCREAMING_SNAKE_CASE_ = self.transformer.config.in_channels SCREAMING_SNAKE_CASE_ = randn_tensor( shape=(batch_size, latent_channels, latent_size, latent_size) , generator=_A , device=self.device , dtype=self.transformer.dtype , ) SCREAMING_SNAKE_CASE_ = torch.cat([latents] * 2) if guidance_scale > 1 else latents SCREAMING_SNAKE_CASE_ = torch.tensor(_A , device=self.device).reshape(-1) SCREAMING_SNAKE_CASE_ = torch.tensor([1000] * batch_size , device=self.device) SCREAMING_SNAKE_CASE_ = torch.cat([class_labels, class_null] , 0) if guidance_scale > 1 else class_labels # set step values self.scheduler.set_timesteps(_A) for t in self.progress_bar(self.scheduler.timesteps): if guidance_scale > 1: SCREAMING_SNAKE_CASE_ = latent_model_input[: len(_A) // 2] SCREAMING_SNAKE_CASE_ = torch.cat([half, half] , dim=0) SCREAMING_SNAKE_CASE_ = self.scheduler.scale_model_input(_A , _A) SCREAMING_SNAKE_CASE_ = t if not torch.is_tensor(_A): # TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can # This would be a good case for the `match` statement (Python 3.10+) SCREAMING_SNAKE_CASE_ = latent_model_input.device.type == 'mps' if isinstance(_A , _A): SCREAMING_SNAKE_CASE_ = torch.floataa if is_mps else torch.floataa else: SCREAMING_SNAKE_CASE_ = torch.intaa if is_mps else torch.intaa SCREAMING_SNAKE_CASE_ = torch.tensor([timesteps] , dtype=_A , device=latent_model_input.device) elif len(timesteps.shape) == 0: SCREAMING_SNAKE_CASE_ = timesteps[None].to(latent_model_input.device) # broadcast to batch dimension in a way that's compatible with ONNX/Core ML SCREAMING_SNAKE_CASE_ = timesteps.expand(latent_model_input.shape[0]) # predict noise model_output SCREAMING_SNAKE_CASE_ = self.transformer( _A , timestep=_A , class_labels=_A).sample # perform guidance if guidance_scale > 1: SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = noise_pred[:, :latent_channels], noise_pred[:, latent_channels:] SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = torch.split(_A , len(_A) // 2 , dim=0) SCREAMING_SNAKE_CASE_ = uncond_eps + guidance_scale * (cond_eps - uncond_eps) SCREAMING_SNAKE_CASE_ = torch.cat([half_eps, half_eps] , dim=0) SCREAMING_SNAKE_CASE_ = torch.cat([eps, rest] , dim=1) # learned sigma if self.transformer.config.out_channels // 2 == latent_channels: SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = torch.split(_A , _A , dim=1) else: SCREAMING_SNAKE_CASE_ = noise_pred # compute previous image: x_t -> x_t-1 SCREAMING_SNAKE_CASE_ = self.scheduler.step(_A , _A , _A).prev_sample if guidance_scale > 1: SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = latent_model_input.chunk(2 , dim=0) else: SCREAMING_SNAKE_CASE_ = latent_model_input SCREAMING_SNAKE_CASE_ = 1 / self.vae.config.scaling_factor * latents SCREAMING_SNAKE_CASE_ = self.vae.decode(_A).sample SCREAMING_SNAKE_CASE_ = (samples / 2 + 0.5).clamp(0 , 1) # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16 SCREAMING_SNAKE_CASE_ = samples.cpu().permute(0 , 2 , 3 , 1).float().numpy() if output_type == "pil": SCREAMING_SNAKE_CASE_ = self.numpy_to_pil(_A) if not return_dict: return (samples,) return ImagePipelineOutput(images=_A)
620
0
from typing import List, Optional from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCamelCase__ : List[str] = logging.get_logger(__name__) UpperCamelCase__ : Dict = { "huggingface/autoformer-tourism-monthly": "https://huggingface.co/huggingface/autoformer-tourism-monthly/resolve/main/config.json", } class __snake_case ( lowerCAmelCase__ ): __lowerCAmelCase : int = 'autoformer' __lowerCAmelCase : Any = { 'hidden_size': 'd_model', 'num_attention_heads': 'encoder_attention_heads', 'num_hidden_layers': 'encoder_layers', } def __init__( self , _A = None , _A = None , _A = "student_t" , _A = "nll" , _A = 1 , _A = [1, 2, 3, 4, 5, 6, 7] , _A = True , _A = 0 , _A = 0 , _A = 0 , _A = 0 , _A = None , _A = None , _A = 64 , _A = 2 , _A = 2 , _A = 2 , _A = 2 , _A = 32 , _A = 32 , _A = "gelu" , _A = 0.1 , _A = 0.1 , _A = 0.1 , _A = 0.1 , _A = 0.1 , _A = 100 , _A = 0.0_2 , _A = True , _A=True , _A = 10 , _A = 25 , _A = 3 , **_A , ): SCREAMING_SNAKE_CASE_ = prediction_length SCREAMING_SNAKE_CASE_ = context_length if context_length is not None else prediction_length SCREAMING_SNAKE_CASE_ = distribution_output SCREAMING_SNAKE_CASE_ = loss SCREAMING_SNAKE_CASE_ = input_size SCREAMING_SNAKE_CASE_ = num_time_features SCREAMING_SNAKE_CASE_ = lags_sequence SCREAMING_SNAKE_CASE_ = scaling SCREAMING_SNAKE_CASE_ = num_dynamic_real_features SCREAMING_SNAKE_CASE_ = num_static_real_features SCREAMING_SNAKE_CASE_ = num_static_categorical_features if cardinality is not None and num_static_categorical_features > 0: if len(_A) != num_static_categorical_features: raise ValueError( 'The cardinality should be a list of the same length as `num_static_categorical_features`') SCREAMING_SNAKE_CASE_ = cardinality else: SCREAMING_SNAKE_CASE_ = [0] if embedding_dimension is not None and num_static_categorical_features > 0: if len(_A) != num_static_categorical_features: raise ValueError( 'The embedding dimension should be a list of the same length as `num_static_categorical_features`') SCREAMING_SNAKE_CASE_ = embedding_dimension else: SCREAMING_SNAKE_CASE_ = [min(50 , (cat + 1) // 2) for cat in self.cardinality] SCREAMING_SNAKE_CASE_ = num_parallel_samples # Transformer architecture configuration SCREAMING_SNAKE_CASE_ = input_size * len(self.lags_sequence) + self._number_of_features SCREAMING_SNAKE_CASE_ = d_model SCREAMING_SNAKE_CASE_ = encoder_attention_heads SCREAMING_SNAKE_CASE_ = decoder_attention_heads SCREAMING_SNAKE_CASE_ = encoder_ffn_dim SCREAMING_SNAKE_CASE_ = decoder_ffn_dim SCREAMING_SNAKE_CASE_ = encoder_layers SCREAMING_SNAKE_CASE_ = decoder_layers SCREAMING_SNAKE_CASE_ = dropout SCREAMING_SNAKE_CASE_ = attention_dropout SCREAMING_SNAKE_CASE_ = activation_dropout SCREAMING_SNAKE_CASE_ = encoder_layerdrop SCREAMING_SNAKE_CASE_ = decoder_layerdrop SCREAMING_SNAKE_CASE_ = activation_function SCREAMING_SNAKE_CASE_ = init_std SCREAMING_SNAKE_CASE_ = use_cache # Autoformer SCREAMING_SNAKE_CASE_ = label_length SCREAMING_SNAKE_CASE_ = moving_average SCREAMING_SNAKE_CASE_ = autocorrelation_factor super().__init__(is_encoder_decoder=_A , **_A) @property def lowerCAmelCase__ ( self): return ( sum(self.embedding_dimension) + self.num_dynamic_real_features + self.num_time_features + self.num_static_real_features + self.input_size * 2 # the log1p(abs(loc)) and log(scale) features )
703
import pickle import numpy as np from matplotlib import pyplot as plt class __snake_case : def __init__( self , _A , _A , _A , _A , _A , _A=0.2 , _A=0.2): SCREAMING_SNAKE_CASE_ = bp_numa SCREAMING_SNAKE_CASE_ = bp_numa SCREAMING_SNAKE_CASE_ = bp_numa SCREAMING_SNAKE_CASE_ = conva_get[:2] SCREAMING_SNAKE_CASE_ = conva_get[2] SCREAMING_SNAKE_CASE_ = size_pa SCREAMING_SNAKE_CASE_ = rate_w SCREAMING_SNAKE_CASE_ = rate_t SCREAMING_SNAKE_CASE_ = [ np.mat(-1 * np.random.rand(self.conva[0] , self.conva[0]) + 0.5) for i in range(self.conva[1]) ] SCREAMING_SNAKE_CASE_ = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa) + 0.5) SCREAMING_SNAKE_CASE_ = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa) + 0.5) SCREAMING_SNAKE_CASE_ = -2 * np.random.rand(self.conva[1]) + 1 SCREAMING_SNAKE_CASE_ = -2 * np.random.rand(self.num_bpa) + 1 SCREAMING_SNAKE_CASE_ = -2 * np.random.rand(self.num_bpa) + 1 def lowerCAmelCase__ ( self , _A): # save model dict with pickle SCREAMING_SNAKE_CASE_ = { 'num_bp1': self.num_bpa, 'num_bp2': self.num_bpa, 'num_bp3': self.num_bpa, 'conv1': self.conva, 'step_conv1': self.step_conva, 'size_pooling1': self.size_poolinga, 'rate_weight': self.rate_weight, 'rate_thre': self.rate_thre, 'w_conv1': self.w_conva, 'wkj': self.wkj, 'vji': self.vji, 'thre_conv1': self.thre_conva, 'thre_bp2': self.thre_bpa, 'thre_bp3': self.thre_bpa, } with open(_A , 'wb') as f: pickle.dump(_A , _A) print(f"""Model saved: {save_path}""") @classmethod def lowerCAmelCase__ ( cls , _A): # read saved model with open(_A , 'rb') as f: SCREAMING_SNAKE_CASE_ = pickle.load(_A) # noqa: S301 SCREAMING_SNAKE_CASE_ = model_dic.get('conv1') conv_get.append(model_dic.get('step_conv1')) SCREAMING_SNAKE_CASE_ = model_dic.get('size_pooling1') SCREAMING_SNAKE_CASE_ = model_dic.get('num_bp1') SCREAMING_SNAKE_CASE_ = model_dic.get('num_bp2') SCREAMING_SNAKE_CASE_ = model_dic.get('num_bp3') SCREAMING_SNAKE_CASE_ = model_dic.get('rate_weight') SCREAMING_SNAKE_CASE_ = model_dic.get('rate_thre') # create model instance SCREAMING_SNAKE_CASE_ = CNN(_A , _A , _A , _A , _A , _A , _A) # modify model parameter SCREAMING_SNAKE_CASE_ = model_dic.get('w_conv1') SCREAMING_SNAKE_CASE_ = model_dic.get('wkj') SCREAMING_SNAKE_CASE_ = model_dic.get('vji') SCREAMING_SNAKE_CASE_ = model_dic.get('thre_conv1') SCREAMING_SNAKE_CASE_ = model_dic.get('thre_bp2') SCREAMING_SNAKE_CASE_ = model_dic.get('thre_bp3') return conv_ins def lowerCAmelCase__ ( self , _A): return 1 / (1 + np.exp(-1 * x)) def lowerCAmelCase__ ( self , _A): return round(_A , 3) def lowerCAmelCase__ ( self , _A , _A , _A , _A , _A): # convolution process SCREAMING_SNAKE_CASE_ = convs[0] SCREAMING_SNAKE_CASE_ = convs[1] SCREAMING_SNAKE_CASE_ = np.shape(_A)[0] # get the data slice of original image data, data_focus SCREAMING_SNAKE_CASE_ = [] for i_focus in range(0 , size_data - size_conv + 1 , _A): for j_focus in range(0 , size_data - size_conv + 1 , _A): SCREAMING_SNAKE_CASE_ = data[ i_focus : i_focus + size_conv, j_focus : j_focus + size_conv ] data_focus.append(_A) # calculate the feature map of every single kernel, and saved as list of matrix SCREAMING_SNAKE_CASE_ = [] SCREAMING_SNAKE_CASE_ = int((size_data - size_conv) / conv_step + 1) for i_map in range(_A): SCREAMING_SNAKE_CASE_ = [] for i_focus in range(len(_A)): SCREAMING_SNAKE_CASE_ = ( np.sum(np.multiply(data_focus[i_focus] , w_convs[i_map])) - thre_convs[i_map] ) featuremap.append(self.sig(_A)) SCREAMING_SNAKE_CASE_ = np.asmatrix(_A).reshape( _A , _A) data_featuremap.append(_A) # expanding the data slice to One dimenssion SCREAMING_SNAKE_CASE_ = [] for each_focus in data_focus: focusa_list.extend(self.Expand_Mat(_A)) SCREAMING_SNAKE_CASE_ = np.asarray(_A) return focus_list, data_featuremap def lowerCAmelCase__ ( self , _A , _A , _A="average_pool"): # pooling process SCREAMING_SNAKE_CASE_ = len(featuremaps[0]) SCREAMING_SNAKE_CASE_ = int(size_map / size_pooling) SCREAMING_SNAKE_CASE_ = [] for i_map in range(len(_A)): SCREAMING_SNAKE_CASE_ = featuremaps[i_map] SCREAMING_SNAKE_CASE_ = [] for i_focus in range(0 , _A , _A): for j_focus in range(0 , _A , _A): SCREAMING_SNAKE_CASE_ = feature_map[ i_focus : i_focus + size_pooling, j_focus : j_focus + size_pooling, ] if pooling_type == "average_pool": # average pooling map_pooled.append(np.average(_A)) elif pooling_type == "max_pooling": # max pooling map_pooled.append(np.max(_A)) SCREAMING_SNAKE_CASE_ = np.asmatrix(_A).reshape(_A , _A) featuremap_pooled.append(_A) return featuremap_pooled def lowerCAmelCase__ ( self , _A): # expanding three dimension data to one dimension list SCREAMING_SNAKE_CASE_ = [] for i in range(len(_A)): SCREAMING_SNAKE_CASE_ = np.shape(data[i]) SCREAMING_SNAKE_CASE_ = data[i].reshape(1 , shapes[0] * shapes[1]) SCREAMING_SNAKE_CASE_ = data_listed.getA().tolist()[0] data_expanded.extend(_A) SCREAMING_SNAKE_CASE_ = np.asarray(_A) return data_expanded def lowerCAmelCase__ ( self , _A): # expanding matrix to one dimension list SCREAMING_SNAKE_CASE_ = np.asarray(_A) SCREAMING_SNAKE_CASE_ = np.shape(_A) SCREAMING_SNAKE_CASE_ = data_mat.reshape(1 , shapes[0] * shapes[1]) return data_expanded def lowerCAmelCase__ ( self , _A , _A , _A , _A , _A): SCREAMING_SNAKE_CASE_ = [] SCREAMING_SNAKE_CASE_ = 0 for i_map in range(_A): SCREAMING_SNAKE_CASE_ = np.ones((size_map, size_map)) for i in range(0 , _A , _A): for j in range(0 , _A , _A): SCREAMING_SNAKE_CASE_ = pd_pool[ i_pool ] SCREAMING_SNAKE_CASE_ = i_pool + 1 SCREAMING_SNAKE_CASE_ = np.multiply( _A , np.multiply(out_map[i_map] , (1 - out_map[i_map]))) pd_all.append(_A) return pd_all def lowerCAmelCase__ ( self , _A , _A , _A , _A , _A , _A=bool): # model traning print('----------------------Start Training-------------------------') print((' - - Shape: Train_Data ', np.shape(_A))) print((' - - Shape: Teach_Data ', np.shape(_A))) SCREAMING_SNAKE_CASE_ = 0 SCREAMING_SNAKE_CASE_ = [] SCREAMING_SNAKE_CASE_ = 10000 while rp < n_repeat and mse >= error_accuracy: SCREAMING_SNAKE_CASE_ = 0 print(f"""-------------Learning Time {rp}--------------""") for p in range(len(_A)): # print('------------Learning Image: %d--------------'%p) SCREAMING_SNAKE_CASE_ = np.asmatrix(datas_train[p]) SCREAMING_SNAKE_CASE_ = np.asarray(datas_teach[p]) SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.convolute( _A , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , ) SCREAMING_SNAKE_CASE_ = self.pooling(_A , self.size_poolinga) SCREAMING_SNAKE_CASE_ = np.shape(_A) SCREAMING_SNAKE_CASE_ = self._expand(_A) SCREAMING_SNAKE_CASE_ = data_bp_input SCREAMING_SNAKE_CASE_ = np.dot(_A , self.vji.T) - self.thre_bpa SCREAMING_SNAKE_CASE_ = self.sig(_A) SCREAMING_SNAKE_CASE_ = np.dot(_A , self.wkj.T) - self.thre_bpa SCREAMING_SNAKE_CASE_ = self.sig(_A) # --------------Model Leaning ------------------------ # calculate error and gradient--------------- SCREAMING_SNAKE_CASE_ = np.multiply( (data_teach - bp_outa) , np.multiply(_A , (1 - bp_outa))) SCREAMING_SNAKE_CASE_ = np.multiply( np.dot(_A , self.wkj) , np.multiply(_A , (1 - bp_outa))) SCREAMING_SNAKE_CASE_ = np.dot(_A , self.vji) SCREAMING_SNAKE_CASE_ = pd_i_all / (self.size_poolinga * self.size_poolinga) SCREAMING_SNAKE_CASE_ = pd_conva_pooled.T.getA().tolist() SCREAMING_SNAKE_CASE_ = self._calculate_gradient_from_pool( _A , _A , shape_featuremapa[0] , shape_featuremapa[1] , self.size_poolinga , ) # weight and threshold learning process--------- # convolution layer for k_conv in range(self.conva[1]): SCREAMING_SNAKE_CASE_ = self._expand_mat(pd_conva_all[k_conv]) SCREAMING_SNAKE_CASE_ = self.rate_weight * np.dot(_A , _A) SCREAMING_SNAKE_CASE_ = self.w_conva[k_conv] + delta_w.reshape( (self.conva[0], self.conva[0])) SCREAMING_SNAKE_CASE_ = ( self.thre_conva[k_conv] - np.sum(pd_conva_all[k_conv]) * self.rate_thre ) # all connected layer SCREAMING_SNAKE_CASE_ = self.wkj + pd_k_all.T * bp_outa * self.rate_weight SCREAMING_SNAKE_CASE_ = self.vji + pd_j_all.T * bp_outa * self.rate_weight SCREAMING_SNAKE_CASE_ = self.thre_bpa - pd_k_all * self.rate_thre SCREAMING_SNAKE_CASE_ = self.thre_bpa - pd_j_all * self.rate_thre # calculate the sum error of all single image SCREAMING_SNAKE_CASE_ = np.sum(abs(data_teach - bp_outa)) error_count += errors # print(' ----Teach ',data_teach) # print(' ----BP_output ',bp_out3) SCREAMING_SNAKE_CASE_ = rp + 1 SCREAMING_SNAKE_CASE_ = error_count / patterns all_mse.append(_A) def draw_error(): SCREAMING_SNAKE_CASE_ = [error_accuracy for i in range(int(n_repeat * 1.2))] plt.plot(_A , '+-') plt.plot(_A , 'r--') plt.xlabel('Learning Times') plt.ylabel('All_mse') plt.grid(_A , alpha=0.5) plt.show() print('------------------Training Complished---------------------') print((' - - Training epoch: ', rp, f""" - - Mse: {mse:.6f}""")) if draw_e: draw_error() return mse def lowerCAmelCase__ ( self , _A): # model predict SCREAMING_SNAKE_CASE_ = [] print('-------------------Start Testing-------------------------') print((' - - Shape: Test_Data ', np.shape(_A))) for p in range(len(_A)): SCREAMING_SNAKE_CASE_ = np.asmatrix(datas_test[p]) SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.convolute( _A , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , ) SCREAMING_SNAKE_CASE_ = self.pooling(_A , self.size_poolinga) SCREAMING_SNAKE_CASE_ = self._expand(_A) SCREAMING_SNAKE_CASE_ = data_bp_input SCREAMING_SNAKE_CASE_ = bp_outa * self.vji.T - self.thre_bpa SCREAMING_SNAKE_CASE_ = self.sig(_A) SCREAMING_SNAKE_CASE_ = bp_outa * self.wkj.T - self.thre_bpa SCREAMING_SNAKE_CASE_ = self.sig(_A) produce_out.extend(bp_outa.getA().tolist()) SCREAMING_SNAKE_CASE_ = [list(map(self.do_round , _A)) for each in produce_out] return np.asarray(_A) def lowerCAmelCase__ ( self , _A): # return the data of image after convoluting process so we can check it out SCREAMING_SNAKE_CASE_ = np.asmatrix(_A) SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.convolute( _A , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , ) SCREAMING_SNAKE_CASE_ = self.pooling(_A , self.size_poolinga) return data_conveda, data_pooleda if __name__ == "__main__": pass
620
0
# flake8: noqa # Lint as: python3 UpperCamelCase__ : List[Any] = [ "VerificationMode", "Version", "disable_progress_bar", "enable_progress_bar", "is_progress_bar_enabled", "experimental", ] from .info_utils import VerificationMode from .logging import disable_progress_bar, enable_progress_bar, is_progress_bar_enabled from .version import Version from .experimental import experimental
704
import os import zipfile import requests from get_ci_error_statistics import download_artifact, get_artifacts_links def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : int=7 ): """simple docstring""" SCREAMING_SNAKE_CASE_ = None if token is not None: SCREAMING_SNAKE_CASE_ = {'Accept': 'application/vnd.github+json', 'Authorization': f"""Bearer {token}"""} # The id of a workflow (not of a workflow run) SCREAMING_SNAKE_CASE_ = '636036' SCREAMING_SNAKE_CASE_ = f"""https://api.github.com/repos/huggingface/transformers/actions/workflows/{workflow_id}/runs""" # On `main` branch + event being `schedule` + not returning PRs + only `num_runs` results url += f"""?branch=main&event=schedule&exclude_pull_requests=true&per_page={num_runs}""" SCREAMING_SNAKE_CASE_ = requests.get(_SCREAMING_SNAKE_CASE , headers=_SCREAMING_SNAKE_CASE ).json() return result["workflow_runs"] def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : str ): """simple docstring""" SCREAMING_SNAKE_CASE_ = get_daily_ci_runs(_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE_ = None for workflow_run in workflow_runs: if workflow_run["status"] == "completed": SCREAMING_SNAKE_CASE_ = workflow_run['id'] break return workflow_run_id def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Optional[int] ): """simple docstring""" SCREAMING_SNAKE_CASE_ = get_last_daily_ci_runs(_SCREAMING_SNAKE_CASE ) if workflow_run_id is not None: SCREAMING_SNAKE_CASE_ = get_artifacts_links(worflow_run_id=_SCREAMING_SNAKE_CASE , token=_SCREAMING_SNAKE_CASE ) for artifact_name in artifact_names: if artifact_name in artifacts_links: SCREAMING_SNAKE_CASE_ = artifacts_links[artifact_name] download_artifact( artifact_name=_SCREAMING_SNAKE_CASE , artifact_url=_SCREAMING_SNAKE_CASE , output_dir=_SCREAMING_SNAKE_CASE , token=_SCREAMING_SNAKE_CASE ) def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : List[Any] ): """simple docstring""" get_last_daily_ci_artifacts(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE_ = {} for artifact_name in artifact_names: SCREAMING_SNAKE_CASE_ = os.path.join(_SCREAMING_SNAKE_CASE , f"""{artifact_name}.zip""" ) if os.path.isfile(_SCREAMING_SNAKE_CASE ): SCREAMING_SNAKE_CASE_ = {} with zipfile.ZipFile(_SCREAMING_SNAKE_CASE ) as z: for filename in z.namelist(): if not os.path.isdir(_SCREAMING_SNAKE_CASE ): # read the file with z.open(_SCREAMING_SNAKE_CASE ) as f: SCREAMING_SNAKE_CASE_ = f.read().decode('UTF-8' ) return results
620
0
from __future__ import annotations from typing import Generic, TypeVar UpperCamelCase__ : Optional[Any] = TypeVar("T") class __snake_case ( Generic[T] ): def __init__( self , _A): SCREAMING_SNAKE_CASE_ = data SCREAMING_SNAKE_CASE_ = self SCREAMING_SNAKE_CASE_ = 0 class __snake_case ( Generic[T] ): def __init__( self): # map from node name to the node object SCREAMING_SNAKE_CASE_ = {} def lowerCAmelCase__ ( self , _A): # create a new set with x as its member SCREAMING_SNAKE_CASE_ = DisjointSetTreeNode(_A) def lowerCAmelCase__ ( self , _A): # find the set x belongs to (with path-compression) SCREAMING_SNAKE_CASE_ = self.map[data] if elem_ref != elem_ref.parent: SCREAMING_SNAKE_CASE_ = self.find_set(elem_ref.parent.data) return elem_ref.parent def lowerCAmelCase__ ( self , _A , _A): # helper function for union operation if nodea.rank > nodea.rank: SCREAMING_SNAKE_CASE_ = nodea else: SCREAMING_SNAKE_CASE_ = nodea if nodea.rank == nodea.rank: nodea.rank += 1 def lowerCAmelCase__ ( self , _A , _A): # merge 2 disjoint sets self.link(self.find_set(_A) , self.find_set(_A)) class __snake_case ( Generic[T] ): def __init__( self): # connections: map from the node to the neighbouring nodes (with weights) SCREAMING_SNAKE_CASE_ = {} def lowerCAmelCase__ ( self , _A): # add a node ONLY if its not present in the graph if node not in self.connections: SCREAMING_SNAKE_CASE_ = {} def lowerCAmelCase__ ( self , _A , _A , _A): # add an edge with the given weight self.add_node(_A) self.add_node(_A) SCREAMING_SNAKE_CASE_ = weight SCREAMING_SNAKE_CASE_ = weight def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = [] SCREAMING_SNAKE_CASE_ = set() for start in self.connections: for end in self.connections[start]: if (start, end) not in seen: seen.add((end, start)) edges.append((start, end, self.connections[start][end])) edges.sort(key=lambda _A: x[2]) # creating the disjoint set SCREAMING_SNAKE_CASE_ = DisjointSetTree[T]() for node in self.connections: disjoint_set.make_set(_A) # MST generation SCREAMING_SNAKE_CASE_ = 0 SCREAMING_SNAKE_CASE_ = 0 SCREAMING_SNAKE_CASE_ = GraphUndirectedWeighted[T]() while num_edges < len(self.connections) - 1: SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = edges[index] index += 1 SCREAMING_SNAKE_CASE_ = disjoint_set.find_set(_A) SCREAMING_SNAKE_CASE_ = disjoint_set.find_set(_A) if parent_u != parent_v: num_edges += 1 graph.add_edge(_A , _A , _A) disjoint_set.union(_A , _A) return graph
705
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available UpperCamelCase__ : Any = { "configuration_mvp": ["MVP_PRETRAINED_CONFIG_ARCHIVE_MAP", "MvpConfig", "MvpOnnxConfig"], "tokenization_mvp": ["MvpTokenizer"], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase__ : Optional[int] = ["MvpTokenizerFast"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase__ : str = [ "MVP_PRETRAINED_MODEL_ARCHIVE_LIST", "MvpForCausalLM", "MvpForConditionalGeneration", "MvpForQuestionAnswering", "MvpForSequenceClassification", "MvpModel", "MvpPreTrainedModel", ] if TYPE_CHECKING: from .configuration_mvp import MVP_PRETRAINED_CONFIG_ARCHIVE_MAP, MvpConfig, MvpOnnxConfig from .tokenization_mvp import MvpTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_mvp_fast import MvpTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mvp import ( MVP_PRETRAINED_MODEL_ARCHIVE_LIST, MvpForCausalLM, MvpForConditionalGeneration, MvpForQuestionAnswering, MvpForSequenceClassification, MvpModel, MvpPreTrainedModel, ) else: import sys UpperCamelCase__ : Dict = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
620
0
import json import os from typing import Dict, List, Optional, Tuple from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging UpperCamelCase__ : List[Any] = logging.get_logger(__name__) UpperCamelCase__ : Any = { '''vocab_file''': '''vocab.json''', '''tokenizer_config_file''': '''tokenizer_config.json''', '''merges_file''': '''merges.txt''', } UpperCamelCase__ : Optional[Any] = { '''vocab_file''': { '''facebook/s2t-wav2vec2-large-en-de''': ( '''https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/vocab.json''' ), }, '''tokenizer_config_file''': { '''facebook/s2t-wav2vec2-large-en-de''': ( '''https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/tokenizer_config.json''' ), }, '''merges_file''': { '''facebook/s2t-wav2vec2-large-en-de''': ( '''https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/merges.txt''' ), }, } UpperCamelCase__ : Optional[Any] = '''</w>''' UpperCamelCase__ : Union[str, Any] = '''@@ ''' def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : Union[str, Any] ): """simple docstring""" SCREAMING_SNAKE_CASE_ = set() SCREAMING_SNAKE_CASE_ = word[0] for char in word[1:]: pairs.add((prev_char, char) ) SCREAMING_SNAKE_CASE_ = char return pairs # Speech2Text2 has no max input length UpperCamelCase__ : int = {'''facebook/s2t-wav2vec2-large-en-de''': 1_024} class __snake_case ( __a ): __lowerCAmelCase : str = VOCAB_FILES_NAMES __lowerCAmelCase : List[str] = PRETRAINED_VOCAB_FILES_MAP __lowerCAmelCase : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __lowerCAmelCase : str = ['''input_ids''', '''attention_mask'''] def __init__( self , _A , _A="<s>" , _A="<pad>" , _A="</s>" , _A="<unk>" , _A=False , _A=None , **_A , ): super().__init__( unk_token=snake_case__ , bos_token=snake_case__ , eos_token=snake_case__ , pad_token=snake_case__ , do_lower_case=snake_case__ , **snake_case__ , ) SCREAMING_SNAKE_CASE_ = do_lower_case with open(snake_case__ , encoding='utf-8') as vocab_handle: SCREAMING_SNAKE_CASE_ = json.load(snake_case__) SCREAMING_SNAKE_CASE_ = {v: k for k, v in self.encoder.items()} if merges_file is None: logger.info(f"""No merges files provided. {self.__class__.__name__} can only be used for decoding.""") SCREAMING_SNAKE_CASE_ = None SCREAMING_SNAKE_CASE_ = None else: with open(snake_case__ , encoding='utf-8') as merges_handle: SCREAMING_SNAKE_CASE_ = merges_handle.read().split('\n')[:-1] SCREAMING_SNAKE_CASE_ = [tuple(merge.split()[:2]) for merge in merges] SCREAMING_SNAKE_CASE_ = dict(zip(snake_case__ , range(len(snake_case__)))) SCREAMING_SNAKE_CASE_ = {} @property def lowerCAmelCase__ ( self): return len(self.decoder) def lowerCAmelCase__ ( self): return dict(self.encoder , **self.added_tokens_encoder) def lowerCAmelCase__ ( self , _A): SCREAMING_SNAKE_CASE_ = tuple(token[:-1]) + (token[-1] + BPE_TOKEN_MERGES,) if token in self.cache: return self.cache[token] SCREAMING_SNAKE_CASE_ = get_pairs(snake_case__) if not pairs: return token while True: SCREAMING_SNAKE_CASE_ = min(snake_case__ , key=lambda _A: self.bpe_ranks.get(snake_case__ , float('inf'))) if bigram not in self.bpe_ranks: break SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = bigram SCREAMING_SNAKE_CASE_ = [] SCREAMING_SNAKE_CASE_ = 0 while i < len(snake_case__): try: SCREAMING_SNAKE_CASE_ = word.index(snake_case__ , snake_case__) except ValueError: new_word.extend(word[i:]) break else: new_word.extend(word[i:j]) SCREAMING_SNAKE_CASE_ = j if word[i] == first and i < len(snake_case__) - 1 and word[i + 1] == second: new_word.append(first + second) i += 2 else: new_word.append(word[i]) i += 1 SCREAMING_SNAKE_CASE_ = tuple(snake_case__) SCREAMING_SNAKE_CASE_ = new_word if len(snake_case__) == 1: break else: SCREAMING_SNAKE_CASE_ = get_pairs(snake_case__) SCREAMING_SNAKE_CASE_ = ' '.join(snake_case__) if word == "\n " + BPE_TOKEN_MERGES: SCREAMING_SNAKE_CASE_ = '\n' + BPE_TOKEN_MERGES if word.endswith(snake_case__): SCREAMING_SNAKE_CASE_ = word.replace(snake_case__ , '') SCREAMING_SNAKE_CASE_ = word.replace(' ' , snake_case__) SCREAMING_SNAKE_CASE_ = word return word def lowerCAmelCase__ ( self , _A): if self.bpe_ranks is None: raise ValueError( 'This tokenizer was instantiated without a `merges.txt` file, so' ' that it can only be used for decoding, not for encoding.' 'Make sure to provide `merges.txt` file at instantiation to enable ' 'encoding.') if self.do_lower_case: SCREAMING_SNAKE_CASE_ = text.lower() SCREAMING_SNAKE_CASE_ = text.split() SCREAMING_SNAKE_CASE_ = [] for token in text: if token: split_tokens.extend(list(self.bpe(snake_case__).split(' '))) return split_tokens def lowerCAmelCase__ ( self , _A): return self.encoder.get(snake_case__ , self.encoder.get(self.unk_token)) def lowerCAmelCase__ ( self , _A): SCREAMING_SNAKE_CASE_ = self.decoder.get(snake_case__ , self.unk_token) return result def lowerCAmelCase__ ( self , _A): SCREAMING_SNAKE_CASE_ = ' '.join(snake_case__) # make sure @@ tokens are concatenated SCREAMING_SNAKE_CASE_ = ''.join(string.split(snake_case__)) return string def lowerCAmelCase__ ( self , _A , _A = None): if not os.path.isdir(snake_case__): logger.error(f"""Vocabulary path ({save_directory}) should be a directory""") return SCREAMING_SNAKE_CASE_ = os.path.join( snake_case__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file']) SCREAMING_SNAKE_CASE_ = os.path.join( snake_case__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file']) with open(snake_case__ , 'w' , encoding='utf-8') as f: f.write(json.dumps(self.encoder , indent=2 , sort_keys=snake_case__ , ensure_ascii=snake_case__) + '\n') SCREAMING_SNAKE_CASE_ = 0 if self.bpe_ranks is None: return (vocab_file,) with open(snake_case__ , 'w' , encoding='utf-8') as writer: for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda _A: kv[1]): if index != token_index: logger.warning( f"""Saving vocabulary to {merges_file}: BPE merge indices are not consecutive.""" ' Please check that the tokenizer is not corrupted!') SCREAMING_SNAKE_CASE_ = token_index writer.write(' '.join(snake_case__) + '\n') index += 1 return (vocab_file, merges_file)
706
import inspect import os import unittest from pathlib import Path import torch import accelerate from accelerate.test_utils import execute_subprocess_async from accelerate.test_utils.testing import run_command class __snake_case ( unittest.TestCase ): __lowerCAmelCase : Dict = inspect.getfile(accelerate.test_utils ) __lowerCAmelCase : Optional[Any] = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['scripts', 'test_cli.py'] ) __lowerCAmelCase : Tuple = ['accelerate', 'launch'] __lowerCAmelCase : Union[str, Any] = Path.home() / '.cache/huggingface/accelerate' __lowerCAmelCase : List[str] = 'default_config.yaml' __lowerCAmelCase : List[Any] = config_folder / config_file __lowerCAmelCase : str = config_folder / '_default_config.yaml' __lowerCAmelCase : Optional[int] = Path('tests/test_configs' ) @classmethod def lowerCAmelCase__ ( cls): if cls.config_path.is_file(): cls.config_path.rename(cls.changed_path) @classmethod def lowerCAmelCase__ ( cls): if cls.changed_path.is_file(): cls.changed_path.rename(cls.config_path) def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = self.base_cmd if torch.cuda.is_available() and (torch.cuda.device_count() > 1): cmd += ["--multi_gpu"] execute_subprocess_async(cmd + [self.test_file_path] , env=os.environ.copy()) def lowerCAmelCase__ ( self): for config in sorted(self.test_config_path.glob('**/*.yaml')): with self.subTest(config_file=_A): execute_subprocess_async( self.base_cmd + ['--config_file', str(_A), self.test_file_path] , env=os.environ.copy()) def lowerCAmelCase__ ( self): execute_subprocess_async(['accelerate', 'test'] , env=os.environ.copy()) class __snake_case ( unittest.TestCase ): __lowerCAmelCase : Optional[Any] = 'test-tpu' __lowerCAmelCase : str = 'us-central1-a' __lowerCAmelCase : Union[str, Any] = 'ls' __lowerCAmelCase : Union[str, Any] = ['accelerate', 'tpu-config'] __lowerCAmelCase : Union[str, Any] = 'cd /usr/share' __lowerCAmelCase : List[Any] = 'tests/test_samples/test_command_file.sh' __lowerCAmelCase : Dict = 'Running gcloud compute tpus tpu-vm ssh' def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = run_command( self.cmd + ['--command', self.command, '--tpu_zone', self.tpu_zone, '--tpu_name', self.tpu_name, '--debug'] , return_stdout=_A , ) self.assertIn( f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all""" , _A , ) def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = run_command( self.cmd + [ '--config_file', 'tests/test_configs/0_12_0.yaml', '--command', self.command, '--tpu_zone', self.tpu_zone, '--tpu_name', self.tpu_name, '--debug', ] , return_stdout=_A , ) self.assertIn( f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all""" , _A , ) def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = run_command( self.cmd + ['--config_file', 'tests/test_configs/latest.yaml', '--debug'] , return_stdout=_A) self.assertIn( f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo \"hello world\"; echo \"this is a second command\" --worker all""" , _A , ) def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = run_command( self.cmd + ['--config_file', 'tests/test_configs/latest.yaml', '--command', self.command, '--debug'] , return_stdout=_A , ) self.assertIn( f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all""" , _A , ) def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = run_command( self.cmd + [ '--config_file', 'tests/test_configs/latest.yaml', '--command', self.command, '--command', 'echo "Hello World"', '--debug', ] , return_stdout=_A , ) self.assertIn( f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls; echo \"Hello World\" --worker all""" , _A , ) def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = run_command( self.cmd + ['--config_file', 'tests/test_configs/latest.yaml', '--command_file', self.command_file, '--debug'] , return_stdout=_A , ) self.assertIn( f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo \"hello world\"; echo \"this is a second command\" --worker all""" , _A , ) def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = run_command( self.cmd + [ '--config_file', 'tests/test_configs/0_12_0.yaml', '--command_file', self.command_file, '--tpu_zone', self.tpu_zone, '--tpu_name', self.tpu_name, '--debug', ] , return_stdout=_A , ) self.assertIn( f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo \"hello world\"; echo \"this is a second command\" --worker all""" , _A , ) def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = run_command( self.cmd + ['--config_file', 'tests/test_configs/latest.yaml', '--install_accelerate', '--debug'] , return_stdout=_A , ) self.assertIn( f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate -U; echo \"hello world\"; echo \"this is a second command\" --worker all""" , _A , ) def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = run_command( self.cmd + [ '--config_file', 'tests/test_configs/latest.yaml', '--install_accelerate', '--accelerate_version', '12.0.0', '--debug', ] , return_stdout=_A , ) self.assertIn( f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate==12.0.0; echo \"hello world\"; echo \"this is a second command\" --worker all""" , _A , )
620
0
import json import pathlib import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision, slow from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import DeformableDetrImageProcessor class __snake_case ( unittest.TestCase ): def __init__( self , _A , _A=7 , _A=3 , _A=30 , _A=400 , _A=True , _A=None , _A=True , _A=[0.5, 0.5, 0.5] , _A=[0.5, 0.5, 0.5] , _A=True , _A=1 / 255 , _A=True , ): # by setting size["longest_edge"] > max_resolution we're effectively not testing this :p SCREAMING_SNAKE_CASE_ = size if size is not None else {"shortest_edge": 18, "longest_edge": 1333} SCREAMING_SNAKE_CASE_ = parent SCREAMING_SNAKE_CASE_ = batch_size SCREAMING_SNAKE_CASE_ = num_channels SCREAMING_SNAKE_CASE_ = min_resolution SCREAMING_SNAKE_CASE_ = max_resolution SCREAMING_SNAKE_CASE_ = do_resize SCREAMING_SNAKE_CASE_ = size SCREAMING_SNAKE_CASE_ = do_normalize SCREAMING_SNAKE_CASE_ = image_mean SCREAMING_SNAKE_CASE_ = image_std SCREAMING_SNAKE_CASE_ = do_rescale SCREAMING_SNAKE_CASE_ = rescale_factor SCREAMING_SNAKE_CASE_ = do_pad def lowerCAmelCase__ ( self): return { "do_resize": self.do_resize, "size": self.size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "do_rescale": self.do_rescale, "rescale_factor": self.rescale_factor, "do_pad": self.do_pad, } def lowerCAmelCase__ ( self , _A , _A=False): if not batched: SCREAMING_SNAKE_CASE_ = image_inputs[0] if isinstance(a_ , Image.Image): SCREAMING_SNAKE_CASE_ = image.size else: SCREAMING_SNAKE_CASE_ = image.shape[1], image.shape[2] if w < h: SCREAMING_SNAKE_CASE_ = int(self.size['shortest_edge'] * h / w) SCREAMING_SNAKE_CASE_ = self.size["shortest_edge"] elif w > h: SCREAMING_SNAKE_CASE_ = self.size["shortest_edge"] SCREAMING_SNAKE_CASE_ = int(self.size['shortest_edge'] * w / h) else: SCREAMING_SNAKE_CASE_ = self.size["shortest_edge"] SCREAMING_SNAKE_CASE_ = self.size["shortest_edge"] else: SCREAMING_SNAKE_CASE_ = [] for image in image_inputs: SCREAMING_SNAKE_CASE_ = self.get_expected_values([image]) expected_values.append((expected_height, expected_width)) SCREAMING_SNAKE_CASE_ = max(a_ , key=lambda _A: item[0])[0] SCREAMING_SNAKE_CASE_ = max(a_ , key=lambda _A: item[1])[1] return expected_height, expected_width @require_torch @require_vision class __snake_case ( __lowerCamelCase , unittest.TestCase ): __lowerCAmelCase : Dict = DeformableDetrImageProcessor if is_vision_available() else None def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = DeformableDetrImageProcessingTester(self) @property def lowerCAmelCase__ ( self): return self.image_processor_tester.prepare_image_processor_dict() def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = self.image_processing_class(**self.image_processor_dict) self.assertTrue(hasattr(a_ , 'image_mean')) self.assertTrue(hasattr(a_ , 'image_std')) self.assertTrue(hasattr(a_ , 'do_normalize')) self.assertTrue(hasattr(a_ , 'do_resize')) self.assertTrue(hasattr(a_ , 'do_rescale')) self.assertTrue(hasattr(a_ , 'do_pad')) self.assertTrue(hasattr(a_ , 'size')) def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = self.image_processing_class.from_dict(self.image_processor_dict) self.assertEqual(image_processor.size , {'shortest_edge': 18, 'longest_edge': 1333}) self.assertEqual(image_processor.do_pad , a_) SCREAMING_SNAKE_CASE_ = self.image_processing_class.from_dict( self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=a_) self.assertEqual(image_processor.size , {'shortest_edge': 42, 'longest_edge': 84}) self.assertEqual(image_processor.do_pad , a_) def lowerCAmelCase__ ( self): pass def lowerCAmelCase__ ( self): # Initialize image_processing SCREAMING_SNAKE_CASE_ = self.image_processing_class(**self.image_processor_dict) # create random PIL images SCREAMING_SNAKE_CASE_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=a_) for image in image_inputs: self.assertIsInstance(a_ , Image.Image) # Test not batched input SCREAMING_SNAKE_CASE_ = image_processing(image_inputs[0] , return_tensors='pt').pixel_values SCREAMING_SNAKE_CASE_ = self.image_processor_tester.get_expected_values(a_) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched SCREAMING_SNAKE_CASE_ = self.image_processor_tester.get_expected_values(a_ , batched=a_) SCREAMING_SNAKE_CASE_ = image_processing(a_ , return_tensors='pt').pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def lowerCAmelCase__ ( self): # Initialize image_processing SCREAMING_SNAKE_CASE_ = self.image_processing_class(**self.image_processor_dict) # create random numpy tensors SCREAMING_SNAKE_CASE_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=a_ , numpify=a_) for image in image_inputs: self.assertIsInstance(a_ , np.ndarray) # Test not batched input SCREAMING_SNAKE_CASE_ = image_processing(image_inputs[0] , return_tensors='pt').pixel_values SCREAMING_SNAKE_CASE_ = self.image_processor_tester.get_expected_values(a_) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched SCREAMING_SNAKE_CASE_ = image_processing(a_ , return_tensors='pt').pixel_values SCREAMING_SNAKE_CASE_ = self.image_processor_tester.get_expected_values(a_ , batched=a_) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def lowerCAmelCase__ ( self): # Initialize image_processing SCREAMING_SNAKE_CASE_ = self.image_processing_class(**self.image_processor_dict) # create random PyTorch tensors SCREAMING_SNAKE_CASE_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=a_ , torchify=a_) for image in image_inputs: self.assertIsInstance(a_ , torch.Tensor) # Test not batched input SCREAMING_SNAKE_CASE_ = image_processing(image_inputs[0] , return_tensors='pt').pixel_values SCREAMING_SNAKE_CASE_ = self.image_processor_tester.get_expected_values(a_) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched SCREAMING_SNAKE_CASE_ = image_processing(a_ , return_tensors='pt').pixel_values SCREAMING_SNAKE_CASE_ = self.image_processor_tester.get_expected_values(a_ , batched=a_) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) @slow def lowerCAmelCase__ ( self): # prepare image and target SCREAMING_SNAKE_CASE_ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png') with open('./tests/fixtures/tests_samples/COCO/coco_annotations.txt' , 'r') as f: SCREAMING_SNAKE_CASE_ = json.loads(f.read()) SCREAMING_SNAKE_CASE_ = {"image_id": 39769, "annotations": target} # encode them SCREAMING_SNAKE_CASE_ = DeformableDetrImageProcessor() SCREAMING_SNAKE_CASE_ = image_processing(images=a_ , annotations=a_ , return_tensors='pt') # verify pixel values SCREAMING_SNAKE_CASE_ = torch.Size([1, 3, 800, 1066]) self.assertEqual(encoding['pixel_values'].shape , a_) SCREAMING_SNAKE_CASE_ = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1]) self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , a_ , atol=1E-4)) # verify area SCREAMING_SNAKE_CASE_ = torch.tensor([5887.9600, 11250.2061, 489353.8438, 837122.7500, 147967.5156, 165732.3438]) self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , a_)) # verify boxes SCREAMING_SNAKE_CASE_ = torch.Size([6, 4]) self.assertEqual(encoding['labels'][0]['boxes'].shape , a_) SCREAMING_SNAKE_CASE_ = torch.tensor([0.5_5_0_3, 0.2_7_6_5, 0.0_6_0_4, 0.2_2_1_5]) self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , a_ , atol=1E-3)) # verify image_id SCREAMING_SNAKE_CASE_ = torch.tensor([39769]) self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , a_)) # verify is_crowd SCREAMING_SNAKE_CASE_ = torch.tensor([0, 0, 0, 0, 0, 0]) self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , a_)) # verify class_labels SCREAMING_SNAKE_CASE_ = torch.tensor([75, 75, 63, 65, 17, 17]) self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , a_)) # verify orig_size SCREAMING_SNAKE_CASE_ = torch.tensor([480, 640]) self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , a_)) # verify size SCREAMING_SNAKE_CASE_ = torch.tensor([800, 1066]) self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , a_)) @slow def lowerCAmelCase__ ( self): # prepare image, target and masks_path SCREAMING_SNAKE_CASE_ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png') with open('./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt' , 'r') as f: SCREAMING_SNAKE_CASE_ = json.loads(f.read()) SCREAMING_SNAKE_CASE_ = {"file_name": "000000039769.png", "image_id": 39769, "segments_info": target} SCREAMING_SNAKE_CASE_ = pathlib.Path('./tests/fixtures/tests_samples/COCO/coco_panoptic') # encode them SCREAMING_SNAKE_CASE_ = DeformableDetrImageProcessor(format='coco_panoptic') SCREAMING_SNAKE_CASE_ = image_processing(images=a_ , annotations=a_ , masks_path=a_ , return_tensors='pt') # verify pixel values SCREAMING_SNAKE_CASE_ = torch.Size([1, 3, 800, 1066]) self.assertEqual(encoding['pixel_values'].shape , a_) SCREAMING_SNAKE_CASE_ = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1]) self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , a_ , atol=1E-4)) # verify area SCREAMING_SNAKE_CASE_ = torch.tensor([147979.6875, 165527.0469, 484638.5938, 11292.9375, 5879.6562, 7634.1147]) self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , a_)) # verify boxes SCREAMING_SNAKE_CASE_ = torch.Size([6, 4]) self.assertEqual(encoding['labels'][0]['boxes'].shape , a_) SCREAMING_SNAKE_CASE_ = torch.tensor([0.2_6_2_5, 0.5_4_3_7, 0.4_6_8_8, 0.8_6_2_5]) self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , a_ , atol=1E-3)) # verify image_id SCREAMING_SNAKE_CASE_ = torch.tensor([39769]) self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , a_)) # verify is_crowd SCREAMING_SNAKE_CASE_ = torch.tensor([0, 0, 0, 0, 0, 0]) self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , a_)) # verify class_labels SCREAMING_SNAKE_CASE_ = torch.tensor([17, 17, 63, 75, 75, 93]) self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , a_)) # verify masks SCREAMING_SNAKE_CASE_ = 822873 self.assertEqual(encoding['labels'][0]['masks'].sum().item() , a_) # verify orig_size SCREAMING_SNAKE_CASE_ = torch.tensor([480, 640]) self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , a_)) # verify size SCREAMING_SNAKE_CASE_ = torch.tensor([800, 1066]) self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , a_))
707
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_speech_available, is_torch_available, ) UpperCamelCase__ : Tuple = { "configuration_trocr": ["TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP", "TrOCRConfig"], "processing_trocr": ["TrOCRProcessor"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase__ : Tuple = [ "TROCR_PRETRAINED_MODEL_ARCHIVE_LIST", "TrOCRForCausalLM", "TrOCRPreTrainedModel", ] if TYPE_CHECKING: from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig from .processing_trocr import TrOCRProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel else: import sys UpperCamelCase__ : str = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
620
0
import os import unittest from transformers import MobileBertTokenizer, MobileBertTokenizerFast from transformers.models.bert.tokenization_bert import ( VOCAB_FILES_NAMES, BasicTokenizer, WordpieceTokenizer, _is_control, _is_punctuation, _is_whitespace, ) from transformers.testing_utils import require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english @require_tokenizers class __snake_case ( lowerCAmelCase__ , unittest.TestCase ): __lowerCAmelCase : Optional[int] = MobileBertTokenizer __lowerCAmelCase : int = MobileBertTokenizerFast __lowerCAmelCase : List[str] = True __lowerCAmelCase : List[str] = True __lowerCAmelCase : Union[str, Any] = filter_non_english __lowerCAmelCase : Optional[int] = 'google/mobilebert-uncased' def lowerCAmelCase__ ( self): super().setUp() SCREAMING_SNAKE_CASE_ = [ '[UNK]', '[CLS]', '[SEP]', '[PAD]', '[MASK]', 'want', '##want', '##ed', 'wa', 'un', 'runn', '##ing', ',', 'low', 'lowest', ] SCREAMING_SNAKE_CASE_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file']) with open(self.vocab_file , 'w' , encoding='utf-8') as vocab_writer: vocab_writer.write(''.join([x + '\n' for x in vocab_tokens])) SCREAMING_SNAKE_CASE_ = [ (tokenizer_def[0], self.pre_trained_model_path, tokenizer_def[2]) # else the 'google/' prefix is stripped for tokenizer_def in self.tokenizers_list ] def lowerCAmelCase__ ( self , _A): SCREAMING_SNAKE_CASE_ = 'UNwant\u00E9d,running' SCREAMING_SNAKE_CASE_ = 'unwanted, running' return input_text, output_text def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = self.tokenizer_class(self.vocab_file) SCREAMING_SNAKE_CASE_ = tokenizer.tokenize('UNwant\u00E9d,running') self.assertListEqual(_A , ['un', '##want', '##ed', ',', 'runn', '##ing']) self.assertListEqual(tokenizer.convert_tokens_to_ids(_A) , [9, 6, 7, 12, 10, 11]) def lowerCAmelCase__ ( self): if not self.test_rust_tokenizer: return SCREAMING_SNAKE_CASE_ = self.get_tokenizer() SCREAMING_SNAKE_CASE_ = self.get_rust_tokenizer() SCREAMING_SNAKE_CASE_ = 'UNwant\u00E9d,running' SCREAMING_SNAKE_CASE_ = tokenizer.tokenize(_A) SCREAMING_SNAKE_CASE_ = rust_tokenizer.tokenize(_A) self.assertListEqual(_A , _A) SCREAMING_SNAKE_CASE_ = tokenizer.encode(_A , add_special_tokens=_A) SCREAMING_SNAKE_CASE_ = rust_tokenizer.encode(_A , add_special_tokens=_A) self.assertListEqual(_A , _A) SCREAMING_SNAKE_CASE_ = self.get_rust_tokenizer() SCREAMING_SNAKE_CASE_ = tokenizer.encode(_A) SCREAMING_SNAKE_CASE_ = rust_tokenizer.encode(_A) self.assertListEqual(_A , _A) # With lower casing SCREAMING_SNAKE_CASE_ = self.get_tokenizer(do_lower_case=_A) SCREAMING_SNAKE_CASE_ = self.get_rust_tokenizer(do_lower_case=_A) SCREAMING_SNAKE_CASE_ = 'UNwant\u00E9d,running' SCREAMING_SNAKE_CASE_ = tokenizer.tokenize(_A) SCREAMING_SNAKE_CASE_ = rust_tokenizer.tokenize(_A) self.assertListEqual(_A , _A) SCREAMING_SNAKE_CASE_ = tokenizer.encode(_A , add_special_tokens=_A) SCREAMING_SNAKE_CASE_ = rust_tokenizer.encode(_A , add_special_tokens=_A) self.assertListEqual(_A , _A) SCREAMING_SNAKE_CASE_ = self.get_rust_tokenizer() SCREAMING_SNAKE_CASE_ = tokenizer.encode(_A) SCREAMING_SNAKE_CASE_ = rust_tokenizer.encode(_A) self.assertListEqual(_A , _A) def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = BasicTokenizer() self.assertListEqual(tokenizer.tokenize('ah\u535A\u63A8zz') , ['ah', '\u535A', '\u63A8', 'zz']) def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = BasicTokenizer(do_lower_case=_A) self.assertListEqual( tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ') , ['hello', '!', 'how', 'are', 'you', '?']) self.assertListEqual(tokenizer.tokenize('H\u00E9llo') , ['hello']) def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = BasicTokenizer(do_lower_case=_A , strip_accents=_A) self.assertListEqual( tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ') , ['hällo', '!', 'how', 'are', 'you', '?']) self.assertListEqual(tokenizer.tokenize('H\u00E9llo') , ['h\u00E9llo']) def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = BasicTokenizer(do_lower_case=_A , strip_accents=_A) self.assertListEqual( tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ') , ['hallo', '!', 'how', 'are', 'you', '?']) self.assertListEqual(tokenizer.tokenize('H\u00E9llo') , ['hello']) def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = BasicTokenizer(do_lower_case=_A) self.assertListEqual( tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ') , ['hallo', '!', 'how', 'are', 'you', '?']) self.assertListEqual(tokenizer.tokenize('H\u00E9llo') , ['hello']) def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = BasicTokenizer(do_lower_case=_A) self.assertListEqual( tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ') , ['HeLLo', '!', 'how', 'Are', 'yoU', '?']) def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = BasicTokenizer(do_lower_case=_A , strip_accents=_A) self.assertListEqual( tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ') , ['HäLLo', '!', 'how', 'Are', 'yoU', '?']) def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = BasicTokenizer(do_lower_case=_A , strip_accents=_A) self.assertListEqual( tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ') , ['HaLLo', '!', 'how', 'Are', 'yoU', '?']) def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = BasicTokenizer(do_lower_case=_A , never_split=['[UNK]']) self.assertListEqual( tokenizer.tokenize(' \tHeLLo!how \n Are yoU? [UNK]') , ['HeLLo', '!', 'how', 'Are', 'yoU', '?', '[UNK]']) def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = ['[UNK]', '[CLS]', '[SEP]', 'want', '##want', '##ed', 'wa', 'un', 'runn', '##ing'] SCREAMING_SNAKE_CASE_ = {} for i, token in enumerate(_A): SCREAMING_SNAKE_CASE_ = i SCREAMING_SNAKE_CASE_ = WordpieceTokenizer(vocab=_A , unk_token='[UNK]') self.assertListEqual(tokenizer.tokenize('') , []) self.assertListEqual(tokenizer.tokenize('unwanted running') , ['un', '##want', '##ed', 'runn', '##ing']) self.assertListEqual(tokenizer.tokenize('unwantedX running') , ['[UNK]', 'runn', '##ing']) def lowerCAmelCase__ ( self): self.assertTrue(_is_whitespace(' ')) self.assertTrue(_is_whitespace('\t')) self.assertTrue(_is_whitespace('\r')) self.assertTrue(_is_whitespace('\n')) self.assertTrue(_is_whitespace('\u00A0')) self.assertFalse(_is_whitespace('A')) self.assertFalse(_is_whitespace('-')) def lowerCAmelCase__ ( self): self.assertTrue(_is_control('\u0005')) self.assertFalse(_is_control('A')) self.assertFalse(_is_control(' ')) self.assertFalse(_is_control('\t')) self.assertFalse(_is_control('\r')) def lowerCAmelCase__ ( self): self.assertTrue(_is_punctuation('-')) self.assertTrue(_is_punctuation('$')) self.assertTrue(_is_punctuation('`')) self.assertTrue(_is_punctuation('.')) self.assertFalse(_is_punctuation('A')) self.assertFalse(_is_punctuation(' ')) def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = self.get_tokenizer() SCREAMING_SNAKE_CASE_ = self.get_rust_tokenizer() # Example taken from the issue https://github.com/huggingface/tokenizers/issues/340 self.assertListEqual([tokenizer.tokenize(_A) for t in ['Test', '\xad', 'test']] , [['[UNK]'], [], ['[UNK]']]) self.assertListEqual( [rust_tokenizer.tokenize(_A) for t in ['Test', '\xad', 'test']] , [['[UNK]'], [], ['[UNK]']]) @slow def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = self.tokenizer_class.from_pretrained('google/mobilebert-uncased') SCREAMING_SNAKE_CASE_ = tokenizer.encode('sequence builders' , add_special_tokens=_A) SCREAMING_SNAKE_CASE_ = tokenizer.encode('multi-sequence build' , add_special_tokens=_A) SCREAMING_SNAKE_CASE_ = tokenizer.build_inputs_with_special_tokens(_A) SCREAMING_SNAKE_CASE_ = tokenizer.build_inputs_with_special_tokens(_A , _A) assert encoded_sentence == [101] + text + [102] assert encoded_pair == [101] + text + [102] + text_a + [102] def lowerCAmelCase__ ( self): for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})"""): SCREAMING_SNAKE_CASE_ = self.rust_tokenizer_class.from_pretrained(_A , **_A) SCREAMING_SNAKE_CASE_ = f"""A, naïve {tokenizer_r.mask_token} AllenNLP sentence.""" SCREAMING_SNAKE_CASE_ = tokenizer_r.encode_plus( _A , return_attention_mask=_A , return_token_type_ids=_A , return_offsets_mapping=_A , add_special_tokens=_A , ) SCREAMING_SNAKE_CASE_ = tokenizer_r.do_lower_case if hasattr(_A , 'do_lower_case') else False SCREAMING_SNAKE_CASE_ = ( [ ((0, 0), tokenizer_r.cls_token), ((0, 1), 'A'), ((1, 2), ','), ((3, 5), 'na'), ((5, 6), '##ï'), ((6, 8), '##ve'), ((9, 15), tokenizer_r.mask_token), ((16, 21), 'Allen'), ((21, 23), '##NL'), ((23, 24), '##P'), ((25, 33), 'sentence'), ((33, 34), '.'), ((0, 0), tokenizer_r.sep_token), ] if not do_lower_case else [ ((0, 0), tokenizer_r.cls_token), ((0, 1), 'a'), ((1, 2), ','), ((3, 8), 'naive'), ((9, 15), tokenizer_r.mask_token), ((16, 21), 'allen'), ((21, 23), '##nl'), ((23, 24), '##p'), ((25, 33), 'sentence'), ((33, 34), '.'), ((0, 0), tokenizer_r.sep_token), ] ) self.assertEqual( [e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens['input_ids'])) self.assertEqual([e[0] for e in expected_results] , tokens['offset_mapping']) def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = ['的', '人', '有'] SCREAMING_SNAKE_CASE_ = ''.join(_A) for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})"""): SCREAMING_SNAKE_CASE_ = True SCREAMING_SNAKE_CASE_ = self.tokenizer_class.from_pretrained(_A , **_A) SCREAMING_SNAKE_CASE_ = self.rust_tokenizer_class.from_pretrained(_A , **_A) SCREAMING_SNAKE_CASE_ = tokenizer_p.encode(_A , add_special_tokens=_A) SCREAMING_SNAKE_CASE_ = tokenizer_r.encode(_A , add_special_tokens=_A) SCREAMING_SNAKE_CASE_ = tokenizer_r.convert_ids_to_tokens(_A) SCREAMING_SNAKE_CASE_ = tokenizer_p.convert_ids_to_tokens(_A) # it is expected that each Chinese character is not preceded by "##" self.assertListEqual(_A , _A) self.assertListEqual(_A , _A) SCREAMING_SNAKE_CASE_ = False SCREAMING_SNAKE_CASE_ = self.rust_tokenizer_class.from_pretrained(_A , **_A) SCREAMING_SNAKE_CASE_ = self.tokenizer_class.from_pretrained(_A , **_A) SCREAMING_SNAKE_CASE_ = tokenizer_r.encode(_A , add_special_tokens=_A) SCREAMING_SNAKE_CASE_ = tokenizer_p.encode(_A , add_special_tokens=_A) SCREAMING_SNAKE_CASE_ = tokenizer_r.convert_ids_to_tokens(_A) SCREAMING_SNAKE_CASE_ = tokenizer_p.convert_ids_to_tokens(_A) # it is expected that only the first Chinese character is not preceded by "##". SCREAMING_SNAKE_CASE_ = [ f"""##{token}""" if idx != 0 else token for idx, token in enumerate(_A) ] self.assertListEqual(_A , _A) self.assertListEqual(_A , _A)
708
from multiprocessing import Lock, Pipe, Process # lock used to ensure that two processes do not access a pipe at the same time UpperCamelCase__ : int = Lock() def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : Any ): """simple docstring""" global process_lock # we perform n swaps since after n swaps we know we are sorted # we *could* stop early if we are sorted already, but it takes as long to # find out we are sorted as it does to sort the list with this algorithm for i in range(0 , 10 ): if (i + position) % 2 == 0 and r_send is not None: # send your value to your right neighbor process_lock.acquire() r_send[1].send(_SCREAMING_SNAKE_CASE ) process_lock.release() # receive your right neighbor's value process_lock.acquire() SCREAMING_SNAKE_CASE_ = rr_cv[0].recv() process_lock.release() # take the lower value since you are on the left SCREAMING_SNAKE_CASE_ = min(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) elif (i + position) % 2 != 0 and l_send is not None: # send your value to your left neighbor process_lock.acquire() l_send[1].send(_SCREAMING_SNAKE_CASE ) process_lock.release() # receive your left neighbor's value process_lock.acquire() SCREAMING_SNAKE_CASE_ = lr_cv[0].recv() process_lock.release() # take the higher value since you are on the right SCREAMING_SNAKE_CASE_ = max(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # after all swaps are performed, send the values back to main result_pipe[1].send(_SCREAMING_SNAKE_CASE ) def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : Union[str, Any] ): """simple docstring""" SCREAMING_SNAKE_CASE_ = [] SCREAMING_SNAKE_CASE_ = [] # initialize the list of pipes where the values will be retrieved for _ in arr: result_pipe.append(Pipe() ) # creates the processes # the first and last process only have one neighbor so they are made outside # of the loop SCREAMING_SNAKE_CASE_ = Pipe() SCREAMING_SNAKE_CASE_ = Pipe() process_array_.append( Process( target=_SCREAMING_SNAKE_CASE , args=(0, arr[0], None, temp_rs, None, temp_rr, result_pipe[0]) , ) ) SCREAMING_SNAKE_CASE_ = temp_rs SCREAMING_SNAKE_CASE_ = temp_rr for i in range(1 , len(_SCREAMING_SNAKE_CASE ) - 1 ): SCREAMING_SNAKE_CASE_ = Pipe() SCREAMING_SNAKE_CASE_ = Pipe() process_array_.append( Process( target=_SCREAMING_SNAKE_CASE , args=(i, arr[i], temp_ls, temp_rs, temp_lr, temp_rr, result_pipe[i]) , ) ) SCREAMING_SNAKE_CASE_ = temp_rs SCREAMING_SNAKE_CASE_ = temp_rr process_array_.append( Process( target=_SCREAMING_SNAKE_CASE , args=( len(_SCREAMING_SNAKE_CASE ) - 1, arr[len(_SCREAMING_SNAKE_CASE ) - 1], temp_ls, None, temp_lr, None, result_pipe[len(_SCREAMING_SNAKE_CASE ) - 1], ) , ) ) # start the processes for p in process_array_: p.start() # wait for the processes to end and write their values to the list for p in range(0 , len(_SCREAMING_SNAKE_CASE ) ): SCREAMING_SNAKE_CASE_ = result_pipe[p][0].recv() process_array_[p].join() return arr def _UpperCAmelCase ( ): """simple docstring""" SCREAMING_SNAKE_CASE_ = list(range(10 , 0 , -1 ) ) print('Initial List' ) print(*_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE_ = odd_even_transposition(_SCREAMING_SNAKE_CASE ) print('Sorted List\n' ) print(*_SCREAMING_SNAKE_CASE ) if __name__ == "__main__": main()
620
0
import unittest import torch from torch import nn from accelerate.test_utils import require_cuda from accelerate.utils.memory import find_executable_batch_size, release_memory def _UpperCAmelCase ( ): """simple docstring""" raise RuntimeError('CUDA out of memory.' ) class __snake_case ( nn.Module ): def __init__( self): super().__init__() SCREAMING_SNAKE_CASE_ = nn.Linear(3 , 4) SCREAMING_SNAKE_CASE_ = nn.BatchNormad(4) SCREAMING_SNAKE_CASE_ = nn.Linear(4 , 5) def lowerCAmelCase__ ( self , _A): return self.lineara(self.batchnorm(self.lineara(_SCREAMING_SNAKE_CASE))) class __snake_case ( unittest.TestCase ): def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = [] @find_executable_batch_size(starting_batch_size=128) def mock_training_loop_function(_A): nonlocal batch_sizes batch_sizes.append(_SCREAMING_SNAKE_CASE) if batch_size != 8: raise_fake_out_of_memory() mock_training_loop_function() self.assertListEqual(_SCREAMING_SNAKE_CASE , [128, 64, 32, 16, 8]) def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = [] @find_executable_batch_size(starting_batch_size=128) def mock_training_loop_function(_A , _A): nonlocal batch_sizes batch_sizes.append(_SCREAMING_SNAKE_CASE) if batch_size != 8: raise_fake_out_of_memory() return batch_size, arga SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = mock_training_loop_function('hello') self.assertListEqual(_SCREAMING_SNAKE_CASE , [128, 64, 32, 16, 8]) self.assertListEqual([bs, arga] , [8, 'hello']) def lowerCAmelCase__ ( self): @find_executable_batch_size(starting_batch_size=0) def mock_training_loop_function(_A): pass with self.assertRaises(_SCREAMING_SNAKE_CASE) as cm: mock_training_loop_function() self.assertIn('No executable batch size found, reached zero.' , cm.exception.args[0]) def lowerCAmelCase__ ( self): @find_executable_batch_size(starting_batch_size=16) def mock_training_loop_function(_A): if batch_size > 0: raise_fake_out_of_memory() pass with self.assertRaises(_SCREAMING_SNAKE_CASE) as cm: mock_training_loop_function() self.assertIn('No executable batch size found, reached zero.' , cm.exception.args[0]) def lowerCAmelCase__ ( self): @find_executable_batch_size(starting_batch_size=128) def mock_training_loop_function(_A , _A , _A): if batch_size != 8: raise raise_fake_out_of_memory() with self.assertRaises(_SCREAMING_SNAKE_CASE) as cm: mock_training_loop_function(128 , 'hello' , 'world') self.assertIn('Batch size was passed into `f`' , cm.exception.args[0]) self.assertIn('`f(arg1=\'hello\', arg2=\'world\')' , cm.exception.args[0]) def lowerCAmelCase__ ( self): @find_executable_batch_size(starting_batch_size=16) def mock_training_loop_function(_A): raise ValueError('Oops, we had an error!') with self.assertRaises(_SCREAMING_SNAKE_CASE) as cm: mock_training_loop_function() self.assertIn('Oops, we had an error!' , cm.exception.args[0]) @require_cuda def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = torch.cuda.memory_allocated() SCREAMING_SNAKE_CASE_ = ModelForTest() model.cuda() self.assertGreater(torch.cuda.memory_allocated() , _SCREAMING_SNAKE_CASE) SCREAMING_SNAKE_CASE_ = release_memory(_SCREAMING_SNAKE_CASE) self.assertEqual(torch.cuda.memory_allocated() , _SCREAMING_SNAKE_CASE)
709
import unittest from transformers import load_tool from .test_tools_common import ToolTesterMixin UpperCamelCase__ : int = "\nHugging Face was founded in 2016 by French entrepreneurs Clément Delangue, Julien Chaumond, and Thomas Wolf originally as a company that developed a chatbot app targeted at teenagers.[2] After open-sourcing the model behind the chatbot, the company pivoted to focus on being a platform for machine learning.\n\nIn March 2021, Hugging Face raised $40 million in a Series B funding round.[3]\n\nOn April 28, 2021, the company launched the BigScience Research Workshop in collaboration with several other research groups to release an open large language model.[4] In 2022, the workshop concluded with the announcement of BLOOM, a multilingual large language model with 176 billion parameters.[5]\n" class __snake_case ( unittest.TestCase , lowerCAmelCase__ ): def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = load_tool('text-question-answering') self.tool.setup() SCREAMING_SNAKE_CASE_ = load_tool('text-question-answering' , remote=_A) def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = self.tool(_A , 'What did Hugging Face do in April 2021?') self.assertEqual(_A , 'launched the BigScience Research Workshop') def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = self.remote_tool(_A , 'What did Hugging Face do in April 2021?') self.assertEqual(_A , 'launched the BigScience Research Workshop') def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = self.tool(text=_A , question='What did Hugging Face do in April 2021?') self.assertEqual(_A , 'launched the BigScience Research Workshop') def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = self.remote_tool(text=_A , question='What did Hugging Face do in April 2021?') self.assertEqual(_A , 'launched the BigScience Research Workshop')
620
0
'''simple docstring''' import warnings from ...utils import logging from .image_processing_donut import DonutImageProcessor UpperCamelCase__ : Tuple = logging.get_logger(__name__) class __snake_case ( _A ): def __init__( self , *_A , **_A): warnings.warn( 'The class DonutFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please' ' use DonutImageProcessor instead.' , UpperCamelCase__ , ) super().__init__(*UpperCamelCase__ , **UpperCamelCase__)
710
import unittest import numpy as np from datasets import load_dataset from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import BeitImageProcessor class __snake_case ( unittest.TestCase ): def __init__( self , _A , _A=7 , _A=3 , _A=18 , _A=30 , _A=400 , _A=True , _A=None , _A=True , _A=None , _A=True , _A=[0.5, 0.5, 0.5] , _A=[0.5, 0.5, 0.5] , _A=False , ): SCREAMING_SNAKE_CASE_ = size if size is not None else {'height': 20, 'width': 20} SCREAMING_SNAKE_CASE_ = crop_size if crop_size is not None else {'height': 18, 'width': 18} SCREAMING_SNAKE_CASE_ = parent SCREAMING_SNAKE_CASE_ = batch_size SCREAMING_SNAKE_CASE_ = num_channels SCREAMING_SNAKE_CASE_ = image_size SCREAMING_SNAKE_CASE_ = min_resolution SCREAMING_SNAKE_CASE_ = max_resolution SCREAMING_SNAKE_CASE_ = do_resize SCREAMING_SNAKE_CASE_ = size SCREAMING_SNAKE_CASE_ = do_center_crop SCREAMING_SNAKE_CASE_ = crop_size SCREAMING_SNAKE_CASE_ = do_normalize SCREAMING_SNAKE_CASE_ = image_mean SCREAMING_SNAKE_CASE_ = image_std SCREAMING_SNAKE_CASE_ = do_reduce_labels def lowerCAmelCase__ ( self): return { "do_resize": self.do_resize, "size": self.size, "do_center_crop": self.do_center_crop, "crop_size": self.crop_size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "do_reduce_labels": self.do_reduce_labels, } def _UpperCAmelCase ( ): """simple docstring""" SCREAMING_SNAKE_CASE_ = load_dataset('hf-internal-testing/fixtures_ade20k' , split='test' ) SCREAMING_SNAKE_CASE_ = Image.open(dataset[0]['file'] ) SCREAMING_SNAKE_CASE_ = Image.open(dataset[1]['file'] ) return image, map def _UpperCAmelCase ( ): """simple docstring""" SCREAMING_SNAKE_CASE_ = load_dataset('hf-internal-testing/fixtures_ade20k' , split='test' ) SCREAMING_SNAKE_CASE_ = Image.open(ds[0]['file'] ) SCREAMING_SNAKE_CASE_ = Image.open(ds[1]['file'] ) SCREAMING_SNAKE_CASE_ = Image.open(ds[2]['file'] ) SCREAMING_SNAKE_CASE_ = Image.open(ds[3]['file'] ) return [imagea, imagea], [mapa, mapa] @require_torch @require_vision class __snake_case ( lowerCAmelCase__ , unittest.TestCase ): __lowerCAmelCase : Union[str, Any] = BeitImageProcessor if is_vision_available() else None def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = BeitImageProcessingTester(self) @property def lowerCAmelCase__ ( self): return self.image_processor_tester.prepare_image_processor_dict() def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = self.image_processing_class(**self.image_processor_dict) self.assertTrue(hasattr(_A , 'do_resize')) self.assertTrue(hasattr(_A , 'size')) self.assertTrue(hasattr(_A , 'do_center_crop')) self.assertTrue(hasattr(_A , 'center_crop')) self.assertTrue(hasattr(_A , 'do_normalize')) self.assertTrue(hasattr(_A , 'image_mean')) self.assertTrue(hasattr(_A , 'image_std')) def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = self.image_processing_class.from_dict(self.image_processor_dict) self.assertEqual(image_processor.size , {'height': 20, 'width': 20}) self.assertEqual(image_processor.crop_size , {'height': 18, 'width': 18}) self.assertEqual(image_processor.do_reduce_labels , _A) SCREAMING_SNAKE_CASE_ = self.image_processing_class.from_dict( self.image_processor_dict , size=42 , crop_size=84 , reduce_labels=_A) self.assertEqual(image_processor.size , {'height': 42, 'width': 42}) self.assertEqual(image_processor.crop_size , {'height': 84, 'width': 84}) self.assertEqual(image_processor.do_reduce_labels , _A) def lowerCAmelCase__ ( self): pass def lowerCAmelCase__ ( self): # Initialize image_processing SCREAMING_SNAKE_CASE_ = self.image_processing_class(**self.image_processor_dict) # create random PIL images SCREAMING_SNAKE_CASE_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A) for image in image_inputs: self.assertIsInstance(_A , Image.Image) # Test not batched input SCREAMING_SNAKE_CASE_ = image_processing(image_inputs[0] , return_tensors='pt').pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) # Test batched SCREAMING_SNAKE_CASE_ = image_processing(_A , return_tensors='pt').pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) def lowerCAmelCase__ ( self): # Initialize image_processing SCREAMING_SNAKE_CASE_ = self.image_processing_class(**self.image_processor_dict) # create random numpy tensors SCREAMING_SNAKE_CASE_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A , numpify=_A) for image in image_inputs: self.assertIsInstance(_A , np.ndarray) # Test not batched input SCREAMING_SNAKE_CASE_ = image_processing(image_inputs[0] , return_tensors='pt').pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) # Test batched SCREAMING_SNAKE_CASE_ = image_processing(_A , return_tensors='pt').pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) def lowerCAmelCase__ ( self): # Initialize image_processing SCREAMING_SNAKE_CASE_ = self.image_processing_class(**self.image_processor_dict) # create random PyTorch tensors SCREAMING_SNAKE_CASE_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A , torchify=_A) for image in image_inputs: self.assertIsInstance(_A , torch.Tensor) # Test not batched input SCREAMING_SNAKE_CASE_ = image_processing(image_inputs[0] , return_tensors='pt').pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) # Test batched SCREAMING_SNAKE_CASE_ = image_processing(_A , return_tensors='pt').pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) def lowerCAmelCase__ ( self): # Initialize image_processing SCREAMING_SNAKE_CASE_ = self.image_processing_class(**self.image_processor_dict) # create random PyTorch tensors SCREAMING_SNAKE_CASE_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A , torchify=_A) SCREAMING_SNAKE_CASE_ = [] for image in image_inputs: self.assertIsInstance(_A , torch.Tensor) maps.append(torch.zeros(image.shape[-2:]).long()) # Test not batched input SCREAMING_SNAKE_CASE_ = image_processing(image_inputs[0] , maps[0] , return_tensors='pt') self.assertEqual( encoding['pixel_values'].shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) self.assertEqual( encoding['labels'].shape , ( 1, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) self.assertEqual(encoding['labels'].dtype , torch.long) self.assertTrue(encoding['labels'].min().item() >= 0) self.assertTrue(encoding['labels'].max().item() <= 255) # Test batched SCREAMING_SNAKE_CASE_ = image_processing(_A , _A , return_tensors='pt') self.assertEqual( encoding['pixel_values'].shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) self.assertEqual( encoding['labels'].shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) self.assertEqual(encoding['labels'].dtype , torch.long) self.assertTrue(encoding['labels'].min().item() >= 0) self.assertTrue(encoding['labels'].max().item() <= 255) # Test not batched input (PIL images) SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = prepare_semantic_single_inputs() SCREAMING_SNAKE_CASE_ = image_processing(_A , _A , return_tensors='pt') self.assertEqual( encoding['pixel_values'].shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) self.assertEqual( encoding['labels'].shape , ( 1, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) self.assertEqual(encoding['labels'].dtype , torch.long) self.assertTrue(encoding['labels'].min().item() >= 0) self.assertTrue(encoding['labels'].max().item() <= 255) # Test batched input (PIL images) SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = prepare_semantic_batch_inputs() SCREAMING_SNAKE_CASE_ = image_processing(_A , _A , return_tensors='pt') self.assertEqual( encoding['pixel_values'].shape , ( 2, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) self.assertEqual( encoding['labels'].shape , ( 2, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) self.assertEqual(encoding['labels'].dtype , torch.long) self.assertTrue(encoding['labels'].min().item() >= 0) self.assertTrue(encoding['labels'].max().item() <= 255) def lowerCAmelCase__ ( self): # Initialize image_processing SCREAMING_SNAKE_CASE_ = self.image_processing_class(**self.image_processor_dict) # ADE20k has 150 classes, and the background is included, so labels should be between 0 and 150 SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = prepare_semantic_single_inputs() SCREAMING_SNAKE_CASE_ = image_processing(_A , _A , return_tensors='pt') self.assertTrue(encoding['labels'].min().item() >= 0) self.assertTrue(encoding['labels'].max().item() <= 150) SCREAMING_SNAKE_CASE_ = True SCREAMING_SNAKE_CASE_ = image_processing(_A , _A , return_tensors='pt') self.assertTrue(encoding['labels'].min().item() >= 0) self.assertTrue(encoding['labels'].max().item() <= 255)
620
0
UpperCamelCase__ : str = '''Alexander Joslin''' import operator as op from .stack import Stack def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : List[str] ) -> int: """simple docstring""" SCREAMING_SNAKE_CASE_ = {'*': op.mul, '/': op.truediv, '+': op.add, '-': op.sub} SCREAMING_SNAKE_CASE_ = Stack() SCREAMING_SNAKE_CASE_ = Stack() for i in equation: if i.isdigit(): # RULE 1 operand_stack.push(int(_SCREAMING_SNAKE_CASE ) ) elif i in operators: # RULE 2 operator_stack.push(_SCREAMING_SNAKE_CASE ) elif i == ")": # RULE 4 SCREAMING_SNAKE_CASE_ = operator_stack.peek() operator_stack.pop() SCREAMING_SNAKE_CASE_ = operand_stack.peek() operand_stack.pop() SCREAMING_SNAKE_CASE_ = operand_stack.peek() operand_stack.pop() SCREAMING_SNAKE_CASE_ = operators[opr](_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) operand_stack.push(_SCREAMING_SNAKE_CASE ) # RULE 5 return operand_stack.peek() if __name__ == "__main__": UpperCamelCase__ : int = '''(5 + ((4 * 2) * (2 + 3)))''' # answer = 45 print(F'{equation} = {dijkstras_two_stack_algorithm(equation)}')
711
def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : int = 200 ): """simple docstring""" SCREAMING_SNAKE_CASE_ = [1, 2, 5, 10, 20, 50, 100, 200] SCREAMING_SNAKE_CASE_ = [0] * (pence + 1) SCREAMING_SNAKE_CASE_ = 1 # base case: 1 way to make 0 pence for coin in coins: for i in range(_SCREAMING_SNAKE_CASE , pence + 1 , 1 ): number_of_ways[i] += number_of_ways[i - coin] return number_of_ways[pence] if __name__ == "__main__": assert solution(200) == 73_682
620
0
from __future__ import annotations import math def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : Dict ): """simple docstring""" if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 , int(math.sqrt(__snake_case ) + 1 ) , 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True UpperCamelCase__ : Optional[Any] = [num for num in range(3, 100_001, 2) if not is_prime(num)] def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : Union[str, Any] ): """simple docstring""" if not isinstance(__snake_case , __snake_case ): raise ValueError('n must be an integer' ) if n <= 0: raise ValueError('n must be >= 0' ) SCREAMING_SNAKE_CASE_ = [] for num in range(len(__snake_case ) ): SCREAMING_SNAKE_CASE_ = 0 while 2 * i * i <= odd_composites[num]: SCREAMING_SNAKE_CASE_ = odd_composites[num] - 2 * i * i if is_prime(__snake_case ): break i += 1 else: list_nums.append(odd_composites[num] ) if len(__snake_case ) == n: return list_nums return [] def _UpperCAmelCase ( ): """simple docstring""" return compute_nums(1 )[0] if __name__ == "__main__": print(F'{solution() = }')
712
def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : list , _SCREAMING_SNAKE_CASE : list , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int ): """simple docstring""" if index == number_of_items: return 0 SCREAMING_SNAKE_CASE_ = 0 SCREAMING_SNAKE_CASE_ = 0 SCREAMING_SNAKE_CASE_ = knapsack(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , index + 1 ) if weights[index] <= max_weight: SCREAMING_SNAKE_CASE_ = values[index] + knapsack( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , max_weight - weights[index] , index + 1 ) return max(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) if __name__ == "__main__": import doctest doctest.testmod()
620
0
import subprocess import sys from transformers import BertConfig, BertModel, BertTokenizer, pipeline from transformers.testing_utils import TestCasePlus, require_torch class __snake_case ( lowercase_ ): @require_torch def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = """ from transformers import BertConfig, BertModel, BertTokenizer, pipeline """ SCREAMING_SNAKE_CASE_ = """ mname = \"hf-internal-testing/tiny-random-bert\" BertConfig.from_pretrained(mname) BertModel.from_pretrained(mname) BertTokenizer.from_pretrained(mname) pipe = pipeline(task=\"fill-mask\", model=mname) print(\"success\") """ SCREAMING_SNAKE_CASE_ = """ import socket def offline_socket(*args, **kwargs): raise RuntimeError(\"Offline mode is enabled, we shouldn't access internet\") socket.socket = offline_socket """ # Force fetching the files so that we can use the cache SCREAMING_SNAKE_CASE_ = """hf-internal-testing/tiny-random-bert""" BertConfig.from_pretrained(lowerCamelCase_) BertModel.from_pretrained(lowerCamelCase_) BertTokenizer.from_pretrained(lowerCamelCase_) pipeline(task='fill-mask' , model=lowerCamelCase_) # baseline - just load from_pretrained with normal network SCREAMING_SNAKE_CASE_ = [sys.executable, """-c""", """\n""".join([load, run, mock])] # should succeed SCREAMING_SNAKE_CASE_ = self.get_env() # should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files SCREAMING_SNAKE_CASE_ = """1""" SCREAMING_SNAKE_CASE_ = subprocess.run(lowerCamelCase_ , env=lowerCamelCase_ , check=lowerCamelCase_ , capture_output=lowerCamelCase_) self.assertEqual(result.returncode , 0 , result.stderr) self.assertIn('success' , result.stdout.decode()) @require_torch def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = """ from transformers import BertConfig, BertModel, BertTokenizer, pipeline """ SCREAMING_SNAKE_CASE_ = """ mname = \"hf-internal-testing/tiny-random-bert\" BertConfig.from_pretrained(mname) BertModel.from_pretrained(mname) BertTokenizer.from_pretrained(mname) pipe = pipeline(task=\"fill-mask\", model=mname) print(\"success\") """ SCREAMING_SNAKE_CASE_ = """ import socket def offline_socket(*args, **kwargs): raise socket.error(\"Faking flaky internet\") socket.socket = offline_socket """ # Force fetching the files so that we can use the cache SCREAMING_SNAKE_CASE_ = """hf-internal-testing/tiny-random-bert""" BertConfig.from_pretrained(lowerCamelCase_) BertModel.from_pretrained(lowerCamelCase_) BertTokenizer.from_pretrained(lowerCamelCase_) pipeline(task='fill-mask' , model=lowerCamelCase_) # baseline - just load from_pretrained with normal network SCREAMING_SNAKE_CASE_ = [sys.executable, """-c""", """\n""".join([load, run, mock])] # should succeed SCREAMING_SNAKE_CASE_ = self.get_env() SCREAMING_SNAKE_CASE_ = subprocess.run(lowerCamelCase_ , env=lowerCamelCase_ , check=lowerCamelCase_ , capture_output=lowerCamelCase_) self.assertEqual(result.returncode , 0 , result.stderr) self.assertIn('success' , result.stdout.decode()) @require_torch def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = """ from transformers import BertConfig, BertModel, BertTokenizer """ SCREAMING_SNAKE_CASE_ = """ mname = \"hf-internal-testing/tiny-random-bert-sharded\" BertConfig.from_pretrained(mname) BertModel.from_pretrained(mname) print(\"success\") """ SCREAMING_SNAKE_CASE_ = """ import socket def offline_socket(*args, **kwargs): raise ValueError(\"Offline mode is enabled\") socket.socket = offline_socket """ # baseline - just load from_pretrained with normal network SCREAMING_SNAKE_CASE_ = [sys.executable, """-c""", """\n""".join([load, run])] # should succeed SCREAMING_SNAKE_CASE_ = self.get_env() SCREAMING_SNAKE_CASE_ = subprocess.run(lowerCamelCase_ , env=lowerCamelCase_ , check=lowerCamelCase_ , capture_output=lowerCamelCase_) self.assertEqual(result.returncode , 0 , result.stderr) self.assertIn('success' , result.stdout.decode()) # next emulate no network SCREAMING_SNAKE_CASE_ = [sys.executable, """-c""", """\n""".join([load, mock, run])] # Doesn't fail anymore since the model is in the cache due to other tests, so commenting this. # env["TRANSFORMERS_OFFLINE"] = "0" # result = subprocess.run(cmd, env=env, check=False, capture_output=True) # self.assertEqual(result.returncode, 1, result.stderr) # should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files SCREAMING_SNAKE_CASE_ = """1""" SCREAMING_SNAKE_CASE_ = subprocess.run(lowerCamelCase_ , env=lowerCamelCase_ , check=lowerCamelCase_ , capture_output=lowerCamelCase_) self.assertEqual(result.returncode , 0 , result.stderr) self.assertIn('success' , result.stdout.decode()) @require_torch def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = """ from transformers import pipeline """ SCREAMING_SNAKE_CASE_ = """ mname = \"hf-internal-testing/tiny-random-bert\" pipe = pipeline(model=mname) """ SCREAMING_SNAKE_CASE_ = """ import socket def offline_socket(*args, **kwargs): raise socket.error(\"Offline mode is enabled\") socket.socket = offline_socket """ SCREAMING_SNAKE_CASE_ = self.get_env() SCREAMING_SNAKE_CASE_ = """1""" SCREAMING_SNAKE_CASE_ = [sys.executable, """-c""", """\n""".join([load, mock, run])] SCREAMING_SNAKE_CASE_ = subprocess.run(lowerCamelCase_ , env=lowerCamelCase_ , check=lowerCamelCase_ , capture_output=lowerCamelCase_) self.assertEqual(result.returncode , 1 , result.stderr) self.assertIn( 'You cannot infer task automatically within `pipeline` when using offline mode' , result.stderr.decode().replace('\n' , '') , ) @require_torch def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = """ from transformers import AutoModel """ SCREAMING_SNAKE_CASE_ = """ mname = \"hf-internal-testing/test_dynamic_model\" AutoModel.from_pretrained(mname, trust_remote_code=True) print(\"success\") """ # baseline - just load from_pretrained with normal network SCREAMING_SNAKE_CASE_ = [sys.executable, """-c""", """\n""".join([load, run])] # should succeed SCREAMING_SNAKE_CASE_ = self.get_env() SCREAMING_SNAKE_CASE_ = subprocess.run(lowerCamelCase_ , env=lowerCamelCase_ , check=lowerCamelCase_ , capture_output=lowerCamelCase_) self.assertEqual(result.returncode , 0 , result.stderr) self.assertIn('success' , result.stdout.decode()) # should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files SCREAMING_SNAKE_CASE_ = """1""" SCREAMING_SNAKE_CASE_ = subprocess.run(lowerCamelCase_ , env=lowerCamelCase_ , check=lowerCamelCase_ , capture_output=lowerCamelCase_) self.assertEqual(result.returncode , 0 , result.stderr) self.assertIn('success' , result.stdout.decode())
713
import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( SwiftFormerConfig, SwiftFormerForImageClassification, ViTImageProcessor, ) from transformers.utils import logging logging.set_verbosity_info() UpperCamelCase__ : Optional[int] = logging.get_logger(__name__) UpperCamelCase__ : List[Any] = torch.device("cpu") def _UpperCAmelCase ( ): """simple docstring""" SCREAMING_SNAKE_CASE_ = 'http://images.cocodataset.org/val2017/000000039769.jpg' SCREAMING_SNAKE_CASE_ = Image.open(requests.get(_SCREAMING_SNAKE_CASE , stream=_SCREAMING_SNAKE_CASE ).raw ) return im def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : int ): """simple docstring""" if swiftformer_name == "swiftformer_xs": return torch.tensor([-2.1_7_0_3E0_0, 2.1_1_0_7E0_0, -2.0_8_1_1E0_0, 8.8_6_8_5E-0_1, 2.4_3_6_0E-0_1] ) elif swiftformer_name == "swiftformer_s": return torch.tensor([3.9_6_3_6E-0_1, 2.3_4_7_8E-0_1, -1.6_9_6_3E0_0, -1.7_3_8_1E0_0, -8.6_3_3_7E-0_1] ) elif swiftformer_name == "swiftformer_l1": return torch.tensor([-4.2_7_6_8E-0_1, -4.7_4_2_9E-0_1, -1.0_8_9_7E0_0, -1.0_2_4_8E0_0, 3.5_5_2_3E-0_2] ) elif swiftformer_name == "swiftformer_l3": return torch.tensor([-2.5_3_3_0E-0_1, 2.4_2_1_1E-0_1, -6.0_1_8_5E-0_1, -8.2_7_8_9E-0_1, -6.0_4_4_6E-0_2] ) def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : Optional[Any] ): """simple docstring""" SCREAMING_SNAKE_CASE_ = dct.pop(_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE_ = val def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : Dict ): """simple docstring""" SCREAMING_SNAKE_CASE_ = [] for k in state_dict.keys(): SCREAMING_SNAKE_CASE_ = k if ".pwconv" in k: SCREAMING_SNAKE_CASE_ = k_new.replace('.pwconv' , '.point_wise_conv' ) if ".dwconv" in k: SCREAMING_SNAKE_CASE_ = k_new.replace('.dwconv' , '.depth_wise_conv' ) if ".Proj." in k: SCREAMING_SNAKE_CASE_ = k_new.replace('.Proj.' , '.proj.' ) if "patch_embed" in k_new: SCREAMING_SNAKE_CASE_ = k_new.replace('patch_embed' , 'swiftformer.patch_embed.patch_embedding' ) if "network" in k_new: SCREAMING_SNAKE_CASE_ = k_new.split('.' ) if ls[2].isdigit(): SCREAMING_SNAKE_CASE_ = 'swiftformer.encoder.network.' + ls[1] + '.blocks.' + ls[2] + '.' + '.'.join(ls[3:] ) else: SCREAMING_SNAKE_CASE_ = k_new.replace('network' , 'swiftformer.encoder.network' ) rename_keys.append((k, k_new) ) return rename_keys @torch.no_grad() def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Optional[Any] ): """simple docstring""" SCREAMING_SNAKE_CASE_ = SwiftFormerConfig() # dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size SCREAMING_SNAKE_CASE_ = 1_000 SCREAMING_SNAKE_CASE_ = 'huggingface/label-files' SCREAMING_SNAKE_CASE_ = 'imagenet-1k-id2label.json' SCREAMING_SNAKE_CASE_ = json.load(open(hf_hub_download(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , repo_type='dataset' ) , 'r' ) ) SCREAMING_SNAKE_CASE_ = {int(_SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()} SCREAMING_SNAKE_CASE_ = idalabel SCREAMING_SNAKE_CASE_ = {v: k for k, v in idalabel.items()} # size of the architecture if swiftformer_name == "swiftformer_xs": SCREAMING_SNAKE_CASE_ = [3, 3, 6, 4] SCREAMING_SNAKE_CASE_ = [48, 56, 112, 220] elif swiftformer_name == "swiftformer_s": SCREAMING_SNAKE_CASE_ = [3, 3, 9, 6] SCREAMING_SNAKE_CASE_ = [48, 64, 168, 224] elif swiftformer_name == "swiftformer_l1": SCREAMING_SNAKE_CASE_ = [4, 3, 10, 5] SCREAMING_SNAKE_CASE_ = [48, 96, 192, 384] elif swiftformer_name == "swiftformer_l3": SCREAMING_SNAKE_CASE_ = [4, 4, 12, 6] SCREAMING_SNAKE_CASE_ = [64, 128, 320, 512] # load state_dict of original model, remove and rename some keys if original_ckpt: if original_ckpt.startswith('https' ): SCREAMING_SNAKE_CASE_ = torch.hub.load_state_dict_from_url(_SCREAMING_SNAKE_CASE , map_location='cpu' , check_hash=_SCREAMING_SNAKE_CASE ) else: SCREAMING_SNAKE_CASE_ = torch.load(_SCREAMING_SNAKE_CASE , map_location='cpu' ) SCREAMING_SNAKE_CASE_ = checkpoint SCREAMING_SNAKE_CASE_ = create_rename_keys(_SCREAMING_SNAKE_CASE ) for rename_key_src, rename_key_dest in rename_keys: rename_key(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # load HuggingFace model SCREAMING_SNAKE_CASE_ = SwiftFormerForImageClassification(_SCREAMING_SNAKE_CASE ).eval() hf_model.load_state_dict(_SCREAMING_SNAKE_CASE ) # prepare test inputs SCREAMING_SNAKE_CASE_ = prepare_img() SCREAMING_SNAKE_CASE_ = ViTImageProcessor.from_pretrained('preprocessor_config' ) SCREAMING_SNAKE_CASE_ = processor(images=_SCREAMING_SNAKE_CASE , return_tensors='pt' ) # compare outputs from both models SCREAMING_SNAKE_CASE_ = get_expected_output(_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE_ = hf_model(inputs['pixel_values'] ).logits assert hf_logits.shape == torch.Size([1, 1_000] ) assert torch.allclose(hf_logits[0, 0:5] , _SCREAMING_SNAKE_CASE , atol=1E-3 ) Path(_SCREAMING_SNAKE_CASE ).mkdir(exist_ok=_SCREAMING_SNAKE_CASE ) print(f"""Saving model {swiftformer_name} to {pytorch_dump_folder_path}""" ) hf_model.save_pretrained(_SCREAMING_SNAKE_CASE ) if __name__ == "__main__": UpperCamelCase__ : str = argparse.ArgumentParser() # Required parameters parser.add_argument( "--swiftformer_name", default="swiftformer_xs", choices=["swiftformer_xs", "swiftformer_s", "swiftformer_l1", "swiftformer_l3"], type=str, help="Name of the SwiftFormer model you'd like to convert.", ) parser.add_argument( "--pytorch_dump_folder_path", default="./converted_outputs/", type=str, help="Path to the output PyTorch model directory.", ) parser.add_argument("--original_ckpt", default=None, type=str, help="Path to the original model checkpoint.") UpperCamelCase__ : Union[str, Any] = parser.parse_args() convert_swiftformer_checkpoint(args.swiftformer_name, args.pytorch_dump_folder_path, args.original_ckpt)
620
0
'''simple docstring''' from math import factorial def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int ): """simple docstring""" if n < k or k < 0: raise ValueError('Please enter positive integers for n and k where n >= k' ) return factorial(A__ ) // (factorial(A__ ) * factorial(n - k )) if __name__ == "__main__": print( "The number of five-card hands possible from a standard", F'fifty-two card deck is: {combinations(52, 5)}\n', ) print( "If a class of 40 students must be arranged into groups of", F'4 for group projects, there are {combinations(40, 4)} ways', "to arrange them.\n", ) print( "If 10 teams are competing in a Formula One race, there", F'are {combinations(10, 3)} ways that first, second and', "third place can be awarded.", )
714
def _UpperCAmelCase ( ): """simple docstring""" for n in range(1 , 1_000_000 ): yield n * (n + 1) // 2 def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : Tuple ): """simple docstring""" SCREAMING_SNAKE_CASE_ = 1 SCREAMING_SNAKE_CASE_ = 2 while i * i <= n: SCREAMING_SNAKE_CASE_ = 0 while n % i == 0: n //= i multiplicity += 1 divisors_count *= multiplicity + 1 i += 1 if n > 1: divisors_count *= 2 return divisors_count def _UpperCAmelCase ( ): """simple docstring""" return next(i for i in triangle_number_generator() if count_divisors(_SCREAMING_SNAKE_CASE ) > 500 ) if __name__ == "__main__": print(solution())
620
0
import inspect import unittest from transformers import SegformerConfig, is_torch_available, is_vision_available from transformers.models.auto import get_values from transformers.testing_utils import require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MODEL_MAPPING, SegformerForImageClassification, SegformerForSemanticSegmentation, SegformerModel, ) from transformers.models.segformer.modeling_segformer import SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import SegformerImageProcessor class __snake_case ( _UpperCamelCase ): def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = self.config_class(**self.inputs_dict) self.parent.assertTrue(hasattr(_A , 'hidden_sizes')) self.parent.assertTrue(hasattr(_A , 'num_attention_heads')) self.parent.assertTrue(hasattr(_A , 'num_encoder_blocks')) class __snake_case : def __init__( self , _A , _A=13 , _A=64 , _A=3 , _A=4 , _A=[2, 2, 2, 2] , _A=[8, 4, 2, 1] , _A=[16, 32, 64, 128] , _A=[1, 4, 8, 16] , _A=[1, 2, 4, 8] , _A=True , _A=True , _A="gelu" , _A=0.1 , _A=0.1 , _A=0.0_2 , _A=3 , _A=None , ): SCREAMING_SNAKE_CASE_ = parent SCREAMING_SNAKE_CASE_ = batch_size SCREAMING_SNAKE_CASE_ = image_size SCREAMING_SNAKE_CASE_ = num_channels SCREAMING_SNAKE_CASE_ = num_encoder_blocks SCREAMING_SNAKE_CASE_ = sr_ratios SCREAMING_SNAKE_CASE_ = depths SCREAMING_SNAKE_CASE_ = hidden_sizes SCREAMING_SNAKE_CASE_ = downsampling_rates SCREAMING_SNAKE_CASE_ = num_attention_heads SCREAMING_SNAKE_CASE_ = is_training SCREAMING_SNAKE_CASE_ = use_labels SCREAMING_SNAKE_CASE_ = hidden_act SCREAMING_SNAKE_CASE_ = hidden_dropout_prob SCREAMING_SNAKE_CASE_ = attention_probs_dropout_prob SCREAMING_SNAKE_CASE_ = initializer_range SCREAMING_SNAKE_CASE_ = num_labels SCREAMING_SNAKE_CASE_ = scope def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) SCREAMING_SNAKE_CASE_ = None if self.use_labels: SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels) SCREAMING_SNAKE_CASE_ = self.get_config() return config, pixel_values, labels def lowerCAmelCase__ ( self): return SegformerConfig( image_size=self.image_size , num_channels=self.num_channels , num_encoder_blocks=self.num_encoder_blocks , depths=self.depths , hidden_sizes=self.hidden_sizes , num_attention_heads=self.num_attention_heads , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , ) def lowerCAmelCase__ ( self , _A , _A , _A): SCREAMING_SNAKE_CASE_ = SegformerModel(config=_A) model.to(_A) model.eval() SCREAMING_SNAKE_CASE_ = model(_A) SCREAMING_SNAKE_CASE_ = self.image_size // (self.downsampling_rates[-1] * 2) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], expected_height, expected_width)) def lowerCAmelCase__ ( self , _A , _A , _A): SCREAMING_SNAKE_CASE_ = self.num_labels SCREAMING_SNAKE_CASE_ = SegformerForSemanticSegmentation(_A) model.to(_A) model.eval() SCREAMING_SNAKE_CASE_ = model(_A) self.parent.assertEqual( result.logits.shape , (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4)) SCREAMING_SNAKE_CASE_ = model(_A , labels=_A) self.parent.assertEqual( result.logits.shape , (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4)) self.parent.assertGreater(result.loss , 0.0) def lowerCAmelCase__ ( self , _A , _A , _A): SCREAMING_SNAKE_CASE_ = 1 SCREAMING_SNAKE_CASE_ = SegformerForSemanticSegmentation(config=_A) model.to(_A) model.eval() SCREAMING_SNAKE_CASE_ = torch.randint(0 , 1 , (self.batch_size, self.image_size, self.image_size)).to(_A) SCREAMING_SNAKE_CASE_ = model(_A , labels=_A) self.parent.assertGreater(result.loss , 0.0) def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = self.prepare_config_and_inputs() SCREAMING_SNAKE_CASE_ = config_and_inputs SCREAMING_SNAKE_CASE_ = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class __snake_case ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase ): __lowerCAmelCase : str = ( ( SegformerModel, SegformerForSemanticSegmentation, SegformerForImageClassification, ) if is_torch_available() else () ) __lowerCAmelCase : Optional[int] = ( { 'feature-extraction': SegformerModel, 'image-classification': SegformerForImageClassification, 'image-segmentation': SegformerForSemanticSegmentation, } if is_torch_available() else {} ) __lowerCAmelCase : Optional[int] = True __lowerCAmelCase : List[Any] = False __lowerCAmelCase : Optional[int] = False __lowerCAmelCase : List[Any] = False def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = SegformerModelTester(self) SCREAMING_SNAKE_CASE_ = SegformerConfigTester(self , config_class=_A) def lowerCAmelCase__ ( self): self.config_tester.run_common_tests() def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_A) def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_binary_image_segmentation(*_A) def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_segmentation(*_A) @unittest.skip('SegFormer does not use inputs_embeds') def lowerCAmelCase__ ( self): pass @unittest.skip('SegFormer does not have get_input_embeddings method and get_output_embeddings methods') def lowerCAmelCase__ ( self): pass def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: SCREAMING_SNAKE_CASE_ = model_class(_A) SCREAMING_SNAKE_CASE_ = inspect.signature(model.forward) # signature.parameters is an OrderedDict => so arg_names order is deterministic SCREAMING_SNAKE_CASE_ = [*signature.parameters.keys()] SCREAMING_SNAKE_CASE_ = ["pixel_values"] self.assertListEqual(arg_names[:1] , _A) def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs_for_common() SCREAMING_SNAKE_CASE_ = True for model_class in self.all_model_classes: SCREAMING_SNAKE_CASE_ = True SCREAMING_SNAKE_CASE_ = False SCREAMING_SNAKE_CASE_ = True SCREAMING_SNAKE_CASE_ = model_class(_A) model.to(_A) model.eval() with torch.no_grad(): SCREAMING_SNAKE_CASE_ = model(**self._prepare_for_class(_A , _A)) SCREAMING_SNAKE_CASE_ = outputs.attentions SCREAMING_SNAKE_CASE_ = sum(self.model_tester.depths) self.assertEqual(len(_A) , _A) # check that output_attentions also work using config del inputs_dict["output_attentions"] SCREAMING_SNAKE_CASE_ = True SCREAMING_SNAKE_CASE_ = model_class(_A) model.to(_A) model.eval() with torch.no_grad(): SCREAMING_SNAKE_CASE_ = model(**self._prepare_for_class(_A , _A)) SCREAMING_SNAKE_CASE_ = outputs.attentions self.assertEqual(len(_A) , _A) # verify the first attentions (first block, first layer) SCREAMING_SNAKE_CASE_ = (self.model_tester.image_size // 4) ** 2 SCREAMING_SNAKE_CASE_ = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2 self.assertListEqual( list(attentions[0].shape[-3:]) , [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] , ) # verify the last attentions (last block, last layer) SCREAMING_SNAKE_CASE_ = (self.model_tester.image_size // 32) ** 2 SCREAMING_SNAKE_CASE_ = (self.model_tester.image_size // (32 * self.model_tester.sr_ratios[-1])) ** 2 self.assertListEqual( list(attentions[-1].shape[-3:]) , [self.model_tester.num_attention_heads[-1], expected_seq_len, expected_reduced_seq_len] , ) SCREAMING_SNAKE_CASE_ = len(_A) # Check attention is always last and order is fine SCREAMING_SNAKE_CASE_ = True SCREAMING_SNAKE_CASE_ = True SCREAMING_SNAKE_CASE_ = model_class(_A) model.to(_A) model.eval() with torch.no_grad(): SCREAMING_SNAKE_CASE_ = model(**self._prepare_for_class(_A , _A)) self.assertEqual(out_len + 1 , len(_A)) SCREAMING_SNAKE_CASE_ = outputs.attentions self.assertEqual(len(_A) , _A) # verify the first attentions (first block, first layer) SCREAMING_SNAKE_CASE_ = (self.model_tester.image_size // 4) ** 2 SCREAMING_SNAKE_CASE_ = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2 self.assertListEqual( list(self_attentions[0].shape[-3:]) , [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] , ) def lowerCAmelCase__ ( self): def check_hidden_states_output(_A , _A , _A): SCREAMING_SNAKE_CASE_ = model_class(_A) model.to(_A) model.eval() with torch.no_grad(): SCREAMING_SNAKE_CASE_ = model(**self._prepare_for_class(_A , _A)) SCREAMING_SNAKE_CASE_ = outputs.hidden_states SCREAMING_SNAKE_CASE_ = self.model_tester.num_encoder_blocks self.assertEqual(len(_A) , _A) # verify the first hidden states (first block) self.assertListEqual( list(hidden_states[0].shape[-3:]) , [ self.model_tester.hidden_sizes[0], self.model_tester.image_size // 4, self.model_tester.image_size // 4, ] , ) SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: SCREAMING_SNAKE_CASE_ = True check_hidden_states_output(_A , _A , _A) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] SCREAMING_SNAKE_CASE_ = True check_hidden_states_output(_A , _A , _A) def lowerCAmelCase__ ( self): if not self.model_tester.is_training: return SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs_for_common() SCREAMING_SNAKE_CASE_ = True for model_class in self.all_model_classes: if model_class in get_values(_A): continue SCREAMING_SNAKE_CASE_ = model_class(_A) model.to(_A) model.train() SCREAMING_SNAKE_CASE_ = self._prepare_for_class(_A , _A , return_labels=_A) SCREAMING_SNAKE_CASE_ = model(**_A).loss loss.backward() @unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.') def lowerCAmelCase__ ( self): pass @slow def lowerCAmelCase__ ( self): for model_name in SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: SCREAMING_SNAKE_CASE_ = SegformerModel.from_pretrained(_A) self.assertIsNotNone(_A) def _UpperCAmelCase ( ): """simple docstring""" SCREAMING_SNAKE_CASE_ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) return image @require_torch class __snake_case ( unittest.TestCase ): @slow def lowerCAmelCase__ ( self): # only resize + normalize SCREAMING_SNAKE_CASE_ = SegformerImageProcessor( image_scale=(512, 512) , keep_ratio=_A , align=_A , do_random_crop=_A) SCREAMING_SNAKE_CASE_ = SegformerForSemanticSegmentation.from_pretrained('nvidia/segformer-b0-finetuned-ade-512-512').to( _A) SCREAMING_SNAKE_CASE_ = prepare_img() SCREAMING_SNAKE_CASE_ = image_processor(images=_A , return_tensors='pt') SCREAMING_SNAKE_CASE_ = encoded_inputs.pixel_values.to(_A) with torch.no_grad(): SCREAMING_SNAKE_CASE_ = model(_A) SCREAMING_SNAKE_CASE_ = torch.Size((1, model.config.num_labels, 128, 128)) self.assertEqual(outputs.logits.shape , _A) SCREAMING_SNAKE_CASE_ = torch.tensor( [ [[-4.6_3_1_0, -5.5_2_3_2, -6.2_3_5_6], [-5.1_9_2_1, -6.1_4_4_4, -6.5_9_9_6], [-5.4_4_2_4, -6.2_7_9_0, -6.7_5_7_4]], [[-1_2.1_3_9_1, -1_3.3_1_2_2, -1_3.9_5_5_4], [-1_2.8_7_3_2, -1_3.9_3_5_2, -1_4.3_5_6_3], [-1_2.9_4_3_8, -1_3.8_2_2_6, -1_4.2_5_1_3]], [[-1_2.5_1_3_4, -1_3.4_6_8_6, -1_4.4_9_1_5], [-1_2.8_6_6_9, -1_4.4_3_4_3, -1_4.7_7_5_8], [-1_3.2_5_2_3, -1_4.5_8_1_9, -1_5.0_6_9_4]], ]).to(_A) self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] , _A , atol=1E-4)) @slow def lowerCAmelCase__ ( self): # only resize + normalize SCREAMING_SNAKE_CASE_ = SegformerImageProcessor( image_scale=(512, 512) , keep_ratio=_A , align=_A , do_random_crop=_A) SCREAMING_SNAKE_CASE_ = SegformerForSemanticSegmentation.from_pretrained( 'nvidia/segformer-b1-finetuned-cityscapes-1024-1024').to(_A) SCREAMING_SNAKE_CASE_ = prepare_img() SCREAMING_SNAKE_CASE_ = image_processor(images=_A , return_tensors='pt') SCREAMING_SNAKE_CASE_ = encoded_inputs.pixel_values.to(_A) with torch.no_grad(): SCREAMING_SNAKE_CASE_ = model(_A) SCREAMING_SNAKE_CASE_ = torch.Size((1, model.config.num_labels, 128, 128)) self.assertEqual(outputs.logits.shape , _A) SCREAMING_SNAKE_CASE_ = torch.tensor( [ [[-1_3.5_7_4_8, -1_3.9_1_1_1, -1_2.6_5_0_0], [-1_4.3_5_0_0, -1_5.3_6_8_3, -1_4.2_3_2_8], [-1_4.7_5_3_2, -1_6.0_4_2_4, -1_5.6_0_8_7]], [[-1_7.1_6_5_1, -1_5.8_7_2_5, -1_2.9_6_5_3], [-1_7.2_5_8_0, -1_7.3_7_1_8, -1_4.8_2_2_3], [-1_6.6_0_5_8, -1_6.8_7_8_3, -1_6.7_4_5_2]], [[-3.6_4_5_6, -3.0_2_0_9, -1.4_2_0_3], [-3.0_7_9_7, -3.1_9_5_9, -2.0_0_0_0], [-1.8_7_5_7, -1.9_2_1_7, -1.6_9_9_7]], ]).to(_A) self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] , _A , atol=1E-1)) @slow def lowerCAmelCase__ ( self): # only resize + normalize SCREAMING_SNAKE_CASE_ = SegformerImageProcessor( image_scale=(512, 512) , keep_ratio=_A , align=_A , do_random_crop=_A) SCREAMING_SNAKE_CASE_ = SegformerForSemanticSegmentation.from_pretrained('nvidia/segformer-b0-finetuned-ade-512-512').to( _A) SCREAMING_SNAKE_CASE_ = prepare_img() SCREAMING_SNAKE_CASE_ = image_processor(images=_A , return_tensors='pt') SCREAMING_SNAKE_CASE_ = encoded_inputs.pixel_values.to(_A) with torch.no_grad(): SCREAMING_SNAKE_CASE_ = model(_A) SCREAMING_SNAKE_CASE_ = outputs.logits.detach().cpu() SCREAMING_SNAKE_CASE_ = image_processor.post_process_semantic_segmentation(outputs=_A , target_sizes=[(500, 300)]) SCREAMING_SNAKE_CASE_ = torch.Size((500, 300)) self.assertEqual(segmentation[0].shape , _A) SCREAMING_SNAKE_CASE_ = image_processor.post_process_semantic_segmentation(outputs=_A) SCREAMING_SNAKE_CASE_ = torch.Size((128, 128)) self.assertEqual(segmentation[0].shape , _A)
715
import io import itertools import json from dataclasses import dataclass from typing import Optional import pyarrow as pa import pyarrow.json as paj import datasets from datasets.table import table_cast from datasets.utils.file_utils import readline UpperCamelCase__ : Optional[int] = datasets.utils.logging.get_logger(__name__) @dataclass class __snake_case ( datasets.BuilderConfig ): __lowerCAmelCase : Optional[datasets.Features] = None __lowerCAmelCase : str = "utf-8" __lowerCAmelCase : Optional[str] = None __lowerCAmelCase : Optional[str] = None __lowerCAmelCase : bool = True # deprecated __lowerCAmelCase : Optional[int] = None # deprecated __lowerCAmelCase : int = 10 << 20 # 10MB __lowerCAmelCase : Optional[bool] = None class __snake_case ( datasets.ArrowBasedBuilder ): __lowerCAmelCase : int = JsonConfig def lowerCAmelCase__ ( self): if self.config.block_size is not None: logger.warning('The JSON loader parameter `block_size` is deprecated. Please use `chunksize` instead') SCREAMING_SNAKE_CASE_ = self.config.block_size if self.config.use_threads is not True: logger.warning( 'The JSON loader parameter `use_threads` is deprecated and doesn\'t have any effect anymore.') if self.config.newlines_in_values is not None: raise ValueError('The JSON loader parameter `newlines_in_values` is no longer supported') return datasets.DatasetInfo(features=self.config.features) def lowerCAmelCase__ ( self , _A): if not self.config.data_files: raise ValueError(f"""At least one data file must be specified, but got data_files={self.config.data_files}""") SCREAMING_SNAKE_CASE_ = dl_manager.download_and_extract(self.config.data_files) if isinstance(_A , (str, list, tuple)): SCREAMING_SNAKE_CASE_ = data_files if isinstance(_A , _A): SCREAMING_SNAKE_CASE_ = [files] SCREAMING_SNAKE_CASE_ = [dl_manager.iter_files(_A) for file in files] return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'files': files})] SCREAMING_SNAKE_CASE_ = [] for split_name, files in data_files.items(): if isinstance(_A , _A): SCREAMING_SNAKE_CASE_ = [files] SCREAMING_SNAKE_CASE_ = [dl_manager.iter_files(_A) for file in files] splits.append(datasets.SplitGenerator(name=_A , gen_kwargs={'files': files})) return splits def lowerCAmelCase__ ( self , _A): if self.config.features is not None: # adding missing columns for column_name in set(self.config.features) - set(pa_table.column_names): SCREAMING_SNAKE_CASE_ = self.config.features.arrow_schema.field(_A).type SCREAMING_SNAKE_CASE_ = pa_table.append_column(_A , pa.array([None] * len(_A) , type=_A)) # more expensive cast to support nested structures with keys in a different order # allows str <-> int/float or str to Audio for example SCREAMING_SNAKE_CASE_ = table_cast(_A , self.config.features.arrow_schema) return pa_table def lowerCAmelCase__ ( self , _A): for file_idx, file in enumerate(itertools.chain.from_iterable(_A)): # If the file is one json object and if we need to look at the list of items in one specific field if self.config.field is not None: with open(_A , encoding=self.config.encoding , errors=self.config.encoding_errors) as f: SCREAMING_SNAKE_CASE_ = json.load(_A) # We keep only the field we are interested in SCREAMING_SNAKE_CASE_ = dataset[self.config.field] # We accept two format: a list of dicts or a dict of lists if isinstance(_A , (list, tuple)): SCREAMING_SNAKE_CASE_ = set().union(*[row.keys() for row in dataset]) SCREAMING_SNAKE_CASE_ = {col: [row.get(_A) for row in dataset] for col in keys} else: SCREAMING_SNAKE_CASE_ = dataset SCREAMING_SNAKE_CASE_ = pa.Table.from_pydict(_A) yield file_idx, self._cast_table(_A) # If the file has one json object per line else: with open(_A , 'rb') as f: SCREAMING_SNAKE_CASE_ = 0 # Use block_size equal to the chunk size divided by 32 to leverage multithreading # Set a default minimum value of 16kB if the chunk size is really small SCREAMING_SNAKE_CASE_ = max(self.config.chunksize // 32 , 16 << 10) SCREAMING_SNAKE_CASE_ = ( self.config.encoding_errors if self.config.encoding_errors is not None else 'strict' ) while True: SCREAMING_SNAKE_CASE_ = f.read(self.config.chunksize) if not batch: break # Finish current line try: batch += f.readline() except (AttributeError, io.UnsupportedOperation): batch += readline(_A) # PyArrow only accepts utf-8 encoded bytes if self.config.encoding != "utf-8": SCREAMING_SNAKE_CASE_ = batch.decode(self.config.encoding , errors=_A).encode('utf-8') try: while True: try: SCREAMING_SNAKE_CASE_ = paj.read_json( io.BytesIO(_A) , read_options=paj.ReadOptions(block_size=_A)) break except (pa.ArrowInvalid, pa.ArrowNotImplementedError) as e: if ( isinstance(_A , pa.ArrowInvalid) and "straddling" not in str(_A) or block_size > len(_A) ): raise else: # Increase the block size in case it was too small. # The block size will be reset for the next file. logger.debug( f"""Batch of {len(_A)} bytes couldn't be parsed with block_size={block_size}. Retrying with block_size={block_size * 2}.""") block_size *= 2 except pa.ArrowInvalid as e: try: with open( _A , encoding=self.config.encoding , errors=self.config.encoding_errors) as f: SCREAMING_SNAKE_CASE_ = json.load(_A) except json.JSONDecodeError: logger.error(f"""Failed to read file '{file}' with error {type(_A)}: {e}""") raise e # If possible, parse the file as a list of json objects and exit the loop if isinstance(_A , _A): # list is the only sequence type supported in JSON try: SCREAMING_SNAKE_CASE_ = set().union(*[row.keys() for row in dataset]) SCREAMING_SNAKE_CASE_ = {col: [row.get(_A) for row in dataset] for col in keys} SCREAMING_SNAKE_CASE_ = pa.Table.from_pydict(_A) except (pa.ArrowInvalid, AttributeError) as e: logger.error(f"""Failed to read file '{file}' with error {type(_A)}: {e}""") raise ValueError(f"""Not able to read records in the JSON file at {file}.""") from None yield file_idx, self._cast_table(_A) break else: logger.error(f"""Failed to read file '{file}' with error {type(_A)}: {e}""") raise ValueError( f"""Not able to read records in the JSON file at {file}. """ f"""You should probably indicate the field of the JSON file containing your records. """ f"""This JSON file contain the following fields: {str(list(dataset.keys()))}. """ f"""Select the correct one and provide it as `field='XXX'` to the dataset loading method. """) from None # Uncomment for debugging (will print the Arrow table size and elements) # logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}") # logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows))) yield (file_idx, batch_idx), self._cast_table(_A) batch_idx += 1
620
0
import json import os import tempfile import datasets from utils import generate_example_dataset, get_duration UpperCamelCase__ : Any = 50_000 UpperCamelCase__ : str = 5_000 UpperCamelCase__ , UpperCamelCase__ : Tuple = os.path.split(__file__) UpperCamelCase__ : List[Any] = os.path.join(RESULTS_BASEPATH, "results", RESULTS_FILENAME.replace(".py", ".json")) @get_duration def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : Union[str, Any] ): for i in range(a__ ): SCREAMING_SNAKE_CASE_ = dataset[i] @get_duration def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : Any ): for i in range(0 , len(a__ ) , a__ ): SCREAMING_SNAKE_CASE_ = dataset[i : i + batch_size] @get_duration def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : int ): with dataset.formatted_as(type=a__ ): for i in range(a__ ): SCREAMING_SNAKE_CASE_ = dataset[i] @get_duration def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : Tuple ): with dataset.formatted_as(type=a__ ): for i in range(0 , a__ , a__ ): SCREAMING_SNAKE_CASE_ = dataset[i : i + batch_size] def _UpperCAmelCase ( ): SCREAMING_SNAKE_CASE_ = {'num examples': SPEED_TEST_N_EXAMPLES} SCREAMING_SNAKE_CASE_ = [ (read, {'length': SMALL_TEST}), (read, {'length': SPEED_TEST_N_EXAMPLES}), (read_batch, {'length': SPEED_TEST_N_EXAMPLES, 'batch_size': 10}), (read_batch, {'length': SPEED_TEST_N_EXAMPLES, 'batch_size': 100}), (read_batch, {'length': SPEED_TEST_N_EXAMPLES, 'batch_size': 1_000}), (read_formatted, {'type': 'numpy', 'length': SMALL_TEST}), (read_formatted, {'type': 'pandas', 'length': SMALL_TEST}), (read_formatted, {'type': 'torch', 'length': SMALL_TEST}), (read_formatted, {'type': 'tensorflow', 'length': SMALL_TEST}), (read_formatted_batch, {'type': 'numpy', 'length': SMALL_TEST, 'batch_size': 10}), (read_formatted_batch, {'type': 'numpy', 'length': SMALL_TEST, 'batch_size': 1_000}), ] SCREAMING_SNAKE_CASE_ = [ (read, {'length': SMALL_TEST}), (read, {'length': SPEED_TEST_N_EXAMPLES}), (read_batch, {'length': SPEED_TEST_N_EXAMPLES, 'batch_size': 10}), (read_batch, {'length': SPEED_TEST_N_EXAMPLES, 'batch_size': 100}), (read_batch, {'length': SPEED_TEST_N_EXAMPLES, 'batch_size': 1_000}), (read_formatted, {'type': 'numpy', 'length': SMALL_TEST}), (read_formatted_batch, {'type': 'numpy', 'length': SMALL_TEST, 'batch_size': 10}), (read_formatted_batch, {'type': 'numpy', 'length': SMALL_TEST, 'batch_size': 1_000}), ] with tempfile.TemporaryDirectory() as tmp_dir: print('generating dataset' ) SCREAMING_SNAKE_CASE_ = datasets.Features( {'list': datasets.Sequence(datasets.Value('float32' ) ), 'numbers': datasets.Value('float32' )} ) SCREAMING_SNAKE_CASE_ = generate_example_dataset( os.path.join(a__ , 'dataset.arrow' ) , a__ , num_examples=a__ , seq_shapes={'list': (100,)} , ) print('first set of iterations' ) for func, kwargs in functions: print(func.__name__ , str(a__ ) ) SCREAMING_SNAKE_CASE_ = func(a__ , **a__ ) print('shuffling dataset' ) SCREAMING_SNAKE_CASE_ = dataset.shuffle() print('Second set of iterations (after shuffling' ) for func, kwargs in functions_shuffled: print('shuffled ' , func.__name__ , str(a__ ) ) SCREAMING_SNAKE_CASE_ = func( a__ , **a__ ) with open(a__ , 'wb' ) as f: f.write(json.dumps(a__ ).encode('utf-8' ) ) if __name__ == "__main__": # useful to run the profiler benchmark_iterating()
716
import unittest from transformers import TrOCRConfig from transformers.testing_utils import is_torch_available, require_torch, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers.models.trocr.modeling_trocr import TrOCRDecoder, TrOCRForCausalLM @require_torch class __snake_case : def __init__( self , _A , _A=99 , _A=13 , _A=16 , _A=7 , _A=True , _A=True , _A=True , _A=False , _A=True , _A=2 , _A=32 , _A=4 , _A=4 , _A=30 , _A=0 , _A=1 , _A=2 , _A=None , ): SCREAMING_SNAKE_CASE_ = parent SCREAMING_SNAKE_CASE_ = batch_size SCREAMING_SNAKE_CASE_ = decoder_seq_length # For common tests SCREAMING_SNAKE_CASE_ = self.decoder_seq_length SCREAMING_SNAKE_CASE_ = is_training SCREAMING_SNAKE_CASE_ = use_attention_mask SCREAMING_SNAKE_CASE_ = use_labels SCREAMING_SNAKE_CASE_ = vocab_size SCREAMING_SNAKE_CASE_ = d_model SCREAMING_SNAKE_CASE_ = d_model SCREAMING_SNAKE_CASE_ = decoder_layers SCREAMING_SNAKE_CASE_ = decoder_layers SCREAMING_SNAKE_CASE_ = decoder_ffn_dim SCREAMING_SNAKE_CASE_ = decoder_attention_heads SCREAMING_SNAKE_CASE_ = decoder_attention_heads SCREAMING_SNAKE_CASE_ = eos_token_id SCREAMING_SNAKE_CASE_ = bos_token_id SCREAMING_SNAKE_CASE_ = pad_token_id SCREAMING_SNAKE_CASE_ = decoder_start_token_id SCREAMING_SNAKE_CASE_ = use_cache SCREAMING_SNAKE_CASE_ = max_position_embeddings SCREAMING_SNAKE_CASE_ = None SCREAMING_SNAKE_CASE_ = decoder_seq_length SCREAMING_SNAKE_CASE_ = 2 SCREAMING_SNAKE_CASE_ = 1 def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size) SCREAMING_SNAKE_CASE_ = None if self.use_attention_mask: SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size, self.decoder_seq_length] , vocab_size=2) SCREAMING_SNAKE_CASE_ = None if self.use_labels: SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size) SCREAMING_SNAKE_CASE_ = TrOCRConfig( vocab_size=self.vocab_size , d_model=self.d_model , decoder_layers=self.decoder_layers , decoder_ffn_dim=self.decoder_ffn_dim , decoder_attention_heads=self.decoder_attention_heads , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , use_cache=self.use_cache , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , max_position_embeddings=self.max_position_embeddings , ) return (config, input_ids, attention_mask, lm_labels) def lowerCAmelCase__ ( self , _A , _A , _A , _A , ): SCREAMING_SNAKE_CASE_ = True SCREAMING_SNAKE_CASE_ = TrOCRDecoder(config=_A).to(_A).eval() SCREAMING_SNAKE_CASE_ = input_ids[:2] input_ids[input_ids == 0] += 1 # first forward pass SCREAMING_SNAKE_CASE_ = model(_A , use_cache=_A) SCREAMING_SNAKE_CASE_ = model(_A) SCREAMING_SNAKE_CASE_ = model(_A , use_cache=_A) self.parent.assertTrue(len(_A) == len(_A)) self.parent.assertTrue(len(_A) == len(_A) + 1) SCREAMING_SNAKE_CASE_ = outputs['past_key_values'] # create hypothetical next token and extent to next_input_ids SCREAMING_SNAKE_CASE_ = ids_tensor((2, 1) , config.vocab_size - 1) + 1 # append to next input_ids and SCREAMING_SNAKE_CASE_ = torch.cat([input_ids, next_tokens] , dim=-1) SCREAMING_SNAKE_CASE_ = model(_A)['last_hidden_state'] SCREAMING_SNAKE_CASE_ = model(_A , past_key_values=_A)['last_hidden_state'] # select random slice SCREAMING_SNAKE_CASE_ = ids_tensor((1,) , output_from_past.shape[-1]).item() SCREAMING_SNAKE_CASE_ = output_from_no_past[:, next_input_ids.shape[-1] - 1, random_slice_idx].detach() SCREAMING_SNAKE_CASE_ = output_from_past[:, 0, random_slice_idx].detach() # test that outputs are equal for slice assert torch.allclose(_A , _A , atol=1E-3) def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = self.prepare_config_and_inputs() SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = config_and_inputs SCREAMING_SNAKE_CASE_ = {'input_ids': input_ids, 'attention_mask': attention_mask} return config, inputs_dict @require_torch class __snake_case ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ): __lowerCAmelCase : Tuple = (TrOCRDecoder, TrOCRForCausalLM) if is_torch_available() else () __lowerCAmelCase : Union[str, Any] = (TrOCRForCausalLM,) if is_torch_available() else () __lowerCAmelCase : str = {'text-generation': TrOCRForCausalLM} if is_torch_available() else {} __lowerCAmelCase : Any = True __lowerCAmelCase : str = False def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = TrOCRStandaloneDecoderModelTester(self , is_training=_A) SCREAMING_SNAKE_CASE_ = ConfigTester(self , config_class=_A) def lowerCAmelCase__ ( self): pass def lowerCAmelCase__ ( self): pass def lowerCAmelCase__ ( self): pass def lowerCAmelCase__ ( self): self.config_tester.run_common_tests() def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_decoder_model_past(*_A) def lowerCAmelCase__ ( self): return @unittest.skip('The model doesn\'t support left padding') # and it's not used enough to be worth fixing :) def lowerCAmelCase__ ( self): pass
620
0
import os import re import shutil from argparse import ArgumentParser, Namespace from datasets.commands import BaseDatasetsCLICommand from datasets.utils.logging import get_logger UpperCamelCase__ : Union[str, Any] = "<<<<<<< This should probably be modified because it mentions: " UpperCamelCase__ : int = "=======\n>>>>>>>\n" UpperCamelCase__ : Union[str, Any] = [ "TextEncoderConfig", "ByteTextEncoder", "SubwordTextEncoder", "encoder_config", "maybe_build_from_corpus", "manual_dir", ] UpperCamelCase__ : Tuple = [ # (pattern, replacement) # Order is important here for some replacements (r"tfds\.core", r"datasets"), (r"tf\.io\.gfile\.GFile", r"open"), (r"tf\.([\w\d]+)", r"datasets.Value(\'\1\')"), (r"tfds\.features\.Text\(\)", r"datasets.Value(\'string\')"), (r"tfds\.features\.Text\(", r"datasets.Value(\'string\'),"), (r"features\s*=\s*tfds.features.FeaturesDict\(", r"features=datasets.Features("), (r"tfds\.features\.FeaturesDict\(", r"dict("), (r"The TensorFlow Datasets Authors", r"The TensorFlow Datasets Authors and the HuggingFace Datasets Authors"), (r"tfds\.", r"datasets."), (r"dl_manager\.manual_dir", r"self.config.data_dir"), (r"self\.builder_config", r"self.config"), ] def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : Namespace ): """simple docstring""" return ConvertCommand(args.tfds_path , args.datasets_directory ) class __snake_case ( __lowerCAmelCase ): @staticmethod def lowerCAmelCase__ ( _A): SCREAMING_SNAKE_CASE_ = parser.add_parser( 'convert' , help='Convert a TensorFlow Datasets dataset to a HuggingFace Datasets dataset.' , ) train_parser.add_argument( '--tfds_path' , type=lowerCAmelCase_ , required=lowerCAmelCase_ , help='Path to a TensorFlow Datasets folder to convert or a single tfds file to convert.' , ) train_parser.add_argument( '--datasets_directory' , type=lowerCAmelCase_ , required=lowerCAmelCase_ , help='Path to the HuggingFace Datasets folder.') train_parser.set_defaults(func=lowerCAmelCase_) def __init__( self , _A , _A , *_A): SCREAMING_SNAKE_CASE_ = get_logger('datasets-cli/converting') SCREAMING_SNAKE_CASE_ = tfds_path SCREAMING_SNAKE_CASE_ = datasets_directory def lowerCAmelCase__ ( self): if os.path.isdir(self._tfds_path): SCREAMING_SNAKE_CASE_ = os.path.abspath(self._tfds_path) elif os.path.isfile(self._tfds_path): SCREAMING_SNAKE_CASE_ = os.path.dirname(self._tfds_path) else: raise ValueError('--tfds_path is neither a directory nor a file. Please check path.') SCREAMING_SNAKE_CASE_ = os.path.abspath(self._datasets_directory) self._logger.info(f"""Converting datasets from {abs_tfds_path} to {abs_datasets_path}""") SCREAMING_SNAKE_CASE_ = [] SCREAMING_SNAKE_CASE_ = [] SCREAMING_SNAKE_CASE_ = {} if os.path.isdir(self._tfds_path): SCREAMING_SNAKE_CASE_ = os.listdir(lowerCAmelCase_) else: SCREAMING_SNAKE_CASE_ = [os.path.basename(self._tfds_path)] for f_name in file_names: self._logger.info(f"""Looking at file {f_name}""") SCREAMING_SNAKE_CASE_ = os.path.join(lowerCAmelCase_ , lowerCAmelCase_) SCREAMING_SNAKE_CASE_ = os.path.join(lowerCAmelCase_ , lowerCAmelCase_) if not os.path.isfile(lowerCAmelCase_) or "__init__" in f_name or "_test" in f_name or ".py" not in f_name: self._logger.info('Skipping file') continue with open(lowerCAmelCase_ , encoding='utf-8') as f: SCREAMING_SNAKE_CASE_ = f.readlines() SCREAMING_SNAKE_CASE_ = [] SCREAMING_SNAKE_CASE_ = False SCREAMING_SNAKE_CASE_ = False SCREAMING_SNAKE_CASE_ = [] for line in lines: SCREAMING_SNAKE_CASE_ = line # Convert imports if "import tensorflow.compat.v2 as tf" in out_line: continue elif "@tfds.core" in out_line: continue elif "builder=self" in out_line: continue elif "import tensorflow_datasets.public_api as tfds" in out_line: SCREAMING_SNAKE_CASE_ = 'import datasets\n' elif "import tensorflow" in out_line: # order is important here SCREAMING_SNAKE_CASE_ = '' continue elif "from absl import logging" in out_line: SCREAMING_SNAKE_CASE_ = 'from datasets import logging\n' elif "getLogger" in out_line: SCREAMING_SNAKE_CASE_ = out_line.replace('getLogger' , 'get_logger') elif any(expression in out_line for expression in TO_HIGHLIGHT): SCREAMING_SNAKE_CASE_ = True SCREAMING_SNAKE_CASE_ = list(filter(lambda _A: e in out_line , lowerCAmelCase_)) out_lines.append(HIGHLIGHT_MESSAGE_PRE + str(lowerCAmelCase_) + '\n') out_lines.append(lowerCAmelCase_) out_lines.append(lowerCAmelCase_) continue else: for pattern, replacement in TO_CONVERT: SCREAMING_SNAKE_CASE_ = re.sub(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_) # Take care of saving utilities (to later move them together with main script) if "tensorflow_datasets" in out_line: SCREAMING_SNAKE_CASE_ = re.match(r'from\stensorflow_datasets.*import\s([^\.\r\n]+)' , lowerCAmelCase_) tfds_imports.extend(imp.strip() for imp in match.group(1).split(',')) SCREAMING_SNAKE_CASE_ = 'from . import ' + match.group(1) # Check we have not forget anything if "tf." in out_line or "tfds." in out_line or "tensorflow_datasets" in out_line: raise ValueError(f"""Error converting {out_line.strip()}""") if "GeneratorBasedBuilder" in out_line or "BeamBasedBuilder" in out_line: SCREAMING_SNAKE_CASE_ = True out_lines.append(lowerCAmelCase_) if is_builder or "wmt" in f_name: # We create a new directory for each dataset SCREAMING_SNAKE_CASE_ = f_name.replace('.py' , '') SCREAMING_SNAKE_CASE_ = os.path.join(lowerCAmelCase_ , lowerCAmelCase_) SCREAMING_SNAKE_CASE_ = os.path.join(lowerCAmelCase_ , lowerCAmelCase_) os.makedirs(lowerCAmelCase_ , exist_ok=lowerCAmelCase_) self._logger.info(f"""Adding directory {output_dir}""") imports_to_builder_map.update({imp: output_dir for imp in tfds_imports}) else: # Utilities will be moved at the end utils_files.append(lowerCAmelCase_) if needs_manual_update: with_manual_update.append(lowerCAmelCase_) with open(lowerCAmelCase_ , 'w' , encoding='utf-8') as f: f.writelines(lowerCAmelCase_) self._logger.info(f"""Converted in {output_file}""") for utils_file in utils_files: try: SCREAMING_SNAKE_CASE_ = os.path.basename(lowerCAmelCase_) SCREAMING_SNAKE_CASE_ = imports_to_builder_map[f_name.replace('.py' , '')] self._logger.info(f"""Moving {dest_folder} to {utils_file}""") shutil.copy(lowerCAmelCase_ , lowerCAmelCase_) except KeyError: self._logger.error(f"""Cannot find destination folder for {utils_file}. Please copy manually.""") if with_manual_update: for file_path in with_manual_update: self._logger.warning( f"""You need to manually update file {file_path} to remove configurations using \'TextEncoderConfig\'.""")
717
from dataclasses import dataclass from typing import Optional, Tuple, Union import torch import torch.nn as nn from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput, apply_forward_hook from .modeling_utils import ModelMixin from .vae import Decoder, DecoderOutput, Encoder, VectorQuantizer @dataclass class __snake_case ( lowerCAmelCase__ ): __lowerCAmelCase : torch.FloatTensor class __snake_case ( lowerCAmelCase__ , lowerCAmelCase__ ): @register_to_config def __init__( self , _A = 3 , _A = 3 , _A = ("DownEncoderBlock2D",) , _A = ("UpDecoderBlock2D",) , _A = (64,) , _A = 1 , _A = "silu" , _A = 3 , _A = 32 , _A = 256 , _A = 32 , _A = None , _A = 0.1_8_2_1_5 , _A = "group" , ): super().__init__() # pass init params to Encoder SCREAMING_SNAKE_CASE_ = Encoder( in_channels=_A , out_channels=_A , down_block_types=_A , block_out_channels=_A , layers_per_block=_A , act_fn=_A , norm_num_groups=_A , double_z=_A , ) SCREAMING_SNAKE_CASE_ = vq_embed_dim if vq_embed_dim is not None else latent_channels SCREAMING_SNAKE_CASE_ = nn.Convad(_A , _A , 1) SCREAMING_SNAKE_CASE_ = VectorQuantizer(_A , _A , beta=0.2_5 , remap=_A , sane_index_shape=_A) SCREAMING_SNAKE_CASE_ = nn.Convad(_A , _A , 1) # pass init params to Decoder SCREAMING_SNAKE_CASE_ = Decoder( in_channels=_A , out_channels=_A , up_block_types=_A , block_out_channels=_A , layers_per_block=_A , act_fn=_A , norm_num_groups=_A , norm_type=_A , ) @apply_forward_hook def lowerCAmelCase__ ( self , _A , _A = True): SCREAMING_SNAKE_CASE_ = self.encoder(_A) SCREAMING_SNAKE_CASE_ = self.quant_conv(_A) if not return_dict: return (h,) return VQEncoderOutput(latents=_A) @apply_forward_hook def lowerCAmelCase__ ( self , _A , _A = False , _A = True): # also go through quantization layer if not force_not_quantize: SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.quantize(_A) else: SCREAMING_SNAKE_CASE_ = h SCREAMING_SNAKE_CASE_ = self.post_quant_conv(_A) SCREAMING_SNAKE_CASE_ = self.decoder(_A , quant if self.config.norm_type == 'spatial' else None) if not return_dict: return (dec,) return DecoderOutput(sample=_A) def lowerCAmelCase__ ( self , _A , _A = True): SCREAMING_SNAKE_CASE_ = sample SCREAMING_SNAKE_CASE_ = self.encode(_A).latents SCREAMING_SNAKE_CASE_ = self.decode(_A).sample if not return_dict: return (dec,) return DecoderOutput(sample=_A)
620
0
import argparse import json import os import fairseq import torch from torch import nn from transformers import ( SpeechaTextaConfig, SpeechaTextaForCausalLM, SpeechaTextaTokenizer, SpeechEncoderDecoderConfig, SpeechEncoderDecoderModel, WavaVecaConfig, WavaVecaFeatureExtractor, WavaVecaModel, logging, ) logging.set_verbosity_info() UpperCamelCase__ : List[str] = logging.get_logger(__name__) UpperCamelCase__ : str = { 'post_extract_proj': 'feature_projection.projection', 'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv', 'self_attn.k_proj': 'encoder.layers.*.attention.k_proj', 'self_attn.v_proj': 'encoder.layers.*.attention.v_proj', 'self_attn.q_proj': 'encoder.layers.*.attention.q_proj', 'self_attn.out_proj': 'encoder.layers.*.attention.out_proj', 'self_attn_layer_norm': 'encoder.layers.*.layer_norm', 'fc1': 'encoder.layers.*.feed_forward.intermediate_dense', 'fc2': 'encoder.layers.*.feed_forward.output_dense', 'final_layer_norm': 'encoder.layers.*.final_layer_norm', 'encoder.layer_norm': 'encoder.layer_norm', 'w2v_model.layer_norm': 'feature_projection.layer_norm', 'quantizer.weight_proj': 'quantizer.weight_proj', 'quantizer.vars': 'quantizer.codevectors', 'project_q': 'project_q', 'final_proj': 'project_hid', 'w2v_encoder.proj': 'lm_head', 'mask_emb': 'masked_spec_embed', } UpperCamelCase__ : Tuple = [ 'lm_head', 'quantizer.weight_proj', 'quantizer.codevectors', 'project_q', 'project_hid', ] def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : Union[str, Any] ): """simple docstring""" for attribute in key.split('.' ): SCREAMING_SNAKE_CASE_ = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) if weight_type is not None: SCREAMING_SNAKE_CASE_ = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).shape else: SCREAMING_SNAKE_CASE_ = hf_pointer.shape assert hf_shape == value.shape, ( f"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be""" f""" {value.shape} for {full_name}""" ) if weight_type == "weight": SCREAMING_SNAKE_CASE_ = value elif weight_type == "weight_g": SCREAMING_SNAKE_CASE_ = value elif weight_type == "weight_v": SCREAMING_SNAKE_CASE_ = value elif weight_type == "bias": SCREAMING_SNAKE_CASE_ = value else: SCREAMING_SNAKE_CASE_ = value logger.info(f"""{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.""" ) def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : List[str] ): """simple docstring""" SCREAMING_SNAKE_CASE_ = [] SCREAMING_SNAKE_CASE_ = fairseq_model.state_dict() SCREAMING_SNAKE_CASE_ = hf_model.feature_extractor # if encoder has different dim to decoder -> use proj_weight SCREAMING_SNAKE_CASE_ = None for name, value in fairseq_dict.items(): SCREAMING_SNAKE_CASE_ = False if "conv_layers" in name: load_conv_layer( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , hf_model.config.feat_extract_norm == 'group' , ) SCREAMING_SNAKE_CASE_ = True elif name.split('.' )[0] == "proj": SCREAMING_SNAKE_CASE_ = fairseq_model.proj SCREAMING_SNAKE_CASE_ = True else: for key, mapped_key in MAPPING.items(): if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]: SCREAMING_SNAKE_CASE_ = True if "*" in mapped_key: SCREAMING_SNAKE_CASE_ = name.split(_SCREAMING_SNAKE_CASE )[0].split('.' )[-2] SCREAMING_SNAKE_CASE_ = mapped_key.replace('*' , _SCREAMING_SNAKE_CASE ) if "weight_g" in name: SCREAMING_SNAKE_CASE_ = '''weight_g''' elif "weight_v" in name: SCREAMING_SNAKE_CASE_ = '''weight_v''' elif "bias" in name: SCREAMING_SNAKE_CASE_ = '''bias''' elif "weight" in name: SCREAMING_SNAKE_CASE_ = '''weight''' else: SCREAMING_SNAKE_CASE_ = None set_recursively(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) continue if not is_used: unused_weights.append(_SCREAMING_SNAKE_CASE ) logger.warning(f"""Unused weights: {unused_weights}""" ) return proj_weight def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : int ): """simple docstring""" SCREAMING_SNAKE_CASE_ = full_name.split('conv_layers.' )[-1] SCREAMING_SNAKE_CASE_ = name.split('.' ) SCREAMING_SNAKE_CASE_ = int(items[0] ) SCREAMING_SNAKE_CASE_ = int(items[1] ) if type_id == 0: if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, ( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" ) SCREAMING_SNAKE_CASE_ = value logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, ( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" ) SCREAMING_SNAKE_CASE_ = value logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, ( f"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was""" " found." ) SCREAMING_SNAKE_CASE_ = value logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, ( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.""" ) SCREAMING_SNAKE_CASE_ = value logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) else: unused_weights.append(_SCREAMING_SNAKE_CASE ) def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : List[str] ): """simple docstring""" SCREAMING_SNAKE_CASE_ = emb.weight.shape SCREAMING_SNAKE_CASE_ = nn.Linear(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , bias=_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE_ = emb.weight.data return lin_layer def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : Optional[int] ): """simple docstring""" with open(_SCREAMING_SNAKE_CASE , 'r' , encoding='utf-8' ) as f: SCREAMING_SNAKE_CASE_ = f.readlines() SCREAMING_SNAKE_CASE_ = [line.split(' ' )[0] for line in lines] SCREAMING_SNAKE_CASE_ = len(_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE_ = { '''<s>''': 0, '''<pad>''': 1, '''</s>''': 2, '''<unk>''': 3, } vocab_dict.update(dict(zip(_SCREAMING_SNAKE_CASE , range(4 , num_words + 4 ) ) ) ) return vocab_dict @torch.no_grad() def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : List[Any] , ): """simple docstring""" SCREAMING_SNAKE_CASE_ = WavaVecaConfig.from_pretrained(_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE_ = SpeechaTextaConfig.from_pretrained( _SCREAMING_SNAKE_CASE , vocab_size=_SCREAMING_SNAKE_CASE , decoder_layers=_SCREAMING_SNAKE_CASE , do_stable_layer_norm=_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE_ = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=16_000 , padding_value=0 , do_normalize=_SCREAMING_SNAKE_CASE , return_attention_mask=_SCREAMING_SNAKE_CASE , ) SCREAMING_SNAKE_CASE_ = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] )} ) SCREAMING_SNAKE_CASE_ = model[0].eval() # set weights for wav2vec2 encoder SCREAMING_SNAKE_CASE_ = WavaVecaModel(_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE_ = recursively_load_weights_wavaveca(model.encoder , _SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE_ = SpeechaTextaForCausalLM(_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE_ = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=_SCREAMING_SNAKE_CASE ) # set output linear layer unexpected_keys.remove('embed_out' ) SCREAMING_SNAKE_CASE_ = nn.Parameter(model.decoder.embed_out.detach() ) # layer norm is init to identity matrix so leaving it is fine logger.warning(f"""The following keys are missing when loading the decoder weights: {missing_keys}""" ) logger.warning(f"""The following keys are unexpected when loading the decoder weights: {unexpected_keys}""" ) SCREAMING_SNAKE_CASE_ = SpeechEncoderDecoderModel(encoder=_SCREAMING_SNAKE_CASE , decoder=_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE_ = False # add projection layer SCREAMING_SNAKE_CASE_ = nn.Parameter(projection_layer.weight ) SCREAMING_SNAKE_CASE_ = nn.Parameter(projection_layer.bias ) SCREAMING_SNAKE_CASE_ = create_vocab_dict(_SCREAMING_SNAKE_CASE ) with open(os.path.join(_SCREAMING_SNAKE_CASE , 'vocab.json' ) , 'w' ) as fp: json.dump(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE_ = SpeechaTextaTokenizer(os.path.join(_SCREAMING_SNAKE_CASE , 'vocab.json' ) ) tokenizer.save_pretrained(_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE_ = hf_wavavec.config.to_dict() SCREAMING_SNAKE_CASE_ = tokenizer.pad_token_id SCREAMING_SNAKE_CASE_ = tokenizer.bos_token_id SCREAMING_SNAKE_CASE_ = tokenizer.eos_token_id SCREAMING_SNAKE_CASE_ = '''speech_to_text_2''' SCREAMING_SNAKE_CASE_ = '''wav2vec2''' SCREAMING_SNAKE_CASE_ = SpeechEncoderDecoderConfig.from_dict(_SCREAMING_SNAKE_CASE ) hf_wavavec.save_pretrained(_SCREAMING_SNAKE_CASE ) feature_extractor.save_pretrained(_SCREAMING_SNAKE_CASE ) if __name__ == "__main__": UpperCamelCase__ : Tuple = argparse.ArgumentParser() parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.") parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint") parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model") parser.add_argument( "--encoder_config_path", default="facebook/wav2vec2-large-lv60", type=str, help="Path to hf encoder wav2vec2 checkpoint config", ) parser.add_argument( "--decoder_config_path", default="facebook/s2t-small-mustc-en-fr-st", type=str, help="Path to hf decoder s2t checkpoint config", ) parser.add_argument("--vocab_size", default=10_224, type=int, help="Vocab size of decoder") parser.add_argument("--num_decoder_layers", default=7, type=int, help="Number of decoder layers") UpperCamelCase__ : Any = parser.parse_args() convert_wavaveca_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.dict_path, encoder_config_path=args.encoder_config_path, decoder_config_path=args.decoder_config_path, vocab_size=args.vocab_size, num_decoder_layers=args.num_decoder_layers, )
718
import logging import os from typing import Dict, List, Optional, Union import torch import torch.nn as nn from accelerate.utils.imports import ( is_abit_bnb_available, is_abit_bnb_available, is_bnb_available, ) from ..big_modeling import dispatch_model, init_empty_weights from .dataclasses import BnbQuantizationConfig from .modeling import ( find_tied_parameters, get_balanced_memory, infer_auto_device_map, load_checkpoint_in_model, offload_weight, set_module_tensor_to_device, ) if is_bnb_available(): import bitsandbytes as bnb from copy import deepcopy UpperCamelCase__ : Optional[int] = logging.getLogger(__name__) def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : torch.nn.Module , _SCREAMING_SNAKE_CASE : BnbQuantizationConfig , _SCREAMING_SNAKE_CASE : Union[str, os.PathLike] = None , _SCREAMING_SNAKE_CASE : Optional[Dict[str, Union[int, str, torch.device]]] = None , _SCREAMING_SNAKE_CASE : Optional[List[str]] = None , _SCREAMING_SNAKE_CASE : Optional[Dict[Union[int, str], Union[int, str]]] = None , _SCREAMING_SNAKE_CASE : Optional[Union[str, os.PathLike]] = None , _SCREAMING_SNAKE_CASE : bool = False , ): """simple docstring""" SCREAMING_SNAKE_CASE_ = bnb_quantization_config.load_in_abit SCREAMING_SNAKE_CASE_ = bnb_quantization_config.load_in_abit if load_in_abit and not is_abit_bnb_available(): raise ImportError( 'You have a version of `bitsandbytes` that is not compatible with 8bit quantization,' ' make sure you have the latest version of `bitsandbytes` installed.' ) if load_in_abit and not is_abit_bnb_available(): raise ValueError( 'You have a version of `bitsandbytes` that is not compatible with 4bit quantization,' 'make sure you have the latest version of `bitsandbytes` installed.' ) SCREAMING_SNAKE_CASE_ = [] # custom device map if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) and len(device_map.keys() ) > 1: SCREAMING_SNAKE_CASE_ = [key for key, value in device_map.items() if value in ['disk', 'cpu']] # We keep some modules such as the lm_head in their original dtype for numerical stability reasons if bnb_quantization_config.skip_modules is None: SCREAMING_SNAKE_CASE_ = get_keys_to_not_convert(_SCREAMING_SNAKE_CASE ) # add cpu modules to skip modules only for 4-bit modules if load_in_abit: bnb_quantization_config.skip_modules.extend(_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE_ = bnb_quantization_config.skip_modules # We add the modules we want to keep in full precision if bnb_quantization_config.keep_in_fpaa_modules is None: SCREAMING_SNAKE_CASE_ = [] SCREAMING_SNAKE_CASE_ = bnb_quantization_config.keep_in_fpaa_modules modules_to_not_convert.extend(_SCREAMING_SNAKE_CASE ) # compatibility with peft SCREAMING_SNAKE_CASE_ = load_in_abit SCREAMING_SNAKE_CASE_ = load_in_abit SCREAMING_SNAKE_CASE_ = get_parameter_device(_SCREAMING_SNAKE_CASE ) if model_device.type != "meta": # quantization of an already loaded model logger.warning( 'It is not recommended to quantize a loaded model. ' 'The model should be instantiated under the `init_empty_weights` context manager.' ) SCREAMING_SNAKE_CASE_ = replace_with_bnb_layers(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , modules_to_not_convert=_SCREAMING_SNAKE_CASE ) # convert param to the right dtype SCREAMING_SNAKE_CASE_ = bnb_quantization_config.torch_dtype for name, param in model.state_dict().items(): if any(module_to_keep_in_fpaa in name for module_to_keep_in_fpaa in keep_in_fpaa_modules ): param.to(torch.floataa ) if param.dtype != torch.floataa: SCREAMING_SNAKE_CASE_ = name.replace('.weight' , '' ).replace('.bias' , '' ) SCREAMING_SNAKE_CASE_ = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) if param is not None: param.to(torch.floataa ) elif torch.is_floating_point(_SCREAMING_SNAKE_CASE ): param.to(_SCREAMING_SNAKE_CASE ) if model_device.type == "cuda": # move everything to cpu in the first place because we can't do quantization if the weights are already on cuda model.cuda(torch.cuda.current_device() ) torch.cuda.empty_cache() elif torch.cuda.is_available(): model.to(torch.cuda.current_device() ) else: raise RuntimeError('No GPU found. A GPU is needed for quantization.' ) logger.info( f"""The model device type is {model_device.type}. However, cuda is needed for quantization.""" 'We move the model to cuda.' ) return model elif weights_location is None: raise RuntimeError( f"""`weights_location` needs to be the folder path containing the weights of the model, but we found {weights_location} """ ) else: with init_empty_weights(): SCREAMING_SNAKE_CASE_ = replace_with_bnb_layers( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , modules_to_not_convert=_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE_ = get_quantized_model_device_map( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , max_memory=_SCREAMING_SNAKE_CASE , no_split_module_classes=_SCREAMING_SNAKE_CASE , ) if offload_state_dict is None and device_map is not None and "disk" in device_map.values(): SCREAMING_SNAKE_CASE_ = True SCREAMING_SNAKE_CASE_ = any(x in list(device_map.values() ) for x in ['cpu', 'disk'] ) load_checkpoint_in_model( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , dtype=bnb_quantization_config.torch_dtype , offload_folder=_SCREAMING_SNAKE_CASE , offload_state_dict=_SCREAMING_SNAKE_CASE , keep_in_fpaa_modules=bnb_quantization_config.keep_in_fpaa_modules , offload_abit_bnb=load_in_abit and offload , ) return dispatch_model(_SCREAMING_SNAKE_CASE , device_map=_SCREAMING_SNAKE_CASE , offload_dir=_SCREAMING_SNAKE_CASE ) def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : List[str]=None , _SCREAMING_SNAKE_CASE : List[str]=None , _SCREAMING_SNAKE_CASE : Union[str, Any]=None ): """simple docstring""" if device_map is None: if torch.cuda.is_available(): SCREAMING_SNAKE_CASE_ = {'': torch.cuda.current_device()} else: raise RuntimeError('No GPU found. A GPU is needed for quantization.' ) logger.info('The device_map was not initialized.' 'Setting device_map to `{\'\':torch.cuda.current_device()}`.' ) if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): if device_map not in ["auto", "balanced", "balanced_low_0", "sequential"]: raise ValueError( 'If passing a string for `device_map`, please choose \'auto\', \'balanced\', \'balanced_low_0\' or ' '\'sequential\'.' ) SCREAMING_SNAKE_CASE_ = {} special_dtypes.update( { name: bnb_quantization_config.torch_dtype for name, _ in model.named_parameters() if any(m in name for m in bnb_quantization_config.skip_modules ) } ) special_dtypes.update( { name: torch.floataa for name, _ in model.named_parameters() if any(m in name for m in bnb_quantization_config.keep_in_fpaa_modules ) } ) SCREAMING_SNAKE_CASE_ = {} SCREAMING_SNAKE_CASE_ = special_dtypes SCREAMING_SNAKE_CASE_ = no_split_module_classes SCREAMING_SNAKE_CASE_ = bnb_quantization_config.target_dtype # get max_memory for each device. if device_map != "sequential": SCREAMING_SNAKE_CASE_ = get_balanced_memory( _SCREAMING_SNAKE_CASE , low_zero=(device_map == 'balanced_low_0') , max_memory=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , ) SCREAMING_SNAKE_CASE_ = max_memory SCREAMING_SNAKE_CASE_ = infer_auto_device_map(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): # check if don't have any quantized module on the cpu SCREAMING_SNAKE_CASE_ = bnb_quantization_config.skip_modules + bnb_quantization_config.keep_in_fpaa_modules SCREAMING_SNAKE_CASE_ = { key: device_map[key] for key in device_map.keys() if key not in modules_not_to_convert } for device in ["cpu", "disk"]: if device in device_map_without_some_modules.values(): if bnb_quantization_config.load_in_abit: raise ValueError( '\n Some modules are dispatched on the CPU or the disk. Make sure you have enough GPU RAM to fit\n the quantized model. If you want to dispatch the model on the CPU or the disk while keeping\n these modules in `torch_dtype`, you need to pass a custom `device_map` to\n `load_and_quantize_model`. Check\n https://huggingface.co/docs/accelerate/main/en/usage_guides/quantization#offload-modules-to-cpu-and-disk\n for more details.\n ' ) else: logger.info( 'Some modules are are offloaded to the CPU or the disk. Note that these modules will be converted to 8-bit' ) del device_map_without_some_modules return device_map def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int=None , _SCREAMING_SNAKE_CASE : Union[str, Any]=None ): """simple docstring""" if modules_to_not_convert is None: SCREAMING_SNAKE_CASE_ = [] SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = _replace_with_bnb_layers( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) if not has_been_replaced: logger.warning( 'You are loading your model in 8bit or 4bit but no linear modules were found in your model.' ' this can happen for some architectures such as gpt2 that uses Conv1D instead of Linear layers.' ' Please double check your model architecture, or submit an issue on github if you think this is' ' a bug.' ) return model def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : Optional[Any]=None , _SCREAMING_SNAKE_CASE : str=None , ): """simple docstring""" SCREAMING_SNAKE_CASE_ = False for name, module in model.named_children(): if current_key_name is None: SCREAMING_SNAKE_CASE_ = [] current_key_name.append(_SCREAMING_SNAKE_CASE ) if isinstance(_SCREAMING_SNAKE_CASE , nn.Linear ) and name not in modules_to_not_convert: # Check if the current key is not in the `modules_to_not_convert` SCREAMING_SNAKE_CASE_ = '.'.join(_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE_ = True for key in modules_to_not_convert: if ( (key in current_key_name_str) and (key + "." in current_key_name_str) ) or key == current_key_name_str: SCREAMING_SNAKE_CASE_ = False break if proceed: # Load bnb module with empty weight and replace ``nn.Linear` module if bnb_quantization_config.load_in_abit: SCREAMING_SNAKE_CASE_ = bnb.nn.LinearabitLt( module.in_features , module.out_features , module.bias is not None , has_fpaa_weights=_SCREAMING_SNAKE_CASE , threshold=bnb_quantization_config.llm_inta_threshold , ) elif bnb_quantization_config.load_in_abit: SCREAMING_SNAKE_CASE_ = bnb.nn.Linearabit( module.in_features , module.out_features , module.bias is not None , bnb_quantization_config.bnb_abit_compute_dtype , compress_statistics=bnb_quantization_config.bnb_abit_use_double_quant , quant_type=bnb_quantization_config.bnb_abit_quant_type , ) else: raise ValueError('load_in_8bit and load_in_4bit can\'t be both False' ) SCREAMING_SNAKE_CASE_ = module.weight.data if module.bias is not None: SCREAMING_SNAKE_CASE_ = module.bias.data bnb_module.requires_grad_(_SCREAMING_SNAKE_CASE ) setattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE_ = True if len(list(module.children() ) ) > 0: SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = _replace_with_bnb_layers( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE_ = has_been_replaced | _has_been_replaced # Remove the last key for recursion current_key_name.pop(-1 ) return model, has_been_replaced def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : Union[str, Any] ): """simple docstring""" with init_empty_weights(): SCREAMING_SNAKE_CASE_ = deepcopy(_SCREAMING_SNAKE_CASE ) # this has 0 cost since it is done inside `init_empty_weights` context manager` SCREAMING_SNAKE_CASE_ = find_tied_parameters(_SCREAMING_SNAKE_CASE ) # For compatibility with Accelerate < 0.18 if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): SCREAMING_SNAKE_CASE_ = sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() ) else: SCREAMING_SNAKE_CASE_ = sum(_SCREAMING_SNAKE_CASE , [] ) SCREAMING_SNAKE_CASE_ = len(_SCREAMING_SNAKE_CASE ) > 0 # Check if it is a base model SCREAMING_SNAKE_CASE_ = False if hasattr(_SCREAMING_SNAKE_CASE , 'base_model_prefix' ): SCREAMING_SNAKE_CASE_ = not hasattr(_SCREAMING_SNAKE_CASE , model.base_model_prefix ) # Ignore this for base models (BertModel, GPT2Model, etc.) if (not has_tied_params) and is_base_model: return [] # otherwise they have an attached head SCREAMING_SNAKE_CASE_ = list(model.named_children() ) SCREAMING_SNAKE_CASE_ = [list_modules[-1][0]] # add last module together with tied weights SCREAMING_SNAKE_CASE_ = set(_SCREAMING_SNAKE_CASE ) - set(_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE_ = list(set(_SCREAMING_SNAKE_CASE ) ) + list(_SCREAMING_SNAKE_CASE ) # remove ".weight" from the keys SCREAMING_SNAKE_CASE_ = ['.weight', '.bias'] SCREAMING_SNAKE_CASE_ = [] for name in list_untouched: for name_to_remove in names_to_remove: if name_to_remove in name: SCREAMING_SNAKE_CASE_ = name.replace(_SCREAMING_SNAKE_CASE , '' ) filtered_module_names.append(_SCREAMING_SNAKE_CASE ) return filtered_module_names def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : Dict ): """simple docstring""" for m in model.modules(): if isinstance(_SCREAMING_SNAKE_CASE , bnb.nn.Linearabit ): return True return False def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : nn.Module ): """simple docstring""" return next(parameter.parameters() ).device def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : str ): """simple docstring""" if fpaa_statistics is None: set_module_tensor_to_device(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , 0 , dtype=_SCREAMING_SNAKE_CASE , value=_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE_ = param_name SCREAMING_SNAKE_CASE_ = model if "." in tensor_name: SCREAMING_SNAKE_CASE_ = tensor_name.split('.' ) for split in splits[:-1]: SCREAMING_SNAKE_CASE_ = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) if new_module is None: raise ValueError(f"""{module} has no attribute {split}.""" ) SCREAMING_SNAKE_CASE_ = new_module SCREAMING_SNAKE_CASE_ = splits[-1] # offload weights SCREAMING_SNAKE_CASE_ = False offload_weight(module._parameters[tensor_name] , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , index=_SCREAMING_SNAKE_CASE ) if hasattr(module._parameters[tensor_name] , 'SCB' ): offload_weight( module._parameters[tensor_name].SCB , param_name.replace('weight' , 'SCB' ) , _SCREAMING_SNAKE_CASE , index=_SCREAMING_SNAKE_CASE , ) else: offload_weight(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , index=_SCREAMING_SNAKE_CASE ) offload_weight(_SCREAMING_SNAKE_CASE , param_name.replace('weight' , 'SCB' ) , _SCREAMING_SNAKE_CASE , index=_SCREAMING_SNAKE_CASE ) set_module_tensor_to_device(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , 'meta' , dtype=_SCREAMING_SNAKE_CASE , value=torch.empty(*param.size() ) )
620
0
import itertools import os import random import tempfile import unittest import numpy as np from transformers import TvltFeatureExtractor, is_datasets_available from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio from transformers.utils.import_utils import is_torch_available from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin if is_torch_available(): import torch if is_datasets_available(): from datasets import load_dataset UpperCamelCase__ : int = random.Random() def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : Optional[int]=1.0 , _SCREAMING_SNAKE_CASE : Any=None , _SCREAMING_SNAKE_CASE : Any=None ): """simple docstring""" if rng is None: SCREAMING_SNAKE_CASE_ = global_rng SCREAMING_SNAKE_CASE_ = [] for batch_idx in range(shape[0] ): values.append([] ) for _ in range(shape[1] ): values[-1].append(rng.random() * scale ) return values class __snake_case ( unittest.TestCase ): def __init__( self , _A , _A=7 , _A=400 , _A=2000 , _A=2048 , _A=128 , _A=1 , _A=512 , _A=30 , _A=44100 , ): SCREAMING_SNAKE_CASE_ = parent SCREAMING_SNAKE_CASE_ = batch_size SCREAMING_SNAKE_CASE_ = min_seq_length SCREAMING_SNAKE_CASE_ = max_seq_length SCREAMING_SNAKE_CASE_ = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1) SCREAMING_SNAKE_CASE_ = spectrogram_length SCREAMING_SNAKE_CASE_ = feature_size SCREAMING_SNAKE_CASE_ = num_audio_channels SCREAMING_SNAKE_CASE_ = hop_length SCREAMING_SNAKE_CASE_ = chunk_length SCREAMING_SNAKE_CASE_ = sampling_rate def lowerCAmelCase__ ( self): return { "spectrogram_length": self.spectrogram_length, "feature_size": self.feature_size, "num_audio_channels": self.num_audio_channels, "hop_length": self.hop_length, "chunk_length": self.chunk_length, "sampling_rate": self.sampling_rate, } def lowerCAmelCase__ ( self , _A=False , _A=False): def _flatten(_A): return list(itertools.chain(*snake_case__)) if equal_length: SCREAMING_SNAKE_CASE_ = [floats_list((self.max_seq_length, self.feature_size)) for _ in range(self.batch_size)] else: # make sure that inputs increase in size SCREAMING_SNAKE_CASE_ = [ floats_list((x, self.feature_size)) for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff) ] if numpify: SCREAMING_SNAKE_CASE_ = [np.asarray(snake_case__) for x in speech_inputs] return speech_inputs @require_torch @require_torchaudio class __snake_case ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ): __lowerCAmelCase : Optional[Any] = TvltFeatureExtractor def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = TvltFeatureExtractionTester(self) def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = self.feature_extraction_class(**self.feat_extract_dict) self.assertTrue(hasattr(snake_case__ , 'spectrogram_length')) self.assertTrue(hasattr(snake_case__ , 'feature_size')) self.assertTrue(hasattr(snake_case__ , 'num_audio_channels')) self.assertTrue(hasattr(snake_case__ , 'hop_length')) self.assertTrue(hasattr(snake_case__ , 'chunk_length')) self.assertTrue(hasattr(snake_case__ , 'sampling_rate')) def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = self.feature_extraction_class(**self.feat_extract_dict) with tempfile.TemporaryDirectory() as tmpdirname: SCREAMING_SNAKE_CASE_ = feat_extract_first.save_pretrained(snake_case__)[0] check_json_file_has_correct_format(snake_case__) SCREAMING_SNAKE_CASE_ = self.feature_extraction_class.from_pretrained(snake_case__) SCREAMING_SNAKE_CASE_ = feat_extract_first.to_dict() SCREAMING_SNAKE_CASE_ = feat_extract_second.to_dict() SCREAMING_SNAKE_CASE_ = dict_first.pop('mel_filters') SCREAMING_SNAKE_CASE_ = dict_second.pop('mel_filters') self.assertTrue(np.allclose(snake_case__ , snake_case__)) self.assertEqual(snake_case__ , snake_case__) def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = self.feature_extraction_class(**self.feat_extract_dict) with tempfile.TemporaryDirectory() as tmpdirname: SCREAMING_SNAKE_CASE_ = os.path.join(snake_case__ , 'feat_extract.json') feat_extract_first.to_json_file(snake_case__) SCREAMING_SNAKE_CASE_ = self.feature_extraction_class.from_json_file(snake_case__) SCREAMING_SNAKE_CASE_ = feat_extract_first.to_dict() SCREAMING_SNAKE_CASE_ = feat_extract_second.to_dict() SCREAMING_SNAKE_CASE_ = dict_first.pop('mel_filters') SCREAMING_SNAKE_CASE_ = dict_second.pop('mel_filters') self.assertTrue(np.allclose(snake_case__ , snake_case__)) self.assertEqual(snake_case__ , snake_case__) def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = self.feature_extraction_class(**self.feat_extract_dict) # create three inputs of length 800, 1000, and 1200 SCREAMING_SNAKE_CASE_ = [floats_list((1, x))[0] for x in range(800 , 1400 , 200)] SCREAMING_SNAKE_CASE_ = [np.asarray(snake_case__) for speech_input in speech_inputs] # Test not batched input SCREAMING_SNAKE_CASE_ = feature_extractor(np_speech_inputs[0] , return_tensors='np' , sampling_rate=44100).audio_values self.assertTrue(encoded_audios.ndim == 4) self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size) self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length) self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels) # Test batched SCREAMING_SNAKE_CASE_ = feature_extractor(snake_case__ , return_tensors='np' , sampling_rate=44100).audio_values self.assertTrue(encoded_audios.ndim == 4) self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size) self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length) self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels) # Test audio masking SCREAMING_SNAKE_CASE_ = feature_extractor( snake_case__ , return_tensors='np' , sampling_rate=44100 , mask_audio=snake_case__).audio_values self.assertTrue(encoded_audios.ndim == 4) self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size) self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length) self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels) # Test 2-D numpy arrays are batched. SCREAMING_SNAKE_CASE_ = [floats_list((1, x))[0] for x in (800, 800, 800)] SCREAMING_SNAKE_CASE_ = np.asarray(snake_case__) SCREAMING_SNAKE_CASE_ = feature_extractor(snake_case__ , return_tensors='np' , sampling_rate=44100).audio_values self.assertTrue(encoded_audios.ndim == 4) self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size) self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length) self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels) def lowerCAmelCase__ ( self , _A): SCREAMING_SNAKE_CASE_ = load_dataset('hf-internal-testing/librispeech_asr_dummy' , 'clean' , split='validation') # automatic decoding with librispeech SCREAMING_SNAKE_CASE_ = ds.sort('id').select(range(snake_case__))[:num_samples]["audio"] return [x["array"] for x in speech_samples] def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = self._load_datasamples(1) SCREAMING_SNAKE_CASE_ = TvltFeatureExtractor() SCREAMING_SNAKE_CASE_ = feature_extractor(snake_case__ , return_tensors='pt').audio_values self.assertEquals(audio_values.shape , (1, 1, 192, 128)) SCREAMING_SNAKE_CASE_ = torch.tensor([[-0.3_0_3_2, -0.2_7_0_8], [-0.4_4_3_4, -0.4_0_0_7]]) self.assertTrue(torch.allclose(audio_values[0, 0, :2, :2] , snake_case__ , atol=1E-4))
719
import json import os from functools import lru_cache from typing import List, Optional, Tuple import regex as re from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging UpperCamelCase__ : Union[str, Any] = logging.get_logger(__name__) UpperCamelCase__ : Optional[Any] = {"vocab_file": "vocab.json", "merges_file": "merges.txt"} # See all BART models at https://huggingface.co/models?filter=bart UpperCamelCase__ : List[str] = { "vocab_file": { "facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/vocab.json", "facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/vocab.json", "facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json", "facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json", "facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json", "yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json", }, "merges_file": { "facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/merges.txt", "facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/merges.txt", "facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt", "facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt", "facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt", "yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt", }, } UpperCamelCase__ : str = { "facebook/bart-base": 1_024, "facebook/bart-large": 1_024, "facebook/bart-large-mnli": 1_024, "facebook/bart-large-cnn": 1_024, "facebook/bart-large-xsum": 1_024, "yjernite/bart_eli5": 1_024, } @lru_cache() def _UpperCAmelCase ( ): """simple docstring""" SCREAMING_SNAKE_CASE_ = ( list(range(ord('!' ) , ord('~' ) + 1 ) ) + list(range(ord('¡' ) , ord('¬' ) + 1 ) ) + list(range(ord('®' ) , ord('ÿ' ) + 1 ) ) ) SCREAMING_SNAKE_CASE_ = bs[:] SCREAMING_SNAKE_CASE_ = 0 for b in range(2**8 ): if b not in bs: bs.append(_SCREAMING_SNAKE_CASE ) cs.append(2**8 + n ) n += 1 SCREAMING_SNAKE_CASE_ = [chr(_SCREAMING_SNAKE_CASE ) for n in cs] return dict(zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ) def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : List[str] ): """simple docstring""" SCREAMING_SNAKE_CASE_ = set() SCREAMING_SNAKE_CASE_ = word[0] for char in word[1:]: pairs.add((prev_char, char) ) SCREAMING_SNAKE_CASE_ = char return pairs class __snake_case ( lowerCAmelCase__ ): __lowerCAmelCase : str = VOCAB_FILES_NAMES __lowerCAmelCase : Any = PRETRAINED_VOCAB_FILES_MAP __lowerCAmelCase : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __lowerCAmelCase : List[Any] = ['input_ids', 'attention_mask'] def __init__( self , _A , _A , _A="replace" , _A="<s>" , _A="</s>" , _A="</s>" , _A="<s>" , _A="<unk>" , _A="<pad>" , _A="<mask>" , _A=False , **_A , ): SCREAMING_SNAKE_CASE_ = AddedToken(_A , lstrip=_A , rstrip=_A) if isinstance(_A , _A) else bos_token SCREAMING_SNAKE_CASE_ = AddedToken(_A , lstrip=_A , rstrip=_A) if isinstance(_A , _A) else eos_token SCREAMING_SNAKE_CASE_ = AddedToken(_A , lstrip=_A , rstrip=_A) if isinstance(_A , _A) else sep_token SCREAMING_SNAKE_CASE_ = AddedToken(_A , lstrip=_A , rstrip=_A) if isinstance(_A , _A) else cls_token SCREAMING_SNAKE_CASE_ = AddedToken(_A , lstrip=_A , rstrip=_A) if isinstance(_A , _A) else unk_token SCREAMING_SNAKE_CASE_ = AddedToken(_A , lstrip=_A , rstrip=_A) if isinstance(_A , _A) else pad_token # Mask token behave like a normal word, i.e. include the space before it SCREAMING_SNAKE_CASE_ = AddedToken(_A , lstrip=_A , rstrip=_A) if isinstance(_A , _A) else mask_token super().__init__( errors=_A , bos_token=_A , eos_token=_A , unk_token=_A , sep_token=_A , cls_token=_A , pad_token=_A , mask_token=_A , add_prefix_space=_A , **_A , ) with open(_A , encoding='utf-8') as vocab_handle: SCREAMING_SNAKE_CASE_ = json.load(_A) SCREAMING_SNAKE_CASE_ = {v: k for k, v in self.encoder.items()} SCREAMING_SNAKE_CASE_ = errors # how to handle errors in decoding SCREAMING_SNAKE_CASE_ = bytes_to_unicode() SCREAMING_SNAKE_CASE_ = {v: k for k, v in self.byte_encoder.items()} with open(_A , encoding='utf-8') as merges_handle: SCREAMING_SNAKE_CASE_ = merges_handle.read().split('\n')[1:-1] SCREAMING_SNAKE_CASE_ = [tuple(merge.split()) for merge in bpe_merges] SCREAMING_SNAKE_CASE_ = dict(zip(_A , range(len(_A)))) SCREAMING_SNAKE_CASE_ = {} SCREAMING_SNAKE_CASE_ = add_prefix_space # Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions SCREAMING_SNAKE_CASE_ = re.compile(r'\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+') @property def lowerCAmelCase__ ( self): return len(self.encoder) def lowerCAmelCase__ ( self): return dict(self.encoder , **self.added_tokens_encoder) def lowerCAmelCase__ ( self , _A): if token in self.cache: return self.cache[token] SCREAMING_SNAKE_CASE_ = tuple(_A) SCREAMING_SNAKE_CASE_ = get_pairs(_A) if not pairs: return token while True: SCREAMING_SNAKE_CASE_ = min(_A , key=lambda _A: self.bpe_ranks.get(_A , float('inf'))) if bigram not in self.bpe_ranks: break SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = bigram SCREAMING_SNAKE_CASE_ = [] SCREAMING_SNAKE_CASE_ = 0 while i < len(_A): try: SCREAMING_SNAKE_CASE_ = word.index(_A , _A) except ValueError: new_word.extend(word[i:]) break else: new_word.extend(word[i:j]) SCREAMING_SNAKE_CASE_ = j if word[i] == first and i < len(_A) - 1 and word[i + 1] == second: new_word.append(first + second) i += 2 else: new_word.append(word[i]) i += 1 SCREAMING_SNAKE_CASE_ = tuple(_A) SCREAMING_SNAKE_CASE_ = new_word if len(_A) == 1: break else: SCREAMING_SNAKE_CASE_ = get_pairs(_A) SCREAMING_SNAKE_CASE_ = ' '.join(_A) SCREAMING_SNAKE_CASE_ = word return word def lowerCAmelCase__ ( self , _A): SCREAMING_SNAKE_CASE_ = [] for token in re.findall(self.pat , _A): SCREAMING_SNAKE_CASE_ = ''.join( self.byte_encoder[b] for b in token.encode('utf-8')) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case) bpe_tokens.extend(bpe_token for bpe_token in self.bpe(_A).split(' ')) return bpe_tokens def lowerCAmelCase__ ( self , _A): return self.encoder.get(_A , self.encoder.get(self.unk_token)) def lowerCAmelCase__ ( self , _A): return self.decoder.get(_A) def lowerCAmelCase__ ( self , _A): SCREAMING_SNAKE_CASE_ = ''.join(_A) SCREAMING_SNAKE_CASE_ = bytearray([self.byte_decoder[c] for c in text]).decode('utf-8' , errors=self.errors) return text def lowerCAmelCase__ ( self , _A , _A = None): if not os.path.isdir(_A): logger.error(f"""Vocabulary path ({save_directory}) should be a directory""") return SCREAMING_SNAKE_CASE_ = os.path.join( _A , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file']) SCREAMING_SNAKE_CASE_ = os.path.join( _A , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file']) with open(_A , 'w' , encoding='utf-8') as f: f.write(json.dumps(self.encoder , indent=2 , sort_keys=_A , ensure_ascii=_A) + '\n') SCREAMING_SNAKE_CASE_ = 0 with open(_A , 'w' , encoding='utf-8') as writer: writer.write('#version: 0.2\n') for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda _A: kv[1]): if index != token_index: logger.warning( f"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.""" ' Please check that the tokenizer is not corrupted!') SCREAMING_SNAKE_CASE_ = token_index writer.write(' '.join(_A) + '\n') index += 1 return vocab_file, merge_file def lowerCAmelCase__ ( self , _A , _A = None): if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] SCREAMING_SNAKE_CASE_ = [self.cls_token_id] SCREAMING_SNAKE_CASE_ = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def lowerCAmelCase__ ( self , _A , _A = None , _A = False): if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=_A , token_ids_a=_A , already_has_special_tokens=_A) if token_ids_a is None: return [1] + ([0] * len(_A)) + [1] return [1] + ([0] * len(_A)) + [1, 1] + ([0] * len(_A)) + [1] def lowerCAmelCase__ ( self , _A , _A = None): SCREAMING_SNAKE_CASE_ = [self.sep_token_id] SCREAMING_SNAKE_CASE_ = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0] def lowerCAmelCase__ ( self , _A , _A=False , **_A): SCREAMING_SNAKE_CASE_ = kwargs.pop('add_prefix_space' , self.add_prefix_space) if (is_split_into_words or add_prefix_space) and (len(_A) > 0 and not text[0].isspace()): SCREAMING_SNAKE_CASE_ = ' ' + text return (text, kwargs)
620
0
import functools import gc import inspect import torch from .imports import is_npu_available, is_xpu_available def _UpperCAmelCase ( *_SCREAMING_SNAKE_CASE : List[str] ): """simple docstring""" if not isinstance(__a , __a ): SCREAMING_SNAKE_CASE_ = list(__a ) for i in range(len(__a ) ): SCREAMING_SNAKE_CASE_ = None gc.collect() if is_xpu_available(): torch.xpu.empty_cache() elif is_npu_available(): torch.npu.empty_cache() else: torch.cuda.empty_cache() return objects def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : Exception ): """simple docstring""" SCREAMING_SNAKE_CASE_ = [ 'CUDA out of memory.', # CUDA OOM 'cuDNN error: CUDNN_STATUS_NOT_SUPPORTED.', # CUDNN SNAFU 'DefaultCPUAllocator: can\'t allocate memory', # CPU OOM ] if isinstance(__a , __a ) and len(exception.args ) == 1: return any(err in exception.args[0] for err in _statements ) return False def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : callable = None , _SCREAMING_SNAKE_CASE : int = 128 ): """simple docstring""" if function is None: return functools.partial(__a , starting_batch_size=__a ) SCREAMING_SNAKE_CASE_ = starting_batch_size def decorator(*_SCREAMING_SNAKE_CASE : int , **_SCREAMING_SNAKE_CASE : List[str] ): nonlocal batch_size gc.collect() if is_xpu_available(): torch.xpu.empty_cache() elif is_npu_available(): torch.npu.empty_cache() else: torch.cuda.empty_cache() SCREAMING_SNAKE_CASE_ = list(inspect.signature(__a ).parameters.keys() ) # Guard against user error if len(__a ) < (len(__a ) + 1): SCREAMING_SNAKE_CASE_ = ', '.join([f"""{arg}={value}""" for arg, value in zip(params[1:] , args[1:] )] ) raise TypeError( f"""Batch size was passed into `{function.__name__}` as the first argument when called.""" f"""Remove this as the decorator already does so: `{function.__name__}({arg_str})`""" ) while True: if batch_size == 0: raise RuntimeError('No executable batch size found, reached zero.' ) try: return function(__a , *__a , **__a ) except Exception as e: if should_reduce_batch_size(__a ): gc.collect() if is_xpu_available(): torch.xpu.empty_cache() elif is_npu_available(): torch.npu.empty_cache() else: torch.cuda.empty_cache() batch_size //= 2 else: raise return decorator
720
from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCamelCase__ : str = logging.get_logger(__name__) UpperCamelCase__ : Optional[int] = { "facebook/dpr-ctx_encoder-single-nq-base": ( "https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/config.json" ), "facebook/dpr-question_encoder-single-nq-base": ( "https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/config.json" ), "facebook/dpr-reader-single-nq-base": ( "https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/config.json" ), "facebook/dpr-ctx_encoder-multiset-base": ( "https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/config.json" ), "facebook/dpr-question_encoder-multiset-base": ( "https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/config.json" ), "facebook/dpr-reader-multiset-base": ( "https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/config.json" ), } class __snake_case ( lowerCAmelCase__ ): __lowerCAmelCase : Optional[int] = 'dpr' def __init__( self , _A=30522 , _A=768 , _A=12 , _A=12 , _A=3072 , _A="gelu" , _A=0.1 , _A=0.1 , _A=512 , _A=2 , _A=0.0_2 , _A=1E-12 , _A=0 , _A="absolute" , _A = 0 , **_A , ): super().__init__(pad_token_id=_A , **_A) SCREAMING_SNAKE_CASE_ = vocab_size SCREAMING_SNAKE_CASE_ = hidden_size SCREAMING_SNAKE_CASE_ = num_hidden_layers SCREAMING_SNAKE_CASE_ = num_attention_heads SCREAMING_SNAKE_CASE_ = hidden_act SCREAMING_SNAKE_CASE_ = intermediate_size SCREAMING_SNAKE_CASE_ = hidden_dropout_prob SCREAMING_SNAKE_CASE_ = attention_probs_dropout_prob SCREAMING_SNAKE_CASE_ = max_position_embeddings SCREAMING_SNAKE_CASE_ = type_vocab_size SCREAMING_SNAKE_CASE_ = initializer_range SCREAMING_SNAKE_CASE_ = layer_norm_eps SCREAMING_SNAKE_CASE_ = projection_dim SCREAMING_SNAKE_CASE_ = position_embedding_type
620
0
from __future__ import annotations def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : float , _SCREAMING_SNAKE_CASE : float , _SCREAMING_SNAKE_CASE : float , ): if (stress, tangential_force, area).count(0 ) != 1: raise ValueError('You cannot supply more or less than 2 values' ) elif stress < 0: raise ValueError('Stress cannot be negative' ) elif tangential_force < 0: raise ValueError('Tangential Force cannot be negative' ) elif area < 0: raise ValueError('Area cannot be negative' ) elif stress == 0: return ( "stress", tangential_force / area, ) elif tangential_force == 0: return ( "tangential_force", stress * area, ) else: return ( "area", tangential_force / stress, ) if __name__ == "__main__": import doctest doctest.testmod()
721
import pytest import datasets # Import fixture modules as plugins UpperCamelCase__ : Union[str, Any] = ["tests.fixtures.files", "tests.fixtures.hub", "tests.fixtures.fsspec"] def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : Dict ): """simple docstring""" for item in items: if any(marker in item.keywords for marker in ['integration', 'unit'] ): continue item.add_marker(pytest.mark.unit ) def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : Optional[int] ): """simple docstring""" config.addinivalue_line('markers' , 'torchaudio_latest: mark test to run with torchaudio>=0.12' ) @pytest.fixture(autouse=_SCREAMING_SNAKE_CASE ) def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : str ): """simple docstring""" SCREAMING_SNAKE_CASE_ = tmp_path_factory.getbasetemp() / 'cache' SCREAMING_SNAKE_CASE_ = test_hf_cache_home / 'datasets' SCREAMING_SNAKE_CASE_ = test_hf_cache_home / 'metrics' SCREAMING_SNAKE_CASE_ = test_hf_cache_home / 'modules' monkeypatch.setattr('datasets.config.HF_DATASETS_CACHE' , str(_SCREAMING_SNAKE_CASE ) ) monkeypatch.setattr('datasets.config.HF_METRICS_CACHE' , str(_SCREAMING_SNAKE_CASE ) ) monkeypatch.setattr('datasets.config.HF_MODULES_CACHE' , str(_SCREAMING_SNAKE_CASE ) ) SCREAMING_SNAKE_CASE_ = test_hf_datasets_cache / 'downloads' monkeypatch.setattr('datasets.config.DOWNLOADED_DATASETS_PATH' , str(_SCREAMING_SNAKE_CASE ) ) SCREAMING_SNAKE_CASE_ = test_hf_datasets_cache / 'downloads' / 'extracted' monkeypatch.setattr('datasets.config.EXTRACTED_DATASETS_PATH' , str(_SCREAMING_SNAKE_CASE ) ) @pytest.fixture(autouse=_SCREAMING_SNAKE_CASE , scope='session' ) def _UpperCAmelCase ( ): """simple docstring""" datasets.disable_progress_bar() @pytest.fixture(autouse=_SCREAMING_SNAKE_CASE ) def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : Any ): """simple docstring""" monkeypatch.setattr('datasets.config.HF_UPDATE_DOWNLOAD_COUNTS' , _SCREAMING_SNAKE_CASE ) @pytest.fixture def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : Tuple ): """simple docstring""" monkeypatch.setattr('sqlalchemy.util.deprecations.SILENCE_UBER_WARNING' , _SCREAMING_SNAKE_CASE )
620
0
import torch def _UpperCAmelCase ( ): """simple docstring""" if torch.cuda.is_available(): SCREAMING_SNAKE_CASE_ = torch.cuda.device_count() else: SCREAMING_SNAKE_CASE_ = 0 print(f"""Successfully ran on {num_gpus} GPUs""" ) if __name__ == "__main__": main()
700
from typing import List import numpy as np def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : dict ): """simple docstring""" SCREAMING_SNAKE_CASE_ = {key: len(_SCREAMING_SNAKE_CASE ) for key, value in gen_kwargs.items() if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )} if len(set(lists_lengths.values() ) ) > 1: raise RuntimeError( ( 'Sharding is ambiguous for this dataset: ' + 'we found several data sources lists of different lengths, and we don\'t know over which list we should parallelize:\n' + '\n'.join(f"""\t- key {key} has length {length}""" for key, length in lists_lengths.items() ) + '\nTo fix this, check the \'gen_kwargs\' and make sure to use lists only for data sources, ' + 'and use tuples otherwise. In the end there should only be one single list, or several lists with the same length.' ) ) SCREAMING_SNAKE_CASE_ = max(lists_lengths.values() , default=0 ) return max(1 , _SCREAMING_SNAKE_CASE ) def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int ): """simple docstring""" SCREAMING_SNAKE_CASE_ = [] for group_idx in range(_SCREAMING_SNAKE_CASE ): SCREAMING_SNAKE_CASE_ = num_shards // max_num_jobs + (group_idx < (num_shards % max_num_jobs)) if num_shards_to_add == 0: break SCREAMING_SNAKE_CASE_ = shards_indices_per_group[-1].stop if shards_indices_per_group else 0 SCREAMING_SNAKE_CASE_ = range(_SCREAMING_SNAKE_CASE , start + num_shards_to_add ) shards_indices_per_group.append(_SCREAMING_SNAKE_CASE ) return shards_indices_per_group def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : dict , _SCREAMING_SNAKE_CASE : int ): """simple docstring""" SCREAMING_SNAKE_CASE_ = _number_of_shards_in_gen_kwargs(_SCREAMING_SNAKE_CASE ) if num_shards == 1: return [dict(_SCREAMING_SNAKE_CASE )] else: SCREAMING_SNAKE_CASE_ = _distribute_shards(num_shards=_SCREAMING_SNAKE_CASE , max_num_jobs=_SCREAMING_SNAKE_CASE ) return [ { key: [value[shard_idx] for shard_idx in shard_indices_per_group[group_idx]] if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) else value for key, value in gen_kwargs.items() } for group_idx in range(len(_SCREAMING_SNAKE_CASE ) ) ] def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : List[dict] ): """simple docstring""" return { key: [value for gen_kwargs in gen_kwargs_list for value in gen_kwargs[key]] if isinstance(gen_kwargs_list[0][key] , _SCREAMING_SNAKE_CASE ) else gen_kwargs_list[0][key] for key in gen_kwargs_list[0] } def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : np.random.Generator , _SCREAMING_SNAKE_CASE : dict ): """simple docstring""" SCREAMING_SNAKE_CASE_ = {len(_SCREAMING_SNAKE_CASE ) for value in gen_kwargs.values() if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )} SCREAMING_SNAKE_CASE_ = {} for size in list_sizes: SCREAMING_SNAKE_CASE_ = list(range(_SCREAMING_SNAKE_CASE ) ) rng.shuffle(indices_per_size[size] ) # Now let's copy the gen_kwargs and shuffle the lists based on their sizes SCREAMING_SNAKE_CASE_ = dict(_SCREAMING_SNAKE_CASE ) for key, value in shuffled_kwargs.items(): if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): SCREAMING_SNAKE_CASE_ = [value[i] for i in indices_per_size[len(_SCREAMING_SNAKE_CASE )]] return shuffled_kwargs
620
0
from __future__ import annotations UpperCamelCase__ : Tuple = tuple[int, int, int] UpperCamelCase__ : Optional[int] = tuple[str, str, str] # used alphabet -------------------------- # from string.ascii_uppercase UpperCamelCase__ : str = "ABCDEFGHIJKLMNOPQRSTUVWXYZ" # -------------------------- default selection -------------------------- # rotors -------------------------- UpperCamelCase__ : List[str] = "EGZWVONAHDCLFQMSIPJBYUKXTR" UpperCamelCase__ : Tuple = "FOBHMDKEXQNRAULPGSJVTYICZW" UpperCamelCase__ : str = "ZJXESIUQLHAVRMDOYGTNFWPBKC" # reflector -------------------------- UpperCamelCase__ : Optional[Any] = { "A": "N", "N": "A", "B": "O", "O": "B", "C": "P", "P": "C", "D": "Q", "Q": "D", "E": "R", "R": "E", "F": "S", "S": "F", "G": "T", "T": "G", "H": "U", "U": "H", "I": "V", "V": "I", "J": "W", "W": "J", "K": "X", "X": "K", "L": "Y", "Y": "L", "M": "Z", "Z": "M", } # -------------------------- extra rotors -------------------------- UpperCamelCase__ : int = "RMDJXFUWGISLHVTCQNKYPBEZOA" UpperCamelCase__ : List[str] = "SGLCPQWZHKXAREONTFBVIYJUDM" UpperCamelCase__ : List[Any] = "HVSICLTYKQUBXDWAJZOMFGPREN" UpperCamelCase__ : Dict = "RZWQHFMVDBKICJLNTUXAGYPSOE" UpperCamelCase__ : str = "LFKIJODBEGAMQPXVUHYSTCZRWN" UpperCamelCase__ : List[Any] = "KOAEGVDHXPQZMLFTYWJNBRCIUS" def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : Tuple ): """simple docstring""" if (unique_rotsel := len(set(_lowerCAmelCase ) )) < 3: SCREAMING_SNAKE_CASE_ = f"""Please use 3 unique rotors (not {unique_rotsel})""" raise Exception(_lowerCAmelCase ) # Checks if rotor positions are valid SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = rotpos if not 0 < rotorposa <= len(_lowerCAmelCase ): SCREAMING_SNAKE_CASE_ = f"""First rotor position is not within range of 1..26 ({rotorposa}""" raise ValueError(_lowerCAmelCase ) if not 0 < rotorposa <= len(_lowerCAmelCase ): SCREAMING_SNAKE_CASE_ = f"""Second rotor position is not within range of 1..26 ({rotorposa})""" raise ValueError(_lowerCAmelCase ) if not 0 < rotorposa <= len(_lowerCAmelCase ): SCREAMING_SNAKE_CASE_ = f"""Third rotor position is not within range of 1..26 ({rotorposa})""" raise ValueError(_lowerCAmelCase ) # Validates string and returns dict SCREAMING_SNAKE_CASE_ = _plugboard(_lowerCAmelCase ) return rotpos, rotsel, pbdict def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : Dict ): """simple docstring""" if not isinstance(_lowerCAmelCase , _lowerCAmelCase ): SCREAMING_SNAKE_CASE_ = f"""Plugboard setting isn't type string ({type(_lowerCAmelCase )})""" raise TypeError(_lowerCAmelCase ) elif len(_lowerCAmelCase ) % 2 != 0: SCREAMING_SNAKE_CASE_ = f"""Odd number of symbols ({len(_lowerCAmelCase )})""" raise Exception(_lowerCAmelCase ) elif pbstring == "": return {} pbstring.replace(' ' , '' ) # Checks if all characters are unique SCREAMING_SNAKE_CASE_ = set() for i in pbstring: if i not in abc: SCREAMING_SNAKE_CASE_ = f"""'{i}' not in list of symbols""" raise Exception(_lowerCAmelCase ) elif i in tmppbl: SCREAMING_SNAKE_CASE_ = f"""Duplicate symbol ({i})""" raise Exception(_lowerCAmelCase ) else: tmppbl.add(_lowerCAmelCase ) del tmppbl # Created the dictionary SCREAMING_SNAKE_CASE_ = {} for j in range(0 , len(_lowerCAmelCase ) - 1 , 2 ): SCREAMING_SNAKE_CASE_ = pbstring[j + 1] SCREAMING_SNAKE_CASE_ = pbstring[j] return pb def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : Optional[int] = (rotora, rotora, rotora) , _SCREAMING_SNAKE_CASE : int = "" , ): """simple docstring""" SCREAMING_SNAKE_CASE_ = text.upper() SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = _validator( _lowerCAmelCase , _lowerCAmelCase , plugb.upper() ) SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = rotor_position SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = rotor_selection rotorposa -= 1 rotorposa -= 1 rotorposa -= 1 SCREAMING_SNAKE_CASE_ = [] # encryption/decryption process -------------------------- for symbol in text: if symbol in abc: # 1st plugboard -------------------------- if symbol in plugboard: SCREAMING_SNAKE_CASE_ = plugboard[symbol] # rotor ra -------------------------- SCREAMING_SNAKE_CASE_ = abc.index(_lowerCAmelCase ) + rotorposa SCREAMING_SNAKE_CASE_ = rotora[index % len(_lowerCAmelCase )] # rotor rb -------------------------- SCREAMING_SNAKE_CASE_ = abc.index(_lowerCAmelCase ) + rotorposa SCREAMING_SNAKE_CASE_ = rotora[index % len(_lowerCAmelCase )] # rotor rc -------------------------- SCREAMING_SNAKE_CASE_ = abc.index(_lowerCAmelCase ) + rotorposa SCREAMING_SNAKE_CASE_ = rotora[index % len(_lowerCAmelCase )] # reflector -------------------------- # this is the reason you don't need another machine to decipher SCREAMING_SNAKE_CASE_ = reflector[symbol] # 2nd rotors SCREAMING_SNAKE_CASE_ = abc[rotora.index(_lowerCAmelCase ) - rotorposa] SCREAMING_SNAKE_CASE_ = abc[rotora.index(_lowerCAmelCase ) - rotorposa] SCREAMING_SNAKE_CASE_ = abc[rotora.index(_lowerCAmelCase ) - rotorposa] # 2nd plugboard if symbol in plugboard: SCREAMING_SNAKE_CASE_ = plugboard[symbol] # moves/resets rotor positions rotorposa += 1 if rotorposa >= len(_lowerCAmelCase ): SCREAMING_SNAKE_CASE_ = 0 rotorposa += 1 if rotorposa >= len(_lowerCAmelCase ): SCREAMING_SNAKE_CASE_ = 0 rotorposa += 1 if rotorposa >= len(_lowerCAmelCase ): SCREAMING_SNAKE_CASE_ = 0 # else: # pass # Error could be also raised # raise ValueError( # 'Invalid symbol('+repr(symbol)+')') result.append(_lowerCAmelCase ) return "".join(_lowerCAmelCase ) if __name__ == "__main__": UpperCamelCase__ : List[str] = "This is my Python script that emulates the Enigma machine from WWII." UpperCamelCase__ : Optional[Any] = (1, 1, 1) UpperCamelCase__ : Dict = "pictures" UpperCamelCase__ : Optional[Any] = (rotora, rotora, rotora) UpperCamelCase__ : Optional[Any] = enigma(message, rotor_pos, rotor_sel, pb) print("Encrypted message:", en) print("Decrypted message:", enigma(en, rotor_pos, rotor_sel, pb))
701
from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCamelCase__ : List[Any] = logging.get_logger(__name__) UpperCamelCase__ : List[str] = { "microsoft/biogpt": "https://huggingface.co/microsoft/biogpt/resolve/main/config.json", # See all BioGPT models at https://huggingface.co/models?filter=biogpt } class __snake_case ( lowerCAmelCase__ ): __lowerCAmelCase : Any = 'biogpt' def __init__( self , _A=42384 , _A=1024 , _A=24 , _A=16 , _A=4096 , _A="gelu" , _A=0.1 , _A=0.1 , _A=1024 , _A=0.0_2 , _A=1E-12 , _A=True , _A=True , _A=0.0 , _A=0.0 , _A=1 , _A=0 , _A=2 , **_A , ): SCREAMING_SNAKE_CASE_ = vocab_size SCREAMING_SNAKE_CASE_ = max_position_embeddings SCREAMING_SNAKE_CASE_ = hidden_size SCREAMING_SNAKE_CASE_ = num_hidden_layers SCREAMING_SNAKE_CASE_ = num_attention_heads SCREAMING_SNAKE_CASE_ = intermediate_size SCREAMING_SNAKE_CASE_ = hidden_act SCREAMING_SNAKE_CASE_ = hidden_dropout_prob SCREAMING_SNAKE_CASE_ = attention_probs_dropout_prob SCREAMING_SNAKE_CASE_ = initializer_range SCREAMING_SNAKE_CASE_ = layer_norm_eps SCREAMING_SNAKE_CASE_ = scale_embedding SCREAMING_SNAKE_CASE_ = use_cache SCREAMING_SNAKE_CASE_ = layerdrop SCREAMING_SNAKE_CASE_ = activation_dropout super().__init__(pad_token_id=_A , bos_token_id=_A , eos_token_id=_A , **_A)
620
0
from __future__ import annotations from math import pi def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : float , _SCREAMING_SNAKE_CASE : float , _SCREAMING_SNAKE_CASE : float ): """simple docstring""" if (inductance, frequency, reactance).count(0 ) != 1: raise ValueError('One and only one argument must be 0' ) if inductance < 0: raise ValueError('Inductance cannot be negative' ) if frequency < 0: raise ValueError('Frequency cannot be negative' ) if reactance < 0: raise ValueError('Inductive reactance cannot be negative' ) if inductance == 0: return {"inductance": reactance / (2 * pi * frequency)} elif frequency == 0: return {"frequency": reactance / (2 * pi * inductance)} elif reactance == 0: return {"reactance": 2 * pi * frequency * inductance} else: raise ValueError('Exactly one argument must be 0' ) if __name__ == "__main__": import doctest doctest.testmod()
702
from typing import Dict, List, Optional, Tuple, Union import torch from ...models import AutoencoderKL, TransformeraDModel from ...schedulers import KarrasDiffusionSchedulers from ...utils import randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput class __snake_case ( lowerCAmelCase__ ): def __init__( self , _A , _A , _A , _A = None , ): super().__init__() self.register_modules(transformer=_A , vae=_A , scheduler=_A) # create a imagenet -> id dictionary for easier use SCREAMING_SNAKE_CASE_ = {} if idalabel is not None: for key, value in idalabel.items(): for label in value.split(','): SCREAMING_SNAKE_CASE_ = int(_A) SCREAMING_SNAKE_CASE_ = dict(sorted(self.labels.items())) def lowerCAmelCase__ ( self , _A): if not isinstance(_A , _A): SCREAMING_SNAKE_CASE_ = list(_A) for l in label: if l not in self.labels: raise ValueError( f"""{l} does not exist. Please make sure to select one of the following labels: \n {self.labels}.""") return [self.labels[l] for l in label] @torch.no_grad() def __call__( self , _A , _A = 4.0 , _A = None , _A = 50 , _A = "pil" , _A = True , ): SCREAMING_SNAKE_CASE_ = len(_A) SCREAMING_SNAKE_CASE_ = self.transformer.config.sample_size SCREAMING_SNAKE_CASE_ = self.transformer.config.in_channels SCREAMING_SNAKE_CASE_ = randn_tensor( shape=(batch_size, latent_channels, latent_size, latent_size) , generator=_A , device=self.device , dtype=self.transformer.dtype , ) SCREAMING_SNAKE_CASE_ = torch.cat([latents] * 2) if guidance_scale > 1 else latents SCREAMING_SNAKE_CASE_ = torch.tensor(_A , device=self.device).reshape(-1) SCREAMING_SNAKE_CASE_ = torch.tensor([1000] * batch_size , device=self.device) SCREAMING_SNAKE_CASE_ = torch.cat([class_labels, class_null] , 0) if guidance_scale > 1 else class_labels # set step values self.scheduler.set_timesteps(_A) for t in self.progress_bar(self.scheduler.timesteps): if guidance_scale > 1: SCREAMING_SNAKE_CASE_ = latent_model_input[: len(_A) // 2] SCREAMING_SNAKE_CASE_ = torch.cat([half, half] , dim=0) SCREAMING_SNAKE_CASE_ = self.scheduler.scale_model_input(_A , _A) SCREAMING_SNAKE_CASE_ = t if not torch.is_tensor(_A): # TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can # This would be a good case for the `match` statement (Python 3.10+) SCREAMING_SNAKE_CASE_ = latent_model_input.device.type == 'mps' if isinstance(_A , _A): SCREAMING_SNAKE_CASE_ = torch.floataa if is_mps else torch.floataa else: SCREAMING_SNAKE_CASE_ = torch.intaa if is_mps else torch.intaa SCREAMING_SNAKE_CASE_ = torch.tensor([timesteps] , dtype=_A , device=latent_model_input.device) elif len(timesteps.shape) == 0: SCREAMING_SNAKE_CASE_ = timesteps[None].to(latent_model_input.device) # broadcast to batch dimension in a way that's compatible with ONNX/Core ML SCREAMING_SNAKE_CASE_ = timesteps.expand(latent_model_input.shape[0]) # predict noise model_output SCREAMING_SNAKE_CASE_ = self.transformer( _A , timestep=_A , class_labels=_A).sample # perform guidance if guidance_scale > 1: SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = noise_pred[:, :latent_channels], noise_pred[:, latent_channels:] SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = torch.split(_A , len(_A) // 2 , dim=0) SCREAMING_SNAKE_CASE_ = uncond_eps + guidance_scale * (cond_eps - uncond_eps) SCREAMING_SNAKE_CASE_ = torch.cat([half_eps, half_eps] , dim=0) SCREAMING_SNAKE_CASE_ = torch.cat([eps, rest] , dim=1) # learned sigma if self.transformer.config.out_channels // 2 == latent_channels: SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = torch.split(_A , _A , dim=1) else: SCREAMING_SNAKE_CASE_ = noise_pred # compute previous image: x_t -> x_t-1 SCREAMING_SNAKE_CASE_ = self.scheduler.step(_A , _A , _A).prev_sample if guidance_scale > 1: SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = latent_model_input.chunk(2 , dim=0) else: SCREAMING_SNAKE_CASE_ = latent_model_input SCREAMING_SNAKE_CASE_ = 1 / self.vae.config.scaling_factor * latents SCREAMING_SNAKE_CASE_ = self.vae.decode(_A).sample SCREAMING_SNAKE_CASE_ = (samples / 2 + 0.5).clamp(0 , 1) # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16 SCREAMING_SNAKE_CASE_ = samples.cpu().permute(0 , 2 , 3 , 1).float().numpy() if output_type == "pil": SCREAMING_SNAKE_CASE_ = self.numpy_to_pil(_A) if not return_dict: return (samples,) return ImagePipelineOutput(images=_A)
620
0