code
stringlengths
81
54k
code_codestyle
int64
0
721
style_context
stringlengths
91
41.9k
style_context_codestyle
int64
0
699
label
int64
0
1
import random def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : list , _SCREAMING_SNAKE_CASE : str ): """simple docstring""" SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = [], [], [] for element in data: if element < pivot: less.append(_SCREAMING_SNAKE_CASE ) elif element > pivot: greater.append(_SCREAMING_SNAKE_CASE ) else: equal.append(_SCREAMING_SNAKE_CASE ) return less, equal, greater def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : list , _SCREAMING_SNAKE_CASE : int ): """simple docstring""" if index >= len(_SCREAMING_SNAKE_CASE ) or index < 0: return None SCREAMING_SNAKE_CASE_ = items[random.randint(0 , len(_SCREAMING_SNAKE_CASE ) - 1 )] SCREAMING_SNAKE_CASE_ = 0 SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = _partition(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE_ = len(_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE_ = len(_SCREAMING_SNAKE_CASE ) # index is the pivot if m <= index < m + count: return pivot # must be in smaller elif m > index: return quick_select(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # must be in larger else: return quick_select(_SCREAMING_SNAKE_CASE , index - (m + count) )
715
import io import itertools import json from dataclasses import dataclass from typing import Optional import pyarrow as pa import pyarrow.json as paj import datasets from datasets.table import table_cast from datasets.utils.file_utils import readline UpperCamelCase__ : Optional[int] = datasets.utils.logging.get_logger(__name__) @dataclass class __snake_case ( datasets.BuilderConfig ): __lowerCAmelCase : Optional[datasets.Features] = None __lowerCAmelCase : str = "utf-8" __lowerCAmelCase : Optional[str] = None __lowerCAmelCase : Optional[str] = None __lowerCAmelCase : bool = True # deprecated __lowerCAmelCase : Optional[int] = None # deprecated __lowerCAmelCase : int = 10 << 20 # 10MB __lowerCAmelCase : Optional[bool] = None class __snake_case ( datasets.ArrowBasedBuilder ): __lowerCAmelCase : int = JsonConfig def lowerCAmelCase__ ( self): if self.config.block_size is not None: logger.warning('The JSON loader parameter `block_size` is deprecated. Please use `chunksize` instead') SCREAMING_SNAKE_CASE_ = self.config.block_size if self.config.use_threads is not True: logger.warning( 'The JSON loader parameter `use_threads` is deprecated and doesn\'t have any effect anymore.') if self.config.newlines_in_values is not None: raise ValueError('The JSON loader parameter `newlines_in_values` is no longer supported') return datasets.DatasetInfo(features=self.config.features) def lowerCAmelCase__ ( self , _A): if not self.config.data_files: raise ValueError(f"""At least one data file must be specified, but got data_files={self.config.data_files}""") SCREAMING_SNAKE_CASE_ = dl_manager.download_and_extract(self.config.data_files) if isinstance(_A , (str, list, tuple)): SCREAMING_SNAKE_CASE_ = data_files if isinstance(_A , _A): SCREAMING_SNAKE_CASE_ = [files] SCREAMING_SNAKE_CASE_ = [dl_manager.iter_files(_A) for file in files] return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'files': files})] SCREAMING_SNAKE_CASE_ = [] for split_name, files in data_files.items(): if isinstance(_A , _A): SCREAMING_SNAKE_CASE_ = [files] SCREAMING_SNAKE_CASE_ = [dl_manager.iter_files(_A) for file in files] splits.append(datasets.SplitGenerator(name=_A , gen_kwargs={'files': files})) return splits def lowerCAmelCase__ ( self , _A): if self.config.features is not None: # adding missing columns for column_name in set(self.config.features) - set(pa_table.column_names): SCREAMING_SNAKE_CASE_ = self.config.features.arrow_schema.field(_A).type SCREAMING_SNAKE_CASE_ = pa_table.append_column(_A , pa.array([None] * len(_A) , type=_A)) # more expensive cast to support nested structures with keys in a different order # allows str <-> int/float or str to Audio for example SCREAMING_SNAKE_CASE_ = table_cast(_A , self.config.features.arrow_schema) return pa_table def lowerCAmelCase__ ( self , _A): for file_idx, file in enumerate(itertools.chain.from_iterable(_A)): # If the file is one json object and if we need to look at the list of items in one specific field if self.config.field is not None: with open(_A , encoding=self.config.encoding , errors=self.config.encoding_errors) as f: SCREAMING_SNAKE_CASE_ = json.load(_A) # We keep only the field we are interested in SCREAMING_SNAKE_CASE_ = dataset[self.config.field] # We accept two format: a list of dicts or a dict of lists if isinstance(_A , (list, tuple)): SCREAMING_SNAKE_CASE_ = set().union(*[row.keys() for row in dataset]) SCREAMING_SNAKE_CASE_ = {col: [row.get(_A) for row in dataset] for col in keys} else: SCREAMING_SNAKE_CASE_ = dataset SCREAMING_SNAKE_CASE_ = pa.Table.from_pydict(_A) yield file_idx, self._cast_table(_A) # If the file has one json object per line else: with open(_A , 'rb') as f: SCREAMING_SNAKE_CASE_ = 0 # Use block_size equal to the chunk size divided by 32 to leverage multithreading # Set a default minimum value of 16kB if the chunk size is really small SCREAMING_SNAKE_CASE_ = max(self.config.chunksize // 32 , 16 << 10) SCREAMING_SNAKE_CASE_ = ( self.config.encoding_errors if self.config.encoding_errors is not None else 'strict' ) while True: SCREAMING_SNAKE_CASE_ = f.read(self.config.chunksize) if not batch: break # Finish current line try: batch += f.readline() except (AttributeError, io.UnsupportedOperation): batch += readline(_A) # PyArrow only accepts utf-8 encoded bytes if self.config.encoding != "utf-8": SCREAMING_SNAKE_CASE_ = batch.decode(self.config.encoding , errors=_A).encode('utf-8') try: while True: try: SCREAMING_SNAKE_CASE_ = paj.read_json( io.BytesIO(_A) , read_options=paj.ReadOptions(block_size=_A)) break except (pa.ArrowInvalid, pa.ArrowNotImplementedError) as e: if ( isinstance(_A , pa.ArrowInvalid) and "straddling" not in str(_A) or block_size > len(_A) ): raise else: # Increase the block size in case it was too small. # The block size will be reset for the next file. logger.debug( f"""Batch of {len(_A)} bytes couldn't be parsed with block_size={block_size}. Retrying with block_size={block_size * 2}.""") block_size *= 2 except pa.ArrowInvalid as e: try: with open( _A , encoding=self.config.encoding , errors=self.config.encoding_errors) as f: SCREAMING_SNAKE_CASE_ = json.load(_A) except json.JSONDecodeError: logger.error(f"""Failed to read file '{file}' with error {type(_A)}: {e}""") raise e # If possible, parse the file as a list of json objects and exit the loop if isinstance(_A , _A): # list is the only sequence type supported in JSON try: SCREAMING_SNAKE_CASE_ = set().union(*[row.keys() for row in dataset]) SCREAMING_SNAKE_CASE_ = {col: [row.get(_A) for row in dataset] for col in keys} SCREAMING_SNAKE_CASE_ = pa.Table.from_pydict(_A) except (pa.ArrowInvalid, AttributeError) as e: logger.error(f"""Failed to read file '{file}' with error {type(_A)}: {e}""") raise ValueError(f"""Not able to read records in the JSON file at {file}.""") from None yield file_idx, self._cast_table(_A) break else: logger.error(f"""Failed to read file '{file}' with error {type(_A)}: {e}""") raise ValueError( f"""Not able to read records in the JSON file at {file}. """ f"""You should probably indicate the field of the JSON file containing your records. """ f"""This JSON file contain the following fields: {str(list(dataset.keys()))}. """ f"""Select the correct one and provide it as `field='XXX'` to the dataset loading method. """) from None # Uncomment for debugging (will print the Arrow table size and elements) # logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}") # logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows))) yield (file_idx, batch_idx), self._cast_table(_A) batch_idx += 1
620
0
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available UpperCamelCase__ : Any = { "configuration_gpt_neo": ["GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP", "GPTNeoConfig", "GPTNeoOnnxConfig"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase__ : Union[str, Any] = [ "GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST", "GPTNeoForCausalLM", "GPTNeoForQuestionAnswering", "GPTNeoForSequenceClassification", "GPTNeoForTokenClassification", "GPTNeoModel", "GPTNeoPreTrainedModel", "load_tf_weights_in_gpt_neo", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase__ : Any = [ "FlaxGPTNeoForCausalLM", "FlaxGPTNeoModel", "FlaxGPTNeoPreTrainedModel", ] if TYPE_CHECKING: from .configuration_gpt_neo import GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoConfig, GPTNeoOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_gpt_neo import ( GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST, GPTNeoForCausalLM, GPTNeoForQuestionAnswering, GPTNeoForSequenceClassification, GPTNeoForTokenClassification, GPTNeoModel, GPTNeoPreTrainedModel, load_tf_weights_in_gpt_neo, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_gpt_neo import FlaxGPTNeoForCausalLM, FlaxGPTNeoModel, FlaxGPTNeoPreTrainedModel else: import sys UpperCamelCase__ : str = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
716
import unittest from transformers import TrOCRConfig from transformers.testing_utils import is_torch_available, require_torch, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers.models.trocr.modeling_trocr import TrOCRDecoder, TrOCRForCausalLM @require_torch class __snake_case : def __init__( self , _A , _A=99 , _A=13 , _A=16 , _A=7 , _A=True , _A=True , _A=True , _A=False , _A=True , _A=2 , _A=32 , _A=4 , _A=4 , _A=30 , _A=0 , _A=1 , _A=2 , _A=None , ): SCREAMING_SNAKE_CASE_ = parent SCREAMING_SNAKE_CASE_ = batch_size SCREAMING_SNAKE_CASE_ = decoder_seq_length # For common tests SCREAMING_SNAKE_CASE_ = self.decoder_seq_length SCREAMING_SNAKE_CASE_ = is_training SCREAMING_SNAKE_CASE_ = use_attention_mask SCREAMING_SNAKE_CASE_ = use_labels SCREAMING_SNAKE_CASE_ = vocab_size SCREAMING_SNAKE_CASE_ = d_model SCREAMING_SNAKE_CASE_ = d_model SCREAMING_SNAKE_CASE_ = decoder_layers SCREAMING_SNAKE_CASE_ = decoder_layers SCREAMING_SNAKE_CASE_ = decoder_ffn_dim SCREAMING_SNAKE_CASE_ = decoder_attention_heads SCREAMING_SNAKE_CASE_ = decoder_attention_heads SCREAMING_SNAKE_CASE_ = eos_token_id SCREAMING_SNAKE_CASE_ = bos_token_id SCREAMING_SNAKE_CASE_ = pad_token_id SCREAMING_SNAKE_CASE_ = decoder_start_token_id SCREAMING_SNAKE_CASE_ = use_cache SCREAMING_SNAKE_CASE_ = max_position_embeddings SCREAMING_SNAKE_CASE_ = None SCREAMING_SNAKE_CASE_ = decoder_seq_length SCREAMING_SNAKE_CASE_ = 2 SCREAMING_SNAKE_CASE_ = 1 def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size) SCREAMING_SNAKE_CASE_ = None if self.use_attention_mask: SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size, self.decoder_seq_length] , vocab_size=2) SCREAMING_SNAKE_CASE_ = None if self.use_labels: SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size) SCREAMING_SNAKE_CASE_ = TrOCRConfig( vocab_size=self.vocab_size , d_model=self.d_model , decoder_layers=self.decoder_layers , decoder_ffn_dim=self.decoder_ffn_dim , decoder_attention_heads=self.decoder_attention_heads , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , use_cache=self.use_cache , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , max_position_embeddings=self.max_position_embeddings , ) return (config, input_ids, attention_mask, lm_labels) def lowerCAmelCase__ ( self , _A , _A , _A , _A , ): SCREAMING_SNAKE_CASE_ = True SCREAMING_SNAKE_CASE_ = TrOCRDecoder(config=_A).to(_A).eval() SCREAMING_SNAKE_CASE_ = input_ids[:2] input_ids[input_ids == 0] += 1 # first forward pass SCREAMING_SNAKE_CASE_ = model(_A , use_cache=_A) SCREAMING_SNAKE_CASE_ = model(_A) SCREAMING_SNAKE_CASE_ = model(_A , use_cache=_A) self.parent.assertTrue(len(_A) == len(_A)) self.parent.assertTrue(len(_A) == len(_A) + 1) SCREAMING_SNAKE_CASE_ = outputs['past_key_values'] # create hypothetical next token and extent to next_input_ids SCREAMING_SNAKE_CASE_ = ids_tensor((2, 1) , config.vocab_size - 1) + 1 # append to next input_ids and SCREAMING_SNAKE_CASE_ = torch.cat([input_ids, next_tokens] , dim=-1) SCREAMING_SNAKE_CASE_ = model(_A)['last_hidden_state'] SCREAMING_SNAKE_CASE_ = model(_A , past_key_values=_A)['last_hidden_state'] # select random slice SCREAMING_SNAKE_CASE_ = ids_tensor((1,) , output_from_past.shape[-1]).item() SCREAMING_SNAKE_CASE_ = output_from_no_past[:, next_input_ids.shape[-1] - 1, random_slice_idx].detach() SCREAMING_SNAKE_CASE_ = output_from_past[:, 0, random_slice_idx].detach() # test that outputs are equal for slice assert torch.allclose(_A , _A , atol=1E-3) def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = self.prepare_config_and_inputs() SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = config_and_inputs SCREAMING_SNAKE_CASE_ = {'input_ids': input_ids, 'attention_mask': attention_mask} return config, inputs_dict @require_torch class __snake_case ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ): __lowerCAmelCase : Tuple = (TrOCRDecoder, TrOCRForCausalLM) if is_torch_available() else () __lowerCAmelCase : Union[str, Any] = (TrOCRForCausalLM,) if is_torch_available() else () __lowerCAmelCase : str = {'text-generation': TrOCRForCausalLM} if is_torch_available() else {} __lowerCAmelCase : Any = True __lowerCAmelCase : str = False def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = TrOCRStandaloneDecoderModelTester(self , is_training=_A) SCREAMING_SNAKE_CASE_ = ConfigTester(self , config_class=_A) def lowerCAmelCase__ ( self): pass def lowerCAmelCase__ ( self): pass def lowerCAmelCase__ ( self): pass def lowerCAmelCase__ ( self): self.config_tester.run_common_tests() def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_decoder_model_past(*_A) def lowerCAmelCase__ ( self): return @unittest.skip('The model doesn\'t support left padding') # and it's not used enough to be worth fixing :) def lowerCAmelCase__ ( self): pass
620
0
from typing import List, Optional, TypeVar from .arrow_dataset import Dataset, _concatenate_map_style_datasets, _interleave_map_style_datasets from .dataset_dict import DatasetDict, IterableDatasetDict from .info import DatasetInfo from .iterable_dataset import IterableDataset, _concatenate_iterable_datasets, _interleave_iterable_datasets from .splits import NamedSplit from .utils import logging from .utils.py_utils import Literal UpperCamelCase__ : str = logging.get_logger(__name__) UpperCamelCase__ : Tuple = TypeVar("DatasetType", Dataset, IterableDataset) def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : List[DatasetType] , _SCREAMING_SNAKE_CASE : Optional[List[float]] = None , _SCREAMING_SNAKE_CASE : Optional[int] = None , _SCREAMING_SNAKE_CASE : Optional[DatasetInfo] = None , _SCREAMING_SNAKE_CASE : Optional[NamedSplit] = None , _SCREAMING_SNAKE_CASE : Literal["first_exhausted", "all_exhausted"] = "first_exhausted" , ): """simple docstring""" from .arrow_dataset import Dataset from .iterable_dataset import IterableDataset if not datasets: raise ValueError('Unable to interleave an empty list of datasets.' ) for i, dataset in enumerate(_SCREAMING_SNAKE_CASE ): if not isinstance(_SCREAMING_SNAKE_CASE , (Dataset, IterableDataset) ): if isinstance(_SCREAMING_SNAKE_CASE , (DatasetDict, IterableDatasetDict) ): if not dataset: raise ValueError( f"""Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} """ 'is an empty dataset dictionary.' ) raise ValueError( f"""Dataset at position {i} has at least one split: {list(_SCREAMING_SNAKE_CASE )}\n""" f"""Please pick one to interleave with the other datasets, for example: dataset['{next(iter(_SCREAMING_SNAKE_CASE ) )}']""" ) raise ValueError( f"""Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(_SCREAMING_SNAKE_CASE ).__name__}.""" ) if i == 0: SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = ( (Dataset, IterableDataset) if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) else (IterableDataset, Dataset) ) elif not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): raise ValueError( f"""Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.""" ) if stopping_strategy not in ["first_exhausted", "all_exhausted"]: raise ValueError(f"""{stopping_strategy} is not supported. Please enter a valid stopping_strategy.""" ) if dataset_type is Dataset: return _interleave_map_style_datasets( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , info=_SCREAMING_SNAKE_CASE , split=_SCREAMING_SNAKE_CASE , stopping_strategy=_SCREAMING_SNAKE_CASE ) else: return _interleave_iterable_datasets( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , info=_SCREAMING_SNAKE_CASE , split=_SCREAMING_SNAKE_CASE , stopping_strategy=_SCREAMING_SNAKE_CASE ) def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : List[DatasetType] , _SCREAMING_SNAKE_CASE : Optional[DatasetInfo] = None , _SCREAMING_SNAKE_CASE : Optional[NamedSplit] = None , _SCREAMING_SNAKE_CASE : int = 0 , ): """simple docstring""" if not dsets: raise ValueError('Unable to concatenate an empty list of datasets.' ) for i, dataset in enumerate(_SCREAMING_SNAKE_CASE ): if not isinstance(_SCREAMING_SNAKE_CASE , (Dataset, IterableDataset) ): if isinstance(_SCREAMING_SNAKE_CASE , (DatasetDict, IterableDatasetDict) ): if not dataset: raise ValueError( f"""Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} """ 'is an empty dataset dictionary.' ) raise ValueError( f"""Dataset at position {i} has at least one split: {list(_SCREAMING_SNAKE_CASE )}\n""" f"""Please pick one to interleave with the other datasets, for example: dataset['{next(iter(_SCREAMING_SNAKE_CASE ) )}']""" ) raise ValueError( f"""Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(_SCREAMING_SNAKE_CASE ).__name__}.""" ) if i == 0: SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = ( (Dataset, IterableDataset) if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) else (IterableDataset, Dataset) ) elif not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): raise ValueError( f"""Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.""" ) if dataset_type is Dataset: return _concatenate_map_style_datasets(_SCREAMING_SNAKE_CASE , info=_SCREAMING_SNAKE_CASE , split=_SCREAMING_SNAKE_CASE , axis=_SCREAMING_SNAKE_CASE ) else: return _concatenate_iterable_datasets(_SCREAMING_SNAKE_CASE , info=_SCREAMING_SNAKE_CASE , split=_SCREAMING_SNAKE_CASE , axis=_SCREAMING_SNAKE_CASE )
717
from dataclasses import dataclass from typing import Optional, Tuple, Union import torch import torch.nn as nn from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput, apply_forward_hook from .modeling_utils import ModelMixin from .vae import Decoder, DecoderOutput, Encoder, VectorQuantizer @dataclass class __snake_case ( lowerCAmelCase__ ): __lowerCAmelCase : torch.FloatTensor class __snake_case ( lowerCAmelCase__ , lowerCAmelCase__ ): @register_to_config def __init__( self , _A = 3 , _A = 3 , _A = ("DownEncoderBlock2D",) , _A = ("UpDecoderBlock2D",) , _A = (64,) , _A = 1 , _A = "silu" , _A = 3 , _A = 32 , _A = 256 , _A = 32 , _A = None , _A = 0.1_8_2_1_5 , _A = "group" , ): super().__init__() # pass init params to Encoder SCREAMING_SNAKE_CASE_ = Encoder( in_channels=_A , out_channels=_A , down_block_types=_A , block_out_channels=_A , layers_per_block=_A , act_fn=_A , norm_num_groups=_A , double_z=_A , ) SCREAMING_SNAKE_CASE_ = vq_embed_dim if vq_embed_dim is not None else latent_channels SCREAMING_SNAKE_CASE_ = nn.Convad(_A , _A , 1) SCREAMING_SNAKE_CASE_ = VectorQuantizer(_A , _A , beta=0.2_5 , remap=_A , sane_index_shape=_A) SCREAMING_SNAKE_CASE_ = nn.Convad(_A , _A , 1) # pass init params to Decoder SCREAMING_SNAKE_CASE_ = Decoder( in_channels=_A , out_channels=_A , up_block_types=_A , block_out_channels=_A , layers_per_block=_A , act_fn=_A , norm_num_groups=_A , norm_type=_A , ) @apply_forward_hook def lowerCAmelCase__ ( self , _A , _A = True): SCREAMING_SNAKE_CASE_ = self.encoder(_A) SCREAMING_SNAKE_CASE_ = self.quant_conv(_A) if not return_dict: return (h,) return VQEncoderOutput(latents=_A) @apply_forward_hook def lowerCAmelCase__ ( self , _A , _A = False , _A = True): # also go through quantization layer if not force_not_quantize: SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.quantize(_A) else: SCREAMING_SNAKE_CASE_ = h SCREAMING_SNAKE_CASE_ = self.post_quant_conv(_A) SCREAMING_SNAKE_CASE_ = self.decoder(_A , quant if self.config.norm_type == 'spatial' else None) if not return_dict: return (dec,) return DecoderOutput(sample=_A) def lowerCAmelCase__ ( self , _A , _A = True): SCREAMING_SNAKE_CASE_ = sample SCREAMING_SNAKE_CASE_ = self.encode(_A).latents SCREAMING_SNAKE_CASE_ = self.decode(_A).sample if not return_dict: return (dec,) return DecoderOutput(sample=_A)
620
0
from __future__ import annotations import math UpperCamelCase__ : Optional[int] = "2020.9.26" UpperCamelCase__ : str = "xcodz-dot, cclaus, dhruvmanila" def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : float , _SCREAMING_SNAKE_CASE : float , _SCREAMING_SNAKE_CASE : float , _SCREAMING_SNAKE_CASE : float , _SCREAMING_SNAKE_CASE : float ): """simple docstring""" if not all(isinstance(_SCREAMING_SNAKE_CASE , (float, int) ) for val in locals().values() ): SCREAMING_SNAKE_CASE_ = f"""Input values must either be float or int: {list(locals().values() )}""" raise TypeError(_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE_ = ((x * distance) / (z + distance)) * scale SCREAMING_SNAKE_CASE_ = ((y * distance) / (z + distance)) * scale return projected_x, projected_y def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : float , _SCREAMING_SNAKE_CASE : float , _SCREAMING_SNAKE_CASE : float , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : float ): """simple docstring""" if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): raise TypeError('Axis must be a str' ) SCREAMING_SNAKE_CASE_ = locals() del input_variables["axis"] if not all(isinstance(_SCREAMING_SNAKE_CASE , (float, int) ) for val in input_variables.values() ): SCREAMING_SNAKE_CASE_ = ( 'Input values except axis must either be float or int: ' f"""{list(input_variables.values() )}""" ) raise TypeError(_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE_ = (angle % 360) / 450 * 180 / math.pi if axis == "z": SCREAMING_SNAKE_CASE_ = x * math.cos(_SCREAMING_SNAKE_CASE ) - y * math.sin(_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE_ = y * math.cos(_SCREAMING_SNAKE_CASE ) + x * math.sin(_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE_ = z elif axis == "x": SCREAMING_SNAKE_CASE_ = y * math.cos(_SCREAMING_SNAKE_CASE ) - z * math.sin(_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE_ = z * math.cos(_SCREAMING_SNAKE_CASE ) + y * math.sin(_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE_ = x elif axis == "y": SCREAMING_SNAKE_CASE_ = x * math.cos(_SCREAMING_SNAKE_CASE ) - z * math.sin(_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE_ = z * math.cos(_SCREAMING_SNAKE_CASE ) + x * math.sin(_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE_ = y else: raise ValueError('not a valid axis, choose one of \'x\', \'y\', \'z\'' ) return new_x, new_y, new_z if __name__ == "__main__": import doctest doctest.testmod() print(F'{convert_to_ad(1.0, 2.0, 3.0, 10.0, 10.0) = }') print(F'{rotate(1.0, 2.0, 3.0, "y", 90.0) = }')
718
import logging import os from typing import Dict, List, Optional, Union import torch import torch.nn as nn from accelerate.utils.imports import ( is_abit_bnb_available, is_abit_bnb_available, is_bnb_available, ) from ..big_modeling import dispatch_model, init_empty_weights from .dataclasses import BnbQuantizationConfig from .modeling import ( find_tied_parameters, get_balanced_memory, infer_auto_device_map, load_checkpoint_in_model, offload_weight, set_module_tensor_to_device, ) if is_bnb_available(): import bitsandbytes as bnb from copy import deepcopy UpperCamelCase__ : Optional[int] = logging.getLogger(__name__) def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : torch.nn.Module , _SCREAMING_SNAKE_CASE : BnbQuantizationConfig , _SCREAMING_SNAKE_CASE : Union[str, os.PathLike] = None , _SCREAMING_SNAKE_CASE : Optional[Dict[str, Union[int, str, torch.device]]] = None , _SCREAMING_SNAKE_CASE : Optional[List[str]] = None , _SCREAMING_SNAKE_CASE : Optional[Dict[Union[int, str], Union[int, str]]] = None , _SCREAMING_SNAKE_CASE : Optional[Union[str, os.PathLike]] = None , _SCREAMING_SNAKE_CASE : bool = False , ): """simple docstring""" SCREAMING_SNAKE_CASE_ = bnb_quantization_config.load_in_abit SCREAMING_SNAKE_CASE_ = bnb_quantization_config.load_in_abit if load_in_abit and not is_abit_bnb_available(): raise ImportError( 'You have a version of `bitsandbytes` that is not compatible with 8bit quantization,' ' make sure you have the latest version of `bitsandbytes` installed.' ) if load_in_abit and not is_abit_bnb_available(): raise ValueError( 'You have a version of `bitsandbytes` that is not compatible with 4bit quantization,' 'make sure you have the latest version of `bitsandbytes` installed.' ) SCREAMING_SNAKE_CASE_ = [] # custom device map if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) and len(device_map.keys() ) > 1: SCREAMING_SNAKE_CASE_ = [key for key, value in device_map.items() if value in ['disk', 'cpu']] # We keep some modules such as the lm_head in their original dtype for numerical stability reasons if bnb_quantization_config.skip_modules is None: SCREAMING_SNAKE_CASE_ = get_keys_to_not_convert(_SCREAMING_SNAKE_CASE ) # add cpu modules to skip modules only for 4-bit modules if load_in_abit: bnb_quantization_config.skip_modules.extend(_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE_ = bnb_quantization_config.skip_modules # We add the modules we want to keep in full precision if bnb_quantization_config.keep_in_fpaa_modules is None: SCREAMING_SNAKE_CASE_ = [] SCREAMING_SNAKE_CASE_ = bnb_quantization_config.keep_in_fpaa_modules modules_to_not_convert.extend(_SCREAMING_SNAKE_CASE ) # compatibility with peft SCREAMING_SNAKE_CASE_ = load_in_abit SCREAMING_SNAKE_CASE_ = load_in_abit SCREAMING_SNAKE_CASE_ = get_parameter_device(_SCREAMING_SNAKE_CASE ) if model_device.type != "meta": # quantization of an already loaded model logger.warning( 'It is not recommended to quantize a loaded model. ' 'The model should be instantiated under the `init_empty_weights` context manager.' ) SCREAMING_SNAKE_CASE_ = replace_with_bnb_layers(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , modules_to_not_convert=_SCREAMING_SNAKE_CASE ) # convert param to the right dtype SCREAMING_SNAKE_CASE_ = bnb_quantization_config.torch_dtype for name, param in model.state_dict().items(): if any(module_to_keep_in_fpaa in name for module_to_keep_in_fpaa in keep_in_fpaa_modules ): param.to(torch.floataa ) if param.dtype != torch.floataa: SCREAMING_SNAKE_CASE_ = name.replace('.weight' , '' ).replace('.bias' , '' ) SCREAMING_SNAKE_CASE_ = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) if param is not None: param.to(torch.floataa ) elif torch.is_floating_point(_SCREAMING_SNAKE_CASE ): param.to(_SCREAMING_SNAKE_CASE ) if model_device.type == "cuda": # move everything to cpu in the first place because we can't do quantization if the weights are already on cuda model.cuda(torch.cuda.current_device() ) torch.cuda.empty_cache() elif torch.cuda.is_available(): model.to(torch.cuda.current_device() ) else: raise RuntimeError('No GPU found. A GPU is needed for quantization.' ) logger.info( f"""The model device type is {model_device.type}. However, cuda is needed for quantization.""" 'We move the model to cuda.' ) return model elif weights_location is None: raise RuntimeError( f"""`weights_location` needs to be the folder path containing the weights of the model, but we found {weights_location} """ ) else: with init_empty_weights(): SCREAMING_SNAKE_CASE_ = replace_with_bnb_layers( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , modules_to_not_convert=_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE_ = get_quantized_model_device_map( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , max_memory=_SCREAMING_SNAKE_CASE , no_split_module_classes=_SCREAMING_SNAKE_CASE , ) if offload_state_dict is None and device_map is not None and "disk" in device_map.values(): SCREAMING_SNAKE_CASE_ = True SCREAMING_SNAKE_CASE_ = any(x in list(device_map.values() ) for x in ['cpu', 'disk'] ) load_checkpoint_in_model( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , dtype=bnb_quantization_config.torch_dtype , offload_folder=_SCREAMING_SNAKE_CASE , offload_state_dict=_SCREAMING_SNAKE_CASE , keep_in_fpaa_modules=bnb_quantization_config.keep_in_fpaa_modules , offload_abit_bnb=load_in_abit and offload , ) return dispatch_model(_SCREAMING_SNAKE_CASE , device_map=_SCREAMING_SNAKE_CASE , offload_dir=_SCREAMING_SNAKE_CASE ) def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : List[str]=None , _SCREAMING_SNAKE_CASE : List[str]=None , _SCREAMING_SNAKE_CASE : Union[str, Any]=None ): """simple docstring""" if device_map is None: if torch.cuda.is_available(): SCREAMING_SNAKE_CASE_ = {'': torch.cuda.current_device()} else: raise RuntimeError('No GPU found. A GPU is needed for quantization.' ) logger.info('The device_map was not initialized.' 'Setting device_map to `{\'\':torch.cuda.current_device()}`.' ) if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): if device_map not in ["auto", "balanced", "balanced_low_0", "sequential"]: raise ValueError( 'If passing a string for `device_map`, please choose \'auto\', \'balanced\', \'balanced_low_0\' or ' '\'sequential\'.' ) SCREAMING_SNAKE_CASE_ = {} special_dtypes.update( { name: bnb_quantization_config.torch_dtype for name, _ in model.named_parameters() if any(m in name for m in bnb_quantization_config.skip_modules ) } ) special_dtypes.update( { name: torch.floataa for name, _ in model.named_parameters() if any(m in name for m in bnb_quantization_config.keep_in_fpaa_modules ) } ) SCREAMING_SNAKE_CASE_ = {} SCREAMING_SNAKE_CASE_ = special_dtypes SCREAMING_SNAKE_CASE_ = no_split_module_classes SCREAMING_SNAKE_CASE_ = bnb_quantization_config.target_dtype # get max_memory for each device. if device_map != "sequential": SCREAMING_SNAKE_CASE_ = get_balanced_memory( _SCREAMING_SNAKE_CASE , low_zero=(device_map == 'balanced_low_0') , max_memory=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , ) SCREAMING_SNAKE_CASE_ = max_memory SCREAMING_SNAKE_CASE_ = infer_auto_device_map(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): # check if don't have any quantized module on the cpu SCREAMING_SNAKE_CASE_ = bnb_quantization_config.skip_modules + bnb_quantization_config.keep_in_fpaa_modules SCREAMING_SNAKE_CASE_ = { key: device_map[key] for key in device_map.keys() if key not in modules_not_to_convert } for device in ["cpu", "disk"]: if device in device_map_without_some_modules.values(): if bnb_quantization_config.load_in_abit: raise ValueError( '\n Some modules are dispatched on the CPU or the disk. Make sure you have enough GPU RAM to fit\n the quantized model. If you want to dispatch the model on the CPU or the disk while keeping\n these modules in `torch_dtype`, you need to pass a custom `device_map` to\n `load_and_quantize_model`. Check\n https://huggingface.co/docs/accelerate/main/en/usage_guides/quantization#offload-modules-to-cpu-and-disk\n for more details.\n ' ) else: logger.info( 'Some modules are are offloaded to the CPU or the disk. Note that these modules will be converted to 8-bit' ) del device_map_without_some_modules return device_map def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int=None , _SCREAMING_SNAKE_CASE : Union[str, Any]=None ): """simple docstring""" if modules_to_not_convert is None: SCREAMING_SNAKE_CASE_ = [] SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = _replace_with_bnb_layers( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) if not has_been_replaced: logger.warning( 'You are loading your model in 8bit or 4bit but no linear modules were found in your model.' ' this can happen for some architectures such as gpt2 that uses Conv1D instead of Linear layers.' ' Please double check your model architecture, or submit an issue on github if you think this is' ' a bug.' ) return model def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : Optional[Any]=None , _SCREAMING_SNAKE_CASE : str=None , ): """simple docstring""" SCREAMING_SNAKE_CASE_ = False for name, module in model.named_children(): if current_key_name is None: SCREAMING_SNAKE_CASE_ = [] current_key_name.append(_SCREAMING_SNAKE_CASE ) if isinstance(_SCREAMING_SNAKE_CASE , nn.Linear ) and name not in modules_to_not_convert: # Check if the current key is not in the `modules_to_not_convert` SCREAMING_SNAKE_CASE_ = '.'.join(_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE_ = True for key in modules_to_not_convert: if ( (key in current_key_name_str) and (key + "." in current_key_name_str) ) or key == current_key_name_str: SCREAMING_SNAKE_CASE_ = False break if proceed: # Load bnb module with empty weight and replace ``nn.Linear` module if bnb_quantization_config.load_in_abit: SCREAMING_SNAKE_CASE_ = bnb.nn.LinearabitLt( module.in_features , module.out_features , module.bias is not None , has_fpaa_weights=_SCREAMING_SNAKE_CASE , threshold=bnb_quantization_config.llm_inta_threshold , ) elif bnb_quantization_config.load_in_abit: SCREAMING_SNAKE_CASE_ = bnb.nn.Linearabit( module.in_features , module.out_features , module.bias is not None , bnb_quantization_config.bnb_abit_compute_dtype , compress_statistics=bnb_quantization_config.bnb_abit_use_double_quant , quant_type=bnb_quantization_config.bnb_abit_quant_type , ) else: raise ValueError('load_in_8bit and load_in_4bit can\'t be both False' ) SCREAMING_SNAKE_CASE_ = module.weight.data if module.bias is not None: SCREAMING_SNAKE_CASE_ = module.bias.data bnb_module.requires_grad_(_SCREAMING_SNAKE_CASE ) setattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE_ = True if len(list(module.children() ) ) > 0: SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = _replace_with_bnb_layers( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE_ = has_been_replaced | _has_been_replaced # Remove the last key for recursion current_key_name.pop(-1 ) return model, has_been_replaced def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : Union[str, Any] ): """simple docstring""" with init_empty_weights(): SCREAMING_SNAKE_CASE_ = deepcopy(_SCREAMING_SNAKE_CASE ) # this has 0 cost since it is done inside `init_empty_weights` context manager` SCREAMING_SNAKE_CASE_ = find_tied_parameters(_SCREAMING_SNAKE_CASE ) # For compatibility with Accelerate < 0.18 if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): SCREAMING_SNAKE_CASE_ = sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() ) else: SCREAMING_SNAKE_CASE_ = sum(_SCREAMING_SNAKE_CASE , [] ) SCREAMING_SNAKE_CASE_ = len(_SCREAMING_SNAKE_CASE ) > 0 # Check if it is a base model SCREAMING_SNAKE_CASE_ = False if hasattr(_SCREAMING_SNAKE_CASE , 'base_model_prefix' ): SCREAMING_SNAKE_CASE_ = not hasattr(_SCREAMING_SNAKE_CASE , model.base_model_prefix ) # Ignore this for base models (BertModel, GPT2Model, etc.) if (not has_tied_params) and is_base_model: return [] # otherwise they have an attached head SCREAMING_SNAKE_CASE_ = list(model.named_children() ) SCREAMING_SNAKE_CASE_ = [list_modules[-1][0]] # add last module together with tied weights SCREAMING_SNAKE_CASE_ = set(_SCREAMING_SNAKE_CASE ) - set(_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE_ = list(set(_SCREAMING_SNAKE_CASE ) ) + list(_SCREAMING_SNAKE_CASE ) # remove ".weight" from the keys SCREAMING_SNAKE_CASE_ = ['.weight', '.bias'] SCREAMING_SNAKE_CASE_ = [] for name in list_untouched: for name_to_remove in names_to_remove: if name_to_remove in name: SCREAMING_SNAKE_CASE_ = name.replace(_SCREAMING_SNAKE_CASE , '' ) filtered_module_names.append(_SCREAMING_SNAKE_CASE ) return filtered_module_names def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : Dict ): """simple docstring""" for m in model.modules(): if isinstance(_SCREAMING_SNAKE_CASE , bnb.nn.Linearabit ): return True return False def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : nn.Module ): """simple docstring""" return next(parameter.parameters() ).device def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : str ): """simple docstring""" if fpaa_statistics is None: set_module_tensor_to_device(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , 0 , dtype=_SCREAMING_SNAKE_CASE , value=_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE_ = param_name SCREAMING_SNAKE_CASE_ = model if "." in tensor_name: SCREAMING_SNAKE_CASE_ = tensor_name.split('.' ) for split in splits[:-1]: SCREAMING_SNAKE_CASE_ = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) if new_module is None: raise ValueError(f"""{module} has no attribute {split}.""" ) SCREAMING_SNAKE_CASE_ = new_module SCREAMING_SNAKE_CASE_ = splits[-1] # offload weights SCREAMING_SNAKE_CASE_ = False offload_weight(module._parameters[tensor_name] , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , index=_SCREAMING_SNAKE_CASE ) if hasattr(module._parameters[tensor_name] , 'SCB' ): offload_weight( module._parameters[tensor_name].SCB , param_name.replace('weight' , 'SCB' ) , _SCREAMING_SNAKE_CASE , index=_SCREAMING_SNAKE_CASE , ) else: offload_weight(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , index=_SCREAMING_SNAKE_CASE ) offload_weight(_SCREAMING_SNAKE_CASE , param_name.replace('weight' , 'SCB' ) , _SCREAMING_SNAKE_CASE , index=_SCREAMING_SNAKE_CASE ) set_module_tensor_to_device(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , 'meta' , dtype=_SCREAMING_SNAKE_CASE , value=torch.empty(*param.size() ) )
620
0
# flake8: noqa # Lint as: python3 from typing import Dict, List, Optional, Type from .. import config from ..utils import logging from .formatting import ( ArrowFormatter, CustomFormatter, Formatter, PandasFormatter, PythonFormatter, TensorFormatter, format_table, query_table, ) from .np_formatter import NumpyFormatter UpperCamelCase__ : List[Any] = logging.get_logger(__name__) UpperCamelCase__ : Dict[Optional[str], Type[Formatter]] = {} UpperCamelCase__ : Dict[Optional[str], str] = {} UpperCamelCase__ : Dict[Optional[str], Exception] = {} def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : type , _SCREAMING_SNAKE_CASE : Optional[str] , _SCREAMING_SNAKE_CASE : Optional[List[str]] = None , ): """simple docstring""" SCREAMING_SNAKE_CASE_ = aliases if aliases is not None else [] if format_type in _FORMAT_TYPES: logger.warning( f"""Overwriting format type '{format_type}' ({_FORMAT_TYPES[format_type].__name__} -> {formatter_cls.__name__})""" ) SCREAMING_SNAKE_CASE_ = formatter_cls for alias in set(aliases + [format_type] ): if alias in _FORMAT_TYPES_ALIASES: logger.warning( f"""Overwriting format type alias '{alias}' ({_FORMAT_TYPES_ALIASES[alias]} -> {format_type})""" ) SCREAMING_SNAKE_CASE_ = format_type def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : Exception , _SCREAMING_SNAKE_CASE : Optional[str] , _SCREAMING_SNAKE_CASE : Optional[List[str]] = None ): """simple docstring""" SCREAMING_SNAKE_CASE_ = aliases if aliases is not None else [] for alias in set(aliases + [format_type] ): SCREAMING_SNAKE_CASE_ = unavailable_error # Here we define all the available formatting functions that can be used by `Dataset.set_format` _register_formatter(PythonFormatter, None, aliases=["python"]) _register_formatter(ArrowFormatter, "arrow", aliases=["pa", "pyarrow"]) _register_formatter(NumpyFormatter, "numpy", aliases=["np"]) _register_formatter(PandasFormatter, "pandas", aliases=["pd"]) _register_formatter(CustomFormatter, "custom") if config.TORCH_AVAILABLE: from .torch_formatter import TorchFormatter _register_formatter(TorchFormatter, "torch", aliases=["pt", "pytorch"]) else: UpperCamelCase__ : str = ValueError("PyTorch needs to be installed to be able to return PyTorch tensors.") _register_unavailable_formatter(_torch_error, "torch", aliases=["pt", "pytorch"]) if config.TF_AVAILABLE: from .tf_formatter import TFFormatter _register_formatter(TFFormatter, "tensorflow", aliases=["tf"]) else: UpperCamelCase__ : Dict = ValueError("Tensorflow needs to be installed to be able to return Tensorflow tensors.") _register_unavailable_formatter(_tf_error, "tensorflow", aliases=["tf"]) if config.JAX_AVAILABLE: from .jax_formatter import JaxFormatter _register_formatter(JaxFormatter, "jax", aliases=[]) else: UpperCamelCase__ : Union[str, Any] = ValueError("JAX needs to be installed to be able to return JAX arrays.") _register_unavailable_formatter(_jax_error, "jax", aliases=[]) def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : Optional[str] ): """simple docstring""" if format_type in _FORMAT_TYPES_ALIASES: return _FORMAT_TYPES_ALIASES[format_type] else: return format_type def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : Optional[str] , **_SCREAMING_SNAKE_CASE : Optional[int] ): """simple docstring""" SCREAMING_SNAKE_CASE_ = get_format_type_from_alias(_SCREAMING_SNAKE_CASE ) if format_type in _FORMAT_TYPES: return _FORMAT_TYPES[format_type](**_SCREAMING_SNAKE_CASE ) if format_type in _FORMAT_TYPES_ALIASES_UNAVAILABLE: raise _FORMAT_TYPES_ALIASES_UNAVAILABLE[format_type] else: raise ValueError( f"""Return type should be None or selected in {list(type for type in _FORMAT_TYPES.keys() if type != None )}, but got '{format_type}'""" )
719
import json import os from functools import lru_cache from typing import List, Optional, Tuple import regex as re from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging UpperCamelCase__ : Union[str, Any] = logging.get_logger(__name__) UpperCamelCase__ : Optional[Any] = {"vocab_file": "vocab.json", "merges_file": "merges.txt"} # See all BART models at https://huggingface.co/models?filter=bart UpperCamelCase__ : List[str] = { "vocab_file": { "facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/vocab.json", "facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/vocab.json", "facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json", "facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json", "facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json", "yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json", }, "merges_file": { "facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/merges.txt", "facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/merges.txt", "facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt", "facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt", "facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt", "yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt", }, } UpperCamelCase__ : str = { "facebook/bart-base": 1_024, "facebook/bart-large": 1_024, "facebook/bart-large-mnli": 1_024, "facebook/bart-large-cnn": 1_024, "facebook/bart-large-xsum": 1_024, "yjernite/bart_eli5": 1_024, } @lru_cache() def _UpperCAmelCase ( ): """simple docstring""" SCREAMING_SNAKE_CASE_ = ( list(range(ord('!' ) , ord('~' ) + 1 ) ) + list(range(ord('¡' ) , ord('¬' ) + 1 ) ) + list(range(ord('®' ) , ord('ÿ' ) + 1 ) ) ) SCREAMING_SNAKE_CASE_ = bs[:] SCREAMING_SNAKE_CASE_ = 0 for b in range(2**8 ): if b not in bs: bs.append(_SCREAMING_SNAKE_CASE ) cs.append(2**8 + n ) n += 1 SCREAMING_SNAKE_CASE_ = [chr(_SCREAMING_SNAKE_CASE ) for n in cs] return dict(zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ) def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : List[str] ): """simple docstring""" SCREAMING_SNAKE_CASE_ = set() SCREAMING_SNAKE_CASE_ = word[0] for char in word[1:]: pairs.add((prev_char, char) ) SCREAMING_SNAKE_CASE_ = char return pairs class __snake_case ( lowerCAmelCase__ ): __lowerCAmelCase : str = VOCAB_FILES_NAMES __lowerCAmelCase : Any = PRETRAINED_VOCAB_FILES_MAP __lowerCAmelCase : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __lowerCAmelCase : List[Any] = ['input_ids', 'attention_mask'] def __init__( self , _A , _A , _A="replace" , _A="<s>" , _A="</s>" , _A="</s>" , _A="<s>" , _A="<unk>" , _A="<pad>" , _A="<mask>" , _A=False , **_A , ): SCREAMING_SNAKE_CASE_ = AddedToken(_A , lstrip=_A , rstrip=_A) if isinstance(_A , _A) else bos_token SCREAMING_SNAKE_CASE_ = AddedToken(_A , lstrip=_A , rstrip=_A) if isinstance(_A , _A) else eos_token SCREAMING_SNAKE_CASE_ = AddedToken(_A , lstrip=_A , rstrip=_A) if isinstance(_A , _A) else sep_token SCREAMING_SNAKE_CASE_ = AddedToken(_A , lstrip=_A , rstrip=_A) if isinstance(_A , _A) else cls_token SCREAMING_SNAKE_CASE_ = AddedToken(_A , lstrip=_A , rstrip=_A) if isinstance(_A , _A) else unk_token SCREAMING_SNAKE_CASE_ = AddedToken(_A , lstrip=_A , rstrip=_A) if isinstance(_A , _A) else pad_token # Mask token behave like a normal word, i.e. include the space before it SCREAMING_SNAKE_CASE_ = AddedToken(_A , lstrip=_A , rstrip=_A) if isinstance(_A , _A) else mask_token super().__init__( errors=_A , bos_token=_A , eos_token=_A , unk_token=_A , sep_token=_A , cls_token=_A , pad_token=_A , mask_token=_A , add_prefix_space=_A , **_A , ) with open(_A , encoding='utf-8') as vocab_handle: SCREAMING_SNAKE_CASE_ = json.load(_A) SCREAMING_SNAKE_CASE_ = {v: k for k, v in self.encoder.items()} SCREAMING_SNAKE_CASE_ = errors # how to handle errors in decoding SCREAMING_SNAKE_CASE_ = bytes_to_unicode() SCREAMING_SNAKE_CASE_ = {v: k for k, v in self.byte_encoder.items()} with open(_A , encoding='utf-8') as merges_handle: SCREAMING_SNAKE_CASE_ = merges_handle.read().split('\n')[1:-1] SCREAMING_SNAKE_CASE_ = [tuple(merge.split()) for merge in bpe_merges] SCREAMING_SNAKE_CASE_ = dict(zip(_A , range(len(_A)))) SCREAMING_SNAKE_CASE_ = {} SCREAMING_SNAKE_CASE_ = add_prefix_space # Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions SCREAMING_SNAKE_CASE_ = re.compile(r'\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+') @property def lowerCAmelCase__ ( self): return len(self.encoder) def lowerCAmelCase__ ( self): return dict(self.encoder , **self.added_tokens_encoder) def lowerCAmelCase__ ( self , _A): if token in self.cache: return self.cache[token] SCREAMING_SNAKE_CASE_ = tuple(_A) SCREAMING_SNAKE_CASE_ = get_pairs(_A) if not pairs: return token while True: SCREAMING_SNAKE_CASE_ = min(_A , key=lambda _A: self.bpe_ranks.get(_A , float('inf'))) if bigram not in self.bpe_ranks: break SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = bigram SCREAMING_SNAKE_CASE_ = [] SCREAMING_SNAKE_CASE_ = 0 while i < len(_A): try: SCREAMING_SNAKE_CASE_ = word.index(_A , _A) except ValueError: new_word.extend(word[i:]) break else: new_word.extend(word[i:j]) SCREAMING_SNAKE_CASE_ = j if word[i] == first and i < len(_A) - 1 and word[i + 1] == second: new_word.append(first + second) i += 2 else: new_word.append(word[i]) i += 1 SCREAMING_SNAKE_CASE_ = tuple(_A) SCREAMING_SNAKE_CASE_ = new_word if len(_A) == 1: break else: SCREAMING_SNAKE_CASE_ = get_pairs(_A) SCREAMING_SNAKE_CASE_ = ' '.join(_A) SCREAMING_SNAKE_CASE_ = word return word def lowerCAmelCase__ ( self , _A): SCREAMING_SNAKE_CASE_ = [] for token in re.findall(self.pat , _A): SCREAMING_SNAKE_CASE_ = ''.join( self.byte_encoder[b] for b in token.encode('utf-8')) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case) bpe_tokens.extend(bpe_token for bpe_token in self.bpe(_A).split(' ')) return bpe_tokens def lowerCAmelCase__ ( self , _A): return self.encoder.get(_A , self.encoder.get(self.unk_token)) def lowerCAmelCase__ ( self , _A): return self.decoder.get(_A) def lowerCAmelCase__ ( self , _A): SCREAMING_SNAKE_CASE_ = ''.join(_A) SCREAMING_SNAKE_CASE_ = bytearray([self.byte_decoder[c] for c in text]).decode('utf-8' , errors=self.errors) return text def lowerCAmelCase__ ( self , _A , _A = None): if not os.path.isdir(_A): logger.error(f"""Vocabulary path ({save_directory}) should be a directory""") return SCREAMING_SNAKE_CASE_ = os.path.join( _A , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file']) SCREAMING_SNAKE_CASE_ = os.path.join( _A , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file']) with open(_A , 'w' , encoding='utf-8') as f: f.write(json.dumps(self.encoder , indent=2 , sort_keys=_A , ensure_ascii=_A) + '\n') SCREAMING_SNAKE_CASE_ = 0 with open(_A , 'w' , encoding='utf-8') as writer: writer.write('#version: 0.2\n') for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda _A: kv[1]): if index != token_index: logger.warning( f"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.""" ' Please check that the tokenizer is not corrupted!') SCREAMING_SNAKE_CASE_ = token_index writer.write(' '.join(_A) + '\n') index += 1 return vocab_file, merge_file def lowerCAmelCase__ ( self , _A , _A = None): if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] SCREAMING_SNAKE_CASE_ = [self.cls_token_id] SCREAMING_SNAKE_CASE_ = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def lowerCAmelCase__ ( self , _A , _A = None , _A = False): if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=_A , token_ids_a=_A , already_has_special_tokens=_A) if token_ids_a is None: return [1] + ([0] * len(_A)) + [1] return [1] + ([0] * len(_A)) + [1, 1] + ([0] * len(_A)) + [1] def lowerCAmelCase__ ( self , _A , _A = None): SCREAMING_SNAKE_CASE_ = [self.sep_token_id] SCREAMING_SNAKE_CASE_ = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0] def lowerCAmelCase__ ( self , _A , _A=False , **_A): SCREAMING_SNAKE_CASE_ = kwargs.pop('add_prefix_space' , self.add_prefix_space) if (is_split_into_words or add_prefix_space) and (len(_A) > 0 and not text[0].isspace()): SCREAMING_SNAKE_CASE_ = ' ' + text return (text, kwargs)
620
0
import argparse import json import torch from diffusers import DDPMScheduler, LDMPipeline, UNetaDModel, VQModel def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : Optional[Any]=1 ): """simple docstring""" if n_shave_prefix_segments >= 0: return ".".join(path.split('.' )[n_shave_prefix_segments:] ) else: return ".".join(path.split('.' )[:n_shave_prefix_segments] ) def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : List[str]=0 ): """simple docstring""" SCREAMING_SNAKE_CASE_ = [] for old_item in old_list: SCREAMING_SNAKE_CASE_ = old_item.replace('in_layers.0' , 'norm1' ) SCREAMING_SNAKE_CASE_ = new_item.replace('in_layers.2' , 'conv1' ) SCREAMING_SNAKE_CASE_ = new_item.replace('out_layers.0' , 'norm2' ) SCREAMING_SNAKE_CASE_ = new_item.replace('out_layers.3' , 'conv2' ) SCREAMING_SNAKE_CASE_ = new_item.replace('emb_layers.1' , 'time_emb_proj' ) SCREAMING_SNAKE_CASE_ = new_item.replace('skip_connection' , 'conv_shortcut' ) SCREAMING_SNAKE_CASE_ = shave_segments(_SCREAMING_SNAKE_CASE , n_shave_prefix_segments=_SCREAMING_SNAKE_CASE ) mapping.append({'old': old_item, 'new': new_item} ) return mapping def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : Tuple=0 ): """simple docstring""" SCREAMING_SNAKE_CASE_ = [] for old_item in old_list: SCREAMING_SNAKE_CASE_ = old_item SCREAMING_SNAKE_CASE_ = new_item.replace('norm.weight' , 'group_norm.weight' ) SCREAMING_SNAKE_CASE_ = new_item.replace('norm.bias' , 'group_norm.bias' ) SCREAMING_SNAKE_CASE_ = new_item.replace('proj_out.weight' , 'proj_attn.weight' ) SCREAMING_SNAKE_CASE_ = new_item.replace('proj_out.bias' , 'proj_attn.bias' ) SCREAMING_SNAKE_CASE_ = shave_segments(_SCREAMING_SNAKE_CASE , n_shave_prefix_segments=_SCREAMING_SNAKE_CASE ) mapping.append({'old': old_item, 'new': new_item} ) return mapping def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : Union[str, Any]=None , _SCREAMING_SNAKE_CASE : Any=None , _SCREAMING_SNAKE_CASE : Dict=None ): """simple docstring""" assert isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ), "Paths should be a list of dicts containing 'old' and 'new' keys." # Splits the attention layers into three variables. if attention_paths_to_split is not None: for path, path_map in attention_paths_to_split.items(): SCREAMING_SNAKE_CASE_ = old_checkpoint[path] SCREAMING_SNAKE_CASE_ = old_tensor.shape[0] // 3 SCREAMING_SNAKE_CASE_ = (-1, channels) if len(old_tensor.shape ) == 3 else (-1) SCREAMING_SNAKE_CASE_ = old_tensor.shape[0] // config['num_head_channels'] // 3 SCREAMING_SNAKE_CASE_ = old_tensor.reshape((num_heads, 3 * channels // num_heads) + old_tensor.shape[1:] ) SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = old_tensor.split(channels // num_heads , dim=1 ) SCREAMING_SNAKE_CASE_ = query.reshape(_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE_ = key.reshape(_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE_ = value.reshape(_SCREAMING_SNAKE_CASE ) for path in paths: SCREAMING_SNAKE_CASE_ = path['new'] # These have already been assigned if attention_paths_to_split is not None and new_path in attention_paths_to_split: continue # Global renaming happens here SCREAMING_SNAKE_CASE_ = new_path.replace('middle_block.0' , 'mid_block.resnets.0' ) SCREAMING_SNAKE_CASE_ = new_path.replace('middle_block.1' , 'mid_block.attentions.0' ) SCREAMING_SNAKE_CASE_ = new_path.replace('middle_block.2' , 'mid_block.resnets.1' ) if additional_replacements is not None: for replacement in additional_replacements: SCREAMING_SNAKE_CASE_ = new_path.replace(replacement['old'] , replacement['new'] ) # proj_attn.weight has to be converted from conv 1D to linear if "proj_attn.weight" in new_path: SCREAMING_SNAKE_CASE_ = old_checkpoint[path['old']][:, :, 0] else: SCREAMING_SNAKE_CASE_ = old_checkpoint[path['old']] def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : int ): """simple docstring""" SCREAMING_SNAKE_CASE_ = {} SCREAMING_SNAKE_CASE_ = checkpoint['time_embed.0.weight'] SCREAMING_SNAKE_CASE_ = checkpoint['time_embed.0.bias'] SCREAMING_SNAKE_CASE_ = checkpoint['time_embed.2.weight'] SCREAMING_SNAKE_CASE_ = checkpoint['time_embed.2.bias'] SCREAMING_SNAKE_CASE_ = checkpoint['input_blocks.0.0.weight'] SCREAMING_SNAKE_CASE_ = checkpoint['input_blocks.0.0.bias'] SCREAMING_SNAKE_CASE_ = checkpoint['out.0.weight'] SCREAMING_SNAKE_CASE_ = checkpoint['out.0.bias'] SCREAMING_SNAKE_CASE_ = checkpoint['out.2.weight'] SCREAMING_SNAKE_CASE_ = checkpoint['out.2.bias'] # Retrieves the keys for the input blocks only SCREAMING_SNAKE_CASE_ = len({'.'.join(layer.split('.' )[:2] ) for layer in checkpoint if 'input_blocks' in layer} ) SCREAMING_SNAKE_CASE_ = { layer_id: [key for key in checkpoint if f"""input_blocks.{layer_id}""" in key] for layer_id in range(_SCREAMING_SNAKE_CASE ) } # Retrieves the keys for the middle blocks only SCREAMING_SNAKE_CASE_ = len({'.'.join(layer.split('.' )[:2] ) for layer in checkpoint if 'middle_block' in layer} ) SCREAMING_SNAKE_CASE_ = { layer_id: [key for key in checkpoint if f"""middle_block.{layer_id}""" in key] for layer_id in range(_SCREAMING_SNAKE_CASE ) } # Retrieves the keys for the output blocks only SCREAMING_SNAKE_CASE_ = len({'.'.join(layer.split('.' )[:2] ) for layer in checkpoint if 'output_blocks' in layer} ) SCREAMING_SNAKE_CASE_ = { layer_id: [key for key in checkpoint if f"""output_blocks.{layer_id}""" in key] for layer_id in range(_SCREAMING_SNAKE_CASE ) } for i in range(1 , _SCREAMING_SNAKE_CASE ): SCREAMING_SNAKE_CASE_ = (i - 1) // (config['num_res_blocks'] + 1) SCREAMING_SNAKE_CASE_ = (i - 1) % (config['num_res_blocks'] + 1) SCREAMING_SNAKE_CASE_ = [key for key in input_blocks[i] if f"""input_blocks.{i}.0""" in key] SCREAMING_SNAKE_CASE_ = [key for key in input_blocks[i] if f"""input_blocks.{i}.1""" in key] if f"""input_blocks.{i}.0.op.weight""" in checkpoint: SCREAMING_SNAKE_CASE_ = checkpoint[ f"""input_blocks.{i}.0.op.weight""" ] SCREAMING_SNAKE_CASE_ = checkpoint[ f"""input_blocks.{i}.0.op.bias""" ] continue SCREAMING_SNAKE_CASE_ = renew_resnet_paths(_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE_ = {'old': f"""input_blocks.{i}.0""", 'new': f"""down_blocks.{block_id}.resnets.{layer_in_block_id}"""} SCREAMING_SNAKE_CASE_ = {'old': 'resnets.2.op', 'new': 'downsamplers.0.op'} assign_to_checkpoint( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , additional_replacements=[meta_path, resnet_op] , config=_SCREAMING_SNAKE_CASE ) if len(_SCREAMING_SNAKE_CASE ): SCREAMING_SNAKE_CASE_ = renew_attention_paths(_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE_ = { 'old': f"""input_blocks.{i}.1""", 'new': f"""down_blocks.{block_id}.attentions.{layer_in_block_id}""", } SCREAMING_SNAKE_CASE_ = { f"""input_blocks.{i}.1.qkv.bias""": { 'key': f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias""", 'query': f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias""", 'value': f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias""", }, f"""input_blocks.{i}.1.qkv.weight""": { 'key': f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight""", 'query': f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight""", 'value': f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight""", }, } assign_to_checkpoint( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , additional_replacements=[meta_path] , attention_paths_to_split=_SCREAMING_SNAKE_CASE , config=_SCREAMING_SNAKE_CASE , ) SCREAMING_SNAKE_CASE_ = middle_blocks[0] SCREAMING_SNAKE_CASE_ = middle_blocks[1] SCREAMING_SNAKE_CASE_ = middle_blocks[2] SCREAMING_SNAKE_CASE_ = renew_resnet_paths(_SCREAMING_SNAKE_CASE ) assign_to_checkpoint(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , config=_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE_ = renew_resnet_paths(_SCREAMING_SNAKE_CASE ) assign_to_checkpoint(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , config=_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE_ = renew_attention_paths(_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE_ = { 'middle_block.1.qkv.bias': { 'key': 'mid_block.attentions.0.key.bias', 'query': 'mid_block.attentions.0.query.bias', 'value': 'mid_block.attentions.0.value.bias', }, 'middle_block.1.qkv.weight': { 'key': 'mid_block.attentions.0.key.weight', 'query': 'mid_block.attentions.0.query.weight', 'value': 'mid_block.attentions.0.value.weight', }, } assign_to_checkpoint( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , attention_paths_to_split=_SCREAMING_SNAKE_CASE , config=_SCREAMING_SNAKE_CASE ) for i in range(_SCREAMING_SNAKE_CASE ): SCREAMING_SNAKE_CASE_ = i // (config['num_res_blocks'] + 1) SCREAMING_SNAKE_CASE_ = i % (config['num_res_blocks'] + 1) SCREAMING_SNAKE_CASE_ = [shave_segments(_SCREAMING_SNAKE_CASE , 2 ) for name in output_blocks[i]] SCREAMING_SNAKE_CASE_ = {} for layer in output_block_layers: SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = layer.split('.' )[0], shave_segments(_SCREAMING_SNAKE_CASE , 1 ) if layer_id in output_block_list: output_block_list[layer_id].append(_SCREAMING_SNAKE_CASE ) else: SCREAMING_SNAKE_CASE_ = [layer_name] if len(_SCREAMING_SNAKE_CASE ) > 1: SCREAMING_SNAKE_CASE_ = [key for key in output_blocks[i] if f"""output_blocks.{i}.0""" in key] SCREAMING_SNAKE_CASE_ = [key for key in output_blocks[i] if f"""output_blocks.{i}.1""" in key] SCREAMING_SNAKE_CASE_ = renew_resnet_paths(_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE_ = renew_resnet_paths(_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE_ = {'old': f"""output_blocks.{i}.0""", 'new': f"""up_blocks.{block_id}.resnets.{layer_in_block_id}"""} assign_to_checkpoint(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , additional_replacements=[meta_path] , config=_SCREAMING_SNAKE_CASE ) if ["conv.weight", "conv.bias"] in output_block_list.values(): SCREAMING_SNAKE_CASE_ = list(output_block_list.values() ).index(['conv.weight', 'conv.bias'] ) SCREAMING_SNAKE_CASE_ = checkpoint[ f"""output_blocks.{i}.{index}.conv.weight""" ] SCREAMING_SNAKE_CASE_ = checkpoint[ f"""output_blocks.{i}.{index}.conv.bias""" ] # Clear attentions as they have been attributed above. if len(_SCREAMING_SNAKE_CASE ) == 2: SCREAMING_SNAKE_CASE_ = [] if len(_SCREAMING_SNAKE_CASE ): SCREAMING_SNAKE_CASE_ = renew_attention_paths(_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE_ = { 'old': f"""output_blocks.{i}.1""", 'new': f"""up_blocks.{block_id}.attentions.{layer_in_block_id}""", } SCREAMING_SNAKE_CASE_ = { f"""output_blocks.{i}.1.qkv.bias""": { 'key': f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias""", 'query': f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias""", 'value': f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias""", }, f"""output_blocks.{i}.1.qkv.weight""": { 'key': f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight""", 'query': f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight""", 'value': f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight""", }, } assign_to_checkpoint( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , additional_replacements=[meta_path] , attention_paths_to_split=to_split if any('qkv' in key for key in attentions ) else None , config=_SCREAMING_SNAKE_CASE , ) else: SCREAMING_SNAKE_CASE_ = renew_resnet_paths(_SCREAMING_SNAKE_CASE , n_shave_prefix_segments=1 ) for path in resnet_0_paths: SCREAMING_SNAKE_CASE_ = '.'.join(['output_blocks', str(_SCREAMING_SNAKE_CASE ), path['old']] ) SCREAMING_SNAKE_CASE_ = '.'.join(['up_blocks', str(_SCREAMING_SNAKE_CASE ), 'resnets', str(_SCREAMING_SNAKE_CASE ), path['new']] ) SCREAMING_SNAKE_CASE_ = checkpoint[old_path] return new_checkpoint if __name__ == "__main__": UpperCamelCase__ : Union[str, Any] = argparse.ArgumentParser() parser.add_argument( "--checkpoint_path", default=None, type=str, required=True, help="Path to the checkpoint to convert." ) parser.add_argument( "--config_file", default=None, type=str, required=True, help="The config json file corresponding to the architecture.", ) parser.add_argument("--dump_path", default=None, type=str, required=True, help="Path to the output model.") UpperCamelCase__ : List[Any] = parser.parse_args() UpperCamelCase__ : Tuple = torch.load(args.checkpoint_path) with open(args.config_file) as f: UpperCamelCase__ : Optional[Any] = json.loads(f.read()) UpperCamelCase__ : List[str] = convert_ldm_checkpoint(checkpoint, config) if "ldm" in config: del config["ldm"] UpperCamelCase__ : Tuple = UNetaDModel(**config) model.load_state_dict(converted_checkpoint) try: UpperCamelCase__ : List[str] = DDPMScheduler.from_config("/".join(args.checkpoint_path.split("/")[:-1])) UpperCamelCase__ : int = VQModel.from_pretrained("/".join(args.checkpoint_path.split("/")[:-1])) UpperCamelCase__ : str = LDMPipeline(unet=model, scheduler=scheduler, vae=vqvae) pipe.save_pretrained(args.dump_path) except: # noqa: E722 model.save_pretrained(args.dump_path)
720
from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCamelCase__ : str = logging.get_logger(__name__) UpperCamelCase__ : Optional[int] = { "facebook/dpr-ctx_encoder-single-nq-base": ( "https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/config.json" ), "facebook/dpr-question_encoder-single-nq-base": ( "https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/config.json" ), "facebook/dpr-reader-single-nq-base": ( "https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/config.json" ), "facebook/dpr-ctx_encoder-multiset-base": ( "https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/config.json" ), "facebook/dpr-question_encoder-multiset-base": ( "https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/config.json" ), "facebook/dpr-reader-multiset-base": ( "https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/config.json" ), } class __snake_case ( lowerCAmelCase__ ): __lowerCAmelCase : Optional[int] = 'dpr' def __init__( self , _A=30522 , _A=768 , _A=12 , _A=12 , _A=3072 , _A="gelu" , _A=0.1 , _A=0.1 , _A=512 , _A=2 , _A=0.0_2 , _A=1E-12 , _A=0 , _A="absolute" , _A = 0 , **_A , ): super().__init__(pad_token_id=_A , **_A) SCREAMING_SNAKE_CASE_ = vocab_size SCREAMING_SNAKE_CASE_ = hidden_size SCREAMING_SNAKE_CASE_ = num_hidden_layers SCREAMING_SNAKE_CASE_ = num_attention_heads SCREAMING_SNAKE_CASE_ = hidden_act SCREAMING_SNAKE_CASE_ = intermediate_size SCREAMING_SNAKE_CASE_ = hidden_dropout_prob SCREAMING_SNAKE_CASE_ = attention_probs_dropout_prob SCREAMING_SNAKE_CASE_ = max_position_embeddings SCREAMING_SNAKE_CASE_ = type_vocab_size SCREAMING_SNAKE_CASE_ = initializer_range SCREAMING_SNAKE_CASE_ = layer_norm_eps SCREAMING_SNAKE_CASE_ = projection_dim SCREAMING_SNAKE_CASE_ = position_embedding_type
620
0
import hashlib import unittest from typing import Dict import numpy as np from transformers import ( MODEL_FOR_MASK_GENERATION_MAPPING, TF_MODEL_FOR_MASK_GENERATION_MAPPING, is_vision_available, pipeline, ) from transformers.pipelines import MaskGenerationPipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_tf, require_torch, require_vision, slow, ) if is_vision_available(): from PIL import Image else: class __snake_case : @staticmethod def lowerCAmelCase__ ( *_A , **_A): pass def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : Image ): SCREAMING_SNAKE_CASE_ = hashlib.mda(image.tobytes() ) return m.hexdigest()[:10] def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : Image ): SCREAMING_SNAKE_CASE_ = np.array(_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE_ = npimg.shape return {"hash": hashimage(_SCREAMING_SNAKE_CASE ), "shape": shape} @is_pipeline_test @require_vision @require_torch class __snake_case ( unittest.TestCase ): __lowerCAmelCase : Dict = dict( (list(MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if MODEL_FOR_MASK_GENERATION_MAPPING else []) ) __lowerCAmelCase : int = dict( (list(TF_MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if TF_MODEL_FOR_MASK_GENERATION_MAPPING else []) ) def lowerCAmelCase__ ( self , _A , _A , _A): SCREAMING_SNAKE_CASE_ = MaskGenerationPipeline(model=_A , image_processor=_A) return image_segmenter, [ "./tests/fixtures/tests_samples/COCO/000000039769.png", "./tests/fixtures/tests_samples/COCO/000000039769.png", ] def lowerCAmelCase__ ( self , _A , _A): pass @require_tf @unittest.skip('Image segmentation not implemented in TF') def lowerCAmelCase__ ( self): pass @slow @require_torch def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = pipeline('mask-generation' , model='facebook/sam-vit-huge') SCREAMING_SNAKE_CASE_ = image_segmenter('http://images.cocodataset.org/val2017/000000039769.jpg' , points_per_batch=256) # Shortening by hashing SCREAMING_SNAKE_CASE_ = [] for i, o in enumerate(outputs['masks']): new_outupt += [{"mask": mask_to_test_readable(_A), "scores": outputs["scores"][i]}] # fmt: off self.assertEqual( nested_simplify(_A , decimals=4) , [ {'mask': {'hash': '115ad19f5f', 'shape': (480, 640)}, 'scores': 1.0_4_4_4}, {'mask': {'hash': '6affa964c6', 'shape': (480, 640)}, 'scores': 1.0_2_1}, {'mask': {'hash': 'dfe28a0388', 'shape': (480, 640)}, 'scores': 1.0_1_6_7}, {'mask': {'hash': 'c0a5f4a318', 'shape': (480, 640)}, 'scores': 1.0_1_3_2}, {'mask': {'hash': 'fe8065c197', 'shape': (480, 640)}, 'scores': 1.0_0_5_3}, {'mask': {'hash': 'e2d0b7a0b7', 'shape': (480, 640)}, 'scores': 0.9_9_6_7}, {'mask': {'hash': '453c7844bd', 'shape': (480, 640)}, 'scores': 0.9_9_3}, {'mask': {'hash': '3d44f2926d', 'shape': (480, 640)}, 'scores': 0.9_9_0_9}, {'mask': {'hash': '64033ddc3f', 'shape': (480, 640)}, 'scores': 0.9_8_7_9}, {'mask': {'hash': '801064ff79', 'shape': (480, 640)}, 'scores': 0.9_8_3_4}, {'mask': {'hash': '6172f276ef', 'shape': (480, 640)}, 'scores': 0.9_7_1_6}, {'mask': {'hash': 'b49e60e084', 'shape': (480, 640)}, 'scores': 0.9_6_1_2}, {'mask': {'hash': 'a811e775fd', 'shape': (480, 640)}, 'scores': 0.9_5_9_9}, {'mask': {'hash': 'a6a8ebcf4b', 'shape': (480, 640)}, 'scores': 0.9_5_5_2}, {'mask': {'hash': '9d8257e080', 'shape': (480, 640)}, 'scores': 0.9_5_3_2}, {'mask': {'hash': '32de6454a8', 'shape': (480, 640)}, 'scores': 0.9_5_1_6}, {'mask': {'hash': 'af3d4af2c8', 'shape': (480, 640)}, 'scores': 0.9_4_9_9}, {'mask': {'hash': '3c6db475fb', 'shape': (480, 640)}, 'scores': 0.9_4_8_3}, {'mask': {'hash': 'c290813fb9', 'shape': (480, 640)}, 'scores': 0.9_4_6_4}, {'mask': {'hash': 'b6f0b8f606', 'shape': (480, 640)}, 'scores': 0.9_4_3}, {'mask': {'hash': '92ce16bfdf', 'shape': (480, 640)}, 'scores': 0.9_4_3}, {'mask': {'hash': 'c749b25868', 'shape': (480, 640)}, 'scores': 0.9_4_0_8}, {'mask': {'hash': 'efb6cab859', 'shape': (480, 640)}, 'scores': 0.9_3_3_5}, {'mask': {'hash': '1ff2eafb30', 'shape': (480, 640)}, 'scores': 0.9_3_2_6}, {'mask': {'hash': '788b798e24', 'shape': (480, 640)}, 'scores': 0.9_2_6_2}, {'mask': {'hash': 'abea804f0e', 'shape': (480, 640)}, 'scores': 0.8_9_9_9}, {'mask': {'hash': '7b9e8ddb73', 'shape': (480, 640)}, 'scores': 0.8_9_8_6}, {'mask': {'hash': 'cd24047c8a', 'shape': (480, 640)}, 'scores': 0.8_9_8_4}, {'mask': {'hash': '6943e6bcbd', 'shape': (480, 640)}, 'scores': 0.8_8_7_3}, {'mask': {'hash': 'b5f47c9191', 'shape': (480, 640)}, 'scores': 0.8_8_7_1} ] , ) # fmt: on @require_torch @slow def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = 'facebook/sam-vit-huge' SCREAMING_SNAKE_CASE_ = pipeline('mask-generation' , model=_A) SCREAMING_SNAKE_CASE_ = image_segmenter( 'http://images.cocodataset.org/val2017/000000039769.jpg' , pred_iou_thresh=1 , points_per_batch=256) # Shortening by hashing SCREAMING_SNAKE_CASE_ = [] for i, o in enumerate(outputs['masks']): new_outupt += [{"mask": mask_to_test_readable(_A), "scores": outputs["scores"][i]}] self.assertEqual( nested_simplify(_A , decimals=4) , [ {'mask': {'hash': '115ad19f5f', 'shape': (480, 640)}, 'scores': 1.0_4_4_4}, {'mask': {'hash': '6affa964c6', 'shape': (480, 640)}, 'scores': 1.0_2_1_0}, {'mask': {'hash': 'dfe28a0388', 'shape': (480, 640)}, 'scores': 1.0_1_6_7}, {'mask': {'hash': 'c0a5f4a318', 'shape': (480, 640)}, 'scores': 1.0_1_3_2}, {'mask': {'hash': 'fe8065c197', 'shape': (480, 640)}, 'scores': 1.0_0_5_3}, ] , )
721
import pytest import datasets # Import fixture modules as plugins UpperCamelCase__ : Union[str, Any] = ["tests.fixtures.files", "tests.fixtures.hub", "tests.fixtures.fsspec"] def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : Dict ): """simple docstring""" for item in items: if any(marker in item.keywords for marker in ['integration', 'unit'] ): continue item.add_marker(pytest.mark.unit ) def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : Optional[int] ): """simple docstring""" config.addinivalue_line('markers' , 'torchaudio_latest: mark test to run with torchaudio>=0.12' ) @pytest.fixture(autouse=_SCREAMING_SNAKE_CASE ) def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : str ): """simple docstring""" SCREAMING_SNAKE_CASE_ = tmp_path_factory.getbasetemp() / 'cache' SCREAMING_SNAKE_CASE_ = test_hf_cache_home / 'datasets' SCREAMING_SNAKE_CASE_ = test_hf_cache_home / 'metrics' SCREAMING_SNAKE_CASE_ = test_hf_cache_home / 'modules' monkeypatch.setattr('datasets.config.HF_DATASETS_CACHE' , str(_SCREAMING_SNAKE_CASE ) ) monkeypatch.setattr('datasets.config.HF_METRICS_CACHE' , str(_SCREAMING_SNAKE_CASE ) ) monkeypatch.setattr('datasets.config.HF_MODULES_CACHE' , str(_SCREAMING_SNAKE_CASE ) ) SCREAMING_SNAKE_CASE_ = test_hf_datasets_cache / 'downloads' monkeypatch.setattr('datasets.config.DOWNLOADED_DATASETS_PATH' , str(_SCREAMING_SNAKE_CASE ) ) SCREAMING_SNAKE_CASE_ = test_hf_datasets_cache / 'downloads' / 'extracted' monkeypatch.setattr('datasets.config.EXTRACTED_DATASETS_PATH' , str(_SCREAMING_SNAKE_CASE ) ) @pytest.fixture(autouse=_SCREAMING_SNAKE_CASE , scope='session' ) def _UpperCAmelCase ( ): """simple docstring""" datasets.disable_progress_bar() @pytest.fixture(autouse=_SCREAMING_SNAKE_CASE ) def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : Any ): """simple docstring""" monkeypatch.setattr('datasets.config.HF_UPDATE_DOWNLOAD_COUNTS' , _SCREAMING_SNAKE_CASE ) @pytest.fixture def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : Tuple ): """simple docstring""" monkeypatch.setattr('sqlalchemy.util.deprecations.SILENCE_UBER_WARNING' , _SCREAMING_SNAKE_CASE )
620
0
from google.protobuf import descriptor as _descriptor from google.protobuf import descriptor_pool as _descriptor_pool from google.protobuf import symbol_database as _symbol_database from google.protobuf.internal import builder as _builder # @@protoc_insertion_point(imports) UpperCamelCase__ : str = _symbol_database.Default() UpperCamelCase__ : int = _descriptor_pool.Default().AddSerializedFile( b"\n\x19sentencepiece_model.proto\x12\rsentencepiece\"\x80\x0c\n\x0bTrainerSpec\x12\r\n\x05input\x18\x01 \x03(\t\x12\x14\n\x0cinput_format\x18\x07 \x01(\t\x12\x14\n\x0cmodel_prefix\x18\x02 \x01(\t\x12\x41\n\nmodel_type\x18\x03 \x01(\x0e\x32$.sentencepiece.TrainerSpec.ModelType:\x07UNIGRAM\x12\x18\n\nvocab_size\x18\x04 \x01(\x05:\x04\x38\x30\x30\x30\x12\x17\n\x0f\x61\x63\x63\x65pt_language\x18\x05 \x03(\t\x12 \n\x15self_test_sample_size\x18\x06 \x01(\x05:\x01\x30\x12*\n\x1b\x65nable_differential_privacy\x18\x32 \x01(\x08:\x05\x66\x61lse\x12+\n differential_privacy_noise_level\x18\x33 \x01(\x02:\x01\x30\x12\x32\n\'differential_privacy_clipping_threshold\x18\x34 \x01(\x04:\x01\x30\x12\"\n\x12\x63haracter_coverage\x18\n \x01(\x02:\x06\x30.9995\x12\x1e\n\x13input_sentence_size\x18\x0b \x01(\x04:\x01\x30\x12$\n\x16shuffle_input_sentence\x18\x13 \x01(\x08:\x04true\x12 \n\x14mining_sentence_size\x18\x0c \x01(\x05\x42\x02\x18\x01\x12\"\n\x16training_sentence_size\x18\r \x01(\x05\x42\x02\x18\x01\x12(\n\x17seed_sentencepiece_size\x18\x0e \x01(\x05:\x07\x31\x30\x30\x30\x30\x30\x30\x12\x1e\n\x10shrinking_factor\x18\x0f \x01(\x02:\x04\x30.75\x12!\n\x13max_sentence_length\x18\x12 \x01(\x05:\x04\x34\x31\x39\x32\x12\x17\n\x0bnum_threads\x18\x10 \x01(\x05:\x02\x31\x36\x12\x1d\n\x12num_sub_iterations\x18\x11 \x01(\x05:\x01\x32\x12$\n\x18max_sentencepiece_length\x18\x14 \x01(\x05:\x02\x31\x36\x12%\n\x17split_by_unicode_script\x18\x15 \x01(\x08:\x04true\x12\x1d\n\x0fsplit_by_number\x18\x17 \x01(\x08:\x04true\x12!\n\x13split_by_whitespace\x18\x16 \x01(\x08:\x04true\x12)\n\x1atreat_whitespace_as_suffix\x18\x18 \x01(\x08:\x05\x66\x61lse\x12+\n\x1c\x61llow_whitespace_only_pieces\x18\x1a \x01(\x08:\x05\x66\x61lse\x12\x1b\n\x0csplit_digits\x18\x19 \x01(\x08:\x05\x66\x61lse\x12#\n\x19pretokenization_delimiter\x18\x35 \x01(\t:\x00\x12\x17\n\x0f\x63ontrol_symbols\x18\x1e \x03(\t\x12\x1c\n\x14user_defined_symbols\x18\x1f \x03(\t\x12\x16\n\x0erequired_chars\x18$ \x01(\t\x12\x1c\n\rbyte_fallback\x18# \x01(\x08:\x05\x66\x61lse\x12+\n\x1dvocabulary_output_piece_score\x18 \x01(\x08:\x04true\x12\x1e\n\x10hard_vocab_limit\x18! \x01(\x08:\x04true\x12\x1c\n\ruse_all_vocab\x18\" \x01(\x08:\x05\x66\x61lse\x12\x11\n\x06unk_id\x18( \x01(\x05:\x01\x30\x12\x11\n\x06\x62os_id\x18) \x01(\x05:\x01\x31\x12\x11\n\x06\x65os_id\x18* \x01(\x05:\x01\x32\x12\x12\n\x06pad_id\x18+ \x01(\x05:\x02-1\x12\x18\n\tunk_piece\x18- \x01(\t:\x05<unk>\x12\x16\n\tbos_piece\x18. \x01(\t:\x03<s>\x12\x17\n\teos_piece\x18/ \x01(\t:\x04</s>\x12\x18\n\tpad_piece\x18\x30 \x01(\t:\x05<pad>\x12\x1a\n\x0bunk_surface\x18, \x01(\t:\x05 \xe2\x81\x87 \x12+\n\x1ctrain_extremely_large_corpus\x18\x31 \x01(\x08:\x05\x66\x61lse\"5\n\tModelType\x12\x0b\n\x07UNIGRAM\x10\x01\x12\x07\n\x03\x42PE\x10\x02\x12\x08\n\x04WORD\x10\x03\x12\x08\n\x04\x43HAR\x10\x04*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"\xd1\x01\n\x0eNormalizerSpec\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x1c\n\x14precompiled_charsmap\x18\x02 \x01(\x0c\x12\x1e\n\x10\x61\x64\x64_dummy_prefix\x18\x03 \x01(\x08:\x04true\x12&\n\x18remove_extra_whitespaces\x18\x04 \x01(\x08:\x04true\x12 \n\x12\x65scape_whitespaces\x18\x05 \x01(\x08:\x04true\x12\x1e\n\x16normalization_rule_tsv\x18\x06 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"y\n\x0cSelfTestData\x12\x33\n\x07samples\x18\x01 \x03(\x0b\x32\".sentencepiece.SelfTestData.Sample\x1a)\n\x06Sample\x12\r\n\x05input\x18\x01 \x01(\t\x12\x10\n\x08\x65xpected\x18\x02 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"\xfe\x03\n\nModelProto\x12\x37\n\x06pieces\x18\x01 \x03(\x0b\x32\'.sentencepiece.ModelProto.SentencePiece\x12\x30\n\x0ctrainer_spec\x18\x02 \x01(\x0b\x32\x1a.sentencepiece.TrainerSpec\x12\x36\n\x0fnormalizer_spec\x18\x03 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x12\x33\n\x0eself_test_data\x18\x04 \x01(\x0b\x32\x1b.sentencepiece.SelfTestData\x12\x38\n\x11\x64\x65normalizer_spec\x18\x05 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x1a\xd2\x01\n\rSentencePiece\x12\r\n\x05piece\x18\x01 \x01(\t\x12\r\n\x05score\x18\x02 \x01(\x02\x12\x42\n\x04type\x18\x03 \x01(\x0e\x32,.sentencepiece.ModelProto.SentencePiece.Type:\x06NORMAL\"T\n\x04Type\x12\n\n\x06NORMAL\x10\x01\x12\x0b\n\x07UNKNOWN\x10\x02\x12\x0b\n\x07\x43ONTROL\x10\x03\x12\x10\n\x0cUSER_DEFINED\x10\x04\x12\x08\n\x04\x42YTE\x10\x06\x12\n\n\x06UNUSED\x10\x05*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\x42\x02H\x03" ) UpperCamelCase__ : Optional[int] = globals() _builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals) _builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, "sentencepiece_model_pb2", _globals) if _descriptor._USE_C_DESCRIPTORS is False: UpperCamelCase__ : Optional[int] = None UpperCamelCase__ : Union[str, Any] = b"H\003" # (generated by protobuf compiler, but `_TRAINERSPEC` is not defined) # _TRAINERSPEC.fields_by_name["mining_sentence_size"]._options = None # _TRAINERSPEC.fields_by_name["mining_sentence_size"]._serialized_options = b"\030\001" # _TRAINERSPEC.fields_by_name["training_sentence_size"]._options = None # _TRAINERSPEC.fields_by_name["training_sentence_size"]._serialized_options = b"\030\001" UpperCamelCase__ : Optional[int] = 45 UpperCamelCase__ : Union[str, Any] = 1_581 UpperCamelCase__ : Union[str, Any] = 1_517 UpperCamelCase__ : Dict = 1_570 UpperCamelCase__ : int = 1_584 UpperCamelCase__ : Any = 1_793 UpperCamelCase__ : Optional[int] = 1_795 UpperCamelCase__ : Union[str, Any] = 1_916 UpperCamelCase__ : Any = 1_864 UpperCamelCase__ : Optional[int] = 1_905 UpperCamelCase__ : Tuple = 1_919 UpperCamelCase__ : List[Any] = 2_429 UpperCamelCase__ : Optional[int] = 2_208 UpperCamelCase__ : Optional[Any] = 2_418 UpperCamelCase__ : int = 2_323 UpperCamelCase__ : List[Any] = 2_407 # @@protoc_insertion_point(module_scope)
700
from typing import List import numpy as np def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : dict ): """simple docstring""" SCREAMING_SNAKE_CASE_ = {key: len(_SCREAMING_SNAKE_CASE ) for key, value in gen_kwargs.items() if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )} if len(set(lists_lengths.values() ) ) > 1: raise RuntimeError( ( 'Sharding is ambiguous for this dataset: ' + 'we found several data sources lists of different lengths, and we don\'t know over which list we should parallelize:\n' + '\n'.join(f"""\t- key {key} has length {length}""" for key, length in lists_lengths.items() ) + '\nTo fix this, check the \'gen_kwargs\' and make sure to use lists only for data sources, ' + 'and use tuples otherwise. In the end there should only be one single list, or several lists with the same length.' ) ) SCREAMING_SNAKE_CASE_ = max(lists_lengths.values() , default=0 ) return max(1 , _SCREAMING_SNAKE_CASE ) def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int ): """simple docstring""" SCREAMING_SNAKE_CASE_ = [] for group_idx in range(_SCREAMING_SNAKE_CASE ): SCREAMING_SNAKE_CASE_ = num_shards // max_num_jobs + (group_idx < (num_shards % max_num_jobs)) if num_shards_to_add == 0: break SCREAMING_SNAKE_CASE_ = shards_indices_per_group[-1].stop if shards_indices_per_group else 0 SCREAMING_SNAKE_CASE_ = range(_SCREAMING_SNAKE_CASE , start + num_shards_to_add ) shards_indices_per_group.append(_SCREAMING_SNAKE_CASE ) return shards_indices_per_group def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : dict , _SCREAMING_SNAKE_CASE : int ): """simple docstring""" SCREAMING_SNAKE_CASE_ = _number_of_shards_in_gen_kwargs(_SCREAMING_SNAKE_CASE ) if num_shards == 1: return [dict(_SCREAMING_SNAKE_CASE )] else: SCREAMING_SNAKE_CASE_ = _distribute_shards(num_shards=_SCREAMING_SNAKE_CASE , max_num_jobs=_SCREAMING_SNAKE_CASE ) return [ { key: [value[shard_idx] for shard_idx in shard_indices_per_group[group_idx]] if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) else value for key, value in gen_kwargs.items() } for group_idx in range(len(_SCREAMING_SNAKE_CASE ) ) ] def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : List[dict] ): """simple docstring""" return { key: [value for gen_kwargs in gen_kwargs_list for value in gen_kwargs[key]] if isinstance(gen_kwargs_list[0][key] , _SCREAMING_SNAKE_CASE ) else gen_kwargs_list[0][key] for key in gen_kwargs_list[0] } def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : np.random.Generator , _SCREAMING_SNAKE_CASE : dict ): """simple docstring""" SCREAMING_SNAKE_CASE_ = {len(_SCREAMING_SNAKE_CASE ) for value in gen_kwargs.values() if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )} SCREAMING_SNAKE_CASE_ = {} for size in list_sizes: SCREAMING_SNAKE_CASE_ = list(range(_SCREAMING_SNAKE_CASE ) ) rng.shuffle(indices_per_size[size] ) # Now let's copy the gen_kwargs and shuffle the lists based on their sizes SCREAMING_SNAKE_CASE_ = dict(_SCREAMING_SNAKE_CASE ) for key, value in shuffled_kwargs.items(): if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): SCREAMING_SNAKE_CASE_ = [value[i] for i in indices_per_size[len(_SCREAMING_SNAKE_CASE )]] return shuffled_kwargs
620
0
import argparse import torch from transformers import RemBertConfig, RemBertModel, load_tf_weights_in_rembert from transformers.utils import logging logging.set_verbosity_info() def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : Optional[int] ): """simple docstring""" SCREAMING_SNAKE_CASE_ = RemBertConfig.from_json_file(_SCREAMING_SNAKE_CASE ) print('Building PyTorch model from configuration: {}'.format(str(_SCREAMING_SNAKE_CASE ) ) ) SCREAMING_SNAKE_CASE_ = RemBertModel(_SCREAMING_SNAKE_CASE ) # Load weights from tf checkpoint load_tf_weights_in_rembert(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # Save pytorch-model print('Save PyTorch model to {}'.format(_SCREAMING_SNAKE_CASE ) ) torch.save(model.state_dict() , _SCREAMING_SNAKE_CASE ) if __name__ == "__main__": UpperCamelCase__ : List[str] = argparse.ArgumentParser() # Required parameters parser.add_argument( "--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path." ) parser.add_argument( "--rembert_config_file", default=None, type=str, required=True, help=( "The config json file corresponding to the pre-trained RemBERT model. \n" "This specifies the model architecture." ), ) parser.add_argument( "--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model." ) UpperCamelCase__ : List[str] = parser.parse_args() convert_rembert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.rembert_config_file, args.pytorch_dump_path)
701
from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCamelCase__ : List[Any] = logging.get_logger(__name__) UpperCamelCase__ : List[str] = { "microsoft/biogpt": "https://huggingface.co/microsoft/biogpt/resolve/main/config.json", # See all BioGPT models at https://huggingface.co/models?filter=biogpt } class __snake_case ( lowerCAmelCase__ ): __lowerCAmelCase : Any = 'biogpt' def __init__( self , _A=42384 , _A=1024 , _A=24 , _A=16 , _A=4096 , _A="gelu" , _A=0.1 , _A=0.1 , _A=1024 , _A=0.0_2 , _A=1E-12 , _A=True , _A=True , _A=0.0 , _A=0.0 , _A=1 , _A=0 , _A=2 , **_A , ): SCREAMING_SNAKE_CASE_ = vocab_size SCREAMING_SNAKE_CASE_ = max_position_embeddings SCREAMING_SNAKE_CASE_ = hidden_size SCREAMING_SNAKE_CASE_ = num_hidden_layers SCREAMING_SNAKE_CASE_ = num_attention_heads SCREAMING_SNAKE_CASE_ = intermediate_size SCREAMING_SNAKE_CASE_ = hidden_act SCREAMING_SNAKE_CASE_ = hidden_dropout_prob SCREAMING_SNAKE_CASE_ = attention_probs_dropout_prob SCREAMING_SNAKE_CASE_ = initializer_range SCREAMING_SNAKE_CASE_ = layer_norm_eps SCREAMING_SNAKE_CASE_ = scale_embedding SCREAMING_SNAKE_CASE_ = use_cache SCREAMING_SNAKE_CASE_ = layerdrop SCREAMING_SNAKE_CASE_ = activation_dropout super().__init__(pad_token_id=_A , bos_token_id=_A , eos_token_id=_A , **_A)
620
0
UpperCamelCase__ : Optional[int] = 9.8_06_65 def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : float , _SCREAMING_SNAKE_CASE : float , _SCREAMING_SNAKE_CASE : float = g ): """simple docstring""" if fluid_density <= 0: raise ValueError('Impossible fluid density' ) if volume < 0: raise ValueError('Impossible Object volume' ) if gravity <= 0: raise ValueError('Impossible Gravity' ) return fluid_density * gravity * volume if __name__ == "__main__": import doctest # run doctest doctest.testmod()
702
from typing import Dict, List, Optional, Tuple, Union import torch from ...models import AutoencoderKL, TransformeraDModel from ...schedulers import KarrasDiffusionSchedulers from ...utils import randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput class __snake_case ( lowerCAmelCase__ ): def __init__( self , _A , _A , _A , _A = None , ): super().__init__() self.register_modules(transformer=_A , vae=_A , scheduler=_A) # create a imagenet -> id dictionary for easier use SCREAMING_SNAKE_CASE_ = {} if idalabel is not None: for key, value in idalabel.items(): for label in value.split(','): SCREAMING_SNAKE_CASE_ = int(_A) SCREAMING_SNAKE_CASE_ = dict(sorted(self.labels.items())) def lowerCAmelCase__ ( self , _A): if not isinstance(_A , _A): SCREAMING_SNAKE_CASE_ = list(_A) for l in label: if l not in self.labels: raise ValueError( f"""{l} does not exist. Please make sure to select one of the following labels: \n {self.labels}.""") return [self.labels[l] for l in label] @torch.no_grad() def __call__( self , _A , _A = 4.0 , _A = None , _A = 50 , _A = "pil" , _A = True , ): SCREAMING_SNAKE_CASE_ = len(_A) SCREAMING_SNAKE_CASE_ = self.transformer.config.sample_size SCREAMING_SNAKE_CASE_ = self.transformer.config.in_channels SCREAMING_SNAKE_CASE_ = randn_tensor( shape=(batch_size, latent_channels, latent_size, latent_size) , generator=_A , device=self.device , dtype=self.transformer.dtype , ) SCREAMING_SNAKE_CASE_ = torch.cat([latents] * 2) if guidance_scale > 1 else latents SCREAMING_SNAKE_CASE_ = torch.tensor(_A , device=self.device).reshape(-1) SCREAMING_SNAKE_CASE_ = torch.tensor([1000] * batch_size , device=self.device) SCREAMING_SNAKE_CASE_ = torch.cat([class_labels, class_null] , 0) if guidance_scale > 1 else class_labels # set step values self.scheduler.set_timesteps(_A) for t in self.progress_bar(self.scheduler.timesteps): if guidance_scale > 1: SCREAMING_SNAKE_CASE_ = latent_model_input[: len(_A) // 2] SCREAMING_SNAKE_CASE_ = torch.cat([half, half] , dim=0) SCREAMING_SNAKE_CASE_ = self.scheduler.scale_model_input(_A , _A) SCREAMING_SNAKE_CASE_ = t if not torch.is_tensor(_A): # TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can # This would be a good case for the `match` statement (Python 3.10+) SCREAMING_SNAKE_CASE_ = latent_model_input.device.type == 'mps' if isinstance(_A , _A): SCREAMING_SNAKE_CASE_ = torch.floataa if is_mps else torch.floataa else: SCREAMING_SNAKE_CASE_ = torch.intaa if is_mps else torch.intaa SCREAMING_SNAKE_CASE_ = torch.tensor([timesteps] , dtype=_A , device=latent_model_input.device) elif len(timesteps.shape) == 0: SCREAMING_SNAKE_CASE_ = timesteps[None].to(latent_model_input.device) # broadcast to batch dimension in a way that's compatible with ONNX/Core ML SCREAMING_SNAKE_CASE_ = timesteps.expand(latent_model_input.shape[0]) # predict noise model_output SCREAMING_SNAKE_CASE_ = self.transformer( _A , timestep=_A , class_labels=_A).sample # perform guidance if guidance_scale > 1: SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = noise_pred[:, :latent_channels], noise_pred[:, latent_channels:] SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = torch.split(_A , len(_A) // 2 , dim=0) SCREAMING_SNAKE_CASE_ = uncond_eps + guidance_scale * (cond_eps - uncond_eps) SCREAMING_SNAKE_CASE_ = torch.cat([half_eps, half_eps] , dim=0) SCREAMING_SNAKE_CASE_ = torch.cat([eps, rest] , dim=1) # learned sigma if self.transformer.config.out_channels // 2 == latent_channels: SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = torch.split(_A , _A , dim=1) else: SCREAMING_SNAKE_CASE_ = noise_pred # compute previous image: x_t -> x_t-1 SCREAMING_SNAKE_CASE_ = self.scheduler.step(_A , _A , _A).prev_sample if guidance_scale > 1: SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = latent_model_input.chunk(2 , dim=0) else: SCREAMING_SNAKE_CASE_ = latent_model_input SCREAMING_SNAKE_CASE_ = 1 / self.vae.config.scaling_factor * latents SCREAMING_SNAKE_CASE_ = self.vae.decode(_A).sample SCREAMING_SNAKE_CASE_ = (samples / 2 + 0.5).clamp(0 , 1) # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16 SCREAMING_SNAKE_CASE_ = samples.cpu().permute(0 , 2 , 3 , 1).float().numpy() if output_type == "pil": SCREAMING_SNAKE_CASE_ = self.numpy_to_pil(_A) if not return_dict: return (samples,) return ImagePipelineOutput(images=_A)
620
0
import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import DetrConfig, DetrForObjectDetection, DetrForSegmentation, DetrImageProcessor, ResNetConfig from transformers.utils import logging logging.set_verbosity_info() UpperCamelCase__ : int = logging.get_logger(__name__) def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : str ): """simple docstring""" if "resnet-50" in model_name: SCREAMING_SNAKE_CASE_ = ResNetConfig.from_pretrained('microsoft/resnet-50' ) elif "resnet-101" in model_name: SCREAMING_SNAKE_CASE_ = ResNetConfig.from_pretrained('microsoft/resnet-101' ) else: raise ValueError('Model name should include either resnet50 or resnet101' ) SCREAMING_SNAKE_CASE_ = DetrConfig(use_timm_backbone=_SCREAMING_SNAKE_CASE , backbone_config=_SCREAMING_SNAKE_CASE ) # set label attributes SCREAMING_SNAKE_CASE_ = 'panoptic' in model_name if is_panoptic: SCREAMING_SNAKE_CASE_ = 250 else: SCREAMING_SNAKE_CASE_ = 91 SCREAMING_SNAKE_CASE_ = 'huggingface/label-files' SCREAMING_SNAKE_CASE_ = 'coco-detection-id2label.json' SCREAMING_SNAKE_CASE_ = json.load(open(hf_hub_download(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , repo_type='dataset' ) , 'r' ) ) SCREAMING_SNAKE_CASE_ = {int(_SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()} SCREAMING_SNAKE_CASE_ = idalabel SCREAMING_SNAKE_CASE_ = {v: k for k, v in idalabel.items()} return config, is_panoptic def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : int ): """simple docstring""" SCREAMING_SNAKE_CASE_ = [] # stem # fmt: off rename_keys.append(('backbone.0.body.conv1.weight', 'backbone.conv_encoder.model.embedder.embedder.convolution.weight') ) rename_keys.append(('backbone.0.body.bn1.weight', 'backbone.conv_encoder.model.embedder.embedder.normalization.weight') ) rename_keys.append(('backbone.0.body.bn1.bias', 'backbone.conv_encoder.model.embedder.embedder.normalization.bias') ) rename_keys.append(('backbone.0.body.bn1.running_mean', 'backbone.conv_encoder.model.embedder.embedder.normalization.running_mean') ) rename_keys.append(('backbone.0.body.bn1.running_var', 'backbone.conv_encoder.model.embedder.embedder.normalization.running_var') ) # stages for stage_idx in range(len(config.backbone_config.depths ) ): for layer_idx in range(config.backbone_config.depths[stage_idx] ): # shortcut if layer_idx == 0: rename_keys.append( ( f"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.0.weight""", f"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.convolution.weight""", ) ) rename_keys.append( ( f"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.weight""", f"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.weight""", ) ) rename_keys.append( ( f"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.bias""", f"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.bias""", ) ) rename_keys.append( ( f"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_mean""", f"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_mean""", ) ) rename_keys.append( ( f"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_var""", f"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_var""", ) ) # 3 convs for i in range(3 ): rename_keys.append( ( f"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.conv{i+1}.weight""", f"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.convolution.weight""", ) ) rename_keys.append( ( f"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.weight""", f"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.weight""", ) ) rename_keys.append( ( f"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.bias""", f"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.bias""", ) ) rename_keys.append( ( f"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.running_mean""", f"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.running_mean""", ) ) rename_keys.append( ( f"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.running_var""", f"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.running_var""", ) ) # fmt: on for i in range(config.encoder_layers ): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append( ( f"""transformer.encoder.layers.{i}.self_attn.out_proj.weight""", f"""encoder.layers.{i}.self_attn.out_proj.weight""", ) ) rename_keys.append( (f"""transformer.encoder.layers.{i}.self_attn.out_proj.bias""", f"""encoder.layers.{i}.self_attn.out_proj.bias""") ) rename_keys.append((f"""transformer.encoder.layers.{i}.linear1.weight""", f"""encoder.layers.{i}.fc1.weight""") ) rename_keys.append((f"""transformer.encoder.layers.{i}.linear1.bias""", f"""encoder.layers.{i}.fc1.bias""") ) rename_keys.append((f"""transformer.encoder.layers.{i}.linear2.weight""", f"""encoder.layers.{i}.fc2.weight""") ) rename_keys.append((f"""transformer.encoder.layers.{i}.linear2.bias""", f"""encoder.layers.{i}.fc2.bias""") ) rename_keys.append( (f"""transformer.encoder.layers.{i}.norm1.weight""", f"""encoder.layers.{i}.self_attn_layer_norm.weight""") ) rename_keys.append( (f"""transformer.encoder.layers.{i}.norm1.bias""", f"""encoder.layers.{i}.self_attn_layer_norm.bias""") ) rename_keys.append( (f"""transformer.encoder.layers.{i}.norm2.weight""", f"""encoder.layers.{i}.final_layer_norm.weight""") ) rename_keys.append((f"""transformer.encoder.layers.{i}.norm2.bias""", f"""encoder.layers.{i}.final_layer_norm.bias""") ) # decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms rename_keys.append( ( f"""transformer.decoder.layers.{i}.self_attn.out_proj.weight""", f"""decoder.layers.{i}.self_attn.out_proj.weight""", ) ) rename_keys.append( (f"""transformer.decoder.layers.{i}.self_attn.out_proj.bias""", f"""decoder.layers.{i}.self_attn.out_proj.bias""") ) rename_keys.append( ( f"""transformer.decoder.layers.{i}.multihead_attn.out_proj.weight""", f"""decoder.layers.{i}.encoder_attn.out_proj.weight""", ) ) rename_keys.append( ( f"""transformer.decoder.layers.{i}.multihead_attn.out_proj.bias""", f"""decoder.layers.{i}.encoder_attn.out_proj.bias""", ) ) rename_keys.append((f"""transformer.decoder.layers.{i}.linear1.weight""", f"""decoder.layers.{i}.fc1.weight""") ) rename_keys.append((f"""transformer.decoder.layers.{i}.linear1.bias""", f"""decoder.layers.{i}.fc1.bias""") ) rename_keys.append((f"""transformer.decoder.layers.{i}.linear2.weight""", f"""decoder.layers.{i}.fc2.weight""") ) rename_keys.append((f"""transformer.decoder.layers.{i}.linear2.bias""", f"""decoder.layers.{i}.fc2.bias""") ) rename_keys.append( (f"""transformer.decoder.layers.{i}.norm1.weight""", f"""decoder.layers.{i}.self_attn_layer_norm.weight""") ) rename_keys.append( (f"""transformer.decoder.layers.{i}.norm1.bias""", f"""decoder.layers.{i}.self_attn_layer_norm.bias""") ) rename_keys.append( (f"""transformer.decoder.layers.{i}.norm2.weight""", f"""decoder.layers.{i}.encoder_attn_layer_norm.weight""") ) rename_keys.append( (f"""transformer.decoder.layers.{i}.norm2.bias""", f"""decoder.layers.{i}.encoder_attn_layer_norm.bias""") ) rename_keys.append( (f"""transformer.decoder.layers.{i}.norm3.weight""", f"""decoder.layers.{i}.final_layer_norm.weight""") ) rename_keys.append((f"""transformer.decoder.layers.{i}.norm3.bias""", f"""decoder.layers.{i}.final_layer_norm.bias""") ) # convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads rename_keys.extend( [ ('input_proj.weight', 'input_projection.weight'), ('input_proj.bias', 'input_projection.bias'), ('query_embed.weight', 'query_position_embeddings.weight'), ('transformer.decoder.norm.weight', 'decoder.layernorm.weight'), ('transformer.decoder.norm.bias', 'decoder.layernorm.bias'), ('class_embed.weight', 'class_labels_classifier.weight'), ('class_embed.bias', 'class_labels_classifier.bias'), ('bbox_embed.layers.0.weight', 'bbox_predictor.layers.0.weight'), ('bbox_embed.layers.0.bias', 'bbox_predictor.layers.0.bias'), ('bbox_embed.layers.1.weight', 'bbox_predictor.layers.1.weight'), ('bbox_embed.layers.1.bias', 'bbox_predictor.layers.1.bias'), ('bbox_embed.layers.2.weight', 'bbox_predictor.layers.2.weight'), ('bbox_embed.layers.2.bias', 'bbox_predictor.layers.2.bias'), ] ) return rename_keys def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : List[Any] ): """simple docstring""" SCREAMING_SNAKE_CASE_ = state_dict.pop(_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE_ = val def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : int=False ): """simple docstring""" SCREAMING_SNAKE_CASE_ = '' if is_panoptic: SCREAMING_SNAKE_CASE_ = 'detr.' # first: transformer encoder for i in range(6 ): # read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias) SCREAMING_SNAKE_CASE_ = state_dict.pop(f"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight""" ) SCREAMING_SNAKE_CASE_ = state_dict.pop(f"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias""" ) # next, add query, keys and values (in that order) to the state dict SCREAMING_SNAKE_CASE_ = in_proj_weight[:256, :] SCREAMING_SNAKE_CASE_ = in_proj_bias[:256] SCREAMING_SNAKE_CASE_ = in_proj_weight[256:512, :] SCREAMING_SNAKE_CASE_ = in_proj_bias[256:512] SCREAMING_SNAKE_CASE_ = in_proj_weight[-256:, :] SCREAMING_SNAKE_CASE_ = in_proj_bias[-256:] # next: transformer decoder (which is a bit more complex because it also includes cross-attention) for i in range(6 ): # read in weights + bias of input projection layer of self-attention SCREAMING_SNAKE_CASE_ = state_dict.pop(f"""{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight""" ) SCREAMING_SNAKE_CASE_ = state_dict.pop(f"""{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias""" ) # next, add query, keys and values (in that order) to the state dict SCREAMING_SNAKE_CASE_ = in_proj_weight[:256, :] SCREAMING_SNAKE_CASE_ = in_proj_bias[:256] SCREAMING_SNAKE_CASE_ = in_proj_weight[256:512, :] SCREAMING_SNAKE_CASE_ = in_proj_bias[256:512] SCREAMING_SNAKE_CASE_ = in_proj_weight[-256:, :] SCREAMING_SNAKE_CASE_ = in_proj_bias[-256:] # read in weights + bias of input projection layer of cross-attention SCREAMING_SNAKE_CASE_ = state_dict.pop( f"""{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight""" ) SCREAMING_SNAKE_CASE_ = state_dict.pop(f"""{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias""" ) # next, add query, keys and values (in that order) of cross-attention to the state dict SCREAMING_SNAKE_CASE_ = in_proj_weight_cross_attn[:256, :] SCREAMING_SNAKE_CASE_ = in_proj_bias_cross_attn[:256] SCREAMING_SNAKE_CASE_ = in_proj_weight_cross_attn[256:512, :] SCREAMING_SNAKE_CASE_ = in_proj_bias_cross_attn[256:512] SCREAMING_SNAKE_CASE_ = in_proj_weight_cross_attn[-256:, :] SCREAMING_SNAKE_CASE_ = in_proj_bias_cross_attn[-256:] def _UpperCAmelCase ( ): """simple docstring""" SCREAMING_SNAKE_CASE_ = 'http://images.cocodataset.org/val2017/000000039769.jpg' SCREAMING_SNAKE_CASE_ = Image.open(requests.get(_SCREAMING_SNAKE_CASE , stream=_SCREAMING_SNAKE_CASE ).raw ) return im @torch.no_grad() def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : List[Any]=None , _SCREAMING_SNAKE_CASE : Any=False ): """simple docstring""" SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = get_detr_config(_SCREAMING_SNAKE_CASE ) # load original model from torch hub SCREAMING_SNAKE_CASE_ = { 'detr-resnet-50': 'detr_resnet50', 'detr-resnet-101': 'detr_resnet101', } logger.info(f"""Converting model {model_name}...""" ) SCREAMING_SNAKE_CASE_ = torch.hub.load('facebookresearch/detr' , model_name_to_original_name[model_name] , pretrained=_SCREAMING_SNAKE_CASE ).eval() SCREAMING_SNAKE_CASE_ = detr.state_dict() # rename keys for src, dest in create_rename_keys(_SCREAMING_SNAKE_CASE ): if is_panoptic: SCREAMING_SNAKE_CASE_ = 'detr.' + src rename_key(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # query, key and value matrices need special treatment read_in_q_k_v(_SCREAMING_SNAKE_CASE , is_panoptic=_SCREAMING_SNAKE_CASE ) # important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them SCREAMING_SNAKE_CASE_ = 'detr.model.' if is_panoptic else 'model.' for key in state_dict.copy().keys(): if is_panoptic: if ( key.startswith('detr' ) and not key.startswith('class_labels_classifier' ) and not key.startswith('bbox_predictor' ) ): SCREAMING_SNAKE_CASE_ = state_dict.pop(_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE_ = val elif "class_labels_classifier" in key or "bbox_predictor" in key: SCREAMING_SNAKE_CASE_ = state_dict.pop(_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE_ = val elif key.startswith('bbox_attention' ) or key.startswith('mask_head' ): continue else: SCREAMING_SNAKE_CASE_ = state_dict.pop(_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE_ = val else: if not key.startswith('class_labels_classifier' ) and not key.startswith('bbox_predictor' ): SCREAMING_SNAKE_CASE_ = state_dict.pop(_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE_ = val # finally, create HuggingFace model and load state dict SCREAMING_SNAKE_CASE_ = DetrForSegmentation(_SCREAMING_SNAKE_CASE ) if is_panoptic else DetrForObjectDetection(_SCREAMING_SNAKE_CASE ) model.load_state_dict(_SCREAMING_SNAKE_CASE ) model.eval() # verify our conversion on an image SCREAMING_SNAKE_CASE_ = 'coco_panoptic' if is_panoptic else 'coco_detection' SCREAMING_SNAKE_CASE_ = DetrImageProcessor(format=_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE_ = processor(images=prepare_img() , return_tensors='pt' ) SCREAMING_SNAKE_CASE_ = encoding['pixel_values'] SCREAMING_SNAKE_CASE_ = detr(_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE_ = model(_SCREAMING_SNAKE_CASE ) assert torch.allclose(outputs.logits , original_outputs['pred_logits'] , atol=1E-3 ) assert torch.allclose(outputs.pred_boxes , original_outputs['pred_boxes'] , atol=1E-3 ) if is_panoptic: assert torch.allclose(outputs.pred_masks , original_outputs['pred_masks'] , atol=1E-4 ) print('Looks ok!' ) if pytorch_dump_folder_path is not None: # Save model and image processor logger.info(f"""Saving PyTorch model and image processor to {pytorch_dump_folder_path}...""" ) Path(_SCREAMING_SNAKE_CASE ).mkdir(exist_ok=_SCREAMING_SNAKE_CASE ) model.save_pretrained(_SCREAMING_SNAKE_CASE ) processor.save_pretrained(_SCREAMING_SNAKE_CASE ) if push_to_hub: # Upload model and image processor to the hub logger.info('Uploading PyTorch model and image processor to the hub...' ) model.push_to_hub(f"""nielsr/{model_name}""" ) processor.push_to_hub(f"""nielsr/{model_name}""" ) if __name__ == "__main__": UpperCamelCase__ : Dict = argparse.ArgumentParser() parser.add_argument( "--model_name", default="detr-resnet-50", type=str, choices=["detr-resnet-50", "detr-resnet-101"], help="Name of the DETR model you'd like to convert.", ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model." ) parser.add_argument("--push_to_hub", action="store_true", help="Whether to push the model to the hub or not.") UpperCamelCase__ : str = parser.parse_args() convert_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
703
import pickle import numpy as np from matplotlib import pyplot as plt class __snake_case : def __init__( self , _A , _A , _A , _A , _A , _A=0.2 , _A=0.2): SCREAMING_SNAKE_CASE_ = bp_numa SCREAMING_SNAKE_CASE_ = bp_numa SCREAMING_SNAKE_CASE_ = bp_numa SCREAMING_SNAKE_CASE_ = conva_get[:2] SCREAMING_SNAKE_CASE_ = conva_get[2] SCREAMING_SNAKE_CASE_ = size_pa SCREAMING_SNAKE_CASE_ = rate_w SCREAMING_SNAKE_CASE_ = rate_t SCREAMING_SNAKE_CASE_ = [ np.mat(-1 * np.random.rand(self.conva[0] , self.conva[0]) + 0.5) for i in range(self.conva[1]) ] SCREAMING_SNAKE_CASE_ = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa) + 0.5) SCREAMING_SNAKE_CASE_ = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa) + 0.5) SCREAMING_SNAKE_CASE_ = -2 * np.random.rand(self.conva[1]) + 1 SCREAMING_SNAKE_CASE_ = -2 * np.random.rand(self.num_bpa) + 1 SCREAMING_SNAKE_CASE_ = -2 * np.random.rand(self.num_bpa) + 1 def lowerCAmelCase__ ( self , _A): # save model dict with pickle SCREAMING_SNAKE_CASE_ = { 'num_bp1': self.num_bpa, 'num_bp2': self.num_bpa, 'num_bp3': self.num_bpa, 'conv1': self.conva, 'step_conv1': self.step_conva, 'size_pooling1': self.size_poolinga, 'rate_weight': self.rate_weight, 'rate_thre': self.rate_thre, 'w_conv1': self.w_conva, 'wkj': self.wkj, 'vji': self.vji, 'thre_conv1': self.thre_conva, 'thre_bp2': self.thre_bpa, 'thre_bp3': self.thre_bpa, } with open(_A , 'wb') as f: pickle.dump(_A , _A) print(f"""Model saved: {save_path}""") @classmethod def lowerCAmelCase__ ( cls , _A): # read saved model with open(_A , 'rb') as f: SCREAMING_SNAKE_CASE_ = pickle.load(_A) # noqa: S301 SCREAMING_SNAKE_CASE_ = model_dic.get('conv1') conv_get.append(model_dic.get('step_conv1')) SCREAMING_SNAKE_CASE_ = model_dic.get('size_pooling1') SCREAMING_SNAKE_CASE_ = model_dic.get('num_bp1') SCREAMING_SNAKE_CASE_ = model_dic.get('num_bp2') SCREAMING_SNAKE_CASE_ = model_dic.get('num_bp3') SCREAMING_SNAKE_CASE_ = model_dic.get('rate_weight') SCREAMING_SNAKE_CASE_ = model_dic.get('rate_thre') # create model instance SCREAMING_SNAKE_CASE_ = CNN(_A , _A , _A , _A , _A , _A , _A) # modify model parameter SCREAMING_SNAKE_CASE_ = model_dic.get('w_conv1') SCREAMING_SNAKE_CASE_ = model_dic.get('wkj') SCREAMING_SNAKE_CASE_ = model_dic.get('vji') SCREAMING_SNAKE_CASE_ = model_dic.get('thre_conv1') SCREAMING_SNAKE_CASE_ = model_dic.get('thre_bp2') SCREAMING_SNAKE_CASE_ = model_dic.get('thre_bp3') return conv_ins def lowerCAmelCase__ ( self , _A): return 1 / (1 + np.exp(-1 * x)) def lowerCAmelCase__ ( self , _A): return round(_A , 3) def lowerCAmelCase__ ( self , _A , _A , _A , _A , _A): # convolution process SCREAMING_SNAKE_CASE_ = convs[0] SCREAMING_SNAKE_CASE_ = convs[1] SCREAMING_SNAKE_CASE_ = np.shape(_A)[0] # get the data slice of original image data, data_focus SCREAMING_SNAKE_CASE_ = [] for i_focus in range(0 , size_data - size_conv + 1 , _A): for j_focus in range(0 , size_data - size_conv + 1 , _A): SCREAMING_SNAKE_CASE_ = data[ i_focus : i_focus + size_conv, j_focus : j_focus + size_conv ] data_focus.append(_A) # calculate the feature map of every single kernel, and saved as list of matrix SCREAMING_SNAKE_CASE_ = [] SCREAMING_SNAKE_CASE_ = int((size_data - size_conv) / conv_step + 1) for i_map in range(_A): SCREAMING_SNAKE_CASE_ = [] for i_focus in range(len(_A)): SCREAMING_SNAKE_CASE_ = ( np.sum(np.multiply(data_focus[i_focus] , w_convs[i_map])) - thre_convs[i_map] ) featuremap.append(self.sig(_A)) SCREAMING_SNAKE_CASE_ = np.asmatrix(_A).reshape( _A , _A) data_featuremap.append(_A) # expanding the data slice to One dimenssion SCREAMING_SNAKE_CASE_ = [] for each_focus in data_focus: focusa_list.extend(self.Expand_Mat(_A)) SCREAMING_SNAKE_CASE_ = np.asarray(_A) return focus_list, data_featuremap def lowerCAmelCase__ ( self , _A , _A , _A="average_pool"): # pooling process SCREAMING_SNAKE_CASE_ = len(featuremaps[0]) SCREAMING_SNAKE_CASE_ = int(size_map / size_pooling) SCREAMING_SNAKE_CASE_ = [] for i_map in range(len(_A)): SCREAMING_SNAKE_CASE_ = featuremaps[i_map] SCREAMING_SNAKE_CASE_ = [] for i_focus in range(0 , _A , _A): for j_focus in range(0 , _A , _A): SCREAMING_SNAKE_CASE_ = feature_map[ i_focus : i_focus + size_pooling, j_focus : j_focus + size_pooling, ] if pooling_type == "average_pool": # average pooling map_pooled.append(np.average(_A)) elif pooling_type == "max_pooling": # max pooling map_pooled.append(np.max(_A)) SCREAMING_SNAKE_CASE_ = np.asmatrix(_A).reshape(_A , _A) featuremap_pooled.append(_A) return featuremap_pooled def lowerCAmelCase__ ( self , _A): # expanding three dimension data to one dimension list SCREAMING_SNAKE_CASE_ = [] for i in range(len(_A)): SCREAMING_SNAKE_CASE_ = np.shape(data[i]) SCREAMING_SNAKE_CASE_ = data[i].reshape(1 , shapes[0] * shapes[1]) SCREAMING_SNAKE_CASE_ = data_listed.getA().tolist()[0] data_expanded.extend(_A) SCREAMING_SNAKE_CASE_ = np.asarray(_A) return data_expanded def lowerCAmelCase__ ( self , _A): # expanding matrix to one dimension list SCREAMING_SNAKE_CASE_ = np.asarray(_A) SCREAMING_SNAKE_CASE_ = np.shape(_A) SCREAMING_SNAKE_CASE_ = data_mat.reshape(1 , shapes[0] * shapes[1]) return data_expanded def lowerCAmelCase__ ( self , _A , _A , _A , _A , _A): SCREAMING_SNAKE_CASE_ = [] SCREAMING_SNAKE_CASE_ = 0 for i_map in range(_A): SCREAMING_SNAKE_CASE_ = np.ones((size_map, size_map)) for i in range(0 , _A , _A): for j in range(0 , _A , _A): SCREAMING_SNAKE_CASE_ = pd_pool[ i_pool ] SCREAMING_SNAKE_CASE_ = i_pool + 1 SCREAMING_SNAKE_CASE_ = np.multiply( _A , np.multiply(out_map[i_map] , (1 - out_map[i_map]))) pd_all.append(_A) return pd_all def lowerCAmelCase__ ( self , _A , _A , _A , _A , _A , _A=bool): # model traning print('----------------------Start Training-------------------------') print((' - - Shape: Train_Data ', np.shape(_A))) print((' - - Shape: Teach_Data ', np.shape(_A))) SCREAMING_SNAKE_CASE_ = 0 SCREAMING_SNAKE_CASE_ = [] SCREAMING_SNAKE_CASE_ = 10000 while rp < n_repeat and mse >= error_accuracy: SCREAMING_SNAKE_CASE_ = 0 print(f"""-------------Learning Time {rp}--------------""") for p in range(len(_A)): # print('------------Learning Image: %d--------------'%p) SCREAMING_SNAKE_CASE_ = np.asmatrix(datas_train[p]) SCREAMING_SNAKE_CASE_ = np.asarray(datas_teach[p]) SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.convolute( _A , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , ) SCREAMING_SNAKE_CASE_ = self.pooling(_A , self.size_poolinga) SCREAMING_SNAKE_CASE_ = np.shape(_A) SCREAMING_SNAKE_CASE_ = self._expand(_A) SCREAMING_SNAKE_CASE_ = data_bp_input SCREAMING_SNAKE_CASE_ = np.dot(_A , self.vji.T) - self.thre_bpa SCREAMING_SNAKE_CASE_ = self.sig(_A) SCREAMING_SNAKE_CASE_ = np.dot(_A , self.wkj.T) - self.thre_bpa SCREAMING_SNAKE_CASE_ = self.sig(_A) # --------------Model Leaning ------------------------ # calculate error and gradient--------------- SCREAMING_SNAKE_CASE_ = np.multiply( (data_teach - bp_outa) , np.multiply(_A , (1 - bp_outa))) SCREAMING_SNAKE_CASE_ = np.multiply( np.dot(_A , self.wkj) , np.multiply(_A , (1 - bp_outa))) SCREAMING_SNAKE_CASE_ = np.dot(_A , self.vji) SCREAMING_SNAKE_CASE_ = pd_i_all / (self.size_poolinga * self.size_poolinga) SCREAMING_SNAKE_CASE_ = pd_conva_pooled.T.getA().tolist() SCREAMING_SNAKE_CASE_ = self._calculate_gradient_from_pool( _A , _A , shape_featuremapa[0] , shape_featuremapa[1] , self.size_poolinga , ) # weight and threshold learning process--------- # convolution layer for k_conv in range(self.conva[1]): SCREAMING_SNAKE_CASE_ = self._expand_mat(pd_conva_all[k_conv]) SCREAMING_SNAKE_CASE_ = self.rate_weight * np.dot(_A , _A) SCREAMING_SNAKE_CASE_ = self.w_conva[k_conv] + delta_w.reshape( (self.conva[0], self.conva[0])) SCREAMING_SNAKE_CASE_ = ( self.thre_conva[k_conv] - np.sum(pd_conva_all[k_conv]) * self.rate_thre ) # all connected layer SCREAMING_SNAKE_CASE_ = self.wkj + pd_k_all.T * bp_outa * self.rate_weight SCREAMING_SNAKE_CASE_ = self.vji + pd_j_all.T * bp_outa * self.rate_weight SCREAMING_SNAKE_CASE_ = self.thre_bpa - pd_k_all * self.rate_thre SCREAMING_SNAKE_CASE_ = self.thre_bpa - pd_j_all * self.rate_thre # calculate the sum error of all single image SCREAMING_SNAKE_CASE_ = np.sum(abs(data_teach - bp_outa)) error_count += errors # print(' ----Teach ',data_teach) # print(' ----BP_output ',bp_out3) SCREAMING_SNAKE_CASE_ = rp + 1 SCREAMING_SNAKE_CASE_ = error_count / patterns all_mse.append(_A) def draw_error(): SCREAMING_SNAKE_CASE_ = [error_accuracy for i in range(int(n_repeat * 1.2))] plt.plot(_A , '+-') plt.plot(_A , 'r--') plt.xlabel('Learning Times') plt.ylabel('All_mse') plt.grid(_A , alpha=0.5) plt.show() print('------------------Training Complished---------------------') print((' - - Training epoch: ', rp, f""" - - Mse: {mse:.6f}""")) if draw_e: draw_error() return mse def lowerCAmelCase__ ( self , _A): # model predict SCREAMING_SNAKE_CASE_ = [] print('-------------------Start Testing-------------------------') print((' - - Shape: Test_Data ', np.shape(_A))) for p in range(len(_A)): SCREAMING_SNAKE_CASE_ = np.asmatrix(datas_test[p]) SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.convolute( _A , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , ) SCREAMING_SNAKE_CASE_ = self.pooling(_A , self.size_poolinga) SCREAMING_SNAKE_CASE_ = self._expand(_A) SCREAMING_SNAKE_CASE_ = data_bp_input SCREAMING_SNAKE_CASE_ = bp_outa * self.vji.T - self.thre_bpa SCREAMING_SNAKE_CASE_ = self.sig(_A) SCREAMING_SNAKE_CASE_ = bp_outa * self.wkj.T - self.thre_bpa SCREAMING_SNAKE_CASE_ = self.sig(_A) produce_out.extend(bp_outa.getA().tolist()) SCREAMING_SNAKE_CASE_ = [list(map(self.do_round , _A)) for each in produce_out] return np.asarray(_A) def lowerCAmelCase__ ( self , _A): # return the data of image after convoluting process so we can check it out SCREAMING_SNAKE_CASE_ = np.asmatrix(_A) SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.convolute( _A , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , ) SCREAMING_SNAKE_CASE_ = self.pooling(_A , self.size_poolinga) return data_conveda, data_pooleda if __name__ == "__main__": pass
620
0
import argparse import collections import numpy as np import torch from flax import traverse_util from tax import checkpoints from transformers import MTaConfig, UMTaEncoderModel, UMTaForConditionalGeneration from transformers.utils import logging logging.set_verbosity_info() def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : Union[str, Any] ): """simple docstring""" return params[f"""{prefix}/{prefix}/relpos_bias/rel_embedding"""][:, i, :] def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : Optional[Any]="attention" ): """simple docstring""" SCREAMING_SNAKE_CASE_ = SCREAMING_SNAKE_CASE_ = np.ascontiguousarray(params[f"""{prefix}/{prefix}/{layer_name}/key/kernel"""][:, i, :, :] ) SCREAMING_SNAKE_CASE_ = k_tmp.reshape(k_tmp.shape[0] , k_tmp.shape[1] * k_tmp.shape[2] ) SCREAMING_SNAKE_CASE_ = np.ascontiguousarray(params[f"""{prefix}/{prefix}/{layer_name}/out/kernel"""][:, i, :, :] ) SCREAMING_SNAKE_CASE_ = o_tmp.reshape(o_tmp.shape[0] * o_tmp.shape[1] , o_tmp.shape[2] ) SCREAMING_SNAKE_CASE_ = np.ascontiguousarray(params[f"""{prefix}/{prefix}/{layer_name}/query/kernel"""][:, i, :, :] ) SCREAMING_SNAKE_CASE_ = q_tmp.reshape(q_tmp.shape[0] , q_tmp.shape[1] * q_tmp.shape[2] ) SCREAMING_SNAKE_CASE_ = np.ascontiguousarray(params[f"""{prefix}/{prefix}/{layer_name}/value/kernel"""][:, i, :, :] ) SCREAMING_SNAKE_CASE_ = v_tmp.reshape(v_tmp.shape[0] , v_tmp.shape[1] * v_tmp.shape[2] ) return k, o, q, v def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Optional[int]=False ): """simple docstring""" if split_mlp_wi: SCREAMING_SNAKE_CASE_ = params[f"""{prefix}/{prefix}/mlp/wi_0/kernel"""][:, i, :] SCREAMING_SNAKE_CASE_ = params[f"""{prefix}/{prefix}/mlp/wi_1/kernel"""][:, i, :] SCREAMING_SNAKE_CASE_ = (wi_a, wi_a) else: SCREAMING_SNAKE_CASE_ = params[f"""{prefix}/{prefix}/mlp/wi/kernel"""][:, i, :] SCREAMING_SNAKE_CASE_ = params[f"""{prefix}/{prefix}/mlp/wo/kernel"""][:, i, :] return wi, wo def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : Optional[Any] ): """simple docstring""" return params[f"""{prefix}/{prefix}/{layer_name}/scale"""][:, i] def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : dict , *, _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : bool , _SCREAMING_SNAKE_CASE : bool = False ): """simple docstring""" SCREAMING_SNAKE_CASE_ = traverse_util.flatten_dict(variables['target'] ) SCREAMING_SNAKE_CASE_ = {'/'.join(_SCREAMING_SNAKE_CASE ): v for k, v in old.items()} # v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi SCREAMING_SNAKE_CASE_ = 'encoder/encoder/mlp/wi_0/kernel' in old print('Split MLP:' , _SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE_ = collections.OrderedDict() # Shared embeddings. SCREAMING_SNAKE_CASE_ = old['token_embedder/embedding'] # Encoder. for i in range(_SCREAMING_SNAKE_CASE ): # Block i, layer 0 (Self Attention). SCREAMING_SNAKE_CASE_ = tax_layer_norm_lookup(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , 'encoder' , 'pre_attention_layer_norm' ) SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = tax_attention_lookup(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , 'encoder' , 'attention' ) SCREAMING_SNAKE_CASE_ = layer_norm SCREAMING_SNAKE_CASE_ = k.T SCREAMING_SNAKE_CASE_ = o.T SCREAMING_SNAKE_CASE_ = q.T SCREAMING_SNAKE_CASE_ = v.T # Block i, layer 1 (MLP). SCREAMING_SNAKE_CASE_ = tax_layer_norm_lookup(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , 'encoder' , 'pre_mlp_layer_norm' ) SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = tax_mlp_lookup(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , 'encoder' , _SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE_ = layer_norm if split_mlp_wi: SCREAMING_SNAKE_CASE_ = wi[0].T SCREAMING_SNAKE_CASE_ = wi[1].T else: SCREAMING_SNAKE_CASE_ = wi.T SCREAMING_SNAKE_CASE_ = wo.T if scalable_attention: # convert the rel_embedding of each layer SCREAMING_SNAKE_CASE_ = tax_relpos_bias_lookup( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , 'encoder' ).T SCREAMING_SNAKE_CASE_ = old['encoder/encoder_norm/scale'] if not scalable_attention: SCREAMING_SNAKE_CASE_ = tax_relpos_bias_lookup( _SCREAMING_SNAKE_CASE , 0 , 'encoder' ).T SCREAMING_SNAKE_CASE_ = tax_relpos_bias_lookup( _SCREAMING_SNAKE_CASE , 0 , 'decoder' ).T if not is_encoder_only: # Decoder. for i in range(_SCREAMING_SNAKE_CASE ): # Block i, layer 0 (Self Attention). SCREAMING_SNAKE_CASE_ = tax_layer_norm_lookup(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , 'decoder' , 'pre_self_attention_layer_norm' ) SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = tax_attention_lookup(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , 'decoder' , 'self_attention' ) SCREAMING_SNAKE_CASE_ = layer_norm SCREAMING_SNAKE_CASE_ = k.T SCREAMING_SNAKE_CASE_ = o.T SCREAMING_SNAKE_CASE_ = q.T SCREAMING_SNAKE_CASE_ = v.T # Block i, layer 1 (Cross Attention). SCREAMING_SNAKE_CASE_ = tax_layer_norm_lookup(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , 'decoder' , 'pre_cross_attention_layer_norm' ) SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = tax_attention_lookup(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , 'decoder' , 'encoder_decoder_attention' ) SCREAMING_SNAKE_CASE_ = layer_norm SCREAMING_SNAKE_CASE_ = k.T SCREAMING_SNAKE_CASE_ = o.T SCREAMING_SNAKE_CASE_ = q.T SCREAMING_SNAKE_CASE_ = v.T # Block i, layer 2 (MLP). SCREAMING_SNAKE_CASE_ = tax_layer_norm_lookup(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , 'decoder' , 'pre_mlp_layer_norm' ) SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = tax_mlp_lookup(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , 'decoder' , _SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE_ = layer_norm if split_mlp_wi: SCREAMING_SNAKE_CASE_ = wi[0].T SCREAMING_SNAKE_CASE_ = wi[1].T else: SCREAMING_SNAKE_CASE_ = wi.T SCREAMING_SNAKE_CASE_ = wo.T if scalable_attention: # convert the rel_embedding of each layer SCREAMING_SNAKE_CASE_ = tax_relpos_bias_lookup(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , 'decoder' ).T SCREAMING_SNAKE_CASE_ = old['decoder/decoder_norm/scale'] # LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead) if "decoder/logits_dense/kernel" in old: SCREAMING_SNAKE_CASE_ = old['decoder/logits_dense/kernel'].T return new def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : bool ): """simple docstring""" SCREAMING_SNAKE_CASE_ = collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] ) # Add what is missing. if "encoder.embed_tokens.weight" not in state_dict: SCREAMING_SNAKE_CASE_ = state_dict['shared.weight'] if not is_encoder_only: if "decoder.embed_tokens.weight" not in state_dict: SCREAMING_SNAKE_CASE_ = state_dict['shared.weight'] if "lm_head.weight" not in state_dict: # For old 1.0 models. print('Using shared word embeddings as lm_head.' ) SCREAMING_SNAKE_CASE_ = state_dict['shared.weight'] return state_dict def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : List[Any] ): """simple docstring""" SCREAMING_SNAKE_CASE_ = checkpoints.load_tax_checkpoint(_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE_ = convert_tax_to_pytorch( _SCREAMING_SNAKE_CASE , num_layers=config.num_layers , is_encoder_only=_SCREAMING_SNAKE_CASE , scalable_attention=_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE_ = make_state_dict(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) model.load_state_dict(_SCREAMING_SNAKE_CASE , strict=_SCREAMING_SNAKE_CASE ) def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : bool = False , _SCREAMING_SNAKE_CASE : bool = False , ): """simple docstring""" SCREAMING_SNAKE_CASE_ = MTaConfig.from_json_file(_SCREAMING_SNAKE_CASE ) print(f"""Building PyTorch model from configuration: {config}""" ) # Non-v1.1 checkpoints could also use T5Model, but this works for all. # The v1.0 checkpoints will simply have an LM head that is the word embeddings. if is_encoder_only: SCREAMING_SNAKE_CASE_ = UMTaEncoderModel(_SCREAMING_SNAKE_CASE ) else: SCREAMING_SNAKE_CASE_ = UMTaForConditionalGeneration(_SCREAMING_SNAKE_CASE ) # Load weights from tf checkpoint load_tax_weights_in_ta(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # Save pytorch-model print(f"""Save PyTorch model to {pytorch_dump_path}""" ) model.save_pretrained(_SCREAMING_SNAKE_CASE ) # Verify that we can load the checkpoint. model.from_pretrained(_SCREAMING_SNAKE_CASE ) print('Done' ) if __name__ == "__main__": UpperCamelCase__ : Optional[Any] = argparse.ArgumentParser(description="Converts a native T5X checkpoint into a PyTorch checkpoint.") # Required parameters parser.add_argument( "--t5x_checkpoint_path", default=None, type=str, required=True, help="Path to the T5X checkpoint." ) parser.add_argument( "--config_file", default=None, type=str, required=True, help="The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.", ) parser.add_argument( "--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model." ) parser.add_argument( "--is_encoder_only", action="store_true", help="Check if the model is encoder-decoder model", default=False ) parser.add_argument( "--scalable_attention", action="store_true", help="Whether the model uses scaled attention (umt5 model)", default=False, ) UpperCamelCase__ : Tuple = parser.parse_args() convert_tax_checkpoint_to_pytorch( args.tax_checkpoint_path, args.config_file, args.pytorch_dump_path, args.is_encoder_only, args.scalable_attention, )
704
import os import zipfile import requests from get_ci_error_statistics import download_artifact, get_artifacts_links def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : int=7 ): """simple docstring""" SCREAMING_SNAKE_CASE_ = None if token is not None: SCREAMING_SNAKE_CASE_ = {'Accept': 'application/vnd.github+json', 'Authorization': f"""Bearer {token}"""} # The id of a workflow (not of a workflow run) SCREAMING_SNAKE_CASE_ = '636036' SCREAMING_SNAKE_CASE_ = f"""https://api.github.com/repos/huggingface/transformers/actions/workflows/{workflow_id}/runs""" # On `main` branch + event being `schedule` + not returning PRs + only `num_runs` results url += f"""?branch=main&event=schedule&exclude_pull_requests=true&per_page={num_runs}""" SCREAMING_SNAKE_CASE_ = requests.get(_SCREAMING_SNAKE_CASE , headers=_SCREAMING_SNAKE_CASE ).json() return result["workflow_runs"] def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : str ): """simple docstring""" SCREAMING_SNAKE_CASE_ = get_daily_ci_runs(_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE_ = None for workflow_run in workflow_runs: if workflow_run["status"] == "completed": SCREAMING_SNAKE_CASE_ = workflow_run['id'] break return workflow_run_id def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Optional[int] ): """simple docstring""" SCREAMING_SNAKE_CASE_ = get_last_daily_ci_runs(_SCREAMING_SNAKE_CASE ) if workflow_run_id is not None: SCREAMING_SNAKE_CASE_ = get_artifacts_links(worflow_run_id=_SCREAMING_SNAKE_CASE , token=_SCREAMING_SNAKE_CASE ) for artifact_name in artifact_names: if artifact_name in artifacts_links: SCREAMING_SNAKE_CASE_ = artifacts_links[artifact_name] download_artifact( artifact_name=_SCREAMING_SNAKE_CASE , artifact_url=_SCREAMING_SNAKE_CASE , output_dir=_SCREAMING_SNAKE_CASE , token=_SCREAMING_SNAKE_CASE ) def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : List[Any] ): """simple docstring""" get_last_daily_ci_artifacts(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE_ = {} for artifact_name in artifact_names: SCREAMING_SNAKE_CASE_ = os.path.join(_SCREAMING_SNAKE_CASE , f"""{artifact_name}.zip""" ) if os.path.isfile(_SCREAMING_SNAKE_CASE ): SCREAMING_SNAKE_CASE_ = {} with zipfile.ZipFile(_SCREAMING_SNAKE_CASE ) as z: for filename in z.namelist(): if not os.path.isdir(_SCREAMING_SNAKE_CASE ): # read the file with z.open(_SCREAMING_SNAKE_CASE ) as f: SCREAMING_SNAKE_CASE_ = f.read().decode('UTF-8' ) return results
620
0
from ...utils import ( OptionalDependencyNotAvailable, is_torch_available, is_transformers_available, is_transformers_version, ) try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import ShapEPipeline else: from .camera import create_pan_cameras from .pipeline_shap_e import ShapEPipeline from .pipeline_shap_e_img2img import ShapEImgaImgPipeline from .renderer import ( BoundingBoxVolume, ImportanceRaySampler, MLPNeRFModelOutput, MLPNeRSTFModel, ShapEParamsProjModel, ShapERenderer, StratifiedRaySampler, VoidNeRFModel, )
705
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available UpperCamelCase__ : Any = { "configuration_mvp": ["MVP_PRETRAINED_CONFIG_ARCHIVE_MAP", "MvpConfig", "MvpOnnxConfig"], "tokenization_mvp": ["MvpTokenizer"], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase__ : Optional[int] = ["MvpTokenizerFast"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase__ : str = [ "MVP_PRETRAINED_MODEL_ARCHIVE_LIST", "MvpForCausalLM", "MvpForConditionalGeneration", "MvpForQuestionAnswering", "MvpForSequenceClassification", "MvpModel", "MvpPreTrainedModel", ] if TYPE_CHECKING: from .configuration_mvp import MVP_PRETRAINED_CONFIG_ARCHIVE_MAP, MvpConfig, MvpOnnxConfig from .tokenization_mvp import MvpTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_mvp_fast import MvpTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mvp import ( MVP_PRETRAINED_MODEL_ARCHIVE_LIST, MvpForCausalLM, MvpForConditionalGeneration, MvpForQuestionAnswering, MvpForSequenceClassification, MvpModel, MvpPreTrainedModel, ) else: import sys UpperCamelCase__ : Dict = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
620
0
import inspect import os import unittest from pathlib import Path import torch import accelerate from accelerate.test_utils import execute_subprocess_async from accelerate.test_utils.testing import run_command class __snake_case ( unittest.TestCase ): __lowerCAmelCase : Dict = inspect.getfile(accelerate.test_utils ) __lowerCAmelCase : Optional[Any] = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['scripts', 'test_cli.py'] ) __lowerCAmelCase : Tuple = ['accelerate', 'launch'] __lowerCAmelCase : Union[str, Any] = Path.home() / '.cache/huggingface/accelerate' __lowerCAmelCase : List[str] = 'default_config.yaml' __lowerCAmelCase : List[Any] = config_folder / config_file __lowerCAmelCase : str = config_folder / '_default_config.yaml' __lowerCAmelCase : Optional[int] = Path('tests/test_configs' ) @classmethod def lowerCAmelCase__ ( cls): if cls.config_path.is_file(): cls.config_path.rename(cls.changed_path) @classmethod def lowerCAmelCase__ ( cls): if cls.changed_path.is_file(): cls.changed_path.rename(cls.config_path) def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = self.base_cmd if torch.cuda.is_available() and (torch.cuda.device_count() > 1): cmd += ["--multi_gpu"] execute_subprocess_async(cmd + [self.test_file_path] , env=os.environ.copy()) def lowerCAmelCase__ ( self): for config in sorted(self.test_config_path.glob('**/*.yaml')): with self.subTest(config_file=_A): execute_subprocess_async( self.base_cmd + ['--config_file', str(_A), self.test_file_path] , env=os.environ.copy()) def lowerCAmelCase__ ( self): execute_subprocess_async(['accelerate', 'test'] , env=os.environ.copy()) class __snake_case ( unittest.TestCase ): __lowerCAmelCase : Optional[Any] = 'test-tpu' __lowerCAmelCase : str = 'us-central1-a' __lowerCAmelCase : Union[str, Any] = 'ls' __lowerCAmelCase : Union[str, Any] = ['accelerate', 'tpu-config'] __lowerCAmelCase : Union[str, Any] = 'cd /usr/share' __lowerCAmelCase : List[Any] = 'tests/test_samples/test_command_file.sh' __lowerCAmelCase : Dict = 'Running gcloud compute tpus tpu-vm ssh' def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = run_command( self.cmd + ['--command', self.command, '--tpu_zone', self.tpu_zone, '--tpu_name', self.tpu_name, '--debug'] , return_stdout=_A , ) self.assertIn( f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all""" , _A , ) def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = run_command( self.cmd + [ '--config_file', 'tests/test_configs/0_12_0.yaml', '--command', self.command, '--tpu_zone', self.tpu_zone, '--tpu_name', self.tpu_name, '--debug', ] , return_stdout=_A , ) self.assertIn( f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all""" , _A , ) def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = run_command( self.cmd + ['--config_file', 'tests/test_configs/latest.yaml', '--debug'] , return_stdout=_A) self.assertIn( f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo \"hello world\"; echo \"this is a second command\" --worker all""" , _A , ) def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = run_command( self.cmd + ['--config_file', 'tests/test_configs/latest.yaml', '--command', self.command, '--debug'] , return_stdout=_A , ) self.assertIn( f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all""" , _A , ) def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = run_command( self.cmd + [ '--config_file', 'tests/test_configs/latest.yaml', '--command', self.command, '--command', 'echo "Hello World"', '--debug', ] , return_stdout=_A , ) self.assertIn( f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls; echo \"Hello World\" --worker all""" , _A , ) def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = run_command( self.cmd + ['--config_file', 'tests/test_configs/latest.yaml', '--command_file', self.command_file, '--debug'] , return_stdout=_A , ) self.assertIn( f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo \"hello world\"; echo \"this is a second command\" --worker all""" , _A , ) def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = run_command( self.cmd + [ '--config_file', 'tests/test_configs/0_12_0.yaml', '--command_file', self.command_file, '--tpu_zone', self.tpu_zone, '--tpu_name', self.tpu_name, '--debug', ] , return_stdout=_A , ) self.assertIn( f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo \"hello world\"; echo \"this is a second command\" --worker all""" , _A , ) def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = run_command( self.cmd + ['--config_file', 'tests/test_configs/latest.yaml', '--install_accelerate', '--debug'] , return_stdout=_A , ) self.assertIn( f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate -U; echo \"hello world\"; echo \"this is a second command\" --worker all""" , _A , ) def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = run_command( self.cmd + [ '--config_file', 'tests/test_configs/latest.yaml', '--install_accelerate', '--accelerate_version', '12.0.0', '--debug', ] , return_stdout=_A , ) self.assertIn( f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate==12.0.0; echo \"hello world\"; echo \"this is a second command\" --worker all""" , _A , )
706
import inspect import os import unittest from pathlib import Path import torch import accelerate from accelerate.test_utils import execute_subprocess_async from accelerate.test_utils.testing import run_command class __snake_case ( unittest.TestCase ): __lowerCAmelCase : Dict = inspect.getfile(accelerate.test_utils ) __lowerCAmelCase : Optional[Any] = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['scripts', 'test_cli.py'] ) __lowerCAmelCase : Tuple = ['accelerate', 'launch'] __lowerCAmelCase : Union[str, Any] = Path.home() / '.cache/huggingface/accelerate' __lowerCAmelCase : List[str] = 'default_config.yaml' __lowerCAmelCase : List[Any] = config_folder / config_file __lowerCAmelCase : str = config_folder / '_default_config.yaml' __lowerCAmelCase : Optional[int] = Path('tests/test_configs' ) @classmethod def lowerCAmelCase__ ( cls): if cls.config_path.is_file(): cls.config_path.rename(cls.changed_path) @classmethod def lowerCAmelCase__ ( cls): if cls.changed_path.is_file(): cls.changed_path.rename(cls.config_path) def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = self.base_cmd if torch.cuda.is_available() and (torch.cuda.device_count() > 1): cmd += ["--multi_gpu"] execute_subprocess_async(cmd + [self.test_file_path] , env=os.environ.copy()) def lowerCAmelCase__ ( self): for config in sorted(self.test_config_path.glob('**/*.yaml')): with self.subTest(config_file=_A): execute_subprocess_async( self.base_cmd + ['--config_file', str(_A), self.test_file_path] , env=os.environ.copy()) def lowerCAmelCase__ ( self): execute_subprocess_async(['accelerate', 'test'] , env=os.environ.copy()) class __snake_case ( unittest.TestCase ): __lowerCAmelCase : Optional[Any] = 'test-tpu' __lowerCAmelCase : str = 'us-central1-a' __lowerCAmelCase : Union[str, Any] = 'ls' __lowerCAmelCase : Union[str, Any] = ['accelerate', 'tpu-config'] __lowerCAmelCase : Union[str, Any] = 'cd /usr/share' __lowerCAmelCase : List[Any] = 'tests/test_samples/test_command_file.sh' __lowerCAmelCase : Dict = 'Running gcloud compute tpus tpu-vm ssh' def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = run_command( self.cmd + ['--command', self.command, '--tpu_zone', self.tpu_zone, '--tpu_name', self.tpu_name, '--debug'] , return_stdout=_A , ) self.assertIn( f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all""" , _A , ) def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = run_command( self.cmd + [ '--config_file', 'tests/test_configs/0_12_0.yaml', '--command', self.command, '--tpu_zone', self.tpu_zone, '--tpu_name', self.tpu_name, '--debug', ] , return_stdout=_A , ) self.assertIn( f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all""" , _A , ) def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = run_command( self.cmd + ['--config_file', 'tests/test_configs/latest.yaml', '--debug'] , return_stdout=_A) self.assertIn( f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo \"hello world\"; echo \"this is a second command\" --worker all""" , _A , ) def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = run_command( self.cmd + ['--config_file', 'tests/test_configs/latest.yaml', '--command', self.command, '--debug'] , return_stdout=_A , ) self.assertIn( f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all""" , _A , ) def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = run_command( self.cmd + [ '--config_file', 'tests/test_configs/latest.yaml', '--command', self.command, '--command', 'echo "Hello World"', '--debug', ] , return_stdout=_A , ) self.assertIn( f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls; echo \"Hello World\" --worker all""" , _A , ) def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = run_command( self.cmd + ['--config_file', 'tests/test_configs/latest.yaml', '--command_file', self.command_file, '--debug'] , return_stdout=_A , ) self.assertIn( f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo \"hello world\"; echo \"this is a second command\" --worker all""" , _A , ) def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = run_command( self.cmd + [ '--config_file', 'tests/test_configs/0_12_0.yaml', '--command_file', self.command_file, '--tpu_zone', self.tpu_zone, '--tpu_name', self.tpu_name, '--debug', ] , return_stdout=_A , ) self.assertIn( f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo \"hello world\"; echo \"this is a second command\" --worker all""" , _A , ) def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = run_command( self.cmd + ['--config_file', 'tests/test_configs/latest.yaml', '--install_accelerate', '--debug'] , return_stdout=_A , ) self.assertIn( f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate -U; echo \"hello world\"; echo \"this is a second command\" --worker all""" , _A , ) def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = run_command( self.cmd + [ '--config_file', 'tests/test_configs/latest.yaml', '--install_accelerate', '--accelerate_version', '12.0.0', '--debug', ] , return_stdout=_A , ) self.assertIn( f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate==12.0.0; echo \"hello world\"; echo \"this is a second command\" --worker all""" , _A , )
620
0
import shutil import tempfile import unittest import numpy as np from transformers.testing_utils import ( is_pt_tf_cross_test, require_tf, require_torch, require_torchvision, require_vision, ) from transformers.utils import is_tf_available, is_torch_available, is_vision_available if is_vision_available(): from PIL import Image from transformers import AutoProcessor, SamImageProcessor, SamProcessor if is_torch_available(): import torch if is_tf_available(): import tensorflow as tf @require_vision @require_torchvision class __snake_case ( unittest.TestCase ): def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = tempfile.mkdtemp() SCREAMING_SNAKE_CASE_ = SamImageProcessor() SCREAMING_SNAKE_CASE_ = SamProcessor(_A) processor.save_pretrained(self.tmpdirname) def lowerCAmelCase__ ( self , **_A): return AutoProcessor.from_pretrained(self.tmpdirname , **_A).image_processor def lowerCAmelCase__ ( self): shutil.rmtree(self.tmpdirname) def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta)] SCREAMING_SNAKE_CASE_ = [Image.fromarray(np.moveaxis(_A , 0 , -1)) for x in image_inputs] return image_inputs def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = SamProcessor(image_processor=self.get_image_processor()) processor.save_pretrained(self.tmpdirname) SCREAMING_SNAKE_CASE_ = self.get_image_processor(do_normalize=_A , padding_value=1.0) SCREAMING_SNAKE_CASE_ = SamProcessor.from_pretrained(self.tmpdirname , do_normalize=_A , padding_value=1.0) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string()) self.assertIsInstance(processor.image_processor , _A) def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = self.get_image_processor() SCREAMING_SNAKE_CASE_ = SamProcessor(image_processor=_A) SCREAMING_SNAKE_CASE_ = self.prepare_image_inputs() SCREAMING_SNAKE_CASE_ = image_processor(_A , return_tensors='np') SCREAMING_SNAKE_CASE_ = processor(images=_A , return_tensors='np') input_feat_extract.pop('original_sizes') # pop original_sizes as it is popped in the processor input_feat_extract.pop('reshaped_input_sizes') # pop original_sizes as it is popped in the processor for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2) @require_torch def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = self.get_image_processor() SCREAMING_SNAKE_CASE_ = SamProcessor(image_processor=_A) SCREAMING_SNAKE_CASE_ = [torch.ones((1, 3, 5, 5))] SCREAMING_SNAKE_CASE_ = [[1764, 2646]] SCREAMING_SNAKE_CASE_ = [[683, 1024]] SCREAMING_SNAKE_CASE_ = processor.post_process_masks(_A , _A , _A) self.assertEqual(masks[0].shape , (1, 3, 1764, 2646)) SCREAMING_SNAKE_CASE_ = processor.post_process_masks( _A , torch.tensor(_A) , torch.tensor(_A)) self.assertEqual(masks[0].shape , (1, 3, 1764, 2646)) # should also work with np SCREAMING_SNAKE_CASE_ = [np.ones((1, 3, 5, 5))] SCREAMING_SNAKE_CASE_ = processor.post_process_masks(_A , np.array(_A) , np.array(_A)) self.assertEqual(masks[0].shape , (1, 3, 1764, 2646)) SCREAMING_SNAKE_CASE_ = [[1, 0], [0, 1]] with self.assertRaises(_A): SCREAMING_SNAKE_CASE_ = processor.post_process_masks(_A , np.array(_A) , np.array(_A)) @require_vision @require_tf class __snake_case ( unittest.TestCase ): def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = tempfile.mkdtemp() SCREAMING_SNAKE_CASE_ = SamImageProcessor() SCREAMING_SNAKE_CASE_ = SamProcessor(_A) processor.save_pretrained(self.tmpdirname) def lowerCAmelCase__ ( self , **_A): return AutoProcessor.from_pretrained(self.tmpdirname , **_A).image_processor def lowerCAmelCase__ ( self): shutil.rmtree(self.tmpdirname) def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta)] SCREAMING_SNAKE_CASE_ = [Image.fromarray(np.moveaxis(_A , 0 , -1)) for x in image_inputs] return image_inputs def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = SamProcessor(image_processor=self.get_image_processor()) processor.save_pretrained(self.tmpdirname) SCREAMING_SNAKE_CASE_ = self.get_image_processor(do_normalize=_A , padding_value=1.0) SCREAMING_SNAKE_CASE_ = SamProcessor.from_pretrained(self.tmpdirname , do_normalize=_A , padding_value=1.0) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string()) self.assertIsInstance(processor.image_processor , _A) def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = self.get_image_processor() SCREAMING_SNAKE_CASE_ = SamProcessor(image_processor=_A) SCREAMING_SNAKE_CASE_ = self.prepare_image_inputs() SCREAMING_SNAKE_CASE_ = image_processor(_A , return_tensors='np') SCREAMING_SNAKE_CASE_ = processor(images=_A , return_tensors='np') input_feat_extract.pop('original_sizes') # pop original_sizes as it is popped in the processor input_feat_extract.pop('reshaped_input_sizes') # pop reshaped_input_sizes as it is popped in the processor for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2) @require_tf def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = self.get_image_processor() SCREAMING_SNAKE_CASE_ = SamProcessor(image_processor=_A) SCREAMING_SNAKE_CASE_ = [tf.ones((1, 3, 5, 5))] SCREAMING_SNAKE_CASE_ = [[1764, 2646]] SCREAMING_SNAKE_CASE_ = [[683, 1024]] SCREAMING_SNAKE_CASE_ = processor.post_process_masks(_A , _A , _A , return_tensors='tf') self.assertEqual(masks[0].shape , (1, 3, 1764, 2646)) SCREAMING_SNAKE_CASE_ = processor.post_process_masks( _A , tf.convert_to_tensor(_A) , tf.convert_to_tensor(_A) , return_tensors='tf' , ) self.assertEqual(masks[0].shape , (1, 3, 1764, 2646)) # should also work with np SCREAMING_SNAKE_CASE_ = [np.ones((1, 3, 5, 5))] SCREAMING_SNAKE_CASE_ = processor.post_process_masks( _A , np.array(_A) , np.array(_A) , return_tensors='tf') self.assertEqual(masks[0].shape , (1, 3, 1764, 2646)) SCREAMING_SNAKE_CASE_ = [[1, 0], [0, 1]] with self.assertRaises(tf.errors.InvalidArgumentError): SCREAMING_SNAKE_CASE_ = processor.post_process_masks( _A , np.array(_A) , np.array(_A) , return_tensors='tf') @require_vision @require_torchvision class __snake_case ( unittest.TestCase ): def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = tempfile.mkdtemp() SCREAMING_SNAKE_CASE_ = SamImageProcessor() SCREAMING_SNAKE_CASE_ = SamProcessor(_A) processor.save_pretrained(self.tmpdirname) def lowerCAmelCase__ ( self , **_A): return AutoProcessor.from_pretrained(self.tmpdirname , **_A).image_processor def lowerCAmelCase__ ( self): shutil.rmtree(self.tmpdirname) def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta)] SCREAMING_SNAKE_CASE_ = [Image.fromarray(np.moveaxis(_A , 0 , -1)) for x in image_inputs] return image_inputs @is_pt_tf_cross_test def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = self.get_image_processor() SCREAMING_SNAKE_CASE_ = SamProcessor(image_processor=_A) SCREAMING_SNAKE_CASE_ = np.random.randint(0 , 2 , size=(1, 3, 5, 5)).astype(np.floataa) SCREAMING_SNAKE_CASE_ = [tf.convert_to_tensor(_A)] SCREAMING_SNAKE_CASE_ = [torch.tensor(_A)] SCREAMING_SNAKE_CASE_ = [[1764, 2646]] SCREAMING_SNAKE_CASE_ = [[683, 1024]] SCREAMING_SNAKE_CASE_ = processor.post_process_masks( _A , _A , _A , return_tensors='tf') SCREAMING_SNAKE_CASE_ = processor.post_process_masks( _A , _A , _A , return_tensors='pt') self.assertTrue(np.all(tf_masks[0].numpy() == pt_masks[0].numpy())) @is_pt_tf_cross_test def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = self.get_image_processor() SCREAMING_SNAKE_CASE_ = SamProcessor(image_processor=_A) SCREAMING_SNAKE_CASE_ = self.prepare_image_inputs() SCREAMING_SNAKE_CASE_ = image_processor(_A , return_tensors='pt')['pixel_values'].numpy() SCREAMING_SNAKE_CASE_ = processor(images=_A , return_tensors='pt')['pixel_values'].numpy() SCREAMING_SNAKE_CASE_ = image_processor(_A , return_tensors='tf')['pixel_values'].numpy() SCREAMING_SNAKE_CASE_ = processor(images=_A , return_tensors='tf')['pixel_values'].numpy() self.assertTrue(np.allclose(_A , _A)) self.assertTrue(np.allclose(_A , _A)) self.assertTrue(np.allclose(_A , _A))
707
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_speech_available, is_torch_available, ) UpperCamelCase__ : Tuple = { "configuration_trocr": ["TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP", "TrOCRConfig"], "processing_trocr": ["TrOCRProcessor"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase__ : Tuple = [ "TROCR_PRETRAINED_MODEL_ARCHIVE_LIST", "TrOCRForCausalLM", "TrOCRPreTrainedModel", ] if TYPE_CHECKING: from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig from .processing_trocr import TrOCRProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel else: import sys UpperCamelCase__ : str = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
620
0
from transformers import BertTokenizerFast from .custom_tokenization import CustomTokenizer class __snake_case ( lowerCAmelCase__ ): __lowerCAmelCase : List[Any] = CustomTokenizer pass
708
from multiprocessing import Lock, Pipe, Process # lock used to ensure that two processes do not access a pipe at the same time UpperCamelCase__ : int = Lock() def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : Any ): """simple docstring""" global process_lock # we perform n swaps since after n swaps we know we are sorted # we *could* stop early if we are sorted already, but it takes as long to # find out we are sorted as it does to sort the list with this algorithm for i in range(0 , 10 ): if (i + position) % 2 == 0 and r_send is not None: # send your value to your right neighbor process_lock.acquire() r_send[1].send(_SCREAMING_SNAKE_CASE ) process_lock.release() # receive your right neighbor's value process_lock.acquire() SCREAMING_SNAKE_CASE_ = rr_cv[0].recv() process_lock.release() # take the lower value since you are on the left SCREAMING_SNAKE_CASE_ = min(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) elif (i + position) % 2 != 0 and l_send is not None: # send your value to your left neighbor process_lock.acquire() l_send[1].send(_SCREAMING_SNAKE_CASE ) process_lock.release() # receive your left neighbor's value process_lock.acquire() SCREAMING_SNAKE_CASE_ = lr_cv[0].recv() process_lock.release() # take the higher value since you are on the right SCREAMING_SNAKE_CASE_ = max(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # after all swaps are performed, send the values back to main result_pipe[1].send(_SCREAMING_SNAKE_CASE ) def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : Union[str, Any] ): """simple docstring""" SCREAMING_SNAKE_CASE_ = [] SCREAMING_SNAKE_CASE_ = [] # initialize the list of pipes where the values will be retrieved for _ in arr: result_pipe.append(Pipe() ) # creates the processes # the first and last process only have one neighbor so they are made outside # of the loop SCREAMING_SNAKE_CASE_ = Pipe() SCREAMING_SNAKE_CASE_ = Pipe() process_array_.append( Process( target=_SCREAMING_SNAKE_CASE , args=(0, arr[0], None, temp_rs, None, temp_rr, result_pipe[0]) , ) ) SCREAMING_SNAKE_CASE_ = temp_rs SCREAMING_SNAKE_CASE_ = temp_rr for i in range(1 , len(_SCREAMING_SNAKE_CASE ) - 1 ): SCREAMING_SNAKE_CASE_ = Pipe() SCREAMING_SNAKE_CASE_ = Pipe() process_array_.append( Process( target=_SCREAMING_SNAKE_CASE , args=(i, arr[i], temp_ls, temp_rs, temp_lr, temp_rr, result_pipe[i]) , ) ) SCREAMING_SNAKE_CASE_ = temp_rs SCREAMING_SNAKE_CASE_ = temp_rr process_array_.append( Process( target=_SCREAMING_SNAKE_CASE , args=( len(_SCREAMING_SNAKE_CASE ) - 1, arr[len(_SCREAMING_SNAKE_CASE ) - 1], temp_ls, None, temp_lr, None, result_pipe[len(_SCREAMING_SNAKE_CASE ) - 1], ) , ) ) # start the processes for p in process_array_: p.start() # wait for the processes to end and write their values to the list for p in range(0 , len(_SCREAMING_SNAKE_CASE ) ): SCREAMING_SNAKE_CASE_ = result_pipe[p][0].recv() process_array_[p].join() return arr def _UpperCAmelCase ( ): """simple docstring""" SCREAMING_SNAKE_CASE_ = list(range(10 , 0 , -1 ) ) print('Initial List' ) print(*_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE_ = odd_even_transposition(_SCREAMING_SNAKE_CASE ) print('Sorted List\n' ) print(*_SCREAMING_SNAKE_CASE ) if __name__ == "__main__": main()
620
0
from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding class __snake_case ( lowerCAmelCase__ ): __lowerCAmelCase : Optional[Any] = ['image_processor', 'tokenizer'] __lowerCAmelCase : List[str] = 'AutoImageProcessor' __lowerCAmelCase : List[Any] = 'AutoTokenizer' def __init__( self , _A , _A): super().__init__(_A , _A) SCREAMING_SNAKE_CASE_ = self.image_processor def __call__( self , _A=None , _A=None , _A=None , **_A): if text is None and images is None: raise ValueError('You have to specify either text or images. Both cannot be none.') if text is not None: SCREAMING_SNAKE_CASE_ = self.tokenizer(_A , return_tensors=_A , **_A) if images is not None: SCREAMING_SNAKE_CASE_ = self.image_processor(_A , return_tensors=_A , **_A) if text is not None and images is not None: SCREAMING_SNAKE_CASE_ = image_features.pixel_values return encoding elif text is not None: return encoding else: return BatchEncoding(data=dict(**_A) , tensor_type=_A) def lowerCAmelCase__ ( self , *_A , **_A): return self.tokenizer.batch_decode(*_A , **_A) def lowerCAmelCase__ ( self , *_A , **_A): return self.tokenizer.decode(*_A , **_A) @property def lowerCAmelCase__ ( self): return ["input_ids", "attention_mask", "pixel_values"]
709
import unittest from transformers import load_tool from .test_tools_common import ToolTesterMixin UpperCamelCase__ : int = "\nHugging Face was founded in 2016 by French entrepreneurs Clément Delangue, Julien Chaumond, and Thomas Wolf originally as a company that developed a chatbot app targeted at teenagers.[2] After open-sourcing the model behind the chatbot, the company pivoted to focus on being a platform for machine learning.\n\nIn March 2021, Hugging Face raised $40 million in a Series B funding round.[3]\n\nOn April 28, 2021, the company launched the BigScience Research Workshop in collaboration with several other research groups to release an open large language model.[4] In 2022, the workshop concluded with the announcement of BLOOM, a multilingual large language model with 176 billion parameters.[5]\n" class __snake_case ( unittest.TestCase , lowerCAmelCase__ ): def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = load_tool('text-question-answering') self.tool.setup() SCREAMING_SNAKE_CASE_ = load_tool('text-question-answering' , remote=_A) def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = self.tool(_A , 'What did Hugging Face do in April 2021?') self.assertEqual(_A , 'launched the BigScience Research Workshop') def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = self.remote_tool(_A , 'What did Hugging Face do in April 2021?') self.assertEqual(_A , 'launched the BigScience Research Workshop') def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = self.tool(text=_A , question='What did Hugging Face do in April 2021?') self.assertEqual(_A , 'launched the BigScience Research Workshop') def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = self.remote_tool(text=_A , question='What did Hugging Face do in April 2021?') self.assertEqual(_A , 'launched the BigScience Research Workshop')
620
0
'''simple docstring''' import hashlib import unittest from transformers import MODEL_FOR_DEPTH_ESTIMATION_MAPPING, is_torch_available, is_vision_available from transformers.pipelines import DepthEstimationPipeline, pipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_tf, require_timm, require_torch, require_vision, slow, ) from .test_pipelines_common import ANY if is_torch_available(): import torch if is_vision_available(): from PIL import Image else: class __snake_case : @staticmethod def lowerCAmelCase__ ( *_A , **_A): pass def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : Image ): """simple docstring""" SCREAMING_SNAKE_CASE_ = hashlib.mda(image.tobytes() ) return m.hexdigest() @is_pipeline_test @require_vision @require_timm @require_torch class __snake_case ( unittest.TestCase ): __lowerCAmelCase : str = MODEL_FOR_DEPTH_ESTIMATION_MAPPING def lowerCAmelCase__ ( self , _A , _A , _A): SCREAMING_SNAKE_CASE_ = DepthEstimationPipeline(model=_A , image_processor=_A) return depth_estimator, [ "./tests/fixtures/tests_samples/COCO/000000039769.png", "./tests/fixtures/tests_samples/COCO/000000039769.png", ] def lowerCAmelCase__ ( self , _A , _A): SCREAMING_SNAKE_CASE_ = depth_estimator('./tests/fixtures/tests_samples/COCO/000000039769.png') self.assertEqual({'predicted_depth': ANY(torch.Tensor), 'depth': ANY(Image.Image)} , _A) import datasets SCREAMING_SNAKE_CASE_ = datasets.load_dataset('hf-internal-testing/fixtures_image_utils' , 'image' , split='test') SCREAMING_SNAKE_CASE_ = depth_estimator( [ Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png'), 'http://images.cocodataset.org/val2017/000000039769.jpg', # RGBA dataset[0]['file'], # LA dataset[1]['file'], # L dataset[2]['file'], ]) self.assertEqual( [ {'predicted_depth': ANY(torch.Tensor), 'depth': ANY(Image.Image)}, {'predicted_depth': ANY(torch.Tensor), 'depth': ANY(Image.Image)}, {'predicted_depth': ANY(torch.Tensor), 'depth': ANY(Image.Image)}, {'predicted_depth': ANY(torch.Tensor), 'depth': ANY(Image.Image)}, {'predicted_depth': ANY(torch.Tensor), 'depth': ANY(Image.Image)}, ] , _A , ) @require_tf @unittest.skip('Depth estimation is not implemented in TF') def lowerCAmelCase__ ( self): pass @slow @require_torch def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = 'Intel/dpt-large' SCREAMING_SNAKE_CASE_ = pipeline('depth-estimation' , model=_A) SCREAMING_SNAKE_CASE_ = depth_estimator('http://images.cocodataset.org/val2017/000000039769.jpg') SCREAMING_SNAKE_CASE_ = hashimage(outputs['depth']) # This seems flaky. # self.assertEqual(outputs["depth"], "1a39394e282e9f3b0741a90b9f108977") self.assertEqual(nested_simplify(outputs['predicted_depth'].max().item()) , 29.304) self.assertEqual(nested_simplify(outputs['predicted_depth'].min().item()) , 2.6_6_2) @require_torch def lowerCAmelCase__ ( self): # This is highly irregular to have no small tests. self.skipTest('There is not hf-internal-testing tiny model for either GLPN nor DPT')
710
import unittest import numpy as np from datasets import load_dataset from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import BeitImageProcessor class __snake_case ( unittest.TestCase ): def __init__( self , _A , _A=7 , _A=3 , _A=18 , _A=30 , _A=400 , _A=True , _A=None , _A=True , _A=None , _A=True , _A=[0.5, 0.5, 0.5] , _A=[0.5, 0.5, 0.5] , _A=False , ): SCREAMING_SNAKE_CASE_ = size if size is not None else {'height': 20, 'width': 20} SCREAMING_SNAKE_CASE_ = crop_size if crop_size is not None else {'height': 18, 'width': 18} SCREAMING_SNAKE_CASE_ = parent SCREAMING_SNAKE_CASE_ = batch_size SCREAMING_SNAKE_CASE_ = num_channels SCREAMING_SNAKE_CASE_ = image_size SCREAMING_SNAKE_CASE_ = min_resolution SCREAMING_SNAKE_CASE_ = max_resolution SCREAMING_SNAKE_CASE_ = do_resize SCREAMING_SNAKE_CASE_ = size SCREAMING_SNAKE_CASE_ = do_center_crop SCREAMING_SNAKE_CASE_ = crop_size SCREAMING_SNAKE_CASE_ = do_normalize SCREAMING_SNAKE_CASE_ = image_mean SCREAMING_SNAKE_CASE_ = image_std SCREAMING_SNAKE_CASE_ = do_reduce_labels def lowerCAmelCase__ ( self): return { "do_resize": self.do_resize, "size": self.size, "do_center_crop": self.do_center_crop, "crop_size": self.crop_size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "do_reduce_labels": self.do_reduce_labels, } def _UpperCAmelCase ( ): """simple docstring""" SCREAMING_SNAKE_CASE_ = load_dataset('hf-internal-testing/fixtures_ade20k' , split='test' ) SCREAMING_SNAKE_CASE_ = Image.open(dataset[0]['file'] ) SCREAMING_SNAKE_CASE_ = Image.open(dataset[1]['file'] ) return image, map def _UpperCAmelCase ( ): """simple docstring""" SCREAMING_SNAKE_CASE_ = load_dataset('hf-internal-testing/fixtures_ade20k' , split='test' ) SCREAMING_SNAKE_CASE_ = Image.open(ds[0]['file'] ) SCREAMING_SNAKE_CASE_ = Image.open(ds[1]['file'] ) SCREAMING_SNAKE_CASE_ = Image.open(ds[2]['file'] ) SCREAMING_SNAKE_CASE_ = Image.open(ds[3]['file'] ) return [imagea, imagea], [mapa, mapa] @require_torch @require_vision class __snake_case ( lowerCAmelCase__ , unittest.TestCase ): __lowerCAmelCase : Union[str, Any] = BeitImageProcessor if is_vision_available() else None def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = BeitImageProcessingTester(self) @property def lowerCAmelCase__ ( self): return self.image_processor_tester.prepare_image_processor_dict() def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = self.image_processing_class(**self.image_processor_dict) self.assertTrue(hasattr(_A , 'do_resize')) self.assertTrue(hasattr(_A , 'size')) self.assertTrue(hasattr(_A , 'do_center_crop')) self.assertTrue(hasattr(_A , 'center_crop')) self.assertTrue(hasattr(_A , 'do_normalize')) self.assertTrue(hasattr(_A , 'image_mean')) self.assertTrue(hasattr(_A , 'image_std')) def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = self.image_processing_class.from_dict(self.image_processor_dict) self.assertEqual(image_processor.size , {'height': 20, 'width': 20}) self.assertEqual(image_processor.crop_size , {'height': 18, 'width': 18}) self.assertEqual(image_processor.do_reduce_labels , _A) SCREAMING_SNAKE_CASE_ = self.image_processing_class.from_dict( self.image_processor_dict , size=42 , crop_size=84 , reduce_labels=_A) self.assertEqual(image_processor.size , {'height': 42, 'width': 42}) self.assertEqual(image_processor.crop_size , {'height': 84, 'width': 84}) self.assertEqual(image_processor.do_reduce_labels , _A) def lowerCAmelCase__ ( self): pass def lowerCAmelCase__ ( self): # Initialize image_processing SCREAMING_SNAKE_CASE_ = self.image_processing_class(**self.image_processor_dict) # create random PIL images SCREAMING_SNAKE_CASE_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A) for image in image_inputs: self.assertIsInstance(_A , Image.Image) # Test not batched input SCREAMING_SNAKE_CASE_ = image_processing(image_inputs[0] , return_tensors='pt').pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) # Test batched SCREAMING_SNAKE_CASE_ = image_processing(_A , return_tensors='pt').pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) def lowerCAmelCase__ ( self): # Initialize image_processing SCREAMING_SNAKE_CASE_ = self.image_processing_class(**self.image_processor_dict) # create random numpy tensors SCREAMING_SNAKE_CASE_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A , numpify=_A) for image in image_inputs: self.assertIsInstance(_A , np.ndarray) # Test not batched input SCREAMING_SNAKE_CASE_ = image_processing(image_inputs[0] , return_tensors='pt').pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) # Test batched SCREAMING_SNAKE_CASE_ = image_processing(_A , return_tensors='pt').pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) def lowerCAmelCase__ ( self): # Initialize image_processing SCREAMING_SNAKE_CASE_ = self.image_processing_class(**self.image_processor_dict) # create random PyTorch tensors SCREAMING_SNAKE_CASE_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A , torchify=_A) for image in image_inputs: self.assertIsInstance(_A , torch.Tensor) # Test not batched input SCREAMING_SNAKE_CASE_ = image_processing(image_inputs[0] , return_tensors='pt').pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) # Test batched SCREAMING_SNAKE_CASE_ = image_processing(_A , return_tensors='pt').pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) def lowerCAmelCase__ ( self): # Initialize image_processing SCREAMING_SNAKE_CASE_ = self.image_processing_class(**self.image_processor_dict) # create random PyTorch tensors SCREAMING_SNAKE_CASE_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A , torchify=_A) SCREAMING_SNAKE_CASE_ = [] for image in image_inputs: self.assertIsInstance(_A , torch.Tensor) maps.append(torch.zeros(image.shape[-2:]).long()) # Test not batched input SCREAMING_SNAKE_CASE_ = image_processing(image_inputs[0] , maps[0] , return_tensors='pt') self.assertEqual( encoding['pixel_values'].shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) self.assertEqual( encoding['labels'].shape , ( 1, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) self.assertEqual(encoding['labels'].dtype , torch.long) self.assertTrue(encoding['labels'].min().item() >= 0) self.assertTrue(encoding['labels'].max().item() <= 255) # Test batched SCREAMING_SNAKE_CASE_ = image_processing(_A , _A , return_tensors='pt') self.assertEqual( encoding['pixel_values'].shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) self.assertEqual( encoding['labels'].shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) self.assertEqual(encoding['labels'].dtype , torch.long) self.assertTrue(encoding['labels'].min().item() >= 0) self.assertTrue(encoding['labels'].max().item() <= 255) # Test not batched input (PIL images) SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = prepare_semantic_single_inputs() SCREAMING_SNAKE_CASE_ = image_processing(_A , _A , return_tensors='pt') self.assertEqual( encoding['pixel_values'].shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) self.assertEqual( encoding['labels'].shape , ( 1, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) self.assertEqual(encoding['labels'].dtype , torch.long) self.assertTrue(encoding['labels'].min().item() >= 0) self.assertTrue(encoding['labels'].max().item() <= 255) # Test batched input (PIL images) SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = prepare_semantic_batch_inputs() SCREAMING_SNAKE_CASE_ = image_processing(_A , _A , return_tensors='pt') self.assertEqual( encoding['pixel_values'].shape , ( 2, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) self.assertEqual( encoding['labels'].shape , ( 2, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) self.assertEqual(encoding['labels'].dtype , torch.long) self.assertTrue(encoding['labels'].min().item() >= 0) self.assertTrue(encoding['labels'].max().item() <= 255) def lowerCAmelCase__ ( self): # Initialize image_processing SCREAMING_SNAKE_CASE_ = self.image_processing_class(**self.image_processor_dict) # ADE20k has 150 classes, and the background is included, so labels should be between 0 and 150 SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = prepare_semantic_single_inputs() SCREAMING_SNAKE_CASE_ = image_processing(_A , _A , return_tensors='pt') self.assertTrue(encoding['labels'].min().item() >= 0) self.assertTrue(encoding['labels'].max().item() <= 150) SCREAMING_SNAKE_CASE_ = True SCREAMING_SNAKE_CASE_ = image_processing(_A , _A , return_tensors='pt') self.assertTrue(encoding['labels'].min().item() >= 0) self.assertTrue(encoding['labels'].max().item() <= 255)
620
0
from __future__ import annotations import os import tempfile import unittest import numpy as np from huggingface_hub import hf_hub_download from transformers import is_tensorflow_text_available, is_tf_available from transformers.testing_utils import require_tensorflow_text, require_tf, slow from ..test_modeling_tf_common import floats_tensor from .test_framework_agnostic import GenerationIntegrationTestsMixin if is_tf_available(): import tensorflow as tf from transformers import ( AutoTokenizer, TFAutoModelForCausalLM, TFAutoModelForSeqaSeqLM, TFAutoModelForSpeechSeqaSeq, TFAutoModelForVisionaSeq, TFBartForConditionalGeneration, TFLogitsProcessorList, TFMinLengthLogitsProcessor, tf_top_k_top_p_filtering, ) if is_tensorflow_text_available(): import tensorflow_text as text @require_tf class __snake_case ( unittest.TestCase ): def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = tf.convert_to_tensor( [ [ 8.2_2_2_0_9_9_1, # 3rd highest value; idx. 0 -0.5_6_2_0_0_4_4, 5.2_3_2_2_9_7_5_2, 4.0_3_8_6_3_9_3, -6.8_7_9_8_3_7_8, -0.5_4_7_8_5_8_0_2, -3.2_0_1_2_1_5_3, 2.9_2_7_7_7_1_7_6, 1.8_8_1_7_1_9_5_3, 7.3_5_3_4_1_2_7_6, # 5th highest value; idx. 9 8.4_3_2_0_7_8_3_3, # 2nd highest value; idx. 10 -9.8_5_7_1_1_8_3_6, -5.9_6_2_0_9_2_3_6, -1.1_3_0_3_9_1_6_1, -7.1_1_1_5_2_9_4, -0.8_3_6_9_6_3_3, -5.3_1_8_6_4_0_8, 7.0_6_4_2_7_4_0_7, 0.8_1_3_6_9_3_4_4, -0.8_2_0_2_3_8_1_7, -5.9_1_7_9_7_9_6, 0.5_8_8_1_3_4_4_3, -6.9_9_7_7_8_4_3_8, 4.7_1_5_5_1_1_8_9, -0.1_8_7_7_1_6_3_7, 7.4_4_0_2_0_7_5_9, # 4th highest value; idx. 25 9.3_8_4_5_0_9_8_7, # 1st highest value; idx. 26 2.1_2_6_6_2_9_4_1, -9.3_2_5_6_2_0_3_8, 2.3_5_6_5_2_5_2_2, ], # cummulative prob of 5 highest values <= 0.6 [ 0.5_8_4_2_5_5_1_8, 4.5_3_1_3_9_2_3_8, -5.5_7_5_1_0_4_6_4, -6.2_8_0_3_0_6_9_9, -7.1_9_5_2_9_5_0_3, -4.0_2_1_2_2_5_5_1, 1.3_9_3_3_7_0_3_7, -6.0_6_7_0_7_0_5_7, 1.5_9_4_8_0_5_1_7, -9.6_4_3_1_1_9, 0.0_3_9_0_7_7_9_9, 0.6_7_2_3_1_7_6_2, -8.8_8_2_0_6_7_2_6, 6.2_7_1_1_5_9_2_2, # 4th highest value; idx. 13 2.2_8_5_2_0_7_2_3, 4.8_2_7_6_7_5_0_6, 4.3_0_4_2_1_3_6_8, 8.8_2_7_5_3_1_3, # 2nd highest value; idx. 17 5.4_4_0_2_9_9_5_8, # 5th highest value; idx. 18 -4.4_7_3_5_7_9_4, 7.3_8_5_7_9_5_3_6, # 3rd highest value; idx. 20 -2.9_1_0_5_1_6_6_3, 2.6_1_9_4_6_0_7_7, -2.5_6_7_4_7_6_2, -9.4_8_9_5_9_3_0_2, -4.0_2_9_2_2_6_4_5, -1.3_5_4_1_6_9_1_8, 9.6_7_7_0_2_3_2_3, # 1st highest value; idx. 27 -5.8_9_4_7_8_5_5_3, 1.8_5_3_7_0_4_6_7, ], # cummulative prob of 5 highest values <= 0.6 ] , dtype=tf.floataa , ) SCREAMING_SNAKE_CASE_ = tf.convert_to_tensor( [[0, 0], [0, 9], [0, 10], [0, 25], [0, 26], [1, 13], [1, 17], [1, 18], [1, 20], [1, 27]] , dtype=tf.intaa , ) # expected non filtered idx as noted above SCREAMING_SNAKE_CASE_ = tf.convert_to_tensor( [8.2_2_2_0_9_9, 7.3_5_3_4_1_2_6, 8.4_3_2_0_7_8, 7.4_4_0_2_0_7_5, 9.3_8_4_5_1, 6.2_7_1_1_5_9, 8.8_2_7_5_3_1, 5.4_4_0_2_9_9_5, 7.3_8_5_7_9_5_6, 9.6_7_7_0_2_3] , dtype=tf.floataa , ) # expected non filtered values as noted above SCREAMING_SNAKE_CASE_ = tf_top_k_top_p_filtering(_A , top_k=10 , top_p=0.6 , min_tokens_to_keep=4) SCREAMING_SNAKE_CASE_ = output[output != -float('inf')] SCREAMING_SNAKE_CASE_ = tf.cast( tf.where(tf.not_equal(_A , tf.constant(-float('inf') , dtype=tf.floataa))) , dtype=tf.intaa , ) tf.debugging.assert_near(_A , _A , rtol=1E-12) tf.debugging.assert_equal(_A , _A) @require_tf class __snake_case ( unittest.TestCase , lowerCAmelCase__ ): # setting framework_dependent_parameters needs to be gated, just like its contents' imports if is_tf_available(): __lowerCAmelCase : List[str] = { 'AutoModelForCausalLM': TFAutoModelForCausalLM, 'AutoModelForSpeechSeq2Seq': TFAutoModelForSpeechSeqaSeq, 'AutoModelForSeq2SeqLM': TFAutoModelForSeqaSeqLM, 'AutoModelForVision2Seq': TFAutoModelForVisionaSeq, 'LogitsProcessorList': TFLogitsProcessorList, 'MinLengthLogitsProcessor': TFMinLengthLogitsProcessor, 'create_tensor_fn': tf.convert_to_tensor, 'floats_tensor': floats_tensor, 'return_tensors': 'tf', } @slow def lowerCAmelCase__ ( self): # TF-only test: tf.saved_model export SCREAMING_SNAKE_CASE_ = TFAutoModelForCausalLM.from_pretrained('hf-internal-testing/tiny-random-gpt2') SCREAMING_SNAKE_CASE_ = 2 SCREAMING_SNAKE_CASE_ = 2 class __snake_case ( tf.Module ): def __init__( self , _A): super(_A , self).__init__() SCREAMING_SNAKE_CASE_ = model @tf.function( input_signature=( tf.TensorSpec((None, input_length) , tf.intaa , name='input_ids'), tf.TensorSpec((None, input_length) , tf.intaa , name='attention_mask'), ) , jit_compile=_A , ) def lowerCAmelCase__ ( self , _A , _A): SCREAMING_SNAKE_CASE_ = self.model.generate( input_ids=_A , attention_mask=_A , max_new_tokens=_A , return_dict_in_generate=_A , ) return {"sequences": outputs["sequences"]} SCREAMING_SNAKE_CASE_ = [[2, 0], [102, 103]] SCREAMING_SNAKE_CASE_ = [[1, 0], [1, 1]] SCREAMING_SNAKE_CASE_ = DummyModel(model=_A) with tempfile.TemporaryDirectory() as tmp_dir: tf.saved_model.save(_A , _A , signatures={'serving_default': dummy_model.serving}) SCREAMING_SNAKE_CASE_ = tf.saved_model.load(_A).signatures['serving_default'] for batch_size in range(1 , len(_A) + 1): SCREAMING_SNAKE_CASE_ = { 'input_ids': tf.constant(dummy_input_ids[:batch_size]), 'attention_mask': tf.constant(dummy_attention_masks[:batch_size]), } SCREAMING_SNAKE_CASE_ = serving_func(**_A)['sequences'] SCREAMING_SNAKE_CASE_ = test_model.generate(**_A , max_new_tokens=_A) tf.debugging.assert_equal(_A , _A) @slow def lowerCAmelCase__ ( self): # TF-only test: tf.saved_model export SCREAMING_SNAKE_CASE_ = TFAutoModelForCausalLM.from_pretrained('hf-internal-testing/tiny-random-gpt2') SCREAMING_SNAKE_CASE_ = 1 SCREAMING_SNAKE_CASE_ = 2 class __snake_case ( tf.Module ): def __init__( self , _A): super(_A , self).__init__() SCREAMING_SNAKE_CASE_ = model @tf.function( input_signature=( tf.TensorSpec((batch_size, None) , tf.intaa , name='input_ids'), tf.TensorSpec((batch_size, None) , tf.intaa , name='attention_mask'), ) , jit_compile=_A , ) def lowerCAmelCase__ ( self , _A , _A): SCREAMING_SNAKE_CASE_ = self.model.generate( input_ids=_A , attention_mask=_A , max_new_tokens=_A , return_dict_in_generate=_A , ) return {"sequences": outputs["sequences"]} SCREAMING_SNAKE_CASE_ = [[2], [102, 103]] SCREAMING_SNAKE_CASE_ = [[1], [1, 1]] SCREAMING_SNAKE_CASE_ = DummyModel(model=_A) with tempfile.TemporaryDirectory() as tmp_dir: tf.saved_model.save(_A , _A , signatures={'serving_default': dummy_model.serving}) SCREAMING_SNAKE_CASE_ = tf.saved_model.load(_A).signatures['serving_default'] for input_row in range(len(_A)): SCREAMING_SNAKE_CASE_ = { 'input_ids': tf.constant([dummy_input_ids[input_row]]), 'attention_mask': tf.constant([dummy_attention_masks[input_row]]), } SCREAMING_SNAKE_CASE_ = serving_func(**_A)['sequences'] SCREAMING_SNAKE_CASE_ = test_model.generate(**_A , max_new_tokens=_A) tf.debugging.assert_equal(_A , _A) @slow @require_tensorflow_text def lowerCAmelCase__ ( self): # TF-only test: tf.saved_model export with tempfile.TemporaryDirectory() as tmp_dir: # file needed to load the TF tokenizer hf_hub_download(repo_id='google/flan-t5-small' , filename='spiece.model' , local_dir=_A) class __snake_case ( tf.keras.layers.Layer ): def __init__( self): super().__init__() SCREAMING_SNAKE_CASE_ = text.SentencepieceTokenizer( model=tf.io.gfile.GFile(os.path.join(_A , 'spiece.model') , 'rb').read()) SCREAMING_SNAKE_CASE_ = TFAutoModelForSeqaSeqLM.from_pretrained('hf-internal-testing/tiny-random-t5') def lowerCAmelCase__ ( self , _A , *_A , **_A): SCREAMING_SNAKE_CASE_ = self.tokenizer.tokenize(_A) SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = text.pad_model_inputs( _A , max_seq_length=64 , pad_value=self.model.config.pad_token_id) SCREAMING_SNAKE_CASE_ = self.model.generate(input_ids=_A , attention_mask=_A) return self.tokenizer.detokenize(_A) SCREAMING_SNAKE_CASE_ = CompleteSentenceTransformer() SCREAMING_SNAKE_CASE_ = tf.keras.layers.Input(shape=(1,) , dtype=tf.string , name='inputs') SCREAMING_SNAKE_CASE_ = complete_model(_A) SCREAMING_SNAKE_CASE_ = tf.keras.Model(_A , _A) keras_model.save(_A) def lowerCAmelCase__ ( self): # Has PT equivalent: this test relies on random sampling SCREAMING_SNAKE_CASE_ = { 'do_sample': True, 'num_beams': 1, 'top_p': 0.7, 'top_k': 10, 'temperature': 0.7, } SCREAMING_SNAKE_CASE_ = 14 SCREAMING_SNAKE_CASE_ = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-gpt2') SCREAMING_SNAKE_CASE_ = 'Hello, my dog is cute and' SCREAMING_SNAKE_CASE_ = tokenizer(_A , return_tensors='tf') SCREAMING_SNAKE_CASE_ = TFAutoModelForCausalLM.from_pretrained('hf-internal-testing/tiny-random-gpt2') SCREAMING_SNAKE_CASE_ = 638 # forces the generation to happen on CPU, to avoid GPU-related quirks with tf.device(':/CPU:0'): tf.random.set_seed(0) SCREAMING_SNAKE_CASE_ = model.generate(**_A , eos_token_id=_A , **_A) self.assertTrue(expectation == len(generated_tokens[0])) SCREAMING_SNAKE_CASE_ = [638, 198] with tf.device(':/CPU:0'): tf.random.set_seed(0) SCREAMING_SNAKE_CASE_ = model.generate(**_A , eos_token_id=_A , **_A) self.assertTrue(expectation == len(generated_tokens[0])) def lowerCAmelCase__ ( self): # Has PT equivalent: ample use of framework-specific code SCREAMING_SNAKE_CASE_ = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-bart') SCREAMING_SNAKE_CASE_ = 'Hugging Face is a technology company based in New York and Paris.' SCREAMING_SNAKE_CASE_ = bart_tokenizer(_A , return_tensors='tf').input_ids SCREAMING_SNAKE_CASE_ = TFBartForConditionalGeneration.from_pretrained('hf-internal-testing/tiny-random-bart') SCREAMING_SNAKE_CASE_ = bart_model.generate(_A).numpy() class __snake_case ( lowerCAmelCase__ ): def lowerCAmelCase__ ( self , _A , _A=None , **_A): return super().call(_A , **_A) SCREAMING_SNAKE_CASE_ = FakeBart.from_pretrained('hf-internal-testing/tiny-random-bart') SCREAMING_SNAKE_CASE_ = bart_model.generate(_A , foo='bar').numpy() self.assertTrue(np.array_equal(_A , _A)) class __snake_case ( bart_model.model.encoder.__class__ ): def lowerCAmelCase__ ( self , _A , **_A): return super().call(_A , **_A) SCREAMING_SNAKE_CASE_ = FakeEncoder(bart_model.config , bart_model.model.shared) SCREAMING_SNAKE_CASE_ = fake_encoder # Normal generation still works (the output will be different because the encoder weights are different) SCREAMING_SNAKE_CASE_ = bart_model.generate(_A).numpy() with self.assertRaises(_A): # FakeEncoder.call() accepts **kwargs -> no filtering -> value error due to unexpected input "foo" bart_model.generate(_A , foo='bar')
711
def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : int = 200 ): """simple docstring""" SCREAMING_SNAKE_CASE_ = [1, 2, 5, 10, 20, 50, 100, 200] SCREAMING_SNAKE_CASE_ = [0] * (pence + 1) SCREAMING_SNAKE_CASE_ = 1 # base case: 1 way to make 0 pence for coin in coins: for i in range(_SCREAMING_SNAKE_CASE , pence + 1 , 1 ): number_of_ways[i] += number_of_ways[i - coin] return number_of_ways[pence] if __name__ == "__main__": assert solution(200) == 73_682
620
0
import math from enum import Enum from typing import Optional, Union from torch.optim import Optimizer from torch.optim.lr_scheduler import LambdaLR from .utils import logging UpperCamelCase__ : List[str] = logging.get_logger(__name__) class __snake_case ( lowerCAmelCase__ ): __lowerCAmelCase : str = 'linear' __lowerCAmelCase : Any = 'cosine' __lowerCAmelCase : List[Any] = 'cosine_with_restarts' __lowerCAmelCase : int = 'polynomial' __lowerCAmelCase : int = 'constant' __lowerCAmelCase : Optional[Any] = 'constant_with_warmup' __lowerCAmelCase : Tuple = 'piecewise_constant' def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : Optimizer , _SCREAMING_SNAKE_CASE : int = -1 ): """simple docstring""" return LambdaLR(_SCREAMING_SNAKE_CASE , lambda _SCREAMING_SNAKE_CASE : 1 , last_epoch=_SCREAMING_SNAKE_CASE ) def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : Optimizer , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int = -1 ): """simple docstring""" def lr_lambda(_SCREAMING_SNAKE_CASE : int ): if current_step < num_warmup_steps: return float(_SCREAMING_SNAKE_CASE ) / float(max(1.0 , _SCREAMING_SNAKE_CASE ) ) return 1.0 return LambdaLR(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , last_epoch=_SCREAMING_SNAKE_CASE ) def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : Optimizer , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : int = -1 ): """simple docstring""" SCREAMING_SNAKE_CASE_ = {} SCREAMING_SNAKE_CASE_ = step_rules.split(',' ) for rule_str in rule_list[:-1]: SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = rule_str.split(':' ) SCREAMING_SNAKE_CASE_ = int(_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE_ = float(_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE_ = value SCREAMING_SNAKE_CASE_ = float(rule_list[-1] ) def create_rules_function(_SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : Optional[int] ): def rule_func(_SCREAMING_SNAKE_CASE : int ) -> float: SCREAMING_SNAKE_CASE_ = sorted(rules_dict.keys() ) for i, sorted_step in enumerate(_SCREAMING_SNAKE_CASE ): if steps < sorted_step: return rules_dict[sorted_steps[i]] return last_lr_multiple return rule_func SCREAMING_SNAKE_CASE_ = create_rules_function(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) return LambdaLR(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , last_epoch=_SCREAMING_SNAKE_CASE ) def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : Any=-1 ): """simple docstring""" def lr_lambda(_SCREAMING_SNAKE_CASE : int ): if current_step < num_warmup_steps: return float(_SCREAMING_SNAKE_CASE ) / float(max(1 , _SCREAMING_SNAKE_CASE ) ) return max( 0.0 , float(num_training_steps - current_step ) / float(max(1 , num_training_steps - num_warmup_steps ) ) ) return LambdaLR(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : Optimizer , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : float = 0.5 , _SCREAMING_SNAKE_CASE : int = -1 ): """simple docstring""" def lr_lambda(_SCREAMING_SNAKE_CASE : Dict ): if current_step < num_warmup_steps: return float(_SCREAMING_SNAKE_CASE ) / float(max(1 , _SCREAMING_SNAKE_CASE ) ) SCREAMING_SNAKE_CASE_ = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) ) return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * float(_SCREAMING_SNAKE_CASE ) * 2.0 * progress )) ) return LambdaLR(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : Optimizer , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int = 1 , _SCREAMING_SNAKE_CASE : int = -1 ): """simple docstring""" def lr_lambda(_SCREAMING_SNAKE_CASE : Tuple ): if current_step < num_warmup_steps: return float(_SCREAMING_SNAKE_CASE ) / float(max(1 , _SCREAMING_SNAKE_CASE ) ) SCREAMING_SNAKE_CASE_ = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) ) if progress >= 1.0: return 0.0 return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * ((float(_SCREAMING_SNAKE_CASE ) * progress) % 1.0) )) ) return LambdaLR(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : Union[str, Any]=1E-7 , _SCREAMING_SNAKE_CASE : List[str]=1.0 , _SCREAMING_SNAKE_CASE : int=-1 ): """simple docstring""" SCREAMING_SNAKE_CASE_ = optimizer.defaults['lr'] if not (lr_init > lr_end): raise ValueError(f"""lr_end ({lr_end}) must be be smaller than initial lr ({lr_init})""" ) def lr_lambda(_SCREAMING_SNAKE_CASE : int ): if current_step < num_warmup_steps: return float(_SCREAMING_SNAKE_CASE ) / float(max(1 , _SCREAMING_SNAKE_CASE ) ) elif current_step > num_training_steps: return lr_end / lr_init # as LambdaLR multiplies by lr_init else: SCREAMING_SNAKE_CASE_ = lr_init - lr_end SCREAMING_SNAKE_CASE_ = num_training_steps - num_warmup_steps SCREAMING_SNAKE_CASE_ = 1 - (current_step - num_warmup_steps) / decay_steps SCREAMING_SNAKE_CASE_ = lr_range * pct_remaining**power + lr_end return decay / lr_init # as LambdaLR multiplies by lr_init return LambdaLR(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) UpperCamelCase__ : str = { SchedulerType.LINEAR: get_linear_schedule_with_warmup, SchedulerType.COSINE: get_cosine_schedule_with_warmup, SchedulerType.COSINE_WITH_RESTARTS: get_cosine_with_hard_restarts_schedule_with_warmup, SchedulerType.POLYNOMIAL: get_polynomial_decay_schedule_with_warmup, SchedulerType.CONSTANT: get_constant_schedule, SchedulerType.CONSTANT_WITH_WARMUP: get_constant_schedule_with_warmup, SchedulerType.PIECEWISE_CONSTANT: get_piecewise_constant_schedule, } def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : Union[str, SchedulerType] , _SCREAMING_SNAKE_CASE : Optimizer , _SCREAMING_SNAKE_CASE : Optional[str] = None , _SCREAMING_SNAKE_CASE : Optional[int] = None , _SCREAMING_SNAKE_CASE : Optional[int] = None , _SCREAMING_SNAKE_CASE : int = 1 , _SCREAMING_SNAKE_CASE : float = 1.0 , _SCREAMING_SNAKE_CASE : int = -1 , ): """simple docstring""" SCREAMING_SNAKE_CASE_ = SchedulerType(_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE_ = TYPE_TO_SCHEDULER_FUNCTION[name] if name == SchedulerType.CONSTANT: return schedule_func(_SCREAMING_SNAKE_CASE , last_epoch=_SCREAMING_SNAKE_CASE ) if name == SchedulerType.PIECEWISE_CONSTANT: return schedule_func(_SCREAMING_SNAKE_CASE , step_rules=_SCREAMING_SNAKE_CASE , last_epoch=_SCREAMING_SNAKE_CASE ) # All other schedulers require `num_warmup_steps` if num_warmup_steps is None: raise ValueError(f"""{name} requires `num_warmup_steps`, please provide that argument.""" ) if name == SchedulerType.CONSTANT_WITH_WARMUP: return schedule_func(_SCREAMING_SNAKE_CASE , num_warmup_steps=_SCREAMING_SNAKE_CASE , last_epoch=_SCREAMING_SNAKE_CASE ) # All other schedulers require `num_training_steps` if num_training_steps is None: raise ValueError(f"""{name} requires `num_training_steps`, please provide that argument.""" ) if name == SchedulerType.COSINE_WITH_RESTARTS: return schedule_func( _SCREAMING_SNAKE_CASE , num_warmup_steps=_SCREAMING_SNAKE_CASE , num_training_steps=_SCREAMING_SNAKE_CASE , num_cycles=_SCREAMING_SNAKE_CASE , last_epoch=_SCREAMING_SNAKE_CASE , ) if name == SchedulerType.POLYNOMIAL: return schedule_func( _SCREAMING_SNAKE_CASE , num_warmup_steps=_SCREAMING_SNAKE_CASE , num_training_steps=_SCREAMING_SNAKE_CASE , power=_SCREAMING_SNAKE_CASE , last_epoch=_SCREAMING_SNAKE_CASE , ) return schedule_func( _SCREAMING_SNAKE_CASE , num_warmup_steps=_SCREAMING_SNAKE_CASE , num_training_steps=_SCREAMING_SNAKE_CASE , last_epoch=_SCREAMING_SNAKE_CASE )
712
def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : list , _SCREAMING_SNAKE_CASE : list , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int ): """simple docstring""" if index == number_of_items: return 0 SCREAMING_SNAKE_CASE_ = 0 SCREAMING_SNAKE_CASE_ = 0 SCREAMING_SNAKE_CASE_ = knapsack(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , index + 1 ) if weights[index] <= max_weight: SCREAMING_SNAKE_CASE_ = values[index] + knapsack( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , max_weight - weights[index] , index + 1 ) return max(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) if __name__ == "__main__": import doctest doctest.testmod()
620
0
import inspect from typing import Callable, List, Optional, Union import torch from transformers import ( CLIPImageProcessor, CLIPTextModel, CLIPTokenizer, WhisperForConditionalGeneration, WhisperProcessor, ) from diffusers import ( AutoencoderKL, DDIMScheduler, DiffusionPipeline, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel, ) from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker from diffusers.utils import logging UpperCamelCase__ : str = logging.get_logger(__name__) # pylint: disable=invalid-name class __snake_case ( lowerCAmelCase__ ): def __init__( self , _A , _A , _A , _A , _A , _A , _A , _A , _A , ): super().__init__() if safety_checker is None: logger.warning( f"""You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure""" ' that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered' ' results in services or applications open to the public. Both the diffusers team and Hugging Face' ' strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling' ' it only for use-cases that involve analyzing network behavior or auditing its results. For more' ' information, please have a look at https://github.com/huggingface/diffusers/pull/254 .') self.register_modules( speech_model=_A , speech_processor=_A , vae=_A , text_encoder=_A , tokenizer=_A , unet=_A , scheduler=_A , feature_extractor=_A , ) def lowerCAmelCase__ ( self , _A = "auto"): if slice_size == "auto": SCREAMING_SNAKE_CASE_ = self.unet.config.attention_head_dim // 2 self.unet.set_attention_slice(_A) def lowerCAmelCase__ ( self): self.enable_attention_slicing(_A) @torch.no_grad() def __call__( self , _A , _A=16000 , _A = 512 , _A = 512 , _A = 50 , _A = 7.5 , _A = None , _A = 1 , _A = 0.0 , _A = None , _A = None , _A = "pil" , _A = True , _A = None , _A = 1 , **_A , ): SCREAMING_SNAKE_CASE_ = self.speech_processor.feature_extractor( _A , return_tensors='pt' , sampling_rate=_A).input_features.to(self.device) SCREAMING_SNAKE_CASE_ = self.speech_model.generate(_A , max_length=480000) SCREAMING_SNAKE_CASE_ = self.speech_processor.tokenizer.batch_decode(_A , skip_special_tokens=_A , normalize=_A)[ 0 ] if isinstance(_A , _A): SCREAMING_SNAKE_CASE_ = 1 elif isinstance(_A , _A): SCREAMING_SNAKE_CASE_ = len(_A) else: raise ValueError(f"""`prompt` has to be of type `str` or `list` but is {type(_A)}""") if height % 8 != 0 or width % 8 != 0: raise ValueError(f"""`height` and `width` have to be divisible by 8 but are {height} and {width}.""") if (callback_steps is None) or ( callback_steps is not None and (not isinstance(_A , _A) or callback_steps <= 0) ): raise ValueError( f"""`callback_steps` has to be a positive integer but is {callback_steps} of type""" f""" {type(_A)}.""") # get prompt text embeddings SCREAMING_SNAKE_CASE_ = self.tokenizer( _A , padding='max_length' , max_length=self.tokenizer.model_max_length , return_tensors='pt' , ) SCREAMING_SNAKE_CASE_ = text_inputs.input_ids if text_input_ids.shape[-1] > self.tokenizer.model_max_length: SCREAMING_SNAKE_CASE_ = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :]) logger.warning( 'The following part of your input was truncated because CLIP can only handle sequences up to' f""" {self.tokenizer.model_max_length} tokens: {removed_text}""") SCREAMING_SNAKE_CASE_ = text_input_ids[:, : self.tokenizer.model_max_length] SCREAMING_SNAKE_CASE_ = self.text_encoder(text_input_ids.to(self.device))[0] # duplicate text embeddings for each generation per prompt, using mps friendly method SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = text_embeddings.shape SCREAMING_SNAKE_CASE_ = text_embeddings.repeat(1 , _A , 1) SCREAMING_SNAKE_CASE_ = text_embeddings.view(bs_embed * num_images_per_prompt , _A , -1) # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` # corresponds to doing no classifier free guidance. SCREAMING_SNAKE_CASE_ = guidance_scale > 1.0 # get unconditional embeddings for classifier free guidance if do_classifier_free_guidance: SCREAMING_SNAKE_CASE_ = 42 if negative_prompt is None: SCREAMING_SNAKE_CASE_ = [''] * batch_size elif type(_A) is not type(_A): raise TypeError( f"""`negative_prompt` should be the same type to `prompt`, but got {type(_A)} !=""" f""" {type(_A)}.""") elif isinstance(_A , _A): SCREAMING_SNAKE_CASE_ = [negative_prompt] elif batch_size != len(_A): raise ValueError( f"""`negative_prompt`: {negative_prompt} has batch size {len(_A)}, but `prompt`:""" f""" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches""" ' the batch size of `prompt`.') else: SCREAMING_SNAKE_CASE_ = negative_prompt SCREAMING_SNAKE_CASE_ = text_input_ids.shape[-1] SCREAMING_SNAKE_CASE_ = self.tokenizer( _A , padding='max_length' , max_length=_A , truncation=_A , return_tensors='pt' , ) SCREAMING_SNAKE_CASE_ = self.text_encoder(uncond_input.input_ids.to(self.device))[0] # duplicate unconditional embeddings for each generation per prompt, using mps friendly method SCREAMING_SNAKE_CASE_ = uncond_embeddings.shape[1] SCREAMING_SNAKE_CASE_ = uncond_embeddings.repeat(1 , _A , 1) SCREAMING_SNAKE_CASE_ = uncond_embeddings.view(batch_size * num_images_per_prompt , _A , -1) # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes SCREAMING_SNAKE_CASE_ = torch.cat([uncond_embeddings, text_embeddings]) # get the initial random noise unless the user supplied it # Unlike in other pipelines, latents need to be generated in the target device # for 1-to-1 results reproducibility with the CompVis implementation. # However this currently doesn't work in `mps`. SCREAMING_SNAKE_CASE_ = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8) SCREAMING_SNAKE_CASE_ = text_embeddings.dtype if latents is None: if self.device.type == "mps": # randn does not exist on mps SCREAMING_SNAKE_CASE_ = torch.randn(_A , generator=_A , device='cpu' , dtype=_A).to( self.device) else: SCREAMING_SNAKE_CASE_ = torch.randn(_A , generator=_A , device=self.device , dtype=_A) else: if latents.shape != latents_shape: raise ValueError(f"""Unexpected latents shape, got {latents.shape}, expected {latents_shape}""") SCREAMING_SNAKE_CASE_ = latents.to(self.device) # set timesteps self.scheduler.set_timesteps(_A) # Some schedulers like PNDM have timesteps as arrays # It's more optimized to move all timesteps to correct device beforehand SCREAMING_SNAKE_CASE_ = self.scheduler.timesteps.to(self.device) # scale the initial noise by the standard deviation required by the scheduler SCREAMING_SNAKE_CASE_ = latents * self.scheduler.init_noise_sigma # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 # and should be between [0, 1] SCREAMING_SNAKE_CASE_ = 'eta' in set(inspect.signature(self.scheduler.step).parameters.keys()) SCREAMING_SNAKE_CASE_ = {} if accepts_eta: SCREAMING_SNAKE_CASE_ = eta for i, t in enumerate(self.progress_bar(_A)): # expand the latents if we are doing classifier free guidance SCREAMING_SNAKE_CASE_ = torch.cat([latents] * 2) if do_classifier_free_guidance else latents SCREAMING_SNAKE_CASE_ = self.scheduler.scale_model_input(_A , _A) # predict the noise residual SCREAMING_SNAKE_CASE_ = self.unet(_A , _A , encoder_hidden_states=_A).sample # perform guidance if do_classifier_free_guidance: SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = noise_pred.chunk(2) SCREAMING_SNAKE_CASE_ = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) # compute the previous noisy sample x_t -> x_t-1 SCREAMING_SNAKE_CASE_ = self.scheduler.step(_A , _A , _A , **_A).prev_sample # call the callback, if provided if callback is not None and i % callback_steps == 0: callback(_A , _A , _A) SCREAMING_SNAKE_CASE_ = 1 / 0.1_8_2_1_5 * latents SCREAMING_SNAKE_CASE_ = self.vae.decode(_A).sample SCREAMING_SNAKE_CASE_ = (image / 2 + 0.5).clamp(0 , 1) # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16 SCREAMING_SNAKE_CASE_ = image.cpu().permute(0 , 2 , 3 , 1).float().numpy() if output_type == "pil": SCREAMING_SNAKE_CASE_ = self.numpy_to_pil(_A) if not return_dict: return image return StableDiffusionPipelineOutput(images=_A , nsfw_content_detected=_A)
713
import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( SwiftFormerConfig, SwiftFormerForImageClassification, ViTImageProcessor, ) from transformers.utils import logging logging.set_verbosity_info() UpperCamelCase__ : Optional[int] = logging.get_logger(__name__) UpperCamelCase__ : List[Any] = torch.device("cpu") def _UpperCAmelCase ( ): """simple docstring""" SCREAMING_SNAKE_CASE_ = 'http://images.cocodataset.org/val2017/000000039769.jpg' SCREAMING_SNAKE_CASE_ = Image.open(requests.get(_SCREAMING_SNAKE_CASE , stream=_SCREAMING_SNAKE_CASE ).raw ) return im def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : int ): """simple docstring""" if swiftformer_name == "swiftformer_xs": return torch.tensor([-2.1_7_0_3E0_0, 2.1_1_0_7E0_0, -2.0_8_1_1E0_0, 8.8_6_8_5E-0_1, 2.4_3_6_0E-0_1] ) elif swiftformer_name == "swiftformer_s": return torch.tensor([3.9_6_3_6E-0_1, 2.3_4_7_8E-0_1, -1.6_9_6_3E0_0, -1.7_3_8_1E0_0, -8.6_3_3_7E-0_1] ) elif swiftformer_name == "swiftformer_l1": return torch.tensor([-4.2_7_6_8E-0_1, -4.7_4_2_9E-0_1, -1.0_8_9_7E0_0, -1.0_2_4_8E0_0, 3.5_5_2_3E-0_2] ) elif swiftformer_name == "swiftformer_l3": return torch.tensor([-2.5_3_3_0E-0_1, 2.4_2_1_1E-0_1, -6.0_1_8_5E-0_1, -8.2_7_8_9E-0_1, -6.0_4_4_6E-0_2] ) def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : Optional[Any] ): """simple docstring""" SCREAMING_SNAKE_CASE_ = dct.pop(_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE_ = val def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : Dict ): """simple docstring""" SCREAMING_SNAKE_CASE_ = [] for k in state_dict.keys(): SCREAMING_SNAKE_CASE_ = k if ".pwconv" in k: SCREAMING_SNAKE_CASE_ = k_new.replace('.pwconv' , '.point_wise_conv' ) if ".dwconv" in k: SCREAMING_SNAKE_CASE_ = k_new.replace('.dwconv' , '.depth_wise_conv' ) if ".Proj." in k: SCREAMING_SNAKE_CASE_ = k_new.replace('.Proj.' , '.proj.' ) if "patch_embed" in k_new: SCREAMING_SNAKE_CASE_ = k_new.replace('patch_embed' , 'swiftformer.patch_embed.patch_embedding' ) if "network" in k_new: SCREAMING_SNAKE_CASE_ = k_new.split('.' ) if ls[2].isdigit(): SCREAMING_SNAKE_CASE_ = 'swiftformer.encoder.network.' + ls[1] + '.blocks.' + ls[2] + '.' + '.'.join(ls[3:] ) else: SCREAMING_SNAKE_CASE_ = k_new.replace('network' , 'swiftformer.encoder.network' ) rename_keys.append((k, k_new) ) return rename_keys @torch.no_grad() def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Optional[Any] ): """simple docstring""" SCREAMING_SNAKE_CASE_ = SwiftFormerConfig() # dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size SCREAMING_SNAKE_CASE_ = 1_000 SCREAMING_SNAKE_CASE_ = 'huggingface/label-files' SCREAMING_SNAKE_CASE_ = 'imagenet-1k-id2label.json' SCREAMING_SNAKE_CASE_ = json.load(open(hf_hub_download(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , repo_type='dataset' ) , 'r' ) ) SCREAMING_SNAKE_CASE_ = {int(_SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()} SCREAMING_SNAKE_CASE_ = idalabel SCREAMING_SNAKE_CASE_ = {v: k for k, v in idalabel.items()} # size of the architecture if swiftformer_name == "swiftformer_xs": SCREAMING_SNAKE_CASE_ = [3, 3, 6, 4] SCREAMING_SNAKE_CASE_ = [48, 56, 112, 220] elif swiftformer_name == "swiftformer_s": SCREAMING_SNAKE_CASE_ = [3, 3, 9, 6] SCREAMING_SNAKE_CASE_ = [48, 64, 168, 224] elif swiftformer_name == "swiftformer_l1": SCREAMING_SNAKE_CASE_ = [4, 3, 10, 5] SCREAMING_SNAKE_CASE_ = [48, 96, 192, 384] elif swiftformer_name == "swiftformer_l3": SCREAMING_SNAKE_CASE_ = [4, 4, 12, 6] SCREAMING_SNAKE_CASE_ = [64, 128, 320, 512] # load state_dict of original model, remove and rename some keys if original_ckpt: if original_ckpt.startswith('https' ): SCREAMING_SNAKE_CASE_ = torch.hub.load_state_dict_from_url(_SCREAMING_SNAKE_CASE , map_location='cpu' , check_hash=_SCREAMING_SNAKE_CASE ) else: SCREAMING_SNAKE_CASE_ = torch.load(_SCREAMING_SNAKE_CASE , map_location='cpu' ) SCREAMING_SNAKE_CASE_ = checkpoint SCREAMING_SNAKE_CASE_ = create_rename_keys(_SCREAMING_SNAKE_CASE ) for rename_key_src, rename_key_dest in rename_keys: rename_key(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # load HuggingFace model SCREAMING_SNAKE_CASE_ = SwiftFormerForImageClassification(_SCREAMING_SNAKE_CASE ).eval() hf_model.load_state_dict(_SCREAMING_SNAKE_CASE ) # prepare test inputs SCREAMING_SNAKE_CASE_ = prepare_img() SCREAMING_SNAKE_CASE_ = ViTImageProcessor.from_pretrained('preprocessor_config' ) SCREAMING_SNAKE_CASE_ = processor(images=_SCREAMING_SNAKE_CASE , return_tensors='pt' ) # compare outputs from both models SCREAMING_SNAKE_CASE_ = get_expected_output(_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE_ = hf_model(inputs['pixel_values'] ).logits assert hf_logits.shape == torch.Size([1, 1_000] ) assert torch.allclose(hf_logits[0, 0:5] , _SCREAMING_SNAKE_CASE , atol=1E-3 ) Path(_SCREAMING_SNAKE_CASE ).mkdir(exist_ok=_SCREAMING_SNAKE_CASE ) print(f"""Saving model {swiftformer_name} to {pytorch_dump_folder_path}""" ) hf_model.save_pretrained(_SCREAMING_SNAKE_CASE ) if __name__ == "__main__": UpperCamelCase__ : str = argparse.ArgumentParser() # Required parameters parser.add_argument( "--swiftformer_name", default="swiftformer_xs", choices=["swiftformer_xs", "swiftformer_s", "swiftformer_l1", "swiftformer_l3"], type=str, help="Name of the SwiftFormer model you'd like to convert.", ) parser.add_argument( "--pytorch_dump_folder_path", default="./converted_outputs/", type=str, help="Path to the output PyTorch model directory.", ) parser.add_argument("--original_ckpt", default=None, type=str, help="Path to the original model checkpoint.") UpperCamelCase__ : Union[str, Any] = parser.parse_args() convert_swiftformer_checkpoint(args.swiftformer_name, args.pytorch_dump_folder_path, args.original_ckpt)
620
0
'''simple docstring''' import os import unittest from tempfile import TemporaryDirectory import torch import torch.nn as nn from accelerate.utils import ( OffloadedWeightsLoader, extract_submodules_state_dict, load_offloaded_weight, offload_state_dict, offload_weight, ) class __snake_case ( nn.Module ): def __init__( self): super().__init__() SCREAMING_SNAKE_CASE_ = nn.Linear(3 , 4) SCREAMING_SNAKE_CASE_ = nn.BatchNormad(4) SCREAMING_SNAKE_CASE_ = nn.Linear(4 , 5) def lowerCAmelCase__ ( self , _A): return self.lineara(self.batchnorm(self.lineara(_A))) class __snake_case ( unittest.TestCase ): def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = ModelForTest() with TemporaryDirectory() as tmp_dir: offload_state_dict(_A , model.state_dict()) SCREAMING_SNAKE_CASE_ = os.path.join(_A , 'index.json') self.assertTrue(os.path.isfile(_A)) # TODO: add tests on what is inside the index for key in ["linear1.weight", "linear1.bias", "linear2.weight", "linear2.bias"]: SCREAMING_SNAKE_CASE_ = os.path.join(_A , f"""{key}.dat""") self.assertTrue(os.path.isfile(_A)) # TODO: add tests on the fact weights are properly loaded def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = [torch.floataa, torch.floataa, torch.bfloataa] for dtype in dtypes: SCREAMING_SNAKE_CASE_ = torch.randn(2 , 3 , dtype=_A) with TemporaryDirectory() as tmp_dir: SCREAMING_SNAKE_CASE_ = offload_weight(_A , 'weight' , _A , {}) SCREAMING_SNAKE_CASE_ = os.path.join(_A , 'weight.dat') self.assertTrue(os.path.isfile(_A)) self.assertDictEqual(_A , {'weight': {'shape': [2, 3], 'dtype': str(_A).split('.')[1]}}) SCREAMING_SNAKE_CASE_ = load_offloaded_weight(_A , index['weight']) self.assertTrue(torch.equal(_A , _A)) def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = ModelForTest() SCREAMING_SNAKE_CASE_ = model.state_dict() SCREAMING_SNAKE_CASE_ = {k: v for k, v in state_dict.items() if 'linear2' not in k} SCREAMING_SNAKE_CASE_ = {k: v for k, v in state_dict.items() if 'linear2' in k} with TemporaryDirectory() as tmp_dir: offload_state_dict(_A , _A) SCREAMING_SNAKE_CASE_ = OffloadedWeightsLoader(state_dict=_A , save_folder=_A) # Every key is there with the right value self.assertEqual(sorted(_A) , sorted(state_dict.keys())) for key, param in state_dict.items(): self.assertTrue(torch.allclose(_A , weight_map[key])) SCREAMING_SNAKE_CASE_ = {k: v for k, v in state_dict.items() if 'weight' in k} SCREAMING_SNAKE_CASE_ = {k: v for k, v in state_dict.items() if 'weight' not in k} with TemporaryDirectory() as tmp_dir: offload_state_dict(_A , _A) SCREAMING_SNAKE_CASE_ = OffloadedWeightsLoader(state_dict=_A , save_folder=_A) # Every key is there with the right value self.assertEqual(sorted(_A) , sorted(state_dict.keys())) for key, param in state_dict.items(): self.assertTrue(torch.allclose(_A , weight_map[key])) with TemporaryDirectory() as tmp_dir: offload_state_dict(_A , _A) # Duplicates are removed SCREAMING_SNAKE_CASE_ = OffloadedWeightsLoader(state_dict=_A , save_folder=_A) # Every key is there with the right value self.assertEqual(sorted(_A) , sorted(state_dict.keys())) for key, param in state_dict.items(): self.assertTrue(torch.allclose(_A , weight_map[key])) def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = {'a.1': 0, 'a.10': 1, 'a.2': 2} SCREAMING_SNAKE_CASE_ = extract_submodules_state_dict(_A , ['a.1', 'a.2']) self.assertDictEqual(_A , {'a.1': 0, 'a.2': 2}) SCREAMING_SNAKE_CASE_ = {'a.1.a': 0, 'a.10.a': 1, 'a.2.a': 2} SCREAMING_SNAKE_CASE_ = extract_submodules_state_dict(_A , ['a.1', 'a.2']) self.assertDictEqual(_A , {'a.1.a': 0, 'a.2.a': 2})
714
def _UpperCAmelCase ( ): """simple docstring""" for n in range(1 , 1_000_000 ): yield n * (n + 1) // 2 def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : Tuple ): """simple docstring""" SCREAMING_SNAKE_CASE_ = 1 SCREAMING_SNAKE_CASE_ = 2 while i * i <= n: SCREAMING_SNAKE_CASE_ = 0 while n % i == 0: n //= i multiplicity += 1 divisors_count *= multiplicity + 1 i += 1 if n > 1: divisors_count *= 2 return divisors_count def _UpperCAmelCase ( ): """simple docstring""" return next(i for i in triangle_number_generator() if count_divisors(_SCREAMING_SNAKE_CASE ) > 500 ) if __name__ == "__main__": print(solution())
620
0
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available UpperCamelCase__ : str = { "configuration_groupvit": [ "GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "GroupViTConfig", "GroupViTOnnxConfig", "GroupViTTextConfig", "GroupViTVisionConfig", ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase__ : Tuple = [ "GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST", "GroupViTModel", "GroupViTPreTrainedModel", "GroupViTTextModel", "GroupViTVisionModel", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase__ : Optional[Any] = [ "TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST", "TFGroupViTModel", "TFGroupViTPreTrainedModel", "TFGroupViTTextModel", "TFGroupViTVisionModel", ] if TYPE_CHECKING: from .configuration_groupvit import ( GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GroupViTConfig, GroupViTOnnxConfig, GroupViTTextConfig, GroupViTVisionConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_groupvit import ( GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST, GroupViTModel, GroupViTPreTrainedModel, GroupViTTextModel, GroupViTVisionModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_groupvit import ( TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST, TFGroupViTModel, TFGroupViTPreTrainedModel, TFGroupViTTextModel, TFGroupViTVisionModel, ) else: import sys UpperCamelCase__ : Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
715
import io import itertools import json from dataclasses import dataclass from typing import Optional import pyarrow as pa import pyarrow.json as paj import datasets from datasets.table import table_cast from datasets.utils.file_utils import readline UpperCamelCase__ : Optional[int] = datasets.utils.logging.get_logger(__name__) @dataclass class __snake_case ( datasets.BuilderConfig ): __lowerCAmelCase : Optional[datasets.Features] = None __lowerCAmelCase : str = "utf-8" __lowerCAmelCase : Optional[str] = None __lowerCAmelCase : Optional[str] = None __lowerCAmelCase : bool = True # deprecated __lowerCAmelCase : Optional[int] = None # deprecated __lowerCAmelCase : int = 10 << 20 # 10MB __lowerCAmelCase : Optional[bool] = None class __snake_case ( datasets.ArrowBasedBuilder ): __lowerCAmelCase : int = JsonConfig def lowerCAmelCase__ ( self): if self.config.block_size is not None: logger.warning('The JSON loader parameter `block_size` is deprecated. Please use `chunksize` instead') SCREAMING_SNAKE_CASE_ = self.config.block_size if self.config.use_threads is not True: logger.warning( 'The JSON loader parameter `use_threads` is deprecated and doesn\'t have any effect anymore.') if self.config.newlines_in_values is not None: raise ValueError('The JSON loader parameter `newlines_in_values` is no longer supported') return datasets.DatasetInfo(features=self.config.features) def lowerCAmelCase__ ( self , _A): if not self.config.data_files: raise ValueError(f"""At least one data file must be specified, but got data_files={self.config.data_files}""") SCREAMING_SNAKE_CASE_ = dl_manager.download_and_extract(self.config.data_files) if isinstance(_A , (str, list, tuple)): SCREAMING_SNAKE_CASE_ = data_files if isinstance(_A , _A): SCREAMING_SNAKE_CASE_ = [files] SCREAMING_SNAKE_CASE_ = [dl_manager.iter_files(_A) for file in files] return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'files': files})] SCREAMING_SNAKE_CASE_ = [] for split_name, files in data_files.items(): if isinstance(_A , _A): SCREAMING_SNAKE_CASE_ = [files] SCREAMING_SNAKE_CASE_ = [dl_manager.iter_files(_A) for file in files] splits.append(datasets.SplitGenerator(name=_A , gen_kwargs={'files': files})) return splits def lowerCAmelCase__ ( self , _A): if self.config.features is not None: # adding missing columns for column_name in set(self.config.features) - set(pa_table.column_names): SCREAMING_SNAKE_CASE_ = self.config.features.arrow_schema.field(_A).type SCREAMING_SNAKE_CASE_ = pa_table.append_column(_A , pa.array([None] * len(_A) , type=_A)) # more expensive cast to support nested structures with keys in a different order # allows str <-> int/float or str to Audio for example SCREAMING_SNAKE_CASE_ = table_cast(_A , self.config.features.arrow_schema) return pa_table def lowerCAmelCase__ ( self , _A): for file_idx, file in enumerate(itertools.chain.from_iterable(_A)): # If the file is one json object and if we need to look at the list of items in one specific field if self.config.field is not None: with open(_A , encoding=self.config.encoding , errors=self.config.encoding_errors) as f: SCREAMING_SNAKE_CASE_ = json.load(_A) # We keep only the field we are interested in SCREAMING_SNAKE_CASE_ = dataset[self.config.field] # We accept two format: a list of dicts or a dict of lists if isinstance(_A , (list, tuple)): SCREAMING_SNAKE_CASE_ = set().union(*[row.keys() for row in dataset]) SCREAMING_SNAKE_CASE_ = {col: [row.get(_A) for row in dataset] for col in keys} else: SCREAMING_SNAKE_CASE_ = dataset SCREAMING_SNAKE_CASE_ = pa.Table.from_pydict(_A) yield file_idx, self._cast_table(_A) # If the file has one json object per line else: with open(_A , 'rb') as f: SCREAMING_SNAKE_CASE_ = 0 # Use block_size equal to the chunk size divided by 32 to leverage multithreading # Set a default minimum value of 16kB if the chunk size is really small SCREAMING_SNAKE_CASE_ = max(self.config.chunksize // 32 , 16 << 10) SCREAMING_SNAKE_CASE_ = ( self.config.encoding_errors if self.config.encoding_errors is not None else 'strict' ) while True: SCREAMING_SNAKE_CASE_ = f.read(self.config.chunksize) if not batch: break # Finish current line try: batch += f.readline() except (AttributeError, io.UnsupportedOperation): batch += readline(_A) # PyArrow only accepts utf-8 encoded bytes if self.config.encoding != "utf-8": SCREAMING_SNAKE_CASE_ = batch.decode(self.config.encoding , errors=_A).encode('utf-8') try: while True: try: SCREAMING_SNAKE_CASE_ = paj.read_json( io.BytesIO(_A) , read_options=paj.ReadOptions(block_size=_A)) break except (pa.ArrowInvalid, pa.ArrowNotImplementedError) as e: if ( isinstance(_A , pa.ArrowInvalid) and "straddling" not in str(_A) or block_size > len(_A) ): raise else: # Increase the block size in case it was too small. # The block size will be reset for the next file. logger.debug( f"""Batch of {len(_A)} bytes couldn't be parsed with block_size={block_size}. Retrying with block_size={block_size * 2}.""") block_size *= 2 except pa.ArrowInvalid as e: try: with open( _A , encoding=self.config.encoding , errors=self.config.encoding_errors) as f: SCREAMING_SNAKE_CASE_ = json.load(_A) except json.JSONDecodeError: logger.error(f"""Failed to read file '{file}' with error {type(_A)}: {e}""") raise e # If possible, parse the file as a list of json objects and exit the loop if isinstance(_A , _A): # list is the only sequence type supported in JSON try: SCREAMING_SNAKE_CASE_ = set().union(*[row.keys() for row in dataset]) SCREAMING_SNAKE_CASE_ = {col: [row.get(_A) for row in dataset] for col in keys} SCREAMING_SNAKE_CASE_ = pa.Table.from_pydict(_A) except (pa.ArrowInvalid, AttributeError) as e: logger.error(f"""Failed to read file '{file}' with error {type(_A)}: {e}""") raise ValueError(f"""Not able to read records in the JSON file at {file}.""") from None yield file_idx, self._cast_table(_A) break else: logger.error(f"""Failed to read file '{file}' with error {type(_A)}: {e}""") raise ValueError( f"""Not able to read records in the JSON file at {file}. """ f"""You should probably indicate the field of the JSON file containing your records. """ f"""This JSON file contain the following fields: {str(list(dataset.keys()))}. """ f"""Select the correct one and provide it as `field='XXX'` to the dataset loading method. """) from None # Uncomment for debugging (will print the Arrow table size and elements) # logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}") # logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows))) yield (file_idx, batch_idx), self._cast_table(_A) batch_idx += 1
620
0
import unittest from transformers import SPIECE_UNDERLINE from transformers.models.speechta import SpeechTaTokenizer from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from transformers.tokenization_utils import AddedToken from ...test_tokenization_common import TokenizerTesterMixin UpperCamelCase__ : Optional[Any] = get_tests_dir("fixtures/test_sentencepiece_bpe_char.model") @require_sentencepiece @require_tokenizers class __snake_case ( lowerCAmelCase__ , unittest.TestCase ): __lowerCAmelCase : Tuple = SpeechTaTokenizer __lowerCAmelCase : int = False __lowerCAmelCase : int = True def lowerCAmelCase__ ( self): super().setUp() # We have a SentencePiece fixture for testing SCREAMING_SNAKE_CASE_ = SpeechTaTokenizer(_A) SCREAMING_SNAKE_CASE_ = AddedToken('<mask>' , lstrip=_A , rstrip=_A) SCREAMING_SNAKE_CASE_ = mask_token tokenizer.add_special_tokens({'mask_token': mask_token}) tokenizer.add_tokens(['<ctc_blank>']) tokenizer.save_pretrained(self.tmpdirname) def lowerCAmelCase__ ( self , _A): SCREAMING_SNAKE_CASE_ = 'this is a test' SCREAMING_SNAKE_CASE_ = 'this is a test' return input_text, output_text def lowerCAmelCase__ ( self , _A , _A=False , _A=20 , _A=5): SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.get_input_output_texts(_A) SCREAMING_SNAKE_CASE_ = tokenizer.encode(_A , add_special_tokens=_A) SCREAMING_SNAKE_CASE_ = tokenizer.decode(_A , clean_up_tokenization_spaces=_A) return text, ids def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = '<pad>' SCREAMING_SNAKE_CASE_ = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(_A) , _A) self.assertEqual(self.get_tokenizer()._convert_id_to_token(_A) , _A) def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = list(self.get_tokenizer().get_vocab().keys()) self.assertEqual(vocab_keys[0] , '<s>') self.assertEqual(vocab_keys[1] , '<pad>') self.assertEqual(vocab_keys[-4] , 'œ') self.assertEqual(vocab_keys[-2] , '<mask>') self.assertEqual(vocab_keys[-1] , '<ctc_blank>') self.assertEqual(len(_A) , 81) def lowerCAmelCase__ ( self): self.assertEqual(self.get_tokenizer().vocab_size , 79) def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = self.get_tokenizers(do_lower_case=_A) for tokenizer in tokenizers: with self.subTest(f"""{tokenizer.__class__.__name__}"""): SCREAMING_SNAKE_CASE_ = tokenizer.vocab_size SCREAMING_SNAKE_CASE_ = len(_A) self.assertNotEqual(_A , 0) # We usually have added tokens from the start in tests because our vocab fixtures are # smaller than the original vocabs - let's not assert this # self.assertEqual(vocab_size, all_size) SCREAMING_SNAKE_CASE_ = ['aaaaa bbbbbb', 'cccccccccdddddddd'] SCREAMING_SNAKE_CASE_ = tokenizer.add_tokens(_A) SCREAMING_SNAKE_CASE_ = tokenizer.vocab_size SCREAMING_SNAKE_CASE_ = len(_A) self.assertNotEqual(_A , 0) self.assertEqual(_A , _A) self.assertEqual(_A , len(_A)) self.assertEqual(_A , all_size + len(_A)) SCREAMING_SNAKE_CASE_ = tokenizer.encode('aaaaa bbbbbb low cccccccccdddddddd l' , add_special_tokens=_A) self.assertGreaterEqual(len(_A) , 4) self.assertGreater(tokens[0] , tokenizer.vocab_size - 1) self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1) SCREAMING_SNAKE_CASE_ = {'eos_token': '>>>>|||<||<<|<<', 'pad_token': '<<<<<|||>|>>>>|>'} SCREAMING_SNAKE_CASE_ = tokenizer.add_special_tokens(_A) SCREAMING_SNAKE_CASE_ = tokenizer.vocab_size SCREAMING_SNAKE_CASE_ = len(_A) self.assertNotEqual(_A , 0) self.assertEqual(_A , _A) self.assertEqual(_A , len(_A)) self.assertEqual(_A , all_size_a + len(_A)) SCREAMING_SNAKE_CASE_ = tokenizer.encode( '>>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l' , add_special_tokens=_A) self.assertGreaterEqual(len(_A) , 6) self.assertGreater(tokens[0] , tokenizer.vocab_size - 1) self.assertGreater(tokens[0] , tokens[1]) self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1) self.assertGreater(tokens[-3] , tokens[-4]) self.assertEqual(tokens[0] , tokenizer.eos_token_id) self.assertEqual(tokens[-3] , tokenizer.pad_token_id) def lowerCAmelCase__ ( self): pass def lowerCAmelCase__ ( self): pass def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = self.get_tokenizer() SCREAMING_SNAKE_CASE_ = tokenizer.tokenize('This is a test') # fmt: off self.assertListEqual(_A , [SPIECE_UNDERLINE, 'T', 'h', 'i', 's', SPIECE_UNDERLINE, 'i', 's', SPIECE_UNDERLINE, 'a', SPIECE_UNDERLINE, 't', 'e', 's', 't']) # fmt: on self.assertListEqual( tokenizer.convert_tokens_to_ids(_A) , [4, 32, 11, 10, 12, 4, 10, 12, 4, 7, 4, 6, 5, 12, 6] , ) SCREAMING_SNAKE_CASE_ = tokenizer.tokenize('I was born in 92000, and this is falsé.') self.assertListEqual( _A , [SPIECE_UNDERLINE, 'I', SPIECE_UNDERLINE, 'w', 'a', 's', SPIECE_UNDERLINE, 'b', 'o', 'r', 'n', SPIECE_UNDERLINE, 'i', 'n', SPIECE_UNDERLINE, '92000', ',', SPIECE_UNDERLINE, 'a', 'n', 'd', SPIECE_UNDERLINE, 't', 'h', 'i', 's', SPIECE_UNDERLINE, 'i', 's', SPIECE_UNDERLINE, 'f', 'a', 'l', 's', 'é', '.']) SCREAMING_SNAKE_CASE_ = tokenizer.convert_tokens_to_ids(_A) # fmt: off self.assertListEqual(_A , [4, 30, 4, 20, 7, 12, 4, 25, 8, 13, 9, 4, 10, 9, 4, 3, 23, 4, 7, 9, 14, 4, 6, 11, 10, 12, 4, 10, 12, 4, 19, 7, 15, 12, 73, 26]) # fmt: on SCREAMING_SNAKE_CASE_ = tokenizer.convert_ids_to_tokens(_A) self.assertListEqual( _A , [SPIECE_UNDERLINE, 'I', SPIECE_UNDERLINE, 'w', 'a', 's', SPIECE_UNDERLINE, 'b', 'o', 'r', 'n', SPIECE_UNDERLINE, 'i', 'n', SPIECE_UNDERLINE, '<unk>', ',', SPIECE_UNDERLINE, 'a', 'n', 'd', SPIECE_UNDERLINE, 't', 'h', 'i', 's', SPIECE_UNDERLINE, 'i', 's', SPIECE_UNDERLINE, 'f', 'a', 'l', 's', 'é', '.']) @slow def lowerCAmelCase__ ( self): # Use custom sequence because this tokenizer does not handle numbers. SCREAMING_SNAKE_CASE_ = [ 'Transformers (formerly known as pytorch-transformers and pytorch-pretrained-bert) provides ' 'general-purpose architectures (BERT, GPT, RoBERTa, XLM, DistilBert, XLNet...) for Natural ' 'Language Understanding (NLU) and Natural Language Generation (NLG) with over thirty-two pretrained ' 'models in one hundred plus languages and deep interoperability between Jax, PyTorch and TensorFlow.', 'BERT is designed to pre-train deep bidirectional representations from unlabeled text by jointly ' 'conditioning on both left and right context in all layers.', 'The quick brown fox jumps over the lazy dog.', ] # fmt: off SCREAMING_SNAKE_CASE_ = { 'input_ids': [ [4, 32, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 64, 19, 8, 13, 18, 5, 13, 15, 22, 4, 28, 9, 8, 20, 9, 4, 7, 12, 4, 24, 22, 6, 8, 13, 17, 11, 39, 6, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 7, 9, 14, 4, 24, 22, 6, 8, 13, 17, 11, 39, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 39, 25, 5, 13, 6, 63, 4, 24, 13, 8, 27, 10, 14, 5, 12, 4, 21, 5, 9, 5, 13, 7, 15, 39, 24, 16, 13, 24, 8, 12, 5, 4, 7, 13, 17, 11, 10, 6, 5, 17, 6, 16, 13, 5, 12, 4, 64, 40, 47, 54, 32, 23, 4, 53, 49, 32, 23, 4, 54, 8, 40, 47, 54, 32, 7, 23, 4, 69, 52, 43, 23, 4, 51, 10, 12, 6, 10, 15, 40, 5, 13, 6, 23, 4, 69, 52, 48, 5, 6, 26, 26, 26, 63, 4, 19, 8, 13, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 61, 9, 14, 5, 13, 12, 6, 7, 9, 14, 10, 9, 21, 4, 64, 48, 52, 61, 63, 4, 7, 9, 14, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 53, 5, 9, 5, 13, 7, 6, 10, 8, 9, 4, 64, 48, 52, 53, 63, 4, 20, 10, 6, 11, 4, 8, 27, 5, 13, 4, 6, 11, 10, 13, 6, 22, 39, 6, 20, 8, 4, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 4, 18, 8, 14, 5, 15, 12, 4, 10, 9, 4, 8, 9, 5, 4, 11, 16, 9, 14, 13, 5, 14, 4, 24, 15, 16, 12, 4, 15, 7, 9, 21, 16, 7, 21, 5, 12, 4, 7, 9, 14, 4, 14, 5, 5, 24, 4, 10, 9, 6, 5, 13, 8, 24, 5, 13, 7, 25, 10, 15, 10, 6, 22, 4, 25, 5, 6, 20, 5, 5, 9, 4, 58, 7, 37, 23, 4, 49, 22, 32, 8, 13, 17, 11, 4, 7, 9, 14, 4, 32, 5, 9, 12, 8, 13, 55, 15, 8, 20, 26, 2], [4, 40, 47, 54, 32, 4, 10, 12, 4, 14, 5, 12, 10, 21, 9, 5, 14, 4, 6, 8, 4, 24, 13, 5, 39, 6, 13, 7, 10, 9, 4, 14, 5, 5, 24, 4, 25, 10, 14, 10, 13, 5, 17, 6, 10, 8, 9, 7, 15, 4, 13, 5, 24, 13, 5, 12, 5, 9, 6, 7, 6, 10, 8, 9, 12, 4, 19, 13, 8, 18, 4, 16, 9, 15, 7, 25, 5, 15, 5, 14, 4, 6, 5, 37, 6, 4, 25, 22, 4, 46, 8, 10, 9, 6, 15, 22, 4, 17, 8, 9, 14, 10, 6, 10, 8, 9, 10, 9, 21, 4, 8, 9, 4, 25, 8, 6, 11, 4, 15, 5, 19, 6, 4, 7, 9, 14, 4, 13, 10, 21, 11, 6, 4, 17, 8, 9, 6, 5, 37, 6, 4, 10, 9, 4, 7, 15, 15, 4, 15, 7, 22, 5, 13, 12, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [4, 32, 11, 5, 4, 45, 16, 10, 17, 28, 4, 25, 13, 8, 20, 9, 4, 19, 8, 37, 4, 46, 16, 18, 24, 12, 4, 8, 27, 5, 13, 4, 6, 11, 5, 4, 15, 7, 57, 22, 4, 14, 8, 21, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], ], 'attention_mask': [ [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], ] } # fmt: on self.tokenizer_integration_test_util( expected_encoding=_A , model_name='microsoft/speecht5_asr' , revision='c5ef64c71905caeccde0e4462ef3f9077224c524' , sequences=_A , )
716
import unittest from transformers import TrOCRConfig from transformers.testing_utils import is_torch_available, require_torch, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers.models.trocr.modeling_trocr import TrOCRDecoder, TrOCRForCausalLM @require_torch class __snake_case : def __init__( self , _A , _A=99 , _A=13 , _A=16 , _A=7 , _A=True , _A=True , _A=True , _A=False , _A=True , _A=2 , _A=32 , _A=4 , _A=4 , _A=30 , _A=0 , _A=1 , _A=2 , _A=None , ): SCREAMING_SNAKE_CASE_ = parent SCREAMING_SNAKE_CASE_ = batch_size SCREAMING_SNAKE_CASE_ = decoder_seq_length # For common tests SCREAMING_SNAKE_CASE_ = self.decoder_seq_length SCREAMING_SNAKE_CASE_ = is_training SCREAMING_SNAKE_CASE_ = use_attention_mask SCREAMING_SNAKE_CASE_ = use_labels SCREAMING_SNAKE_CASE_ = vocab_size SCREAMING_SNAKE_CASE_ = d_model SCREAMING_SNAKE_CASE_ = d_model SCREAMING_SNAKE_CASE_ = decoder_layers SCREAMING_SNAKE_CASE_ = decoder_layers SCREAMING_SNAKE_CASE_ = decoder_ffn_dim SCREAMING_SNAKE_CASE_ = decoder_attention_heads SCREAMING_SNAKE_CASE_ = decoder_attention_heads SCREAMING_SNAKE_CASE_ = eos_token_id SCREAMING_SNAKE_CASE_ = bos_token_id SCREAMING_SNAKE_CASE_ = pad_token_id SCREAMING_SNAKE_CASE_ = decoder_start_token_id SCREAMING_SNAKE_CASE_ = use_cache SCREAMING_SNAKE_CASE_ = max_position_embeddings SCREAMING_SNAKE_CASE_ = None SCREAMING_SNAKE_CASE_ = decoder_seq_length SCREAMING_SNAKE_CASE_ = 2 SCREAMING_SNAKE_CASE_ = 1 def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size) SCREAMING_SNAKE_CASE_ = None if self.use_attention_mask: SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size, self.decoder_seq_length] , vocab_size=2) SCREAMING_SNAKE_CASE_ = None if self.use_labels: SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size) SCREAMING_SNAKE_CASE_ = TrOCRConfig( vocab_size=self.vocab_size , d_model=self.d_model , decoder_layers=self.decoder_layers , decoder_ffn_dim=self.decoder_ffn_dim , decoder_attention_heads=self.decoder_attention_heads , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , use_cache=self.use_cache , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , max_position_embeddings=self.max_position_embeddings , ) return (config, input_ids, attention_mask, lm_labels) def lowerCAmelCase__ ( self , _A , _A , _A , _A , ): SCREAMING_SNAKE_CASE_ = True SCREAMING_SNAKE_CASE_ = TrOCRDecoder(config=_A).to(_A).eval() SCREAMING_SNAKE_CASE_ = input_ids[:2] input_ids[input_ids == 0] += 1 # first forward pass SCREAMING_SNAKE_CASE_ = model(_A , use_cache=_A) SCREAMING_SNAKE_CASE_ = model(_A) SCREAMING_SNAKE_CASE_ = model(_A , use_cache=_A) self.parent.assertTrue(len(_A) == len(_A)) self.parent.assertTrue(len(_A) == len(_A) + 1) SCREAMING_SNAKE_CASE_ = outputs['past_key_values'] # create hypothetical next token and extent to next_input_ids SCREAMING_SNAKE_CASE_ = ids_tensor((2, 1) , config.vocab_size - 1) + 1 # append to next input_ids and SCREAMING_SNAKE_CASE_ = torch.cat([input_ids, next_tokens] , dim=-1) SCREAMING_SNAKE_CASE_ = model(_A)['last_hidden_state'] SCREAMING_SNAKE_CASE_ = model(_A , past_key_values=_A)['last_hidden_state'] # select random slice SCREAMING_SNAKE_CASE_ = ids_tensor((1,) , output_from_past.shape[-1]).item() SCREAMING_SNAKE_CASE_ = output_from_no_past[:, next_input_ids.shape[-1] - 1, random_slice_idx].detach() SCREAMING_SNAKE_CASE_ = output_from_past[:, 0, random_slice_idx].detach() # test that outputs are equal for slice assert torch.allclose(_A , _A , atol=1E-3) def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = self.prepare_config_and_inputs() SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = config_and_inputs SCREAMING_SNAKE_CASE_ = {'input_ids': input_ids, 'attention_mask': attention_mask} return config, inputs_dict @require_torch class __snake_case ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ): __lowerCAmelCase : Tuple = (TrOCRDecoder, TrOCRForCausalLM) if is_torch_available() else () __lowerCAmelCase : Union[str, Any] = (TrOCRForCausalLM,) if is_torch_available() else () __lowerCAmelCase : str = {'text-generation': TrOCRForCausalLM} if is_torch_available() else {} __lowerCAmelCase : Any = True __lowerCAmelCase : str = False def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = TrOCRStandaloneDecoderModelTester(self , is_training=_A) SCREAMING_SNAKE_CASE_ = ConfigTester(self , config_class=_A) def lowerCAmelCase__ ( self): pass def lowerCAmelCase__ ( self): pass def lowerCAmelCase__ ( self): pass def lowerCAmelCase__ ( self): self.config_tester.run_common_tests() def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_decoder_model_past(*_A) def lowerCAmelCase__ ( self): return @unittest.skip('The model doesn\'t support left padding') # and it's not used enough to be worth fixing :) def lowerCAmelCase__ ( self): pass
620
0
from typing import Dict, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import flip_channel_order, resize, to_channel_dimension_format, to_pil_image from ...image_utils import ( ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_pytesseract_available, is_vision_available, logging, requires_backends if is_vision_available(): import PIL # soft dependency if is_pytesseract_available(): import pytesseract UpperCamelCase__ : Optional[Any] = logging.get_logger(__name__) def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : int ): """simple docstring""" return [ int(1_000 * (box[0] / width) ), int(1_000 * (box[1] / height) ), int(1_000 * (box[2] / width) ), int(1_000 * (box[3] / height) ), ] def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : np.ndarray , _SCREAMING_SNAKE_CASE : Optional[str] , _SCREAMING_SNAKE_CASE : Optional[str] = None ): """simple docstring""" SCREAMING_SNAKE_CASE_ = tesseract_config if tesseract_config is not None else '' # apply OCR SCREAMING_SNAKE_CASE_ = to_pil_image(_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = pil_image.size SCREAMING_SNAKE_CASE_ = pytesseract.image_to_data(_SCREAMING_SNAKE_CASE , lang=_SCREAMING_SNAKE_CASE , output_type='dict' , config=_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = data['text'], data['left'], data['top'], data['width'], data['height'] # filter empty words and corresponding coordinates SCREAMING_SNAKE_CASE_ = [idx for idx, word in enumerate(_SCREAMING_SNAKE_CASE ) if not word.strip()] SCREAMING_SNAKE_CASE_ = [word for idx, word in enumerate(_SCREAMING_SNAKE_CASE ) if idx not in irrelevant_indices] SCREAMING_SNAKE_CASE_ = [coord for idx, coord in enumerate(_SCREAMING_SNAKE_CASE ) if idx not in irrelevant_indices] SCREAMING_SNAKE_CASE_ = [coord for idx, coord in enumerate(_SCREAMING_SNAKE_CASE ) if idx not in irrelevant_indices] SCREAMING_SNAKE_CASE_ = [coord for idx, coord in enumerate(_SCREAMING_SNAKE_CASE ) if idx not in irrelevant_indices] SCREAMING_SNAKE_CASE_ = [coord for idx, coord in enumerate(_SCREAMING_SNAKE_CASE ) if idx not in irrelevant_indices] # turn coordinates into (left, top, left+width, top+height) format SCREAMING_SNAKE_CASE_ = [] for x, y, w, h in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): SCREAMING_SNAKE_CASE_ = [x, y, x + w, y + h] actual_boxes.append(_SCREAMING_SNAKE_CASE ) # finally, normalize the bounding boxes SCREAMING_SNAKE_CASE_ = [] for box in actual_boxes: normalized_boxes.append(normalize_box(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ) assert len(_SCREAMING_SNAKE_CASE ) == len(_SCREAMING_SNAKE_CASE ), "Not as many words as there are bounding boxes" return words, normalized_boxes class __snake_case ( lowerCAmelCase__ ): __lowerCAmelCase : str = ['pixel_values'] def __init__( self , _A = True , _A = None , _A = PILImageResampling.BILINEAR , _A = True , _A = None , _A = "" , **_A , ): super().__init__(**_A) SCREAMING_SNAKE_CASE_ = size if size is not None else {'height': 224, 'width': 224} SCREAMING_SNAKE_CASE_ = get_size_dict(_A) SCREAMING_SNAKE_CASE_ = do_resize SCREAMING_SNAKE_CASE_ = size SCREAMING_SNAKE_CASE_ = resample SCREAMING_SNAKE_CASE_ = apply_ocr SCREAMING_SNAKE_CASE_ = ocr_lang SCREAMING_SNAKE_CASE_ = tesseract_config def lowerCAmelCase__ ( self , _A , _A , _A = PILImageResampling.BILINEAR , _A = None , **_A , ): SCREAMING_SNAKE_CASE_ = get_size_dict(_A) if "height" not in size or "width" not in size: raise ValueError(f"""The size dictionary must contain the keys 'height' and 'width'. Got {size.keys()}""") SCREAMING_SNAKE_CASE_ = (size['height'], size['width']) return resize(_A , size=_A , resample=_A , data_format=_A , **_A) def lowerCAmelCase__ ( self , _A , _A = None , _A = None , _A = None , _A = None , _A = None , _A = None , _A = None , _A = ChannelDimension.FIRST , **_A , ): SCREAMING_SNAKE_CASE_ = do_resize if do_resize is not None else self.do_resize SCREAMING_SNAKE_CASE_ = size if size is not None else self.size SCREAMING_SNAKE_CASE_ = get_size_dict(_A) SCREAMING_SNAKE_CASE_ = resample if resample is not None else self.resample SCREAMING_SNAKE_CASE_ = apply_ocr if apply_ocr is not None else self.apply_ocr SCREAMING_SNAKE_CASE_ = ocr_lang if ocr_lang is not None else self.ocr_lang SCREAMING_SNAKE_CASE_ = tesseract_config if tesseract_config is not None else self.tesseract_config SCREAMING_SNAKE_CASE_ = make_list_of_images(_A) if not valid_images(_A): raise ValueError( 'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ' 'torch.Tensor, tf.Tensor or jax.ndarray.') if do_resize and size is None: raise ValueError('Size must be specified if do_resize is True.') # All transformations expect numpy arrays. SCREAMING_SNAKE_CASE_ = [to_numpy_array(_A) for image in images] if apply_ocr: requires_backends(self , 'pytesseract') SCREAMING_SNAKE_CASE_ = [] SCREAMING_SNAKE_CASE_ = [] for image in images: SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = apply_tesseract(_A , _A , _A) words_batch.append(_A) boxes_batch.append(_A) if do_resize: SCREAMING_SNAKE_CASE_ = [self.resize(image=_A , size=_A , resample=_A) for image in images] # flip color channels from RGB to BGR (as Detectron2 requires this) SCREAMING_SNAKE_CASE_ = [flip_channel_order(_A) for image in images] SCREAMING_SNAKE_CASE_ = [to_channel_dimension_format(_A , _A) for image in images] SCREAMING_SNAKE_CASE_ = BatchFeature(data={'pixel_values': images} , tensor_type=_A) if apply_ocr: SCREAMING_SNAKE_CASE_ = words_batch SCREAMING_SNAKE_CASE_ = boxes_batch return data
717
from dataclasses import dataclass from typing import Optional, Tuple, Union import torch import torch.nn as nn from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput, apply_forward_hook from .modeling_utils import ModelMixin from .vae import Decoder, DecoderOutput, Encoder, VectorQuantizer @dataclass class __snake_case ( lowerCAmelCase__ ): __lowerCAmelCase : torch.FloatTensor class __snake_case ( lowerCAmelCase__ , lowerCAmelCase__ ): @register_to_config def __init__( self , _A = 3 , _A = 3 , _A = ("DownEncoderBlock2D",) , _A = ("UpDecoderBlock2D",) , _A = (64,) , _A = 1 , _A = "silu" , _A = 3 , _A = 32 , _A = 256 , _A = 32 , _A = None , _A = 0.1_8_2_1_5 , _A = "group" , ): super().__init__() # pass init params to Encoder SCREAMING_SNAKE_CASE_ = Encoder( in_channels=_A , out_channels=_A , down_block_types=_A , block_out_channels=_A , layers_per_block=_A , act_fn=_A , norm_num_groups=_A , double_z=_A , ) SCREAMING_SNAKE_CASE_ = vq_embed_dim if vq_embed_dim is not None else latent_channels SCREAMING_SNAKE_CASE_ = nn.Convad(_A , _A , 1) SCREAMING_SNAKE_CASE_ = VectorQuantizer(_A , _A , beta=0.2_5 , remap=_A , sane_index_shape=_A) SCREAMING_SNAKE_CASE_ = nn.Convad(_A , _A , 1) # pass init params to Decoder SCREAMING_SNAKE_CASE_ = Decoder( in_channels=_A , out_channels=_A , up_block_types=_A , block_out_channels=_A , layers_per_block=_A , act_fn=_A , norm_num_groups=_A , norm_type=_A , ) @apply_forward_hook def lowerCAmelCase__ ( self , _A , _A = True): SCREAMING_SNAKE_CASE_ = self.encoder(_A) SCREAMING_SNAKE_CASE_ = self.quant_conv(_A) if not return_dict: return (h,) return VQEncoderOutput(latents=_A) @apply_forward_hook def lowerCAmelCase__ ( self , _A , _A = False , _A = True): # also go through quantization layer if not force_not_quantize: SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.quantize(_A) else: SCREAMING_SNAKE_CASE_ = h SCREAMING_SNAKE_CASE_ = self.post_quant_conv(_A) SCREAMING_SNAKE_CASE_ = self.decoder(_A , quant if self.config.norm_type == 'spatial' else None) if not return_dict: return (dec,) return DecoderOutput(sample=_A) def lowerCAmelCase__ ( self , _A , _A = True): SCREAMING_SNAKE_CASE_ = sample SCREAMING_SNAKE_CASE_ = self.encode(_A).latents SCREAMING_SNAKE_CASE_ = self.decode(_A).sample if not return_dict: return (dec,) return DecoderOutput(sample=_A)
620
0
from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCamelCase__ : List[Any] = logging.get_logger(__name__) UpperCamelCase__ : List[str] = { "microsoft/biogpt": "https://huggingface.co/microsoft/biogpt/resolve/main/config.json", # See all BioGPT models at https://huggingface.co/models?filter=biogpt } class __snake_case ( lowerCAmelCase__ ): __lowerCAmelCase : Any = 'biogpt' def __init__( self , _A=42384 , _A=1024 , _A=24 , _A=16 , _A=4096 , _A="gelu" , _A=0.1 , _A=0.1 , _A=1024 , _A=0.0_2 , _A=1E-12 , _A=True , _A=True , _A=0.0 , _A=0.0 , _A=1 , _A=0 , _A=2 , **_A , ): SCREAMING_SNAKE_CASE_ = vocab_size SCREAMING_SNAKE_CASE_ = max_position_embeddings SCREAMING_SNAKE_CASE_ = hidden_size SCREAMING_SNAKE_CASE_ = num_hidden_layers SCREAMING_SNAKE_CASE_ = num_attention_heads SCREAMING_SNAKE_CASE_ = intermediate_size SCREAMING_SNAKE_CASE_ = hidden_act SCREAMING_SNAKE_CASE_ = hidden_dropout_prob SCREAMING_SNAKE_CASE_ = attention_probs_dropout_prob SCREAMING_SNAKE_CASE_ = initializer_range SCREAMING_SNAKE_CASE_ = layer_norm_eps SCREAMING_SNAKE_CASE_ = scale_embedding SCREAMING_SNAKE_CASE_ = use_cache SCREAMING_SNAKE_CASE_ = layerdrop SCREAMING_SNAKE_CASE_ = activation_dropout super().__init__(pad_token_id=_A , bos_token_id=_A , eos_token_id=_A , **_A)
718
import logging import os from typing import Dict, List, Optional, Union import torch import torch.nn as nn from accelerate.utils.imports import ( is_abit_bnb_available, is_abit_bnb_available, is_bnb_available, ) from ..big_modeling import dispatch_model, init_empty_weights from .dataclasses import BnbQuantizationConfig from .modeling import ( find_tied_parameters, get_balanced_memory, infer_auto_device_map, load_checkpoint_in_model, offload_weight, set_module_tensor_to_device, ) if is_bnb_available(): import bitsandbytes as bnb from copy import deepcopy UpperCamelCase__ : Optional[int] = logging.getLogger(__name__) def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : torch.nn.Module , _SCREAMING_SNAKE_CASE : BnbQuantizationConfig , _SCREAMING_SNAKE_CASE : Union[str, os.PathLike] = None , _SCREAMING_SNAKE_CASE : Optional[Dict[str, Union[int, str, torch.device]]] = None , _SCREAMING_SNAKE_CASE : Optional[List[str]] = None , _SCREAMING_SNAKE_CASE : Optional[Dict[Union[int, str], Union[int, str]]] = None , _SCREAMING_SNAKE_CASE : Optional[Union[str, os.PathLike]] = None , _SCREAMING_SNAKE_CASE : bool = False , ): """simple docstring""" SCREAMING_SNAKE_CASE_ = bnb_quantization_config.load_in_abit SCREAMING_SNAKE_CASE_ = bnb_quantization_config.load_in_abit if load_in_abit and not is_abit_bnb_available(): raise ImportError( 'You have a version of `bitsandbytes` that is not compatible with 8bit quantization,' ' make sure you have the latest version of `bitsandbytes` installed.' ) if load_in_abit and not is_abit_bnb_available(): raise ValueError( 'You have a version of `bitsandbytes` that is not compatible with 4bit quantization,' 'make sure you have the latest version of `bitsandbytes` installed.' ) SCREAMING_SNAKE_CASE_ = [] # custom device map if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) and len(device_map.keys() ) > 1: SCREAMING_SNAKE_CASE_ = [key for key, value in device_map.items() if value in ['disk', 'cpu']] # We keep some modules such as the lm_head in their original dtype for numerical stability reasons if bnb_quantization_config.skip_modules is None: SCREAMING_SNAKE_CASE_ = get_keys_to_not_convert(_SCREAMING_SNAKE_CASE ) # add cpu modules to skip modules only for 4-bit modules if load_in_abit: bnb_quantization_config.skip_modules.extend(_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE_ = bnb_quantization_config.skip_modules # We add the modules we want to keep in full precision if bnb_quantization_config.keep_in_fpaa_modules is None: SCREAMING_SNAKE_CASE_ = [] SCREAMING_SNAKE_CASE_ = bnb_quantization_config.keep_in_fpaa_modules modules_to_not_convert.extend(_SCREAMING_SNAKE_CASE ) # compatibility with peft SCREAMING_SNAKE_CASE_ = load_in_abit SCREAMING_SNAKE_CASE_ = load_in_abit SCREAMING_SNAKE_CASE_ = get_parameter_device(_SCREAMING_SNAKE_CASE ) if model_device.type != "meta": # quantization of an already loaded model logger.warning( 'It is not recommended to quantize a loaded model. ' 'The model should be instantiated under the `init_empty_weights` context manager.' ) SCREAMING_SNAKE_CASE_ = replace_with_bnb_layers(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , modules_to_not_convert=_SCREAMING_SNAKE_CASE ) # convert param to the right dtype SCREAMING_SNAKE_CASE_ = bnb_quantization_config.torch_dtype for name, param in model.state_dict().items(): if any(module_to_keep_in_fpaa in name for module_to_keep_in_fpaa in keep_in_fpaa_modules ): param.to(torch.floataa ) if param.dtype != torch.floataa: SCREAMING_SNAKE_CASE_ = name.replace('.weight' , '' ).replace('.bias' , '' ) SCREAMING_SNAKE_CASE_ = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) if param is not None: param.to(torch.floataa ) elif torch.is_floating_point(_SCREAMING_SNAKE_CASE ): param.to(_SCREAMING_SNAKE_CASE ) if model_device.type == "cuda": # move everything to cpu in the first place because we can't do quantization if the weights are already on cuda model.cuda(torch.cuda.current_device() ) torch.cuda.empty_cache() elif torch.cuda.is_available(): model.to(torch.cuda.current_device() ) else: raise RuntimeError('No GPU found. A GPU is needed for quantization.' ) logger.info( f"""The model device type is {model_device.type}. However, cuda is needed for quantization.""" 'We move the model to cuda.' ) return model elif weights_location is None: raise RuntimeError( f"""`weights_location` needs to be the folder path containing the weights of the model, but we found {weights_location} """ ) else: with init_empty_weights(): SCREAMING_SNAKE_CASE_ = replace_with_bnb_layers( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , modules_to_not_convert=_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE_ = get_quantized_model_device_map( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , max_memory=_SCREAMING_SNAKE_CASE , no_split_module_classes=_SCREAMING_SNAKE_CASE , ) if offload_state_dict is None and device_map is not None and "disk" in device_map.values(): SCREAMING_SNAKE_CASE_ = True SCREAMING_SNAKE_CASE_ = any(x in list(device_map.values() ) for x in ['cpu', 'disk'] ) load_checkpoint_in_model( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , dtype=bnb_quantization_config.torch_dtype , offload_folder=_SCREAMING_SNAKE_CASE , offload_state_dict=_SCREAMING_SNAKE_CASE , keep_in_fpaa_modules=bnb_quantization_config.keep_in_fpaa_modules , offload_abit_bnb=load_in_abit and offload , ) return dispatch_model(_SCREAMING_SNAKE_CASE , device_map=_SCREAMING_SNAKE_CASE , offload_dir=_SCREAMING_SNAKE_CASE ) def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : List[str]=None , _SCREAMING_SNAKE_CASE : List[str]=None , _SCREAMING_SNAKE_CASE : Union[str, Any]=None ): """simple docstring""" if device_map is None: if torch.cuda.is_available(): SCREAMING_SNAKE_CASE_ = {'': torch.cuda.current_device()} else: raise RuntimeError('No GPU found. A GPU is needed for quantization.' ) logger.info('The device_map was not initialized.' 'Setting device_map to `{\'\':torch.cuda.current_device()}`.' ) if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): if device_map not in ["auto", "balanced", "balanced_low_0", "sequential"]: raise ValueError( 'If passing a string for `device_map`, please choose \'auto\', \'balanced\', \'balanced_low_0\' or ' '\'sequential\'.' ) SCREAMING_SNAKE_CASE_ = {} special_dtypes.update( { name: bnb_quantization_config.torch_dtype for name, _ in model.named_parameters() if any(m in name for m in bnb_quantization_config.skip_modules ) } ) special_dtypes.update( { name: torch.floataa for name, _ in model.named_parameters() if any(m in name for m in bnb_quantization_config.keep_in_fpaa_modules ) } ) SCREAMING_SNAKE_CASE_ = {} SCREAMING_SNAKE_CASE_ = special_dtypes SCREAMING_SNAKE_CASE_ = no_split_module_classes SCREAMING_SNAKE_CASE_ = bnb_quantization_config.target_dtype # get max_memory for each device. if device_map != "sequential": SCREAMING_SNAKE_CASE_ = get_balanced_memory( _SCREAMING_SNAKE_CASE , low_zero=(device_map == 'balanced_low_0') , max_memory=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , ) SCREAMING_SNAKE_CASE_ = max_memory SCREAMING_SNAKE_CASE_ = infer_auto_device_map(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): # check if don't have any quantized module on the cpu SCREAMING_SNAKE_CASE_ = bnb_quantization_config.skip_modules + bnb_quantization_config.keep_in_fpaa_modules SCREAMING_SNAKE_CASE_ = { key: device_map[key] for key in device_map.keys() if key not in modules_not_to_convert } for device in ["cpu", "disk"]: if device in device_map_without_some_modules.values(): if bnb_quantization_config.load_in_abit: raise ValueError( '\n Some modules are dispatched on the CPU or the disk. Make sure you have enough GPU RAM to fit\n the quantized model. If you want to dispatch the model on the CPU or the disk while keeping\n these modules in `torch_dtype`, you need to pass a custom `device_map` to\n `load_and_quantize_model`. Check\n https://huggingface.co/docs/accelerate/main/en/usage_guides/quantization#offload-modules-to-cpu-and-disk\n for more details.\n ' ) else: logger.info( 'Some modules are are offloaded to the CPU or the disk. Note that these modules will be converted to 8-bit' ) del device_map_without_some_modules return device_map def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int=None , _SCREAMING_SNAKE_CASE : Union[str, Any]=None ): """simple docstring""" if modules_to_not_convert is None: SCREAMING_SNAKE_CASE_ = [] SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = _replace_with_bnb_layers( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) if not has_been_replaced: logger.warning( 'You are loading your model in 8bit or 4bit but no linear modules were found in your model.' ' this can happen for some architectures such as gpt2 that uses Conv1D instead of Linear layers.' ' Please double check your model architecture, or submit an issue on github if you think this is' ' a bug.' ) return model def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : Optional[Any]=None , _SCREAMING_SNAKE_CASE : str=None , ): """simple docstring""" SCREAMING_SNAKE_CASE_ = False for name, module in model.named_children(): if current_key_name is None: SCREAMING_SNAKE_CASE_ = [] current_key_name.append(_SCREAMING_SNAKE_CASE ) if isinstance(_SCREAMING_SNAKE_CASE , nn.Linear ) and name not in modules_to_not_convert: # Check if the current key is not in the `modules_to_not_convert` SCREAMING_SNAKE_CASE_ = '.'.join(_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE_ = True for key in modules_to_not_convert: if ( (key in current_key_name_str) and (key + "." in current_key_name_str) ) or key == current_key_name_str: SCREAMING_SNAKE_CASE_ = False break if proceed: # Load bnb module with empty weight and replace ``nn.Linear` module if bnb_quantization_config.load_in_abit: SCREAMING_SNAKE_CASE_ = bnb.nn.LinearabitLt( module.in_features , module.out_features , module.bias is not None , has_fpaa_weights=_SCREAMING_SNAKE_CASE , threshold=bnb_quantization_config.llm_inta_threshold , ) elif bnb_quantization_config.load_in_abit: SCREAMING_SNAKE_CASE_ = bnb.nn.Linearabit( module.in_features , module.out_features , module.bias is not None , bnb_quantization_config.bnb_abit_compute_dtype , compress_statistics=bnb_quantization_config.bnb_abit_use_double_quant , quant_type=bnb_quantization_config.bnb_abit_quant_type , ) else: raise ValueError('load_in_8bit and load_in_4bit can\'t be both False' ) SCREAMING_SNAKE_CASE_ = module.weight.data if module.bias is not None: SCREAMING_SNAKE_CASE_ = module.bias.data bnb_module.requires_grad_(_SCREAMING_SNAKE_CASE ) setattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE_ = True if len(list(module.children() ) ) > 0: SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = _replace_with_bnb_layers( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE_ = has_been_replaced | _has_been_replaced # Remove the last key for recursion current_key_name.pop(-1 ) return model, has_been_replaced def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : Union[str, Any] ): """simple docstring""" with init_empty_weights(): SCREAMING_SNAKE_CASE_ = deepcopy(_SCREAMING_SNAKE_CASE ) # this has 0 cost since it is done inside `init_empty_weights` context manager` SCREAMING_SNAKE_CASE_ = find_tied_parameters(_SCREAMING_SNAKE_CASE ) # For compatibility with Accelerate < 0.18 if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): SCREAMING_SNAKE_CASE_ = sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() ) else: SCREAMING_SNAKE_CASE_ = sum(_SCREAMING_SNAKE_CASE , [] ) SCREAMING_SNAKE_CASE_ = len(_SCREAMING_SNAKE_CASE ) > 0 # Check if it is a base model SCREAMING_SNAKE_CASE_ = False if hasattr(_SCREAMING_SNAKE_CASE , 'base_model_prefix' ): SCREAMING_SNAKE_CASE_ = not hasattr(_SCREAMING_SNAKE_CASE , model.base_model_prefix ) # Ignore this for base models (BertModel, GPT2Model, etc.) if (not has_tied_params) and is_base_model: return [] # otherwise they have an attached head SCREAMING_SNAKE_CASE_ = list(model.named_children() ) SCREAMING_SNAKE_CASE_ = [list_modules[-1][0]] # add last module together with tied weights SCREAMING_SNAKE_CASE_ = set(_SCREAMING_SNAKE_CASE ) - set(_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE_ = list(set(_SCREAMING_SNAKE_CASE ) ) + list(_SCREAMING_SNAKE_CASE ) # remove ".weight" from the keys SCREAMING_SNAKE_CASE_ = ['.weight', '.bias'] SCREAMING_SNAKE_CASE_ = [] for name in list_untouched: for name_to_remove in names_to_remove: if name_to_remove in name: SCREAMING_SNAKE_CASE_ = name.replace(_SCREAMING_SNAKE_CASE , '' ) filtered_module_names.append(_SCREAMING_SNAKE_CASE ) return filtered_module_names def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : Dict ): """simple docstring""" for m in model.modules(): if isinstance(_SCREAMING_SNAKE_CASE , bnb.nn.Linearabit ): return True return False def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : nn.Module ): """simple docstring""" return next(parameter.parameters() ).device def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : str ): """simple docstring""" if fpaa_statistics is None: set_module_tensor_to_device(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , 0 , dtype=_SCREAMING_SNAKE_CASE , value=_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE_ = param_name SCREAMING_SNAKE_CASE_ = model if "." in tensor_name: SCREAMING_SNAKE_CASE_ = tensor_name.split('.' ) for split in splits[:-1]: SCREAMING_SNAKE_CASE_ = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) if new_module is None: raise ValueError(f"""{module} has no attribute {split}.""" ) SCREAMING_SNAKE_CASE_ = new_module SCREAMING_SNAKE_CASE_ = splits[-1] # offload weights SCREAMING_SNAKE_CASE_ = False offload_weight(module._parameters[tensor_name] , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , index=_SCREAMING_SNAKE_CASE ) if hasattr(module._parameters[tensor_name] , 'SCB' ): offload_weight( module._parameters[tensor_name].SCB , param_name.replace('weight' , 'SCB' ) , _SCREAMING_SNAKE_CASE , index=_SCREAMING_SNAKE_CASE , ) else: offload_weight(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , index=_SCREAMING_SNAKE_CASE ) offload_weight(_SCREAMING_SNAKE_CASE , param_name.replace('weight' , 'SCB' ) , _SCREAMING_SNAKE_CASE , index=_SCREAMING_SNAKE_CASE ) set_module_tensor_to_device(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , 'meta' , dtype=_SCREAMING_SNAKE_CASE , value=torch.empty(*param.size() ) )
620
0
import json import os import pickle import shutil import tempfile from unittest import TestCase from unittest.mock import patch import numpy as np from datasets import Dataset from transformers import is_faiss_available from transformers.models.bart.configuration_bart import BartConfig from transformers.models.bart.tokenization_bart import BartTokenizer from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES from transformers.models.dpr.configuration_dpr import DPRConfig from transformers.models.dpr.tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer from transformers.models.rag.configuration_rag import RagConfig from transformers.models.rag.retrieval_rag import CustomHFIndex, RagRetriever from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES from transformers.testing_utils import require_faiss, require_sentencepiece, require_tokenizers, require_torch if is_faiss_available(): import faiss @require_faiss class __snake_case ( lowerCAmelCase__ ): def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = tempfile.mkdtemp() SCREAMING_SNAKE_CASE_ = 8 # DPR tok SCREAMING_SNAKE_CASE_ = [ '[UNK]', '[CLS]', '[SEP]', '[PAD]', '[MASK]', 'want', '##want', '##ed', 'wa', 'un', 'runn', '##ing', ',', 'low', 'lowest', ] SCREAMING_SNAKE_CASE_ = os.path.join(self.tmpdirname , 'dpr_tokenizer') os.makedirs(_A , exist_ok=_A) SCREAMING_SNAKE_CASE_ = os.path.join(_A , DPR_VOCAB_FILES_NAMES['vocab_file']) with open(self.vocab_file , 'w' , encoding='utf-8') as vocab_writer: vocab_writer.write(''.join([x + '\n' for x in vocab_tokens])) # BART tok SCREAMING_SNAKE_CASE_ = [ 'l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', '\u0120', '\u0120l', '\u0120n', '\u0120lo', '\u0120low', 'er', '\u0120lowest', '\u0120newer', '\u0120wider', '<unk>', ] SCREAMING_SNAKE_CASE_ = dict(zip(_A , range(len(_A)))) SCREAMING_SNAKE_CASE_ = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', ''] SCREAMING_SNAKE_CASE_ = {'unk_token': '<unk>'} SCREAMING_SNAKE_CASE_ = os.path.join(self.tmpdirname , 'bart_tokenizer') os.makedirs(_A , exist_ok=_A) SCREAMING_SNAKE_CASE_ = os.path.join(_A , BART_VOCAB_FILES_NAMES['vocab_file']) SCREAMING_SNAKE_CASE_ = os.path.join(_A , BART_VOCAB_FILES_NAMES['merges_file']) with open(self.vocab_file , 'w' , encoding='utf-8') as fp: fp.write(json.dumps(_A) + '\n') with open(self.merges_file , 'w' , encoding='utf-8') as fp: fp.write('\n'.join(_A)) def lowerCAmelCase__ ( self): return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , 'dpr_tokenizer')) def lowerCAmelCase__ ( self): return DPRContextEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , 'dpr_tokenizer')) def lowerCAmelCase__ ( self): return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname , 'bart_tokenizer')) def lowerCAmelCase__ ( self): shutil.rmtree(self.tmpdirname) def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = Dataset.from_dict( { 'id': ['0', '1'], 'text': ['foo', 'bar'], 'title': ['Foo', 'Bar'], 'embeddings': [np.ones(self.retrieval_vector_size), 2 * np.ones(self.retrieval_vector_size)], }) dataset.add_faiss_index('embeddings' , string_factory='Flat' , metric_type=faiss.METRIC_INNER_PRODUCT) return dataset def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = self.get_dummy_dataset() SCREAMING_SNAKE_CASE_ = RagConfig( retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , ) with patch('transformers.models.rag.retrieval_rag.load_dataset') as mock_load_dataset: SCREAMING_SNAKE_CASE_ = dataset SCREAMING_SNAKE_CASE_ = RagRetriever( _A , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , ) return retriever def lowerCAmelCase__ ( self , _A): SCREAMING_SNAKE_CASE_ = self.get_dummy_dataset() SCREAMING_SNAKE_CASE_ = RagConfig( retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name='custom' , ) if from_disk: SCREAMING_SNAKE_CASE_ = os.path.join(self.tmpdirname , 'dataset') SCREAMING_SNAKE_CASE_ = os.path.join(self.tmpdirname , 'index.faiss') dataset.get_index('embeddings').save(os.path.join(self.tmpdirname , 'index.faiss')) dataset.drop_index('embeddings') dataset.save_to_disk(os.path.join(self.tmpdirname , 'dataset')) del dataset SCREAMING_SNAKE_CASE_ = RagRetriever( _A , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , ) else: SCREAMING_SNAKE_CASE_ = RagRetriever( _A , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , index=CustomHFIndex(config.retrieval_vector_size , _A) , ) return retriever def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = Dataset.from_dict( { 'id': ['0', '1'], 'text': ['foo', 'bar'], 'title': ['Foo', 'Bar'], 'embeddings': [np.ones(self.retrieval_vector_size + 1), 2 * np.ones(self.retrieval_vector_size + 1)], }) dataset.add_faiss_index('embeddings' , string_factory='Flat' , metric_type=faiss.METRIC_INNER_PRODUCT) SCREAMING_SNAKE_CASE_ = os.path.join(self.tmpdirname , 'hf_bert_base.hnswSQ8_correct_phi_128.c_index') dataset.save_faiss_index('embeddings' , index_file_name + '.index.dpr') pickle.dump(dataset['id'] , open(index_file_name + '.index_meta.dpr' , 'wb')) SCREAMING_SNAKE_CASE_ = os.path.join(self.tmpdirname , 'psgs_w100.tsv.pkl') SCREAMING_SNAKE_CASE_ = {sample['id']: [sample['text'], sample['title']] for sample in dataset} pickle.dump(_A , open(_A , 'wb')) SCREAMING_SNAKE_CASE_ = RagConfig( retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name='legacy' , index_path=self.tmpdirname , ) SCREAMING_SNAKE_CASE_ = RagRetriever( _A , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer()) return retriever def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = 1 SCREAMING_SNAKE_CASE_ = self.get_dummy_canonical_hf_index_retriever() SCREAMING_SNAKE_CASE_ = np.array( [np.ones(self.retrieval_vector_size), -np.ones(self.retrieval_vector_size)] , dtype=np.floataa) SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = retriever.retrieve(_A , n_docs=_A) self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size)) self.assertEqual(len(_A) , 2) self.assertEqual(sorted(doc_dicts[0]) , ['embeddings', 'id', 'text', 'title']) self.assertEqual(len(doc_dicts[0]['id']) , _A) self.assertEqual(doc_dicts[0]['id'][0] , '1') # max inner product is reached with second doc self.assertEqual(doc_dicts[1]['id'][0] , '0') # max inner product is reached with first doc self.assertListEqual(doc_ids.tolist() , [[1], [0]]) def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = self.get_dummy_canonical_hf_index_retriever() with tempfile.TemporaryDirectory() as tmp_dirname: with patch('transformers.models.rag.retrieval_rag.load_dataset') as mock_load_dataset: SCREAMING_SNAKE_CASE_ = self.get_dummy_dataset() retriever.save_pretrained(_A) SCREAMING_SNAKE_CASE_ = RagRetriever.from_pretrained(_A) self.assertIsInstance(_A , _A) SCREAMING_SNAKE_CASE_ = np.array( [np.ones(self.retrieval_vector_size), -np.ones(self.retrieval_vector_size)] , dtype=np.floataa) SCREAMING_SNAKE_CASE_ = retriever.retrieve(_A , n_docs=1) self.assertTrue(out is not None) def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = 1 SCREAMING_SNAKE_CASE_ = self.get_dummy_custom_hf_index_retriever(from_disk=_A) SCREAMING_SNAKE_CASE_ = np.array( [np.ones(self.retrieval_vector_size), -np.ones(self.retrieval_vector_size)] , dtype=np.floataa) SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = retriever.retrieve(_A , n_docs=_A) self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size)) self.assertEqual(len(_A) , 2) self.assertEqual(sorted(doc_dicts[0]) , ['embeddings', 'id', 'text', 'title']) self.assertEqual(len(doc_dicts[0]['id']) , _A) self.assertEqual(doc_dicts[0]['id'][0] , '1') # max inner product is reached with second doc self.assertEqual(doc_dicts[1]['id'][0] , '0') # max inner product is reached with first doc self.assertListEqual(doc_ids.tolist() , [[1], [0]]) def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = self.get_dummy_custom_hf_index_retriever(from_disk=_A) with tempfile.TemporaryDirectory() as tmp_dirname: retriever.save_pretrained(_A) SCREAMING_SNAKE_CASE_ = RagRetriever.from_pretrained(_A) self.assertIsInstance(_A , _A) SCREAMING_SNAKE_CASE_ = np.array( [np.ones(self.retrieval_vector_size), -np.ones(self.retrieval_vector_size)] , dtype=np.floataa) SCREAMING_SNAKE_CASE_ = retriever.retrieve(_A , n_docs=1) self.assertTrue(out is not None) def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = 1 SCREAMING_SNAKE_CASE_ = self.get_dummy_custom_hf_index_retriever(from_disk=_A) SCREAMING_SNAKE_CASE_ = np.array( [np.ones(self.retrieval_vector_size), -np.ones(self.retrieval_vector_size)] , dtype=np.floataa) SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = retriever.retrieve(_A , n_docs=_A) self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size)) self.assertEqual(len(_A) , 2) self.assertEqual(sorted(doc_dicts[0]) , ['embeddings', 'id', 'text', 'title']) self.assertEqual(len(doc_dicts[0]['id']) , _A) self.assertEqual(doc_dicts[0]['id'][0] , '1') # max inner product is reached with second doc self.assertEqual(doc_dicts[1]['id'][0] , '0') # max inner product is reached with first doc self.assertListEqual(doc_ids.tolist() , [[1], [0]]) def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = self.get_dummy_custom_hf_index_retriever(from_disk=_A) with tempfile.TemporaryDirectory() as tmp_dirname: retriever.save_pretrained(_A) SCREAMING_SNAKE_CASE_ = RagRetriever.from_pretrained(_A) self.assertIsInstance(_A , _A) SCREAMING_SNAKE_CASE_ = np.array( [np.ones(self.retrieval_vector_size), -np.ones(self.retrieval_vector_size)] , dtype=np.floataa) SCREAMING_SNAKE_CASE_ = retriever.retrieve(_A , n_docs=1) self.assertTrue(out is not None) def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = 1 SCREAMING_SNAKE_CASE_ = self.get_dummy_legacy_index_retriever() SCREAMING_SNAKE_CASE_ = np.array( [np.ones(self.retrieval_vector_size), -np.ones(self.retrieval_vector_size)] , dtype=np.floataa) SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = retriever.retrieve(_A , n_docs=_A) self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size)) self.assertEqual(len(_A) , 2) self.assertEqual(sorted(doc_dicts[0]) , ['text', 'title']) self.assertEqual(len(doc_dicts[0]['text']) , _A) self.assertEqual(doc_dicts[0]['text'][0] , 'bar') # max inner product is reached with second doc self.assertEqual(doc_dicts[1]['text'][0] , 'foo') # max inner product is reached with first doc self.assertListEqual(doc_ids.tolist() , [[1], [0]]) def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = self.get_dummy_legacy_index_retriever() with tempfile.TemporaryDirectory() as tmp_dirname: retriever.save_pretrained(_A) SCREAMING_SNAKE_CASE_ = RagRetriever.from_pretrained(_A) self.assertIsInstance(_A , _A) SCREAMING_SNAKE_CASE_ = np.array( [np.ones(self.retrieval_vector_size), -np.ones(self.retrieval_vector_size)] , dtype=np.floataa) SCREAMING_SNAKE_CASE_ = retriever.retrieve(_A , n_docs=1) self.assertTrue(out is not None) @require_torch @require_tokenizers @require_sentencepiece def lowerCAmelCase__ ( self): import torch SCREAMING_SNAKE_CASE_ = 1 SCREAMING_SNAKE_CASE_ = self.get_dummy_canonical_hf_index_retriever() SCREAMING_SNAKE_CASE_ = [[5, 7], [10, 11]] SCREAMING_SNAKE_CASE_ = np.array( [np.ones(self.retrieval_vector_size), -np.ones(self.retrieval_vector_size)] , dtype=np.floataa) SCREAMING_SNAKE_CASE_ = retriever(_A , _A , prefix=retriever.config.generator.prefix , n_docs=_A) SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = ( out['context_input_ids'], out['context_attention_mask'], out['retrieved_doc_embeds'], ) self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size)) self.assertIsInstance(_A , _A) self.assertIsInstance(_A , _A) self.assertIsInstance(_A , np.ndarray) SCREAMING_SNAKE_CASE_ = retriever( _A , _A , prefix=retriever.config.generator.prefix , n_docs=_A , return_tensors='pt' , ) SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = ( # noqa: F841 out['context_input_ids'], out['context_attention_mask'], out['retrieved_doc_embeds'], out['doc_ids'], ) self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size)) self.assertIsInstance(_A , torch.Tensor) self.assertIsInstance(_A , torch.Tensor) self.assertIsInstance(_A , torch.Tensor) @require_torch @require_tokenizers @require_sentencepiece def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = self.get_dpr_ctx_encoder_tokenizer() SCREAMING_SNAKE_CASE_ = 1 SCREAMING_SNAKE_CASE_ = self.get_dummy_custom_hf_index_retriever(from_disk=_A) retriever.set_ctx_encoder_tokenizer(_A) SCREAMING_SNAKE_CASE_ = [[5, 7], [10, 11]] SCREAMING_SNAKE_CASE_ = np.array( [np.ones(self.retrieval_vector_size), -np.ones(self.retrieval_vector_size)] , dtype=np.floataa) SCREAMING_SNAKE_CASE_ = retriever(_A , _A , prefix=retriever.config.generator.prefix , n_docs=_A) self.assertEqual( len(_A) , 6) # check whether the retriever output consist of 6 attributes including tokenized docs self.assertEqual( all(k in out for k in ('tokenized_doc_ids', 'tokenized_doc_attention_mask')) , _A) # check for doc token related keys in dictionary.
719
import json import os from functools import lru_cache from typing import List, Optional, Tuple import regex as re from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging UpperCamelCase__ : Union[str, Any] = logging.get_logger(__name__) UpperCamelCase__ : Optional[Any] = {"vocab_file": "vocab.json", "merges_file": "merges.txt"} # See all BART models at https://huggingface.co/models?filter=bart UpperCamelCase__ : List[str] = { "vocab_file": { "facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/vocab.json", "facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/vocab.json", "facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json", "facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json", "facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json", "yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json", }, "merges_file": { "facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/merges.txt", "facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/merges.txt", "facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt", "facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt", "facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt", "yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt", }, } UpperCamelCase__ : str = { "facebook/bart-base": 1_024, "facebook/bart-large": 1_024, "facebook/bart-large-mnli": 1_024, "facebook/bart-large-cnn": 1_024, "facebook/bart-large-xsum": 1_024, "yjernite/bart_eli5": 1_024, } @lru_cache() def _UpperCAmelCase ( ): """simple docstring""" SCREAMING_SNAKE_CASE_ = ( list(range(ord('!' ) , ord('~' ) + 1 ) ) + list(range(ord('¡' ) , ord('¬' ) + 1 ) ) + list(range(ord('®' ) , ord('ÿ' ) + 1 ) ) ) SCREAMING_SNAKE_CASE_ = bs[:] SCREAMING_SNAKE_CASE_ = 0 for b in range(2**8 ): if b not in bs: bs.append(_SCREAMING_SNAKE_CASE ) cs.append(2**8 + n ) n += 1 SCREAMING_SNAKE_CASE_ = [chr(_SCREAMING_SNAKE_CASE ) for n in cs] return dict(zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ) def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : List[str] ): """simple docstring""" SCREAMING_SNAKE_CASE_ = set() SCREAMING_SNAKE_CASE_ = word[0] for char in word[1:]: pairs.add((prev_char, char) ) SCREAMING_SNAKE_CASE_ = char return pairs class __snake_case ( lowerCAmelCase__ ): __lowerCAmelCase : str = VOCAB_FILES_NAMES __lowerCAmelCase : Any = PRETRAINED_VOCAB_FILES_MAP __lowerCAmelCase : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __lowerCAmelCase : List[Any] = ['input_ids', 'attention_mask'] def __init__( self , _A , _A , _A="replace" , _A="<s>" , _A="</s>" , _A="</s>" , _A="<s>" , _A="<unk>" , _A="<pad>" , _A="<mask>" , _A=False , **_A , ): SCREAMING_SNAKE_CASE_ = AddedToken(_A , lstrip=_A , rstrip=_A) if isinstance(_A , _A) else bos_token SCREAMING_SNAKE_CASE_ = AddedToken(_A , lstrip=_A , rstrip=_A) if isinstance(_A , _A) else eos_token SCREAMING_SNAKE_CASE_ = AddedToken(_A , lstrip=_A , rstrip=_A) if isinstance(_A , _A) else sep_token SCREAMING_SNAKE_CASE_ = AddedToken(_A , lstrip=_A , rstrip=_A) if isinstance(_A , _A) else cls_token SCREAMING_SNAKE_CASE_ = AddedToken(_A , lstrip=_A , rstrip=_A) if isinstance(_A , _A) else unk_token SCREAMING_SNAKE_CASE_ = AddedToken(_A , lstrip=_A , rstrip=_A) if isinstance(_A , _A) else pad_token # Mask token behave like a normal word, i.e. include the space before it SCREAMING_SNAKE_CASE_ = AddedToken(_A , lstrip=_A , rstrip=_A) if isinstance(_A , _A) else mask_token super().__init__( errors=_A , bos_token=_A , eos_token=_A , unk_token=_A , sep_token=_A , cls_token=_A , pad_token=_A , mask_token=_A , add_prefix_space=_A , **_A , ) with open(_A , encoding='utf-8') as vocab_handle: SCREAMING_SNAKE_CASE_ = json.load(_A) SCREAMING_SNAKE_CASE_ = {v: k for k, v in self.encoder.items()} SCREAMING_SNAKE_CASE_ = errors # how to handle errors in decoding SCREAMING_SNAKE_CASE_ = bytes_to_unicode() SCREAMING_SNAKE_CASE_ = {v: k for k, v in self.byte_encoder.items()} with open(_A , encoding='utf-8') as merges_handle: SCREAMING_SNAKE_CASE_ = merges_handle.read().split('\n')[1:-1] SCREAMING_SNAKE_CASE_ = [tuple(merge.split()) for merge in bpe_merges] SCREAMING_SNAKE_CASE_ = dict(zip(_A , range(len(_A)))) SCREAMING_SNAKE_CASE_ = {} SCREAMING_SNAKE_CASE_ = add_prefix_space # Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions SCREAMING_SNAKE_CASE_ = re.compile(r'\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+') @property def lowerCAmelCase__ ( self): return len(self.encoder) def lowerCAmelCase__ ( self): return dict(self.encoder , **self.added_tokens_encoder) def lowerCAmelCase__ ( self , _A): if token in self.cache: return self.cache[token] SCREAMING_SNAKE_CASE_ = tuple(_A) SCREAMING_SNAKE_CASE_ = get_pairs(_A) if not pairs: return token while True: SCREAMING_SNAKE_CASE_ = min(_A , key=lambda _A: self.bpe_ranks.get(_A , float('inf'))) if bigram not in self.bpe_ranks: break SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = bigram SCREAMING_SNAKE_CASE_ = [] SCREAMING_SNAKE_CASE_ = 0 while i < len(_A): try: SCREAMING_SNAKE_CASE_ = word.index(_A , _A) except ValueError: new_word.extend(word[i:]) break else: new_word.extend(word[i:j]) SCREAMING_SNAKE_CASE_ = j if word[i] == first and i < len(_A) - 1 and word[i + 1] == second: new_word.append(first + second) i += 2 else: new_word.append(word[i]) i += 1 SCREAMING_SNAKE_CASE_ = tuple(_A) SCREAMING_SNAKE_CASE_ = new_word if len(_A) == 1: break else: SCREAMING_SNAKE_CASE_ = get_pairs(_A) SCREAMING_SNAKE_CASE_ = ' '.join(_A) SCREAMING_SNAKE_CASE_ = word return word def lowerCAmelCase__ ( self , _A): SCREAMING_SNAKE_CASE_ = [] for token in re.findall(self.pat , _A): SCREAMING_SNAKE_CASE_ = ''.join( self.byte_encoder[b] for b in token.encode('utf-8')) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case) bpe_tokens.extend(bpe_token for bpe_token in self.bpe(_A).split(' ')) return bpe_tokens def lowerCAmelCase__ ( self , _A): return self.encoder.get(_A , self.encoder.get(self.unk_token)) def lowerCAmelCase__ ( self , _A): return self.decoder.get(_A) def lowerCAmelCase__ ( self , _A): SCREAMING_SNAKE_CASE_ = ''.join(_A) SCREAMING_SNAKE_CASE_ = bytearray([self.byte_decoder[c] for c in text]).decode('utf-8' , errors=self.errors) return text def lowerCAmelCase__ ( self , _A , _A = None): if not os.path.isdir(_A): logger.error(f"""Vocabulary path ({save_directory}) should be a directory""") return SCREAMING_SNAKE_CASE_ = os.path.join( _A , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file']) SCREAMING_SNAKE_CASE_ = os.path.join( _A , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file']) with open(_A , 'w' , encoding='utf-8') as f: f.write(json.dumps(self.encoder , indent=2 , sort_keys=_A , ensure_ascii=_A) + '\n') SCREAMING_SNAKE_CASE_ = 0 with open(_A , 'w' , encoding='utf-8') as writer: writer.write('#version: 0.2\n') for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda _A: kv[1]): if index != token_index: logger.warning( f"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.""" ' Please check that the tokenizer is not corrupted!') SCREAMING_SNAKE_CASE_ = token_index writer.write(' '.join(_A) + '\n') index += 1 return vocab_file, merge_file def lowerCAmelCase__ ( self , _A , _A = None): if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] SCREAMING_SNAKE_CASE_ = [self.cls_token_id] SCREAMING_SNAKE_CASE_ = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def lowerCAmelCase__ ( self , _A , _A = None , _A = False): if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=_A , token_ids_a=_A , already_has_special_tokens=_A) if token_ids_a is None: return [1] + ([0] * len(_A)) + [1] return [1] + ([0] * len(_A)) + [1, 1] + ([0] * len(_A)) + [1] def lowerCAmelCase__ ( self , _A , _A = None): SCREAMING_SNAKE_CASE_ = [self.sep_token_id] SCREAMING_SNAKE_CASE_ = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0] def lowerCAmelCase__ ( self , _A , _A=False , **_A): SCREAMING_SNAKE_CASE_ = kwargs.pop('add_prefix_space' , self.add_prefix_space) if (is_split_into_words or add_prefix_space) and (len(_A) > 0 and not text[0].isspace()): SCREAMING_SNAKE_CASE_ = ' ' + text return (text, kwargs)
620
0
import gc import unittest import numpy as np import torch import torch.nn.functional as F from transformers import ( ClapTextConfig, ClapTextModelWithProjection, RobertaTokenizer, SpeechTaHifiGan, SpeechTaHifiGanConfig, ) from diffusers import ( AudioLDMPipeline, AutoencoderKL, DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel, ) from diffusers.utils import is_xformers_available, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism from ..pipeline_params import TEXT_TO_AUDIO_BATCH_PARAMS, TEXT_TO_AUDIO_PARAMS from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class __snake_case ( lowerCAmelCase__ , unittest.TestCase ): __lowerCAmelCase : Optional[Any] = AudioLDMPipeline __lowerCAmelCase : Optional[int] = TEXT_TO_AUDIO_PARAMS __lowerCAmelCase : Union[str, Any] = TEXT_TO_AUDIO_BATCH_PARAMS __lowerCAmelCase : Union[str, Any] = frozenset( [ 'num_inference_steps', 'num_waveforms_per_prompt', 'generator', 'latents', 'output_type', 'return_dict', 'callback', 'callback_steps', ] ) def lowerCAmelCase__ ( self): torch.manual_seed(0) SCREAMING_SNAKE_CASE_ = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=(32, 64) , class_embed_type='simple_projection' , projection_class_embeddings_input_dim=32 , class_embeddings_concat=_A , ) SCREAMING_SNAKE_CASE_ = DDIMScheduler( beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule='scaled_linear' , clip_sample=_A , set_alpha_to_one=_A , ) torch.manual_seed(0) SCREAMING_SNAKE_CASE_ = AutoencoderKL( block_out_channels=[32, 64] , in_channels=1 , out_channels=1 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , ) torch.manual_seed(0) SCREAMING_SNAKE_CASE_ = ClapTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , projection_dim=32 , ) SCREAMING_SNAKE_CASE_ = ClapTextModelWithProjection(_A) SCREAMING_SNAKE_CASE_ = RobertaTokenizer.from_pretrained('hf-internal-testing/tiny-random-roberta' , model_max_length=77) SCREAMING_SNAKE_CASE_ = SpeechTaHifiGanConfig( model_in_dim=8 , sampling_rate=16000 , upsample_initial_channel=16 , upsample_rates=[2, 2] , upsample_kernel_sizes=[4, 4] , resblock_kernel_sizes=[3, 7] , resblock_dilation_sizes=[[1, 3, 5], [1, 3, 5]] , normalize_before=_A , ) SCREAMING_SNAKE_CASE_ = SpeechTaHifiGan(_A) SCREAMING_SNAKE_CASE_ = { 'unet': unet, 'scheduler': scheduler, 'vae': vae, 'text_encoder': text_encoder, 'tokenizer': tokenizer, 'vocoder': vocoder, } return components def lowerCAmelCase__ ( self , _A , _A=0): if str(_A).startswith('mps'): SCREAMING_SNAKE_CASE_ = torch.manual_seed(_A) else: SCREAMING_SNAKE_CASE_ = torch.Generator(device=_A).manual_seed(_A) SCREAMING_SNAKE_CASE_ = { 'prompt': 'A hammer hitting a wooden surface', 'generator': generator, 'num_inference_steps': 2, 'guidance_scale': 6.0, } return inputs def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = 'cpu' # ensure determinism for the device-dependent torch.Generator SCREAMING_SNAKE_CASE_ = self.get_dummy_components() SCREAMING_SNAKE_CASE_ = AudioLDMPipeline(**_A) SCREAMING_SNAKE_CASE_ = audioldm_pipe.to(_A) audioldm_pipe.set_progress_bar_config(disable=_A) SCREAMING_SNAKE_CASE_ = self.get_dummy_inputs(_A) SCREAMING_SNAKE_CASE_ = audioldm_pipe(**_A) SCREAMING_SNAKE_CASE_ = output.audios[0] assert audio.ndim == 1 assert len(_A) == 256 SCREAMING_SNAKE_CASE_ = audio[:10] SCREAMING_SNAKE_CASE_ = np.array( [-0.0_0_5_0, 0.0_0_5_0, -0.0_0_6_0, 0.0_0_3_3, -0.0_0_2_6, 0.0_0_3_3, -0.0_0_2_7, 0.0_0_3_3, -0.0_0_2_8, 0.0_0_3_3]) assert np.abs(audio_slice - expected_slice).max() < 1E-2 def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = self.get_dummy_components() SCREAMING_SNAKE_CASE_ = AudioLDMPipeline(**_A) SCREAMING_SNAKE_CASE_ = audioldm_pipe.to(_A) SCREAMING_SNAKE_CASE_ = audioldm_pipe.to(_A) audioldm_pipe.set_progress_bar_config(disable=_A) SCREAMING_SNAKE_CASE_ = self.get_dummy_inputs(_A) SCREAMING_SNAKE_CASE_ = 3 * [inputs['prompt']] # forward SCREAMING_SNAKE_CASE_ = audioldm_pipe(**_A) SCREAMING_SNAKE_CASE_ = output.audios[0] SCREAMING_SNAKE_CASE_ = self.get_dummy_inputs(_A) SCREAMING_SNAKE_CASE_ = 3 * [inputs.pop('prompt')] SCREAMING_SNAKE_CASE_ = audioldm_pipe.tokenizer( _A , padding='max_length' , max_length=audioldm_pipe.tokenizer.model_max_length , truncation=_A , return_tensors='pt' , ) SCREAMING_SNAKE_CASE_ = text_inputs['input_ids'].to(_A) SCREAMING_SNAKE_CASE_ = audioldm_pipe.text_encoder( _A , ) SCREAMING_SNAKE_CASE_ = prompt_embeds.text_embeds # additional L_2 normalization over each hidden-state SCREAMING_SNAKE_CASE_ = F.normalize(_A , dim=-1) SCREAMING_SNAKE_CASE_ = prompt_embeds # forward SCREAMING_SNAKE_CASE_ = audioldm_pipe(**_A) SCREAMING_SNAKE_CASE_ = output.audios[0] assert np.abs(audio_a - audio_a).max() < 1E-2 def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = self.get_dummy_components() SCREAMING_SNAKE_CASE_ = AudioLDMPipeline(**_A) SCREAMING_SNAKE_CASE_ = audioldm_pipe.to(_A) SCREAMING_SNAKE_CASE_ = audioldm_pipe.to(_A) audioldm_pipe.set_progress_bar_config(disable=_A) SCREAMING_SNAKE_CASE_ = self.get_dummy_inputs(_A) SCREAMING_SNAKE_CASE_ = 3 * ['this is a negative prompt'] SCREAMING_SNAKE_CASE_ = negative_prompt SCREAMING_SNAKE_CASE_ = 3 * [inputs['prompt']] # forward SCREAMING_SNAKE_CASE_ = audioldm_pipe(**_A) SCREAMING_SNAKE_CASE_ = output.audios[0] SCREAMING_SNAKE_CASE_ = self.get_dummy_inputs(_A) SCREAMING_SNAKE_CASE_ = 3 * [inputs.pop('prompt')] SCREAMING_SNAKE_CASE_ = [] for p in [prompt, negative_prompt]: SCREAMING_SNAKE_CASE_ = audioldm_pipe.tokenizer( _A , padding='max_length' , max_length=audioldm_pipe.tokenizer.model_max_length , truncation=_A , return_tensors='pt' , ) SCREAMING_SNAKE_CASE_ = text_inputs['input_ids'].to(_A) SCREAMING_SNAKE_CASE_ = audioldm_pipe.text_encoder( _A , ) SCREAMING_SNAKE_CASE_ = text_embeds.text_embeds # additional L_2 normalization over each hidden-state SCREAMING_SNAKE_CASE_ = F.normalize(_A , dim=-1) embeds.append(_A) SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = embeds # forward SCREAMING_SNAKE_CASE_ = audioldm_pipe(**_A) SCREAMING_SNAKE_CASE_ = output.audios[0] assert np.abs(audio_a - audio_a).max() < 1E-2 def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = 'cpu' # ensure determinism for the device-dependent torch.Generator SCREAMING_SNAKE_CASE_ = self.get_dummy_components() SCREAMING_SNAKE_CASE_ = PNDMScheduler(skip_prk_steps=_A) SCREAMING_SNAKE_CASE_ = AudioLDMPipeline(**_A) SCREAMING_SNAKE_CASE_ = audioldm_pipe.to(_A) audioldm_pipe.set_progress_bar_config(disable=_A) SCREAMING_SNAKE_CASE_ = self.get_dummy_inputs(_A) SCREAMING_SNAKE_CASE_ = 'egg cracking' SCREAMING_SNAKE_CASE_ = audioldm_pipe(**_A , negative_prompt=_A) SCREAMING_SNAKE_CASE_ = output.audios[0] assert audio.ndim == 1 assert len(_A) == 256 SCREAMING_SNAKE_CASE_ = audio[:10] SCREAMING_SNAKE_CASE_ = np.array( [-0.0_0_5_1, 0.0_0_5_0, -0.0_0_6_0, 0.0_0_3_4, -0.0_0_2_6, 0.0_0_3_3, -0.0_0_2_7, 0.0_0_3_3, -0.0_0_2_8, 0.0_0_3_2]) assert np.abs(audio_slice - expected_slice).max() < 1E-2 def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = 'cpu' # ensure determinism for the device-dependent torch.Generator SCREAMING_SNAKE_CASE_ = self.get_dummy_components() SCREAMING_SNAKE_CASE_ = PNDMScheduler(skip_prk_steps=_A) SCREAMING_SNAKE_CASE_ = AudioLDMPipeline(**_A) SCREAMING_SNAKE_CASE_ = audioldm_pipe.to(_A) audioldm_pipe.set_progress_bar_config(disable=_A) SCREAMING_SNAKE_CASE_ = 'A hammer hitting a wooden surface' # test num_waveforms_per_prompt=1 (default) SCREAMING_SNAKE_CASE_ = audioldm_pipe(_A , num_inference_steps=2).audios assert audios.shape == (1, 256) # test num_waveforms_per_prompt=1 (default) for batch of prompts SCREAMING_SNAKE_CASE_ = 2 SCREAMING_SNAKE_CASE_ = audioldm_pipe([prompt] * batch_size , num_inference_steps=2).audios assert audios.shape == (batch_size, 256) # test num_waveforms_per_prompt for single prompt SCREAMING_SNAKE_CASE_ = 2 SCREAMING_SNAKE_CASE_ = audioldm_pipe(_A , num_inference_steps=2 , num_waveforms_per_prompt=_A).audios assert audios.shape == (num_waveforms_per_prompt, 256) # test num_waveforms_per_prompt for batch of prompts SCREAMING_SNAKE_CASE_ = 2 SCREAMING_SNAKE_CASE_ = audioldm_pipe( [prompt] * batch_size , num_inference_steps=2 , num_waveforms_per_prompt=_A).audios assert audios.shape == (batch_size * num_waveforms_per_prompt, 256) def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = 'cpu' # ensure determinism for the device-dependent torch.Generator SCREAMING_SNAKE_CASE_ = self.get_dummy_components() SCREAMING_SNAKE_CASE_ = AudioLDMPipeline(**_A) SCREAMING_SNAKE_CASE_ = audioldm_pipe.to(_A) audioldm_pipe.set_progress_bar_config(disable=_A) SCREAMING_SNAKE_CASE_ = audioldm_pipe.vocoder.config.sampling_rate SCREAMING_SNAKE_CASE_ = self.get_dummy_inputs(_A) SCREAMING_SNAKE_CASE_ = audioldm_pipe(audio_length_in_s=0.0_1_6 , **_A) SCREAMING_SNAKE_CASE_ = output.audios[0] assert audio.ndim == 1 assert len(_A) / vocoder_sampling_rate == 0.0_1_6 SCREAMING_SNAKE_CASE_ = audioldm_pipe(audio_length_in_s=0.0_3_2 , **_A) SCREAMING_SNAKE_CASE_ = output.audios[0] assert audio.ndim == 1 assert len(_A) / vocoder_sampling_rate == 0.0_3_2 def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = self.get_dummy_components() SCREAMING_SNAKE_CASE_ = AudioLDMPipeline(**_A) SCREAMING_SNAKE_CASE_ = audioldm_pipe.to(_A) audioldm_pipe.set_progress_bar_config(disable=_A) SCREAMING_SNAKE_CASE_ = ['hey'] SCREAMING_SNAKE_CASE_ = audioldm_pipe(_A , num_inference_steps=1) SCREAMING_SNAKE_CASE_ = output.audios.shape assert audio_shape == (1, 256) SCREAMING_SNAKE_CASE_ = audioldm_pipe.vocoder.config config.model_in_dim *= 2 SCREAMING_SNAKE_CASE_ = SpeechTaHifiGan(_A).to(_A) SCREAMING_SNAKE_CASE_ = audioldm_pipe(_A , num_inference_steps=1) SCREAMING_SNAKE_CASE_ = output.audios.shape # waveform shape is unchanged, we just have 2x the number of mel channels in the spectrogram assert audio_shape == (1, 256) def lowerCAmelCase__ ( self): self._test_attention_slicing_forward_pass(test_mean_pixel_difference=_A) def lowerCAmelCase__ ( self): self._test_inference_batch_single_identical(test_mean_pixel_difference=_A) @unittest.skipIf( torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , ) def lowerCAmelCase__ ( self): self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=_A) @slow class __snake_case ( unittest.TestCase ): def lowerCAmelCase__ ( self): super().tearDown() gc.collect() torch.cuda.empty_cache() def lowerCAmelCase__ ( self , _A , _A="cpu" , _A=torch.floataa , _A=0): SCREAMING_SNAKE_CASE_ = torch.Generator(device=_A).manual_seed(_A) SCREAMING_SNAKE_CASE_ = np.random.RandomState(_A).standard_normal((1, 8, 128, 16)) SCREAMING_SNAKE_CASE_ = torch.from_numpy(_A).to(device=_A , dtype=_A) SCREAMING_SNAKE_CASE_ = { 'prompt': 'A hammer hitting a wooden surface', 'latents': latents, 'generator': generator, 'num_inference_steps': 3, 'guidance_scale': 2.5, } return inputs def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = AudioLDMPipeline.from_pretrained('cvssp/audioldm') SCREAMING_SNAKE_CASE_ = audioldm_pipe.to(_A) audioldm_pipe.set_progress_bar_config(disable=_A) SCREAMING_SNAKE_CASE_ = self.get_inputs(_A) SCREAMING_SNAKE_CASE_ = 25 SCREAMING_SNAKE_CASE_ = audioldm_pipe(**_A).audios[0] assert audio.ndim == 1 assert len(_A) == 81920 SCREAMING_SNAKE_CASE_ = audio[77230:77240] SCREAMING_SNAKE_CASE_ = np.array( [-0.4_8_8_4, -0.4_6_0_7, 0.0_0_2_3, 0.5_0_0_7, 0.5_8_9_6, 0.5_1_5_1, 0.3_8_1_3, -0.0_2_0_8, -0.3_6_8_7, -0.4_3_1_5]) SCREAMING_SNAKE_CASE_ = np.abs(expected_slice - audio_slice).max() assert max_diff < 1E-2 def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = AudioLDMPipeline.from_pretrained('cvssp/audioldm') SCREAMING_SNAKE_CASE_ = LMSDiscreteScheduler.from_config(audioldm_pipe.scheduler.config) SCREAMING_SNAKE_CASE_ = audioldm_pipe.to(_A) audioldm_pipe.set_progress_bar_config(disable=_A) SCREAMING_SNAKE_CASE_ = self.get_inputs(_A) SCREAMING_SNAKE_CASE_ = audioldm_pipe(**_A).audios[0] assert audio.ndim == 1 assert len(_A) == 81920 SCREAMING_SNAKE_CASE_ = audio[27780:27790] SCREAMING_SNAKE_CASE_ = np.array([-0.2_1_3_1, -0.0_8_7_3, -0.0_1_2_4, -0.0_1_8_9, 0.0_5_6_9, 0.1_3_7_3, 0.1_8_8_3, 0.2_8_8_6, 0.3_2_9_7, 0.2_2_1_2]) SCREAMING_SNAKE_CASE_ = np.abs(expected_slice - audio_slice).max() assert max_diff < 3E-2
720
from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCamelCase__ : str = logging.get_logger(__name__) UpperCamelCase__ : Optional[int] = { "facebook/dpr-ctx_encoder-single-nq-base": ( "https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/config.json" ), "facebook/dpr-question_encoder-single-nq-base": ( "https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/config.json" ), "facebook/dpr-reader-single-nq-base": ( "https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/config.json" ), "facebook/dpr-ctx_encoder-multiset-base": ( "https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/config.json" ), "facebook/dpr-question_encoder-multiset-base": ( "https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/config.json" ), "facebook/dpr-reader-multiset-base": ( "https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/config.json" ), } class __snake_case ( lowerCAmelCase__ ): __lowerCAmelCase : Optional[int] = 'dpr' def __init__( self , _A=30522 , _A=768 , _A=12 , _A=12 , _A=3072 , _A="gelu" , _A=0.1 , _A=0.1 , _A=512 , _A=2 , _A=0.0_2 , _A=1E-12 , _A=0 , _A="absolute" , _A = 0 , **_A , ): super().__init__(pad_token_id=_A , **_A) SCREAMING_SNAKE_CASE_ = vocab_size SCREAMING_SNAKE_CASE_ = hidden_size SCREAMING_SNAKE_CASE_ = num_hidden_layers SCREAMING_SNAKE_CASE_ = num_attention_heads SCREAMING_SNAKE_CASE_ = hidden_act SCREAMING_SNAKE_CASE_ = intermediate_size SCREAMING_SNAKE_CASE_ = hidden_dropout_prob SCREAMING_SNAKE_CASE_ = attention_probs_dropout_prob SCREAMING_SNAKE_CASE_ = max_position_embeddings SCREAMING_SNAKE_CASE_ = type_vocab_size SCREAMING_SNAKE_CASE_ = initializer_range SCREAMING_SNAKE_CASE_ = layer_norm_eps SCREAMING_SNAKE_CASE_ = projection_dim SCREAMING_SNAKE_CASE_ = position_embedding_type
620
0
import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import GLPNImageProcessor class __snake_case ( unittest.TestCase ): def __init__( self , _A , _A=7 , _A=3 , _A=18 , _A=30 , _A=400 , _A=True , _A=32 , _A=True , ): SCREAMING_SNAKE_CASE_ = parent SCREAMING_SNAKE_CASE_ = batch_size SCREAMING_SNAKE_CASE_ = num_channels SCREAMING_SNAKE_CASE_ = image_size SCREAMING_SNAKE_CASE_ = min_resolution SCREAMING_SNAKE_CASE_ = max_resolution SCREAMING_SNAKE_CASE_ = do_resize SCREAMING_SNAKE_CASE_ = size_divisor SCREAMING_SNAKE_CASE_ = do_rescale def lowerCAmelCase__ ( self): return { "do_resize": self.do_resize, "size_divisor": self.size_divisor, "do_rescale": self.do_rescale, } @require_torch @require_vision class __snake_case ( lowerCAmelCase__ , unittest.TestCase ): __lowerCAmelCase : Tuple = GLPNImageProcessor if is_vision_available() else None def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = GLPNImageProcessingTester(self) @property def lowerCAmelCase__ ( self): return self.image_processor_tester.prepare_image_processor_dict() def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = self.image_processing_class(**self.image_processor_dict) self.assertTrue(hasattr(_A , 'do_resize')) self.assertTrue(hasattr(_A , 'size_divisor')) self.assertTrue(hasattr(_A , 'resample')) self.assertTrue(hasattr(_A , 'do_rescale')) def lowerCAmelCase__ ( self): pass def lowerCAmelCase__ ( self): # Initialize image_processing SCREAMING_SNAKE_CASE_ = self.image_processing_class(**self.image_processor_dict) # create random PIL images SCREAMING_SNAKE_CASE_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A) for image in image_inputs: self.assertIsInstance(_A , Image.Image) # Test not batched input (GLPNImageProcessor doesn't support batching) SCREAMING_SNAKE_CASE_ = image_processing(image_inputs[0] , return_tensors='pt').pixel_values self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0) self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0) def lowerCAmelCase__ ( self): # Initialize image_processing SCREAMING_SNAKE_CASE_ = self.image_processing_class(**self.image_processor_dict) # create random numpy tensors SCREAMING_SNAKE_CASE_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A , numpify=_A) for image in image_inputs: self.assertIsInstance(_A , np.ndarray) # Test not batched input (GLPNImageProcessor doesn't support batching) SCREAMING_SNAKE_CASE_ = image_processing(image_inputs[0] , return_tensors='pt').pixel_values self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0) self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0) def lowerCAmelCase__ ( self): # Initialize image_processing SCREAMING_SNAKE_CASE_ = self.image_processing_class(**self.image_processor_dict) # create random PyTorch tensors SCREAMING_SNAKE_CASE_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A , torchify=_A) for image in image_inputs: self.assertIsInstance(_A , torch.Tensor) # Test not batched input (GLPNImageProcessor doesn't support batching) SCREAMING_SNAKE_CASE_ = image_processing(image_inputs[0] , return_tensors='pt').pixel_values self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0) self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0)
721
import pytest import datasets # Import fixture modules as plugins UpperCamelCase__ : Union[str, Any] = ["tests.fixtures.files", "tests.fixtures.hub", "tests.fixtures.fsspec"] def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : Dict ): """simple docstring""" for item in items: if any(marker in item.keywords for marker in ['integration', 'unit'] ): continue item.add_marker(pytest.mark.unit ) def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : Optional[int] ): """simple docstring""" config.addinivalue_line('markers' , 'torchaudio_latest: mark test to run with torchaudio>=0.12' ) @pytest.fixture(autouse=_SCREAMING_SNAKE_CASE ) def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : str ): """simple docstring""" SCREAMING_SNAKE_CASE_ = tmp_path_factory.getbasetemp() / 'cache' SCREAMING_SNAKE_CASE_ = test_hf_cache_home / 'datasets' SCREAMING_SNAKE_CASE_ = test_hf_cache_home / 'metrics' SCREAMING_SNAKE_CASE_ = test_hf_cache_home / 'modules' monkeypatch.setattr('datasets.config.HF_DATASETS_CACHE' , str(_SCREAMING_SNAKE_CASE ) ) monkeypatch.setattr('datasets.config.HF_METRICS_CACHE' , str(_SCREAMING_SNAKE_CASE ) ) monkeypatch.setattr('datasets.config.HF_MODULES_CACHE' , str(_SCREAMING_SNAKE_CASE ) ) SCREAMING_SNAKE_CASE_ = test_hf_datasets_cache / 'downloads' monkeypatch.setattr('datasets.config.DOWNLOADED_DATASETS_PATH' , str(_SCREAMING_SNAKE_CASE ) ) SCREAMING_SNAKE_CASE_ = test_hf_datasets_cache / 'downloads' / 'extracted' monkeypatch.setattr('datasets.config.EXTRACTED_DATASETS_PATH' , str(_SCREAMING_SNAKE_CASE ) ) @pytest.fixture(autouse=_SCREAMING_SNAKE_CASE , scope='session' ) def _UpperCAmelCase ( ): """simple docstring""" datasets.disable_progress_bar() @pytest.fixture(autouse=_SCREAMING_SNAKE_CASE ) def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : Any ): """simple docstring""" monkeypatch.setattr('datasets.config.HF_UPDATE_DOWNLOAD_COUNTS' , _SCREAMING_SNAKE_CASE ) @pytest.fixture def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : Tuple ): """simple docstring""" monkeypatch.setattr('sqlalchemy.util.deprecations.SILENCE_UBER_WARNING' , _SCREAMING_SNAKE_CASE )
620
0
"""simple docstring""" from dataclasses import asdict, dataclass from typing import Optional from ...configuration_utils import PretrainedConfig from ...utils import logging a_ = logging.get_logger(__name__) # TODO Update this a_ = { "facebook/esm-1b": "https://huggingface.co/facebook/esm-1b/resolve/main/config.json", # See all ESM models at https://huggingface.co/models?filter=esm } class snake_case ( _UpperCamelCase): __UpperCamelCase = 'esm' def __init__( self : Optional[Any] , a__ : List[Any]=None , a__ : int=None , a__ : List[Any]=None , a__ : Optional[int]=7_68 , a__ : str=12 , a__ : str=12 , a__ : str=30_72 , a__ : List[str]=0.1 , a__ : Tuple=0.1 , a__ : List[Any]=10_26 , a__ : Any=0.0_2 , a__ : Any=1E-1_2 , a__ : List[str]="absolute" , a__ : Optional[Any]=True , a__ : Union[str, Any]=None , a__ : Dict=False , a__ : Dict=False , a__ : Any=None , a__ : str=None , **a__ : Optional[int] , ) -> int: '''simple docstring''' super().__init__(pad_token_id=a__ , mask_token_id=a__ , **a__ ) _A = vocab_size _A = hidden_size _A = num_hidden_layers _A = num_attention_heads _A = intermediate_size _A = hidden_dropout_prob _A = attention_probs_dropout_prob _A = max_position_embeddings _A = initializer_range _A = layer_norm_eps _A = position_embedding_type _A = use_cache _A = emb_layer_norm_before _A = token_dropout _A = is_folding_model if is_folding_model: if esmfold_config is None: logger.info("No esmfold_config supplied for folding model, using default values." ) _A = EsmFoldConfig() elif isinstance(a__ , a__ ): _A = EsmFoldConfig(**a__ ) _A = esmfold_config if vocab_list is None: logger.warning("No vocab_list supplied for folding model, assuming the ESM-2 vocabulary!" ) _A = get_default_vocab_list() else: _A = vocab_list else: _A = None _A = None if self.esmfold_config is not None and getattr(self.esmfold_config , "use_esm_attn_map" , a__ ): raise ValueError("The HuggingFace port of ESMFold does not support use_esm_attn_map at this time!" ) def a_ ( self : int ) -> int: '''simple docstring''' _A = super().to_dict() if isinstance(self.esmfold_config , a__ ): _A = self.esmfold_config.to_dict() return output @dataclass class snake_case : __UpperCamelCase = None __UpperCamelCase = True __UpperCamelCase = False __UpperCamelCase = False __UpperCamelCase = False __UpperCamelCase = 0 __UpperCamelCase = True __UpperCamelCase = False __UpperCamelCase = 128 __UpperCamelCase = None def a_ ( self : str ) -> Optional[int]: '''simple docstring''' if self.trunk is None: _A = TrunkConfig() elif isinstance(self.trunk , a__ ): _A = TrunkConfig(**self.trunk ) def a_ ( self : List[str] ) -> Optional[Any]: '''simple docstring''' _A = asdict(self ) _A = self.trunk.to_dict() return output @dataclass class snake_case : __UpperCamelCase = 48 __UpperCamelCase = 1024 __UpperCamelCase = 128 __UpperCamelCase = 32 __UpperCamelCase = 32 __UpperCamelCase = 32 __UpperCamelCase = 0 __UpperCamelCase = 0 __UpperCamelCase = False __UpperCamelCase = 4 __UpperCamelCase = 128 __UpperCamelCase = None def a_ ( self : str ) -> Union[str, Any]: '''simple docstring''' if self.structure_module is None: _A = StructureModuleConfig() elif isinstance(self.structure_module , a__ ): _A = StructureModuleConfig(**self.structure_module ) if self.max_recycles <= 0: raise ValueError(F"""`max_recycles` should be positive, got {self.max_recycles}.""" ) if self.sequence_state_dim % self.sequence_state_dim != 0: raise ValueError( "`sequence_state_dim` should be a round multiple of `sequence_state_dim`, got" F""" {self.sequence_state_dim} and {self.sequence_state_dim}.""" ) if self.pairwise_state_dim % self.pairwise_state_dim != 0: raise ValueError( "`pairwise_state_dim` should be a round multiple of `pairwise_state_dim`, got" F""" {self.pairwise_state_dim} and {self.pairwise_state_dim}.""" ) _A = self.sequence_state_dim // self.sequence_head_width _A = self.pairwise_state_dim // self.pairwise_head_width if self.sequence_state_dim != sequence_num_heads * self.sequence_head_width: raise ValueError( "`sequence_state_dim` should be equal to `sequence_num_heads * sequence_head_width, got" F""" {self.sequence_state_dim} != {sequence_num_heads} * {self.sequence_head_width}.""" ) if self.pairwise_state_dim != pairwise_num_heads * self.pairwise_head_width: raise ValueError( "`pairwise_state_dim` should be equal to `pairwise_num_heads * pairwise_head_width, got" F""" {self.pairwise_state_dim} != {pairwise_num_heads} * {self.pairwise_head_width}.""" ) if self.pairwise_state_dim % 2 != 0: raise ValueError(F"""`pairwise_state_dim` should be even, got {self.pairwise_state_dim}.""" ) if self.dropout >= 0.4: raise ValueError(F"""`dropout` should not be greater than 0.4, got {self.dropout}.""" ) def a_ ( self : List[Any] ) -> Optional[Any]: '''simple docstring''' _A = asdict(self ) _A = self.structure_module.to_dict() return output @dataclass class snake_case : __UpperCamelCase = 384 __UpperCamelCase = 128 __UpperCamelCase = 16 __UpperCamelCase = 128 __UpperCamelCase = 12 __UpperCamelCase = 4 __UpperCamelCase = 8 __UpperCamelCase = 0.1 __UpperCamelCase = 8 __UpperCamelCase = 1 __UpperCamelCase = 2 __UpperCamelCase = 7 __UpperCamelCase = 10 __UpperCamelCase = 1e-8 __UpperCamelCase = 1e5 def a_ ( self : List[Any] ) -> List[Any]: '''simple docstring''' return asdict(self ) def a__ ( ) -> int: return ( "<cls>", "<pad>", "<eos>", "<unk>", "L", "A", "G", "V", "S", "E", "R", "T", "I", "D", "P", "K", "Q", "N", "F", "Y", "M", "H", "W", "C", "X", "B", "U", "Z", "O", ".", "-", "<null_1>", "<mask>", )
621
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available a_ = { "configuration_upernet": ["UperNetConfig"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ = [ "UperNetForSemanticSegmentation", "UperNetPreTrainedModel", ] if TYPE_CHECKING: from .configuration_upernet import UperNetConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_upernet import UperNetForSemanticSegmentation, UperNetPreTrainedModel else: import sys a_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
621
1
"""simple docstring""" import collections from typing import List, Optional, Union from ...tokenization_utils_base import BatchEncoding from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging from ..bert.tokenization_bert import BertTokenizer a_ = logging.get_logger(__name__) a_ = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"} a_ = { "vocab_file": { "facebook/dpr-ctx_encoder-single-nq-base": ( "https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt" ), "facebook/dpr-ctx_encoder-multiset-base": ( "https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt" ), }, "tokenizer_file": { "facebook/dpr-ctx_encoder-single-nq-base": ( "https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json" ), "facebook/dpr-ctx_encoder-multiset-base": ( "https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json" ), }, } a_ = { "vocab_file": { "facebook/dpr-question_encoder-single-nq-base": ( "https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt" ), "facebook/dpr-question_encoder-multiset-base": ( "https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt" ), }, "tokenizer_file": { "facebook/dpr-question_encoder-single-nq-base": ( "https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json" ), "facebook/dpr-question_encoder-multiset-base": ( "https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json" ), }, } a_ = { "vocab_file": { "facebook/dpr-reader-single-nq-base": ( "https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt" ), "facebook/dpr-reader-multiset-base": ( "https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt" ), }, "tokenizer_file": { "facebook/dpr-reader-single-nq-base": ( "https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json" ), "facebook/dpr-reader-multiset-base": ( "https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json" ), }, } a_ = { "facebook/dpr-ctx_encoder-single-nq-base": 5_12, "facebook/dpr-ctx_encoder-multiset-base": 5_12, } a_ = { "facebook/dpr-question_encoder-single-nq-base": 5_12, "facebook/dpr-question_encoder-multiset-base": 5_12, } a_ = { "facebook/dpr-reader-single-nq-base": 5_12, "facebook/dpr-reader-multiset-base": 5_12, } a_ = { "facebook/dpr-ctx_encoder-single-nq-base": {"do_lower_case": True}, "facebook/dpr-ctx_encoder-multiset-base": {"do_lower_case": True}, } a_ = { "facebook/dpr-question_encoder-single-nq-base": {"do_lower_case": True}, "facebook/dpr-question_encoder-multiset-base": {"do_lower_case": True}, } a_ = { "facebook/dpr-reader-single-nq-base": {"do_lower_case": True}, "facebook/dpr-reader-multiset-base": {"do_lower_case": True}, } class snake_case ( _UpperCamelCase): __UpperCamelCase = VOCAB_FILES_NAMES __UpperCamelCase = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP __UpperCamelCase = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __UpperCamelCase = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION class snake_case ( _UpperCamelCase): __UpperCamelCase = VOCAB_FILES_NAMES __UpperCamelCase = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP __UpperCamelCase = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __UpperCamelCase = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION a_ = collections.namedtuple( "DPRSpanPrediction", ["span_score", "relevance_score", "doc_id", "start_index", "end_index", "text"] ) a_ = collections.namedtuple("DPRReaderOutput", ["start_logits", "end_logits", "relevance_logits"]) a_ = r"\n Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.\n It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),\n using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`\n with the format:\n\n ```\n [CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>\n ```\n\n Args:\n questions (`str` or `List[str]`):\n The questions to be encoded. You can specify one question for many passages. In this case, the question\n will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in\n `titles` or `texts`.\n titles (`str` or `List[str]`):\n The passages titles to be encoded. This can be a string or a list of strings if there are several passages.\n texts (`str` or `List[str]`):\n The passages texts to be encoded. This can be a string or a list of strings if there are several passages.\n padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):\n Activates and controls padding. Accepts the following values:\n\n - `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence\n if provided).\n - `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided.\n - `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different\n lengths).\n truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):\n Activates and controls truncation. Accepts the following values:\n\n - `True` or `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or to\n the maximum acceptable input length for the model if that argument is not provided. This will truncate\n token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch\n of pairs) is provided.\n - `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the first\n sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the\n second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `False` or `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths\n greater than the model maximum admissible input size).\n max_length (`int`, *optional*):\n Controls the maximum length to use by one of the truncation/padding parameters.\n\n If left unset or set to `None`, this will use the predefined model maximum length if a maximum length\n is required by one of the truncation/padding parameters. If the model has no specific maximum input\n length (like XLNet) truncation/padding to a maximum length will be deactivated.\n return_tensors (`str` or [`~utils.TensorType`], *optional*):\n If set, will return tensors instead of list of python integers. Acceptable values are:\n\n - `'tf'`: Return TensorFlow `tf.constant` objects.\n - `'pt'`: Return PyTorch `torch.Tensor` objects.\n - `'np'`: Return Numpy `np.ndarray` objects.\n return_attention_mask (`bool`, *optional*):\n Whether or not to return the attention mask. If not set, will return the attention mask according to the\n specific tokenizer's default, defined by the `return_outputs` attribute.\n\n [What are attention masks?](../glossary#attention-mask)\n\n Returns:\n `Dict[str, List[List[int]]]`: A dictionary with the following keys:\n\n - `input_ids`: List of token ids to be fed to a model.\n - `attention_mask`: List of indices specifying which tokens should be attended to by the model.\n " @add_start_docstrings(_UpperCamelCase) class snake_case : def __call__( self : Optional[int] , a__ : Dict , a__ : Optional[str] = None , a__ : Optional[str] = None , a__ : Union[bool, str] = False , a__ : Union[bool, str] = False , a__ : Optional[int] = None , a__ : Optional[Union[str, TensorType]] = None , a__ : Optional[bool] = None , **a__ : int , ) -> BatchEncoding: '''simple docstring''' if titles is None and texts is None: return super().__call__( a__ , padding=a__ , truncation=a__ , max_length=a__ , return_tensors=a__ , return_attention_mask=a__ , **a__ , ) elif titles is None or texts is None: _A = titles if texts is None else texts return super().__call__( a__ , a__ , padding=a__ , truncation=a__ , max_length=a__ , return_tensors=a__ , return_attention_mask=a__ , **a__ , ) _A = titles if not isinstance(a__ , a__ ) else [titles] _A = texts if not isinstance(a__ , a__ ) else [texts] _A = len(a__ ) _A = questions if not isinstance(a__ , a__ ) else [questions] * n_passages if len(a__ ) != len(a__ ): raise ValueError( F"""There should be as many titles than texts but got {len(a__ )} titles and {len(a__ )} texts.""" ) _A = super().__call__(a__ , a__ , padding=a__ , truncation=a__ )["input_ids"] _A = super().__call__(a__ , add_special_tokens=a__ , padding=a__ , truncation=a__ )["input_ids"] _A = { "input_ids": [ (encoded_question_and_title + encoded_text)[:max_length] if max_length is not None and truncation else encoded_question_and_title + encoded_text for encoded_question_and_title, encoded_text in zip(a__ , a__ ) ] } if return_attention_mask is not False: _A = [] for input_ids in encoded_inputs["input_ids"]: attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] ) _A = attention_mask return self.pad(a__ , padding=a__ , max_length=a__ , return_tensors=a__ ) def a_ ( self : Optional[Any] , a__ : BatchEncoding , a__ : DPRReaderOutput , a__ : int = 16 , a__ : int = 64 , a__ : int = 4 , ) -> List[DPRSpanPrediction]: '''simple docstring''' _A = reader_input["input_ids"] _A , _A , _A = reader_output[:3] _A = len(a__ ) _A = sorted(range(a__ ) , reverse=a__ , key=relevance_logits.__getitem__ ) _A = [] for doc_id in sorted_docs: _A = list(input_ids[doc_id] ) # assuming question & title information is at the beginning of the sequence _A = sequence_ids.index(self.sep_token_id , 2 ) + 1 # second sep id if sequence_ids[-1] == self.pad_token_id: _A = sequence_ids.index(self.pad_token_id ) else: _A = len(a__ ) _A = self._get_best_spans( start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=a__ , top_spans=a__ , ) for start_index, end_index in best_spans: start_index += passage_offset end_index += passage_offset nbest_spans_predictions.append( DPRSpanPrediction( span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=a__ , start_index=a__ , end_index=a__ , text=self.decode(sequence_ids[start_index : end_index + 1] ) , ) ) if len(a__ ) >= num_spans: break return nbest_spans_predictions[:num_spans] def a_ ( self : str , a__ : List[int] , a__ : List[int] , a__ : int , a__ : int , ) -> List[DPRSpanPrediction]: '''simple docstring''' _A = [] for start_index, start_score in enumerate(a__ ): for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ): scores.append(((start_index, start_index + answer_length), start_score + end_score) ) _A = sorted(a__ , key=lambda a__ : x[1] , reverse=a__ ) _A = [] for (start_index, end_index), score in scores: if start_index > end_index: raise ValueError(F"""Wrong span indices: [{start_index}:{end_index}]""" ) _A = end_index - start_index + 1 if length > max_answer_length: raise ValueError(F"""Span is too long: {length} > {max_answer_length}""" ) if any( start_index <= prev_start_index <= prev_end_index <= end_index or prev_start_index <= start_index <= end_index <= prev_end_index for (prev_start_index, prev_end_index) in chosen_span_intervals ): continue chosen_span_intervals.append((start_index, end_index) ) if len(a__ ) == top_spans: break return chosen_span_intervals @add_end_docstrings(_UpperCamelCase) class snake_case ( _UpperCamelCase , _UpperCamelCase): __UpperCamelCase = VOCAB_FILES_NAMES __UpperCamelCase = READER_PRETRAINED_VOCAB_FILES_MAP __UpperCamelCase = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __UpperCamelCase = READER_PRETRAINED_INIT_CONFIGURATION __UpperCamelCase = ['input_ids', 'attention_mask']
621
"""simple docstring""" import random import unittest from torch.utils.data import BatchSampler, DataLoader, IterableDataset from accelerate import Accelerator from accelerate.data_loader import ( BatchSamplerShard, DataLoaderDispatcher, DataLoaderShard, IterableDatasetShard, SkipBatchSampler, SkipDataLoader, skip_first_batches, ) class snake_case ( _UpperCamelCase): def __init__( self : Optional[int] , a__ : str=0.0_1 , a__ : str=10_00 ) -> int: '''simple docstring''' _A = p_stop _A = max_length def __iter__( self : Any ) -> Optional[Any]: '''simple docstring''' _A = 0 _A = False while not stop and count < self.max_length: yield count count += 1 _A = random.random() < self.p_stop class snake_case ( unittest.TestCase): def a_ ( self : List[Any] , a__ : Union[str, Any] , a__ : Union[str, Any] , a__ : List[str]=False , a__ : str=True ) -> Union[str, Any]: '''simple docstring''' _A = [ BatchSamplerShard(a__ , 2 , a__ , split_batches=a__ , even_batches=a__ ) for i in range(2 ) ] _A = [list(a__ ) for batch_sampler_shard in batch_sampler_shards] if not split_batches: self.assertListEqual([len(a__ ) for shard in batch_sampler_shards] , [len(a__ ) for e in expected] ) self.assertListEqual(a__ , a__ ) def a_ ( self : List[Any] ) -> str: '''simple docstring''' _A = BatchSampler(range(24 ) , batch_size=3 , drop_last=a__ ) _A = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]], [[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]], ] self.check_batch_sampler_shards(a__ , a__ ) _A = BatchSampler(range(24 ) , batch_size=3 , drop_last=a__ ) # Expected shouldn't change self.check_batch_sampler_shards(a__ , a__ ) # Check the shards when the dataset is a round multiple of batch size but not total batch size. _A = BatchSampler(range(21 ) , batch_size=3 , drop_last=a__ ) _A = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]], [[3, 4, 5], [9, 10, 11], [15, 16, 17], [0, 1, 2]], ] self.check_batch_sampler_shards(a__ , a__ ) _A = BatchSampler(range(21 ) , batch_size=3 , drop_last=a__ ) _A = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14]], [[3, 4, 5], [9, 10, 11], [15, 16, 17]], ] self.check_batch_sampler_shards(a__ , a__ ) # Check the shards when the dataset is not a round multiple of batch size but has a multiple of # num_processes batch. _A = BatchSampler(range(22 ) , batch_size=3 , drop_last=a__ ) _A = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]], [[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 0, 1]], ] self.check_batch_sampler_shards(a__ , a__ ) _A = BatchSampler(range(22 ) , batch_size=3 , drop_last=a__ ) _A = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14]], [[3, 4, 5], [9, 10, 11], [15, 16, 17]], ] self.check_batch_sampler_shards(a__ , a__ ) # Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of # num_processes batch. _A = BatchSampler(range(20 ) , batch_size=3 , drop_last=a__ ) _A = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 0]], [[3, 4, 5], [9, 10, 11], [15, 16, 17], [1, 2, 3]], ] self.check_batch_sampler_shards(a__ , a__ ) _A = BatchSampler(range(20 ) , batch_size=3 , drop_last=a__ ) _A = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14]], [[3, 4, 5], [9, 10, 11], [15, 16, 17]], ] self.check_batch_sampler_shards(a__ , a__ ) # Check the shards when the dataset is very small. _A = BatchSampler(range(2 ) , batch_size=3 , drop_last=a__ ) _A = [[[0, 1, 0]], [[1, 0, 1]]] self.check_batch_sampler_shards(a__ , a__ ) _A = BatchSampler(range(2 ) , batch_size=3 , drop_last=a__ ) _A = [[], []] self.check_batch_sampler_shards(a__ , a__ ) def a_ ( self : int ) -> int: '''simple docstring''' _A = BatchSampler(range(24 ) , batch_size=4 , drop_last=a__ ) _A = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]], ] self.check_batch_sampler_shards(a__ , a__ , split_batches=a__ ) _A = BatchSampler(range(24 ) , batch_size=4 , drop_last=a__ ) # Expected shouldn't change self.check_batch_sampler_shards(a__ , a__ , split_batches=a__ ) # Check the shards when the dataset is not a round multiple of batch size. _A = BatchSampler(range(22 ) , batch_size=4 , drop_last=a__ ) _A = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [0, 1]], ] self.check_batch_sampler_shards(a__ , a__ , split_batches=a__ ) _A = BatchSampler(range(22 ) , batch_size=4 , drop_last=a__ ) _A = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]], ] self.check_batch_sampler_shards(a__ , a__ , split_batches=a__ ) # Check the shards when the dataset is not a round multiple of batch size or num_processes. _A = BatchSampler(range(21 ) , batch_size=4 , drop_last=a__ ) _A = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 0]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [1, 2]], ] self.check_batch_sampler_shards(a__ , a__ , split_batches=a__ ) _A = BatchSampler(range(21 ) , batch_size=4 , drop_last=a__ ) _A = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]], ] self.check_batch_sampler_shards(a__ , a__ , split_batches=a__ ) # Check the shards when the dataset is very small. _A = BatchSampler(range(2 ) , batch_size=4 , drop_last=a__ ) _A = [[[0, 1]], [[0, 1]]] self.check_batch_sampler_shards(a__ , a__ , split_batches=a__ ) _A = BatchSampler(range(2 ) , batch_size=4 , drop_last=a__ ) _A = [[], []] self.check_batch_sampler_shards(a__ , a__ , split_batches=a__ ) def a_ ( self : List[str] ) -> List[str]: '''simple docstring''' _A = BatchSampler(range(24 ) , batch_size=3 , drop_last=a__ ) _A = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]], [[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]], ] self.check_batch_sampler_shards(a__ , a__ , even_batches=a__ ) _A = BatchSampler(range(24 ) , batch_size=3 , drop_last=a__ ) # Expected shouldn't change self.check_batch_sampler_shards(a__ , a__ , even_batches=a__ ) # Check the shards when the dataset is a round multiple of batch size but not total batch size. _A = BatchSampler(range(21 ) , batch_size=3 , drop_last=a__ ) _A = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]], [[3, 4, 5], [9, 10, 11], [15, 16, 17]], ] self.check_batch_sampler_shards(a__ , a__ , even_batches=a__ ) _A = BatchSampler(range(21 ) , batch_size=3 , drop_last=a__ ) _A = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14]], [[3, 4, 5], [9, 10, 11], [15, 16, 17]], ] self.check_batch_sampler_shards(a__ , a__ , even_batches=a__ ) # Check the shards when the dataset is not a round multiple of batch size but has a multiple of # num_processes batch. _A = BatchSampler(range(22 ) , batch_size=3 , drop_last=a__ ) _A = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]], [[3, 4, 5], [9, 10, 11], [15, 16, 17], [21]], ] self.check_batch_sampler_shards(a__ , a__ , even_batches=a__ ) _A = BatchSampler(range(22 ) , batch_size=3 , drop_last=a__ ) _A = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14]], [[3, 4, 5], [9, 10, 11], [15, 16, 17]], ] self.check_batch_sampler_shards(a__ , a__ , even_batches=a__ ) # Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of # num_processes batch. _A = BatchSampler(range(20 ) , batch_size=3 , drop_last=a__ ) _A = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19]], [[3, 4, 5], [9, 10, 11], [15, 16, 17]], ] self.check_batch_sampler_shards(a__ , a__ , even_batches=a__ ) _A = BatchSampler(range(20 ) , batch_size=3 , drop_last=a__ ) _A = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14]], [[3, 4, 5], [9, 10, 11], [15, 16, 17]], ] self.check_batch_sampler_shards(a__ , a__ , even_batches=a__ ) # Check the shards when the dataset is very small. _A = BatchSampler(range(2 ) , batch_size=3 , drop_last=a__ ) _A = [[[0, 1]], []] self.check_batch_sampler_shards(a__ , a__ , even_batches=a__ ) _A = BatchSampler(range(2 ) , batch_size=3 , drop_last=a__ ) _A = [[], []] self.check_batch_sampler_shards(a__ , a__ , even_batches=a__ ) def a_ ( self : List[str] ) -> str: '''simple docstring''' _A = BatchSampler(range(24 ) , batch_size=4 , drop_last=a__ ) _A = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]], ] self.check_batch_sampler_shards(a__ , a__ , split_batches=a__ , even_batches=a__ ) _A = BatchSampler(range(24 ) , batch_size=4 , drop_last=a__ ) # Expected shouldn't change self.check_batch_sampler_shards(a__ , a__ , split_batches=a__ , even_batches=a__ ) # Check the shards when the dataset is not a round multiple of batch size. _A = BatchSampler(range(22 ) , batch_size=4 , drop_last=a__ ) _A = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]], ] self.check_batch_sampler_shards(a__ , a__ , split_batches=a__ , even_batches=a__ ) _A = BatchSampler(range(22 ) , batch_size=4 , drop_last=a__ ) _A = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]], ] self.check_batch_sampler_shards(a__ , a__ , split_batches=a__ , even_batches=a__ ) # Check the shards when the dataset is not a round multiple of batch size or num_processes. _A = BatchSampler(range(21 ) , batch_size=4 , drop_last=a__ ) _A = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]], ] self.check_batch_sampler_shards(a__ , a__ , split_batches=a__ , even_batches=a__ ) _A = BatchSampler(range(21 ) , batch_size=4 , drop_last=a__ ) _A = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]], ] self.check_batch_sampler_shards(a__ , a__ , split_batches=a__ , even_batches=a__ ) # Check the shards when the dataset is very small. _A = BatchSampler(range(2 ) , batch_size=4 , drop_last=a__ ) _A = [[[0, 1]], []] self.check_batch_sampler_shards(a__ , a__ , split_batches=a__ , even_batches=a__ ) _A = BatchSampler(range(2 ) , batch_size=4 , drop_last=a__ ) _A = [[], []] self.check_batch_sampler_shards(a__ , a__ , split_batches=a__ , even_batches=a__ ) def a_ ( self : Union[str, Any] ) -> str: '''simple docstring''' _A = [[0, 1, 2], [3, 4], [5, 6, 7, 8], [9, 10, 11], [12, 13]] _A = [BatchSamplerShard(a__ , 2 , a__ , even_batches=a__ ) for i in range(2 )] self.assertEqual(len(batch_sampler_shards[0] ) , 3 ) self.assertEqual(len(batch_sampler_shards[1] ) , 2 ) self.assertListEqual(list(batch_sampler_shards[0] ) , [[0, 1, 2], [5, 6, 7, 8], [12, 13]] ) self.assertListEqual(list(batch_sampler_shards[1] ) , [[3, 4], [9, 10, 11]] ) def a_ ( self : Optional[int] , a__ : Optional[int] , a__ : Tuple , a__ : Optional[int] , a__ : Union[str, Any]=False , a__ : int=2 , a__ : List[Any]=False ) -> str: '''simple docstring''' random.seed(a__ ) _A = list(a__ ) _A = [ IterableDatasetShard( a__ , batch_size=a__ , drop_last=a__ , num_processes=a__ , process_index=a__ , split_batches=a__ , ) for i in range(a__ ) ] _A = [] for iterable_dataset_shard in iterable_dataset_shards: # Since our random iterable dataset will be... random... we need to use a seed to get reproducible results. random.seed(a__ ) iterable_dataset_lists.append(list(a__ ) ) _A = batch_size // num_processes if split_batches else batch_size # All iterable dataset shard should have the same length, a round multiple of shard_batch_size _A = iterable_dataset_lists[0] for l in iterable_dataset_lists[1:]: self.assertEqual(len(a__ ) , len(a__ ) ) self.assertTrue(len(a__ ) % shard_batch_size == 0 ) _A = [] for idx in range(0 , len(a__ ) , a__ ): for l in iterable_dataset_lists: observed += l[idx : idx + shard_batch_size] if not drop_last: while len(a__ ) < len(a__ ): reference += reference self.assertListEqual(a__ , reference[: len(a__ )] ) def a_ ( self : List[str] ) -> List[Any]: '''simple docstring''' _A = 42 _A = RandomIterableDataset() self.check_iterable_dataset_shards(a__ , a__ , batch_size=4 , drop_last=a__ , split_batches=a__ ) self.check_iterable_dataset_shards(a__ , a__ , batch_size=4 , drop_last=a__ , split_batches=a__ ) self.check_iterable_dataset_shards(a__ , a__ , batch_size=4 , drop_last=a__ , split_batches=a__ ) self.check_iterable_dataset_shards(a__ , a__ , batch_size=4 , drop_last=a__ , split_batches=a__ ) # Edge case with a very small dataset _A = RandomIterableDataset(max_length=2 ) self.check_iterable_dataset_shards(a__ , a__ , batch_size=4 , drop_last=a__ , split_batches=a__ ) self.check_iterable_dataset_shards(a__ , a__ , batch_size=4 , drop_last=a__ , split_batches=a__ ) self.check_iterable_dataset_shards(a__ , a__ , batch_size=4 , drop_last=a__ , split_batches=a__ ) self.check_iterable_dataset_shards(a__ , a__ , batch_size=4 , drop_last=a__ , split_batches=a__ ) def a_ ( self : List[str] ) -> Dict: '''simple docstring''' _A = BatchSampler(range(16 ) , batch_size=4 , drop_last=a__ ) _A = SkipBatchSampler(a__ , 2 ) self.assertListEqual(list(a__ ) , [[8, 9, 10, 11], [12, 13, 14, 15]] ) def a_ ( self : int ) -> Union[str, Any]: '''simple docstring''' _A = SkipDataLoader(list(range(16 ) ) , batch_size=4 , skip_batches=2 ) self.assertListEqual([t.tolist() for t in dataloader] , [[8, 9, 10, 11], [12, 13, 14, 15]] ) def a_ ( self : int ) -> Optional[int]: '''simple docstring''' _A = DataLoader(list(range(16 ) ) , batch_size=4 ) _A = skip_first_batches(a__ , num_batches=2 ) self.assertListEqual([t.tolist() for t in new_dataloader] , [[8, 9, 10, 11], [12, 13, 14, 15]] ) def a_ ( self : Optional[Any] ) -> Optional[int]: '''simple docstring''' _A = DataLoaderShard(list(range(16 ) ) , batch_size=4 ) for idx, _ in enumerate(a__ ): self.assertEqual(dataloader.end_of_dataloader , idx == 3 ) # Test it also works on the second iteration for idx, _ in enumerate(a__ ): self.assertEqual(dataloader.end_of_dataloader , idx == 3 ) def a_ ( self : int ) -> int: '''simple docstring''' Accelerator() _A = DataLoaderDispatcher(range(16 ) , batch_size=4 ) for idx, _ in enumerate(a__ ): self.assertEqual(dataloader.end_of_dataloader , idx == 3 ) # Test it also works on the second iteration for idx, _ in enumerate(a__ ): self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
621
1
"""simple docstring""" from __future__ import annotations from collections.abc import Generator def a__ ( ) -> Generator[int, None, None]: _A = {} _A = 2 while True: _A = factor_map.pop(__lowercase , __lowercase ) if factor: _A = factor + prime while x in factor_map: x += factor _A = factor else: _A = prime yield prime prime += 1 def a__ ( __lowercase = 1E10 ) -> int: _A = sieve() _A = 1 while True: _A = next(__lowercase ) if (2 * prime * n) > limit: return n # Ignore the next prime as the reminder will be 2. next(__lowercase ) n += 2 if __name__ == "__main__": print(solution())
621
"""simple docstring""" import gc import tempfile import unittest import numpy as np import torch from diffusers import VersatileDiffusionPipeline from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device a_ = False class snake_case ( unittest.TestCase): pass @nightly @require_torch_gpu class snake_case ( unittest.TestCase): def a_ ( self : Optional[int] ) -> str: '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def a_ ( self : Tuple ) -> Any: '''simple docstring''' _A = VersatileDiffusionPipeline.from_pretrained("shi-labs/versatile-diffusion" , torch_dtype=torch.floataa ) pipe.to(a__ ) pipe.set_progress_bar_config(disable=a__ ) _A = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg" ) _A = torch.manual_seed(0 ) _A = pipe.dual_guided( prompt="first prompt" , image=a__ , text_to_image_strength=0.7_5 , generator=a__ , guidance_scale=7.5 , num_inference_steps=2 , output_type="numpy" , ).images with tempfile.TemporaryDirectory() as tmpdirname: pipe.save_pretrained(a__ ) _A = VersatileDiffusionPipeline.from_pretrained(a__ , torch_dtype=torch.floataa ) pipe.to(a__ ) pipe.set_progress_bar_config(disable=a__ ) _A = generator.manual_seed(0 ) _A = pipe.dual_guided( prompt="first prompt" , image=a__ , text_to_image_strength=0.7_5 , generator=a__ , guidance_scale=7.5 , num_inference_steps=2 , output_type="numpy" , ).images assert np.abs(image - new_image ).sum() < 1E-5, "Models don't have the same forward pass" def a_ ( self : Optional[int] ) -> List[Any]: '''simple docstring''' _A = VersatileDiffusionPipeline.from_pretrained("shi-labs/versatile-diffusion" , torch_dtype=torch.floataa ) pipe.to(a__ ) pipe.set_progress_bar_config(disable=a__ ) _A = "cyberpunk 2077" _A = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg" ) _A = torch.manual_seed(0 ) _A = pipe.dual_guided( prompt=a__ , image=a__ , text_to_image_strength=0.7_5 , generator=a__ , guidance_scale=7.5 , num_inference_steps=50 , output_type="numpy" , ).images _A = image[0, 2_53:2_56, 2_53:2_56, -1] assert image.shape == (1, 5_12, 5_12, 3) _A = np.array([0.1_4_4_8, 0.1_6_1_9, 0.1_7_4_1, 0.1_0_8_6, 0.1_1_4_7, 0.1_1_2_8, 0.1_1_9_9, 0.1_1_6_5, 0.1_0_0_1] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 _A = "A painting of a squirrel eating a burger " _A = torch.manual_seed(0 ) _A = pipe.text_to_image( prompt=a__ , generator=a__ , guidance_scale=7.5 , num_inference_steps=50 , output_type="numpy" ).images _A = image[0, 2_53:2_56, 2_53:2_56, -1] assert image.shape == (1, 5_12, 5_12, 3) _A = np.array([0.3_3_6_7, 0.3_1_6_9, 0.2_6_5_6, 0.3_8_7_0, 0.4_7_9_0, 0.3_7_9_6, 0.4_0_0_9, 0.4_8_7_8, 0.4_7_7_8] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 _A = pipe.image_variation(a__ , generator=a__ , output_type="numpy" ).images _A = image[0, 2_53:2_56, 2_53:2_56, -1] assert image.shape == (1, 5_12, 5_12, 3) _A = np.array([0.3_0_7_6, 0.3_1_2_3, 0.3_2_8_4, 0.3_7_8_2, 0.3_7_7_0, 0.3_8_9_4, 0.4_2_9_7, 0.4_3_3_1, 0.4_4_5_6] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
621
1
"""simple docstring""" from __future__ import annotations def a__ ( __lowercase , __lowercase , __lowercase , __lowercase ) -> None: if (direction == 1 and array[indexa] > array[indexa]) or ( direction == 0 and array[indexa] < array[indexa] ): _A , _A = array[indexa], array[indexa] def a__ ( __lowercase , __lowercase , __lowercase , __lowercase ) -> None: if length > 1: _A = int(length / 2 ) for i in range(__lowercase , low + middle ): comp_and_swap(__lowercase , __lowercase , i + middle , __lowercase ) bitonic_merge(__lowercase , __lowercase , __lowercase , __lowercase ) bitonic_merge(__lowercase , low + middle , __lowercase , __lowercase ) def a__ ( __lowercase , __lowercase , __lowercase , __lowercase ) -> None: if length > 1: _A = int(length / 2 ) bitonic_sort(__lowercase , __lowercase , __lowercase , 1 ) bitonic_sort(__lowercase , low + middle , __lowercase , 0 ) bitonic_merge(__lowercase , __lowercase , __lowercase , __lowercase ) if __name__ == "__main__": a_ = input("Enter numbers separated by a comma:\n").strip() a_ = [int(item.strip()) for item in user_input.split(",")] bitonic_sort(unsorted, 0, len(unsorted), 1) print("\nSorted array in ascending order is: ", end="") print(*unsorted, sep=", ") bitonic_merge(unsorted, 0, len(unsorted), 0) print("Sorted array in descending order is: ", end="") print(*unsorted, sep=", ")
621
"""simple docstring""" import os import time import warnings from dataclasses import dataclass, field from enum import Enum from typing import List, Optional, Union import torch from filelock import FileLock from torch.utils.data import Dataset from ...tokenization_utils_base import PreTrainedTokenizerBase from ...utils import logging from ..processors.glue import glue_convert_examples_to_features, glue_output_modes, glue_processors from ..processors.utils import InputFeatures a_ = logging.get_logger(__name__) @dataclass class snake_case : __UpperCamelCase = field(metadata={'help': 'The name of the task to train on: ' + ', '.join(glue_processors.keys())}) __UpperCamelCase = field( metadata={'help': 'The input data dir. Should contain the .tsv files (or other data files) for the task.'}) __UpperCamelCase = field( default=128 , metadata={ 'help': ( 'The maximum total input sequence length after tokenization. Sequences longer ' 'than this will be truncated, sequences shorter will be padded.' ) } , ) __UpperCamelCase = field( default=_UpperCamelCase , metadata={'help': 'Overwrite the cached training and evaluation sets'}) def a_ ( self : Optional[Any] ) -> Tuple: '''simple docstring''' _A = self.task_name.lower() class snake_case ( _UpperCamelCase): __UpperCamelCase = 'train' __UpperCamelCase = 'dev' __UpperCamelCase = 'test' class snake_case ( _UpperCamelCase): __UpperCamelCase = 42 __UpperCamelCase = 42 __UpperCamelCase = 42 def __init__( self : Optional[int] , a__ : GlueDataTrainingArguments , a__ : PreTrainedTokenizerBase , a__ : Optional[int] = None , a__ : Union[str, Split] = Split.train , a__ : Optional[str] = None , ) -> Tuple: '''simple docstring''' warnings.warn( "This dataset will be removed from the library soon, preprocessing should be handled with the 🤗 Datasets " "library. You can have a look at this example script for pointers: " "https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py" , a__ , ) _A = args _A = glue_processors[args.task_name]() _A = glue_output_modes[args.task_name] if isinstance(a__ , a__ ): try: _A = Split[mode] except KeyError: raise KeyError("mode is not a valid split name" ) # Load data features from cache or dataset file _A = os.path.join( cache_dir if cache_dir is not None else args.data_dir , F"""cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{args.task_name}""" , ) _A = self.processor.get_labels() if args.task_name in ["mnli", "mnli-mm"] and tokenizer.__class__.__name__ in ( "RobertaTokenizer", "RobertaTokenizerFast", "XLMRobertaTokenizer", "BartTokenizer", "BartTokenizerFast", ): # HACK(label indices are swapped in RoBERTa pretrained model) _A , _A = label_list[2], label_list[1] _A = label_list # Make sure only the first process in distributed training processes the dataset, # and the others will use the cache. _A = cached_features_file + ".lock" with FileLock(a__ ): if os.path.exists(a__ ) and not args.overwrite_cache: _A = time.time() _A = torch.load(a__ ) logger.info( F"""Loading features from cached file {cached_features_file} [took %.3f s]""" , time.time() - start ) else: logger.info(F"""Creating features from dataset file at {args.data_dir}""" ) if mode == Split.dev: _A = self.processor.get_dev_examples(args.data_dir ) elif mode == Split.test: _A = self.processor.get_test_examples(args.data_dir ) else: _A = self.processor.get_train_examples(args.data_dir ) if limit_length is not None: _A = examples[:limit_length] _A = glue_convert_examples_to_features( a__ , a__ , max_length=args.max_seq_length , label_list=a__ , output_mode=self.output_mode , ) _A = time.time() torch.save(self.features , a__ ) # ^ This seems to take a lot of time so I want to investigate why and how we can improve. logger.info( F"""Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]""" ) def __len__( self : List[Any] ) -> Any: '''simple docstring''' return len(self.features ) def __getitem__( self : Tuple , a__ : Union[str, Any] ) -> InputFeatures: '''simple docstring''' return self.features[i] def a_ ( self : Optional[int] ) -> List[Any]: '''simple docstring''' return self.label_list
621
1
"""simple docstring""" import json import os from functools import lru_cache from typing import List, Optional, Tuple import regex as re from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging a_ = logging.get_logger(__name__) a_ = {"vocab_file": "vocab.json", "merges_file": "merges.txt"} a_ = { "vocab_file": { "allenai/longformer-base-4096": "https://huggingface.co/allenai/longformer-base-4096/resolve/main/vocab.json", "allenai/longformer-large-4096": ( "https://huggingface.co/allenai/longformer-large-4096/resolve/main/vocab.json" ), "allenai/longformer-large-4096-finetuned-triviaqa": ( "https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/vocab.json" ), "allenai/longformer-base-4096-extra.pos.embd.only": ( "https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/vocab.json" ), "allenai/longformer-large-4096-extra.pos.embd.only": ( "https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/vocab.json" ), }, "merges_file": { "allenai/longformer-base-4096": "https://huggingface.co/allenai/longformer-base-4096/resolve/main/merges.txt", "allenai/longformer-large-4096": ( "https://huggingface.co/allenai/longformer-large-4096/resolve/main/merges.txt" ), "allenai/longformer-large-4096-finetuned-triviaqa": ( "https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/merges.txt" ), "allenai/longformer-base-4096-extra.pos.embd.only": ( "https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/merges.txt" ), "allenai/longformer-large-4096-extra.pos.embd.only": ( "https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/merges.txt" ), }, } a_ = { "allenai/longformer-base-4096": 40_96, "allenai/longformer-large-4096": 40_96, "allenai/longformer-large-4096-finetuned-triviaqa": 40_96, "allenai/longformer-base-4096-extra.pos.embd.only": 40_96, "allenai/longformer-large-4096-extra.pos.embd.only": 40_96, } @lru_cache() # Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode def a__ ( ) -> Tuple: _A = ( list(range(ord("!" ) , ord("~" ) + 1 ) ) + list(range(ord("¡" ) , ord("¬" ) + 1 ) ) + list(range(ord("®" ) , ord("ÿ" ) + 1 ) ) ) _A = bs[:] _A = 0 for b in range(2**8 ): if b not in bs: bs.append(__lowercase ) cs.append(2**8 + n ) n += 1 _A = [chr(__lowercase ) for n in cs] return dict(zip(__lowercase , __lowercase ) ) def a__ ( __lowercase ) -> Union[str, Any]: _A = set() _A = word[0] for char in word[1:]: pairs.add((prev_char, char) ) _A = char return pairs class snake_case ( _UpperCamelCase): __UpperCamelCase = VOCAB_FILES_NAMES __UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP __UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __UpperCamelCase = ['input_ids', 'attention_mask'] def __init__( self : Union[str, Any] , a__ : Optional[Any] , a__ : Optional[int] , a__ : Any="replace" , a__ : Optional[int]="<s>" , a__ : Optional[int]="</s>" , a__ : Union[str, Any]="</s>" , a__ : Optional[Any]="<s>" , a__ : Tuple="<unk>" , a__ : int="<pad>" , a__ : Union[str, Any]="<mask>" , a__ : Any=False , **a__ : Union[str, Any] , ) -> Optional[int]: '''simple docstring''' _A = AddedToken(a__ , lstrip=a__ , rstrip=a__ ) if isinstance(a__ , a__ ) else bos_token _A = AddedToken(a__ , lstrip=a__ , rstrip=a__ ) if isinstance(a__ , a__ ) else eos_token _A = AddedToken(a__ , lstrip=a__ , rstrip=a__ ) if isinstance(a__ , a__ ) else sep_token _A = AddedToken(a__ , lstrip=a__ , rstrip=a__ ) if isinstance(a__ , a__ ) else cls_token _A = AddedToken(a__ , lstrip=a__ , rstrip=a__ ) if isinstance(a__ , a__ ) else unk_token _A = AddedToken(a__ , lstrip=a__ , rstrip=a__ ) if isinstance(a__ , a__ ) else pad_token # Mask token behave like a normal word, i.e. include the space before it _A = AddedToken(a__ , lstrip=a__ , rstrip=a__ ) if isinstance(a__ , a__ ) else mask_token super().__init__( errors=a__ , bos_token=a__ , eos_token=a__ , unk_token=a__ , sep_token=a__ , cls_token=a__ , pad_token=a__ , mask_token=a__ , add_prefix_space=a__ , **a__ , ) with open(a__ , encoding="utf-8" ) as vocab_handle: _A = json.load(a__ ) _A = {v: k for k, v in self.encoder.items()} _A = errors # how to handle errors in decoding _A = bytes_to_unicode() _A = {v: k for k, v in self.byte_encoder.items()} with open(a__ , encoding="utf-8" ) as merges_handle: _A = merges_handle.read().split("\n" )[1:-1] _A = [tuple(merge.split() ) for merge in bpe_merges] _A = dict(zip(a__ , range(len(a__ ) ) ) ) _A = {} _A = add_prefix_space # Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions _A = re.compile(r"'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+" ) @property def a_ ( self : Optional[Any] ) -> str: '''simple docstring''' return len(self.encoder ) def a_ ( self : int ) -> Tuple: '''simple docstring''' return dict(self.encoder , **self.added_tokens_encoder ) def a_ ( self : List[str] , a__ : Union[str, Any] ) -> List[str]: '''simple docstring''' if token in self.cache: return self.cache[token] _A = tuple(a__ ) _A = get_pairs(a__ ) if not pairs: return token while True: _A = min(a__ , key=lambda a__ : self.bpe_ranks.get(a__ , float("inf" ) ) ) if bigram not in self.bpe_ranks: break _A , _A = bigram _A = [] _A = 0 while i < len(a__ ): try: _A = word.index(a__ , a__ ) except ValueError: new_word.extend(word[i:] ) break else: new_word.extend(word[i:j] ) _A = j if word[i] == first and i < len(a__ ) - 1 and word[i + 1] == second: new_word.append(first + second ) i += 2 else: new_word.append(word[i] ) i += 1 _A = tuple(a__ ) _A = new_word if len(a__ ) == 1: break else: _A = get_pairs(a__ ) _A = " ".join(a__ ) _A = word return word def a_ ( self : str , a__ : List[str] ) -> str: '''simple docstring''' _A = [] for token in re.findall(self.pat , a__ ): _A = "".join( self.byte_encoder[b] for b in token.encode("utf-8" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case) bpe_tokens.extend(bpe_token for bpe_token in self.bpe(a__ ).split(" " ) ) return bpe_tokens def a_ ( self : Optional[int] , a__ : Any ) -> Any: '''simple docstring''' return self.encoder.get(a__ , self.encoder.get(self.unk_token ) ) def a_ ( self : Tuple , a__ : Optional[Any] ) -> Optional[int]: '''simple docstring''' return self.decoder.get(a__ ) def a_ ( self : Tuple , a__ : Dict ) -> Dict: '''simple docstring''' _A = "".join(a__ ) _A = bytearray([self.byte_decoder[c] for c in text] ).decode("utf-8" , errors=self.errors ) return text def a_ ( self : Dict , a__ : str , a__ : Optional[str] = None ) -> Tuple[str]: '''simple docstring''' if not os.path.isdir(a__ ): logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" ) return _A = os.path.join( a__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) _A = os.path.join( a__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] ) with open(a__ , "w" , encoding="utf-8" ) as f: f.write(json.dumps(self.encoder , indent=2 , sort_keys=a__ , ensure_ascii=a__ ) + "\n" ) _A = 0 with open(a__ , "w" , encoding="utf-8" ) as writer: writer.write("#version: 0.2\n" ) for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda a__ : kv[1] ): if index != token_index: logger.warning( F"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.""" " Please check that the tokenizer is not corrupted!" ) _A = token_index writer.write(" ".join(a__ ) + "\n" ) index += 1 return vocab_file, merge_file def a_ ( self : Tuple , a__ : List[int] , a__ : Optional[List[int]] = None ) -> List[int]: '''simple docstring''' if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] _A = [self.cls_token_id] _A = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def a_ ( self : Tuple , a__ : List[int] , a__ : Optional[List[int]] = None , a__ : bool = False ) -> List[int]: '''simple docstring''' if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=a__ , token_ids_a=a__ , already_has_special_tokens=a__ ) if token_ids_a is None: return [1] + ([0] * len(a__ )) + [1] return [1] + ([0] * len(a__ )) + [1, 1] + ([0] * len(a__ )) + [1] def a_ ( self : Optional[int] , a__ : List[int] , a__ : Optional[List[int]] = None ) -> List[int]: '''simple docstring''' _A = [self.sep_token_id] _A = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def a_ ( self : Dict , a__ : List[str] , a__ : Dict=False , **a__ : Union[str, Any] ) -> List[str]: '''simple docstring''' _A = kwargs.pop("add_prefix_space" , self.add_prefix_space ) if (is_split_into_words or add_prefix_space) and (len(a__ ) > 0 and not text[0].isspace()): _A = " " + text return (text, kwargs)
621
"""simple docstring""" def a__ ( __lowercase , __lowercase , __lowercase , __lowercase ) -> str: # Return True if there is node that has not iterated. _A = [False] * len(__lowercase ) _A = [] queue.append(__lowercase ) _A = True while queue: _A = queue.pop(0 ) for ind in range(len(graph[u] ) ): if visited[ind] is False and graph[u][ind] > 0: queue.append(__lowercase ) _A = True _A = u return visited[t] def a__ ( __lowercase , __lowercase , __lowercase ) -> int: # This array is filled by BFS and to store path _A = [-1] * (len(__lowercase )) _A = 0 while bfs(__lowercase , __lowercase , __lowercase , __lowercase ): _A = float("Inf" ) _A = sink while s != source: # Find the minimum value in select path _A = min(__lowercase , graph[parent[s]][s] ) _A = parent[s] max_flow += path_flow _A = sink while v != source: _A = parent[v] graph[u][v] -= path_flow graph[v][u] += path_flow _A = parent[v] return max_flow a_ = [ [0, 16, 13, 0, 0, 0], [0, 0, 10, 12, 0, 0], [0, 4, 0, 0, 14, 0], [0, 0, 9, 0, 0, 20], [0, 0, 0, 7, 0, 4], [0, 0, 0, 0, 0, 0], ] a_ , a_ = 0, 5 print(ford_fulkerson(graph, source, sink))
621
1
"""simple docstring""" import argparse import shutil from pathlib import Path from tqdm import tqdm from transformers import AutoTokenizer def a__ ( __lowercase , __lowercase , __lowercase , __lowercase=1024 ) -> Any: _A , _A = [], [] _A = list(zip(__lowercase , __lowercase ) ) _A , _A = sorted_examples[0] def is_too_big(__lowercase ): return tok(__lowercase , return_tensors="pt" ).input_ids.shape[1] > max_tokens for src, tgt in tqdm(sorted_examples[1:] ): _A = new_src + " " + src _A = new_tgt + " " + tgt if is_too_big(__lowercase ) or is_too_big(__lowercase ): # cant fit, finalize example finished_src.append(__lowercase ) finished_tgt.append(__lowercase ) _A , _A = src, tgt else: # can fit, keep adding _A , _A = cand_src, cand_tgt # cleanup if new_src: assert new_tgt finished_src.append(__lowercase ) finished_tgt.append(__lowercase ) return finished_src, finished_tgt def a__ ( __lowercase , __lowercase , __lowercase , __lowercase ) -> Optional[Any]: _A = Path(__lowercase ) save_path.mkdir(exist_ok=__lowercase ) for split in ["train"]: _A , _A = data_dir / f"""{split}.source""", data_dir / f"""{split}.target""" _A = [x.rstrip() for x in Path(__lowercase ).open().readlines()] _A = [x.rstrip() for x in Path(__lowercase ).open().readlines()] _A , _A = pack_examples(__lowercase , __lowercase , __lowercase , __lowercase ) print(f"""packed {split} split from {len(__lowercase )} examples -> {len(__lowercase )}.""" ) Path(save_path / f"""{split}.source""" ).open("w" ).write("\n".join(__lowercase ) ) Path(save_path / f"""{split}.target""" ).open("w" ).write("\n".join(__lowercase ) ) for split in ["val", "test"]: _A , _A = data_dir / f"""{split}.source""", data_dir / f"""{split}.target""" shutil.copyfile(__lowercase , save_path / f"""{split}.source""" ) shutil.copyfile(__lowercase , save_path / f"""{split}.target""" ) def a__ ( ) -> Tuple: _A = argparse.ArgumentParser() parser.add_argument("--tok_name" , type=__lowercase , help="like facebook/bart-large-cnn,t5-base, etc." ) parser.add_argument("--max_seq_len" , type=__lowercase , default=128 ) parser.add_argument("--data_dir" , type=__lowercase ) parser.add_argument("--save_path" , type=__lowercase ) _A = parser.parse_args() _A = AutoTokenizer.from_pretrained(args.tok_name ) return pack_data_dir(__lowercase , Path(args.data_dir ) , args.max_seq_len , args.save_path ) if __name__ == "__main__": packer_cli()
621
"""simple docstring""" import argparse import json from collections import OrderedDict from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( ConditionalDetrConfig, ConditionalDetrForObjectDetection, ConditionalDetrForSegmentation, ConditionalDetrImageProcessor, ) from transformers.utils import logging logging.set_verbosity_info() a_ = logging.get_logger(__name__) # here we list all keys to be renamed (original name on the left, our name on the right) a_ = [] for i in range(6): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append( (f'''transformer.encoder.layers.{i}.self_attn.out_proj.weight''', f'''encoder.layers.{i}.self_attn.out_proj.weight''') ) rename_keys.append( (f'''transformer.encoder.layers.{i}.self_attn.out_proj.bias''', f'''encoder.layers.{i}.self_attn.out_proj.bias''') ) rename_keys.append((f'''transformer.encoder.layers.{i}.linear1.weight''', f'''encoder.layers.{i}.fc1.weight''')) rename_keys.append((f'''transformer.encoder.layers.{i}.linear1.bias''', f'''encoder.layers.{i}.fc1.bias''')) rename_keys.append((f'''transformer.encoder.layers.{i}.linear2.weight''', f'''encoder.layers.{i}.fc2.weight''')) rename_keys.append((f'''transformer.encoder.layers.{i}.linear2.bias''', f'''encoder.layers.{i}.fc2.bias''')) rename_keys.append( (f'''transformer.encoder.layers.{i}.norm1.weight''', f'''encoder.layers.{i}.self_attn_layer_norm.weight''') ) rename_keys.append((f'''transformer.encoder.layers.{i}.norm1.bias''', f'''encoder.layers.{i}.self_attn_layer_norm.bias''')) rename_keys.append((f'''transformer.encoder.layers.{i}.norm2.weight''', f'''encoder.layers.{i}.final_layer_norm.weight''')) rename_keys.append((f'''transformer.encoder.layers.{i}.norm2.bias''', f'''encoder.layers.{i}.final_layer_norm.bias''')) # decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms rename_keys.append( (f'''transformer.decoder.layers.{i}.self_attn.out_proj.weight''', f'''decoder.layers.{i}.self_attn.out_proj.weight''') ) rename_keys.append( (f'''transformer.decoder.layers.{i}.self_attn.out_proj.bias''', f'''decoder.layers.{i}.self_attn.out_proj.bias''') ) rename_keys.append( ( f'''transformer.decoder.layers.{i}.cross_attn.out_proj.weight''', f'''decoder.layers.{i}.encoder_attn.out_proj.weight''', ) ) rename_keys.append( ( f'''transformer.decoder.layers.{i}.cross_attn.out_proj.bias''', f'''decoder.layers.{i}.encoder_attn.out_proj.bias''', ) ) rename_keys.append((f'''transformer.decoder.layers.{i}.linear1.weight''', f'''decoder.layers.{i}.fc1.weight''')) rename_keys.append((f'''transformer.decoder.layers.{i}.linear1.bias''', f'''decoder.layers.{i}.fc1.bias''')) rename_keys.append((f'''transformer.decoder.layers.{i}.linear2.weight''', f'''decoder.layers.{i}.fc2.weight''')) rename_keys.append((f'''transformer.decoder.layers.{i}.linear2.bias''', f'''decoder.layers.{i}.fc2.bias''')) rename_keys.append( (f'''transformer.decoder.layers.{i}.norm1.weight''', f'''decoder.layers.{i}.self_attn_layer_norm.weight''') ) rename_keys.append((f'''transformer.decoder.layers.{i}.norm1.bias''', f'''decoder.layers.{i}.self_attn_layer_norm.bias''')) rename_keys.append( (f'''transformer.decoder.layers.{i}.norm2.weight''', f'''decoder.layers.{i}.encoder_attn_layer_norm.weight''') ) rename_keys.append( (f'''transformer.decoder.layers.{i}.norm2.bias''', f'''decoder.layers.{i}.encoder_attn_layer_norm.bias''') ) rename_keys.append((f'''transformer.decoder.layers.{i}.norm3.weight''', f'''decoder.layers.{i}.final_layer_norm.weight''')) rename_keys.append((f'''transformer.decoder.layers.{i}.norm3.bias''', f'''decoder.layers.{i}.final_layer_norm.bias''')) # q, k, v projections in self/cross-attention in decoder for conditional DETR rename_keys.append( (f'''transformer.decoder.layers.{i}.sa_qcontent_proj.weight''', f'''decoder.layers.{i}.sa_qcontent_proj.weight''') ) rename_keys.append( (f'''transformer.decoder.layers.{i}.sa_kcontent_proj.weight''', f'''decoder.layers.{i}.sa_kcontent_proj.weight''') ) rename_keys.append( (f'''transformer.decoder.layers.{i}.sa_qpos_proj.weight''', f'''decoder.layers.{i}.sa_qpos_proj.weight''') ) rename_keys.append( (f'''transformer.decoder.layers.{i}.sa_kpos_proj.weight''', f'''decoder.layers.{i}.sa_kpos_proj.weight''') ) rename_keys.append((f'''transformer.decoder.layers.{i}.sa_v_proj.weight''', f'''decoder.layers.{i}.sa_v_proj.weight''')) rename_keys.append( (f'''transformer.decoder.layers.{i}.ca_qcontent_proj.weight''', f'''decoder.layers.{i}.ca_qcontent_proj.weight''') ) # rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.weight", f"decoder.layers.{i}.ca_qpos_proj.weight")) rename_keys.append( (f'''transformer.decoder.layers.{i}.ca_kcontent_proj.weight''', f'''decoder.layers.{i}.ca_kcontent_proj.weight''') ) rename_keys.append( (f'''transformer.decoder.layers.{i}.ca_kpos_proj.weight''', f'''decoder.layers.{i}.ca_kpos_proj.weight''') ) rename_keys.append((f'''transformer.decoder.layers.{i}.ca_v_proj.weight''', f'''decoder.layers.{i}.ca_v_proj.weight''')) rename_keys.append( (f'''transformer.decoder.layers.{i}.ca_qpos_sine_proj.weight''', f'''decoder.layers.{i}.ca_qpos_sine_proj.weight''') ) rename_keys.append( (f'''transformer.decoder.layers.{i}.sa_qcontent_proj.bias''', f'''decoder.layers.{i}.sa_qcontent_proj.bias''') ) rename_keys.append( (f'''transformer.decoder.layers.{i}.sa_kcontent_proj.bias''', f'''decoder.layers.{i}.sa_kcontent_proj.bias''') ) rename_keys.append((f'''transformer.decoder.layers.{i}.sa_qpos_proj.bias''', f'''decoder.layers.{i}.sa_qpos_proj.bias''')) rename_keys.append((f'''transformer.decoder.layers.{i}.sa_kpos_proj.bias''', f'''decoder.layers.{i}.sa_kpos_proj.bias''')) rename_keys.append((f'''transformer.decoder.layers.{i}.sa_v_proj.bias''', f'''decoder.layers.{i}.sa_v_proj.bias''')) rename_keys.append( (f'''transformer.decoder.layers.{i}.ca_qcontent_proj.bias''', f'''decoder.layers.{i}.ca_qcontent_proj.bias''') ) # rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.bias", f"decoder.layers.{i}.ca_qpos_proj.bias")) rename_keys.append( (f'''transformer.decoder.layers.{i}.ca_kcontent_proj.bias''', f'''decoder.layers.{i}.ca_kcontent_proj.bias''') ) rename_keys.append((f'''transformer.decoder.layers.{i}.ca_kpos_proj.bias''', f'''decoder.layers.{i}.ca_kpos_proj.bias''')) rename_keys.append((f'''transformer.decoder.layers.{i}.ca_v_proj.bias''', f'''decoder.layers.{i}.ca_v_proj.bias''')) rename_keys.append( (f'''transformer.decoder.layers.{i}.ca_qpos_sine_proj.bias''', f'''decoder.layers.{i}.ca_qpos_sine_proj.bias''') ) # convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads # for conditional DETR, also convert reference point head and query scale MLP rename_keys.extend( [ ("input_proj.weight", "input_projection.weight"), ("input_proj.bias", "input_projection.bias"), ("query_embed.weight", "query_position_embeddings.weight"), ("transformer.decoder.norm.weight", "decoder.layernorm.weight"), ("transformer.decoder.norm.bias", "decoder.layernorm.bias"), ("class_embed.weight", "class_labels_classifier.weight"), ("class_embed.bias", "class_labels_classifier.bias"), ("bbox_embed.layers.0.weight", "bbox_predictor.layers.0.weight"), ("bbox_embed.layers.0.bias", "bbox_predictor.layers.0.bias"), ("bbox_embed.layers.1.weight", "bbox_predictor.layers.1.weight"), ("bbox_embed.layers.1.bias", "bbox_predictor.layers.1.bias"), ("bbox_embed.layers.2.weight", "bbox_predictor.layers.2.weight"), ("bbox_embed.layers.2.bias", "bbox_predictor.layers.2.bias"), ("transformer.decoder.ref_point_head.layers.0.weight", "decoder.ref_point_head.layers.0.weight"), ("transformer.decoder.ref_point_head.layers.0.bias", "decoder.ref_point_head.layers.0.bias"), ("transformer.decoder.ref_point_head.layers.1.weight", "decoder.ref_point_head.layers.1.weight"), ("transformer.decoder.ref_point_head.layers.1.bias", "decoder.ref_point_head.layers.1.bias"), ("transformer.decoder.query_scale.layers.0.weight", "decoder.query_scale.layers.0.weight"), ("transformer.decoder.query_scale.layers.0.bias", "decoder.query_scale.layers.0.bias"), ("transformer.decoder.query_scale.layers.1.weight", "decoder.query_scale.layers.1.weight"), ("transformer.decoder.query_scale.layers.1.bias", "decoder.query_scale.layers.1.bias"), ("transformer.decoder.layers.0.ca_qpos_proj.weight", "decoder.layers.0.ca_qpos_proj.weight"), ("transformer.decoder.layers.0.ca_qpos_proj.bias", "decoder.layers.0.ca_qpos_proj.bias"), ] ) def a__ ( __lowercase , __lowercase , __lowercase ) -> List[str]: _A = state_dict.pop(__lowercase ) _A = val def a__ ( __lowercase ) -> List[str]: _A = OrderedDict() for key, value in state_dict.items(): if "backbone.0.body" in key: _A = key.replace("backbone.0.body" , "backbone.conv_encoder.model" ) _A = value else: _A = value return new_state_dict def a__ ( __lowercase , __lowercase=False ) -> Any: _A = "" if is_panoptic: _A = "conditional_detr." # first: transformer encoder for i in range(6 ): # read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias) _A = state_dict.pop(f"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight""" ) _A = state_dict.pop(f"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias""" ) # next, add query, keys and values (in that order) to the state dict _A = in_proj_weight[:256, :] _A = in_proj_bias[:256] _A = in_proj_weight[256:512, :] _A = in_proj_bias[256:512] _A = in_proj_weight[-256:, :] _A = in_proj_bias[-256:] def a__ ( ) -> int: _A = "http://images.cocodataset.org/val2017/000000039769.jpg" _A = Image.open(requests.get(__lowercase , stream=__lowercase ).raw ) return im @torch.no_grad() def a__ ( __lowercase , __lowercase ) -> Any: _A = ConditionalDetrConfig() # set backbone and dilation attributes if "resnet101" in model_name: _A = "resnet101" if "dc5" in model_name: _A = True _A = "panoptic" in model_name if is_panoptic: _A = 250 else: _A = 91 _A = "huggingface/label-files" _A = "coco-detection-id2label.json" _A = json.load(open(hf_hub_download(__lowercase , __lowercase , repo_type="dataset" ) , "r" ) ) _A = {int(__lowercase ): v for k, v in idalabel.items()} _A = idalabel _A = {v: k for k, v in idalabel.items()} # load image processor _A = "coco_panoptic" if is_panoptic else "coco_detection" _A = ConditionalDetrImageProcessor(format=__lowercase ) # prepare image _A = prepare_img() _A = image_processor(images=__lowercase , return_tensors="pt" ) _A = encoding["pixel_values"] logger.info(f"""Converting model {model_name}...""" ) # load original model from torch hub _A = torch.hub.load("DeppMeng/ConditionalDETR" , __lowercase , pretrained=__lowercase ).eval() _A = conditional_detr.state_dict() # rename keys for src, dest in rename_keys: if is_panoptic: _A = "conditional_detr." + src rename_key(__lowercase , __lowercase , __lowercase ) _A = rename_backbone_keys(__lowercase ) # query, key and value matrices need special treatment read_in_q_k_v(__lowercase , is_panoptic=__lowercase ) # important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them _A = "conditional_detr.model." if is_panoptic else "model." for key in state_dict.copy().keys(): if is_panoptic: if ( key.startswith("conditional_detr" ) and not key.startswith("class_labels_classifier" ) and not key.startswith("bbox_predictor" ) ): _A = state_dict.pop(__lowercase ) _A = val elif "class_labels_classifier" in key or "bbox_predictor" in key: _A = state_dict.pop(__lowercase ) _A = val elif key.startswith("bbox_attention" ) or key.startswith("mask_head" ): continue else: _A = state_dict.pop(__lowercase ) _A = val else: if not key.startswith("class_labels_classifier" ) and not key.startswith("bbox_predictor" ): _A = state_dict.pop(__lowercase ) _A = val # finally, create HuggingFace model and load state dict _A = ConditionalDetrForSegmentation(__lowercase ) if is_panoptic else ConditionalDetrForObjectDetection(__lowercase ) model.load_state_dict(__lowercase ) model.eval() model.push_to_hub(repo_id=__lowercase , organization="DepuMeng" , commit_message="Add model" ) # verify our conversion _A = conditional_detr(__lowercase ) _A = model(__lowercase ) assert torch.allclose(outputs.logits , original_outputs["pred_logits"] , atol=1E-4 ) assert torch.allclose(outputs.pred_boxes , original_outputs["pred_boxes"] , atol=1E-4 ) if is_panoptic: assert torch.allclose(outputs.pred_masks , original_outputs["pred_masks"] , atol=1E-4 ) # Save model and image processor logger.info(f"""Saving PyTorch model and image processor to {pytorch_dump_folder_path}...""" ) Path(__lowercase ).mkdir(exist_ok=__lowercase ) model.save_pretrained(__lowercase ) image_processor.save_pretrained(__lowercase ) if __name__ == "__main__": a_ = argparse.ArgumentParser() parser.add_argument( "--model_name", default="conditional_detr_resnet50", type=str, help="Name of the CONDITIONAL_DETR model you'd like to convert.", ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model." ) a_ = parser.parse_args() convert_conditional_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path)
621
1
"""simple docstring""" import argparse import json from typing import List from ltp import LTP from transformers.models.bert.tokenization_bert import BertTokenizer def a__ ( __lowercase ) -> Optional[Any]: # This defines a "chinese character" as anything in the CJK Unicode block: # https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block) # # Note that the CJK Unicode block is NOT all Japanese and Korean characters, # despite its name. The modern Korean Hangul alphabet is a different block, # as is Japanese Hiragana and Katakana. Those alphabets are used to write # space-separated words, so they are not treated specially and handled # like the all of the other languages. if ( (cp >= 0x4_e00 and cp <= 0x9_fff) or (cp >= 0x3_400 and cp <= 0x4_dbf) # or (cp >= 0x20_000 and cp <= 0x2a_6df) # or (cp >= 0x2a_700 and cp <= 0x2b_73f) # or (cp >= 0x2b_740 and cp <= 0x2b_81f) # or (cp >= 0x2b_820 and cp <= 0x2c_eaf) # or (cp >= 0xf_900 and cp <= 0xf_aff) or (cp >= 0x2f_800 and cp <= 0x2f_a1f) # ): # return True return False def a__ ( __lowercase ) -> List[str]: # word like '180' or '身高' or '神' for char in word: _A = ord(__lowercase ) if not _is_chinese_char(__lowercase ): return 0 return 1 def a__ ( __lowercase ) -> Dict: _A = set() for token in tokens: _A = len(__lowercase ) > 1 and is_chinese(__lowercase ) if chinese_word: word_set.add(__lowercase ) _A = list(__lowercase ) return word_list def a__ ( __lowercase , __lowercase ) -> List[str]: if not chinese_word_set: return bert_tokens _A = max([len(__lowercase ) for w in chinese_word_set] ) _A = bert_tokens _A , _A = 0, len(__lowercase ) while start < end: _A = True if is_chinese(bert_word[start] ): _A = min(end - start , __lowercase ) for i in range(__lowercase , 1 , -1 ): _A = "".join(bert_word[start : start + i] ) if whole_word in chinese_word_set: for j in range(start + 1 , start + i ): _A = "##" + bert_word[j] _A = start + i _A = False break if single_word: start += 1 return bert_word def a__ ( __lowercase , __lowercase , __lowercase ) -> List[Any]: _A = [] for i in range(0 , len(__lowercase ) , 100 ): _A = ltp_tokenizer.pipeline(lines[i : i + 100] , tasks=["cws"] ).cws _A = [get_chinese_word(__lowercase ) for r in res] ltp_res.extend(__lowercase ) assert len(__lowercase ) == len(__lowercase ) _A = [] for i in range(0 , len(__lowercase ) , 100 ): _A = bert_tokenizer(lines[i : i + 100] , add_special_tokens=__lowercase , truncation=__lowercase , max_length=512 ) bert_res.extend(res["input_ids"] ) assert len(__lowercase ) == len(__lowercase ) _A = [] for input_ids, chinese_word in zip(__lowercase , __lowercase ): _A = [] for id in input_ids: _A = bert_tokenizer._convert_id_to_token(__lowercase ) input_tokens.append(__lowercase ) _A = add_sub_symbol(__lowercase , __lowercase ) _A = [] # We only save pos of chinese subwords start with ##, which mean is part of a whole word. for i, token in enumerate(__lowercase ): if token[:2] == "##": _A = token[2:] # save chinese tokens' pos if len(__lowercase ) == 1 and _is_chinese_char(ord(__lowercase ) ): ref_id.append(__lowercase ) ref_ids.append(__lowercase ) assert len(__lowercase ) == len(__lowercase ) return ref_ids def a__ ( __lowercase ) -> Union[str, Any]: # For Chinese (Ro)Bert, the best result is from : RoBERTa-wwm-ext (https://github.com/ymcui/Chinese-BERT-wwm) # If we want to fine-tune these model, we have to use same tokenizer : LTP (https://github.com/HIT-SCIR/ltp) with open(args.file_name , "r" , encoding="utf-8" ) as f: _A = f.readlines() _A = [line.strip() for line in data if len(__lowercase ) > 0 and not line.isspace()] # avoid delimiter like '\u2029' _A = LTP(args.ltp ) # faster in GPU device _A = BertTokenizer.from_pretrained(args.bert ) _A = prepare_ref(__lowercase , __lowercase , __lowercase ) with open(args.save_path , "w" , encoding="utf-8" ) as f: _A = [json.dumps(__lowercase ) + "\n" for ref in ref_ids] f.writelines(__lowercase ) if __name__ == "__main__": a_ = argparse.ArgumentParser(description="prepare_chinese_ref") parser.add_argument( "--file_name", required=False, type=str, default="./resources/chinese-demo.txt", help="file need process, same as training data in lm", ) parser.add_argument( "--ltp", required=False, type=str, default="./resources/ltp", help="resources for LTP tokenizer, usually a path", ) parser.add_argument( "--bert", required=False, type=str, default="./resources/robert", help="resources for Bert tokenizer", ) parser.add_argument( "--save_path", required=False, type=str, default="./resources/ref.txt", help="path to save res", ) a_ = parser.parse_args() main(args)
621
"""simple docstring""" import random def a__ ( __lowercase , __lowercase , __lowercase ) -> Optional[Any]: _A = a[left_index] _A = left_index + 1 for j in range(left_index + 1 , __lowercase ): if a[j] < pivot: _A , _A = a[i], a[j] i += 1 _A , _A = a[i - 1], a[left_index] return i - 1 def a__ ( __lowercase , __lowercase , __lowercase ) -> int: if left < right: _A = random.randint(__lowercase , right - 1 ) _A , _A = ( a[left], a[pivot], ) # switches the pivot with the left most bound _A = partition(__lowercase , __lowercase , __lowercase ) quick_sort_random( __lowercase , __lowercase , __lowercase ) # recursive quicksort to the left of the pivot point quick_sort_random( __lowercase , pivot_index + 1 , __lowercase ) # recursive quicksort to the right of the pivot point def a__ ( ) -> Dict: _A = input("Enter numbers separated by a comma:\n" ).strip() _A = [int(__lowercase ) for item in user_input.split("," )] quick_sort_random(__lowercase , 0 , len(__lowercase ) ) print(__lowercase ) if __name__ == "__main__": main()
621
1
"""simple docstring""" import gc import tempfile import unittest import numpy as np import torch from diffusers import VersatileDiffusionPipeline from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device a_ = False class snake_case ( unittest.TestCase): pass @nightly @require_torch_gpu class snake_case ( unittest.TestCase): def a_ ( self : Optional[int] ) -> str: '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def a_ ( self : Tuple ) -> Any: '''simple docstring''' _A = VersatileDiffusionPipeline.from_pretrained("shi-labs/versatile-diffusion" , torch_dtype=torch.floataa ) pipe.to(a__ ) pipe.set_progress_bar_config(disable=a__ ) _A = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg" ) _A = torch.manual_seed(0 ) _A = pipe.dual_guided( prompt="first prompt" , image=a__ , text_to_image_strength=0.7_5 , generator=a__ , guidance_scale=7.5 , num_inference_steps=2 , output_type="numpy" , ).images with tempfile.TemporaryDirectory() as tmpdirname: pipe.save_pretrained(a__ ) _A = VersatileDiffusionPipeline.from_pretrained(a__ , torch_dtype=torch.floataa ) pipe.to(a__ ) pipe.set_progress_bar_config(disable=a__ ) _A = generator.manual_seed(0 ) _A = pipe.dual_guided( prompt="first prompt" , image=a__ , text_to_image_strength=0.7_5 , generator=a__ , guidance_scale=7.5 , num_inference_steps=2 , output_type="numpy" , ).images assert np.abs(image - new_image ).sum() < 1E-5, "Models don't have the same forward pass" def a_ ( self : Optional[int] ) -> List[Any]: '''simple docstring''' _A = VersatileDiffusionPipeline.from_pretrained("shi-labs/versatile-diffusion" , torch_dtype=torch.floataa ) pipe.to(a__ ) pipe.set_progress_bar_config(disable=a__ ) _A = "cyberpunk 2077" _A = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg" ) _A = torch.manual_seed(0 ) _A = pipe.dual_guided( prompt=a__ , image=a__ , text_to_image_strength=0.7_5 , generator=a__ , guidance_scale=7.5 , num_inference_steps=50 , output_type="numpy" , ).images _A = image[0, 2_53:2_56, 2_53:2_56, -1] assert image.shape == (1, 5_12, 5_12, 3) _A = np.array([0.1_4_4_8, 0.1_6_1_9, 0.1_7_4_1, 0.1_0_8_6, 0.1_1_4_7, 0.1_1_2_8, 0.1_1_9_9, 0.1_1_6_5, 0.1_0_0_1] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 _A = "A painting of a squirrel eating a burger " _A = torch.manual_seed(0 ) _A = pipe.text_to_image( prompt=a__ , generator=a__ , guidance_scale=7.5 , num_inference_steps=50 , output_type="numpy" ).images _A = image[0, 2_53:2_56, 2_53:2_56, -1] assert image.shape == (1, 5_12, 5_12, 3) _A = np.array([0.3_3_6_7, 0.3_1_6_9, 0.2_6_5_6, 0.3_8_7_0, 0.4_7_9_0, 0.3_7_9_6, 0.4_0_0_9, 0.4_8_7_8, 0.4_7_7_8] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 _A = pipe.image_variation(a__ , generator=a__ , output_type="numpy" ).images _A = image[0, 2_53:2_56, 2_53:2_56, -1] assert image.shape == (1, 5_12, 5_12, 3) _A = np.array([0.3_0_7_6, 0.3_1_2_3, 0.3_2_8_4, 0.3_7_8_2, 0.3_7_7_0, 0.3_8_9_4, 0.4_2_9_7, 0.4_3_3_1, 0.4_4_5_6] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
621
"""simple docstring""" import copy from typing import Any, Dict, List, Optional, Union import numpy as np from ...audio_utils import mel_filter_bank, spectrogram, window_function from ...feature_extraction_sequence_utils import SequenceFeatureExtractor from ...feature_extraction_utils import BatchFeature from ...utils import TensorType, logging a_ = logging.get_logger(__name__) class snake_case ( _UpperCamelCase): __UpperCamelCase = ['input_features'] def __init__( self : int , a__ : Optional[Any]=80 , a__ : Optional[int]=1_60_00 , a__ : int=1_60 , a__ : Union[str, Any]=30 , a__ : Tuple=4_00 , a__ : List[Any]=0.0 , a__ : Optional[Any]=False , **a__ : List[Any] , ) -> str: '''simple docstring''' super().__init__( feature_size=a__ , sampling_rate=a__ , padding_value=a__ , return_attention_mask=a__ , **a__ , ) _A = n_fft _A = hop_length _A = chunk_length _A = chunk_length * sampling_rate _A = self.n_samples // hop_length _A = sampling_rate _A = mel_filter_bank( num_frequency_bins=1 + n_fft // 2 , num_mel_filters=a__ , min_frequency=0.0 , max_frequency=8_0_0_0.0 , sampling_rate=a__ , norm="slaney" , mel_scale="slaney" , ) def a_ ( self : int , a__ : np.array ) -> np.ndarray: '''simple docstring''' _A = spectrogram( a__ , window_function(self.n_fft , "hann" ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters , log_mel="log10" , ) _A = log_spec[:, :-1] _A = np.maximum(a__ , log_spec.max() - 8.0 ) _A = (log_spec + 4.0) / 4.0 return log_spec @staticmethod # Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm def a_ ( a__ : List[np.ndarray] , a__ : List[np.ndarray] , a__ : float = 0.0 ) -> List[np.ndarray]: '''simple docstring''' if attention_mask is not None: _A = np.array(a__ , np.intaa ) _A = [] for vector, length in zip(a__ , attention_mask.sum(-1 ) ): _A = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1E-7 ) if length < normed_slice.shape[0]: _A = padding_value normed_input_values.append(a__ ) else: _A = [(x - x.mean()) / np.sqrt(x.var() + 1E-7 ) for x in input_values] return normed_input_values def __call__( self : Optional[int] , a__ : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , a__ : bool = True , a__ : Optional[int] = None , a__ : Optional[Union[str, TensorType]] = None , a__ : Optional[bool] = None , a__ : Optional[str] = "max_length" , a__ : Optional[int] = None , a__ : Optional[int] = None , a__ : Optional[bool] = None , **a__ : Dict , ) -> BatchFeature: '''simple docstring''' if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( F"""The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a""" F""" sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input""" F""" was sampled with {self.sampling_rate} and not {sampling_rate}.""" ) else: logger.warning( "It is strongly recommended to pass the `sampling_rate` argument to this function. " "Failing to do so can result in silent errors that might be hard to debug." ) _A = isinstance(a__ , np.ndarray ) and len(raw_speech.shape ) > 1 if is_batched_numpy and len(raw_speech.shape ) > 2: raise ValueError(F"""Only mono-channel audio is supported for input to {self}""" ) _A = is_batched_numpy or ( isinstance(a__ , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) )) ) if is_batched: _A = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech] elif not is_batched and not isinstance(a__ , np.ndarray ): _A = np.asarray(a__ , dtype=np.floataa ) elif isinstance(a__ , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ): _A = raw_speech.astype(np.floataa ) # always return batch if not is_batched: _A = [np.asarray([raw_speech] ).T] _A = BatchFeature({"input_features": raw_speech} ) # convert into correct format for padding _A = self.pad( a__ , padding=a__ , max_length=max_length if max_length else self.n_samples , truncation=a__ , pad_to_multiple_of=a__ , return_attention_mask=return_attention_mask or do_normalize , ) # zero-mean and unit-variance normalization if do_normalize: _A = self.zero_mean_unit_var_norm( padded_inputs["input_features"] , attention_mask=padded_inputs["attention_mask"] , padding_value=self.padding_value , ) _A = np.stack(padded_inputs["input_features"] , axis=0 ) # make sure list is in array format _A = padded_inputs.get("input_features" ).transpose(2 , 0 , 1 ) _A = [self._np_extract_fbank_features(a__ ) for waveform in input_features[0]] if isinstance(input_features[0] , a__ ): _A = [np.asarray(a__ , dtype=np.floataa ) for feature in input_features] else: _A = input_features if return_attention_mask: # rescale from sample (48000) to feature (3000) _A = padded_inputs["attention_mask"][:, :: self.hop_length] if return_tensors is not None: _A = padded_inputs.convert_to_tensors(a__ ) return padded_inputs def a_ ( self : Dict ) -> Dict[str, Any]: '''simple docstring''' _A = copy.deepcopy(self.__dict__ ) _A = self.__class__.__name__ if "mel_filters" in output: del output["mel_filters"] return output
621
1
"""simple docstring""" import copy from typing import Any, Dict, List, Optional, Union import numpy as np from ...audio_utils import mel_filter_bank, spectrogram, window_function from ...feature_extraction_sequence_utils import SequenceFeatureExtractor from ...feature_extraction_utils import BatchFeature from ...utils import TensorType, logging a_ = logging.get_logger(__name__) class snake_case ( _UpperCamelCase): __UpperCamelCase = ['input_features'] def __init__( self : int , a__ : Optional[Any]=80 , a__ : Optional[int]=1_60_00 , a__ : int=1_60 , a__ : Union[str, Any]=30 , a__ : Tuple=4_00 , a__ : List[Any]=0.0 , a__ : Optional[Any]=False , **a__ : List[Any] , ) -> str: '''simple docstring''' super().__init__( feature_size=a__ , sampling_rate=a__ , padding_value=a__ , return_attention_mask=a__ , **a__ , ) _A = n_fft _A = hop_length _A = chunk_length _A = chunk_length * sampling_rate _A = self.n_samples // hop_length _A = sampling_rate _A = mel_filter_bank( num_frequency_bins=1 + n_fft // 2 , num_mel_filters=a__ , min_frequency=0.0 , max_frequency=8_0_0_0.0 , sampling_rate=a__ , norm="slaney" , mel_scale="slaney" , ) def a_ ( self : int , a__ : np.array ) -> np.ndarray: '''simple docstring''' _A = spectrogram( a__ , window_function(self.n_fft , "hann" ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters , log_mel="log10" , ) _A = log_spec[:, :-1] _A = np.maximum(a__ , log_spec.max() - 8.0 ) _A = (log_spec + 4.0) / 4.0 return log_spec @staticmethod # Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm def a_ ( a__ : List[np.ndarray] , a__ : List[np.ndarray] , a__ : float = 0.0 ) -> List[np.ndarray]: '''simple docstring''' if attention_mask is not None: _A = np.array(a__ , np.intaa ) _A = [] for vector, length in zip(a__ , attention_mask.sum(-1 ) ): _A = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1E-7 ) if length < normed_slice.shape[0]: _A = padding_value normed_input_values.append(a__ ) else: _A = [(x - x.mean()) / np.sqrt(x.var() + 1E-7 ) for x in input_values] return normed_input_values def __call__( self : Optional[int] , a__ : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , a__ : bool = True , a__ : Optional[int] = None , a__ : Optional[Union[str, TensorType]] = None , a__ : Optional[bool] = None , a__ : Optional[str] = "max_length" , a__ : Optional[int] = None , a__ : Optional[int] = None , a__ : Optional[bool] = None , **a__ : Dict , ) -> BatchFeature: '''simple docstring''' if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( F"""The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a""" F""" sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input""" F""" was sampled with {self.sampling_rate} and not {sampling_rate}.""" ) else: logger.warning( "It is strongly recommended to pass the `sampling_rate` argument to this function. " "Failing to do so can result in silent errors that might be hard to debug." ) _A = isinstance(a__ , np.ndarray ) and len(raw_speech.shape ) > 1 if is_batched_numpy and len(raw_speech.shape ) > 2: raise ValueError(F"""Only mono-channel audio is supported for input to {self}""" ) _A = is_batched_numpy or ( isinstance(a__ , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) )) ) if is_batched: _A = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech] elif not is_batched and not isinstance(a__ , np.ndarray ): _A = np.asarray(a__ , dtype=np.floataa ) elif isinstance(a__ , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ): _A = raw_speech.astype(np.floataa ) # always return batch if not is_batched: _A = [np.asarray([raw_speech] ).T] _A = BatchFeature({"input_features": raw_speech} ) # convert into correct format for padding _A = self.pad( a__ , padding=a__ , max_length=max_length if max_length else self.n_samples , truncation=a__ , pad_to_multiple_of=a__ , return_attention_mask=return_attention_mask or do_normalize , ) # zero-mean and unit-variance normalization if do_normalize: _A = self.zero_mean_unit_var_norm( padded_inputs["input_features"] , attention_mask=padded_inputs["attention_mask"] , padding_value=self.padding_value , ) _A = np.stack(padded_inputs["input_features"] , axis=0 ) # make sure list is in array format _A = padded_inputs.get("input_features" ).transpose(2 , 0 , 1 ) _A = [self._np_extract_fbank_features(a__ ) for waveform in input_features[0]] if isinstance(input_features[0] , a__ ): _A = [np.asarray(a__ , dtype=np.floataa ) for feature in input_features] else: _A = input_features if return_attention_mask: # rescale from sample (48000) to feature (3000) _A = padded_inputs["attention_mask"][:, :: self.hop_length] if return_tensors is not None: _A = padded_inputs.convert_to_tensors(a__ ) return padded_inputs def a_ ( self : Dict ) -> Dict[str, Any]: '''simple docstring''' _A = copy.deepcopy(self.__dict__ ) _A = self.__class__.__name__ if "mel_filters" in output: del output["mel_filters"] return output
621
"""simple docstring""" from __future__ import annotations def a__ ( __lowercase , __lowercase ) -> float: _A = sorted(numsa + numsa ) _A , _A = divmod(len(__lowercase ) , 2 ) if mod == 1: return all_numbers[div] else: return (all_numbers[div] + all_numbers[div - 1]) / 2 if __name__ == "__main__": import doctest doctest.testmod() a_ = [float(x) for x in input("Enter the elements of first array: ").split()] a_ = [float(x) for x in input("Enter the elements of second array: ").split()] print(f'''The median of two arrays is: {median_of_two_arrays(array_a, array_a)}''')
621
1
"""simple docstring""" import copy import os from typing import Union from ...configuration_utils import PretrainedConfig from ...utils import logging a_ = logging.get_logger(__name__) a_ = { "Salesforce/blip-vqa-base": "https://huggingface.co/Salesforce/blip-vqa-base/resolve/main/config.json", "Salesforce/blip-vqa-capfit-large": ( "https://huggingface.co/Salesforce/blip-vqa-base-capfit/resolve/main/config.json" ), "Salesforce/blip-image-captioning-base": ( "https://huggingface.co/Salesforce/blip-image-captioning-base/resolve/main/config.json" ), "Salesforce/blip-image-captioning-large": ( "https://huggingface.co/Salesforce/blip-image-captioning-large/resolve/main/config.json" ), "Salesforce/blip-itm-base-coco": "https://huggingface.co/Salesforce/blip-itm-base-coco/resolve/main/config.json", "Salesforce/blip-itm-large-coco": "https://huggingface.co/Salesforce/blip-itm-large-coco/resolve/main/config.json", "Salesforce/blip-itm-base-flikr": "https://huggingface.co/Salesforce/blip-itm-base-flikr/resolve/main/config.json", "Salesforce/blip-itm-large-flikr": ( "https://huggingface.co/Salesforce/blip-itm-large-flikr/resolve/main/config.json" ), } class snake_case ( _UpperCamelCase): __UpperCamelCase = 'blip_text_model' def __init__( self : int , a__ : List[str]=3_05_24 , a__ : List[str]=7_68 , a__ : List[Any]=7_68 , a__ : int=30_72 , a__ : List[str]=7_68 , a__ : Dict=12 , a__ : Optional[int]=8 , a__ : Optional[Any]=5_12 , a__ : List[Any]="gelu" , a__ : Optional[Any]=1E-1_2 , a__ : Any=0.0 , a__ : int=0.0 , a__ : Dict=0.0_2 , a__ : Optional[Any]=3_05_22 , a__ : Any=2 , a__ : int=0 , a__ : Union[str, Any]=1_02 , a__ : Tuple=True , a__ : Optional[int]=True , **a__ : Any , ) -> List[Any]: '''simple docstring''' super().__init__( pad_token_id=a__ , bos_token_id=a__ , eos_token_id=a__ , sep_token_id=a__ , **a__ , ) _A = vocab_size _A = hidden_size _A = encoder_hidden_size _A = intermediate_size _A = projection_dim _A = hidden_dropout_prob _A = num_hidden_layers _A = num_attention_heads _A = max_position_embeddings _A = layer_norm_eps _A = hidden_act _A = initializer_range _A = attention_probs_dropout_prob _A = is_decoder _A = use_cache @classmethod def a_ ( cls : Optional[Any] , a__ : Union[str, os.PathLike] , **a__ : Optional[Any] ) -> "PretrainedConfig": '''simple docstring''' cls._set_token_in_kwargs(a__ ) _A , _A = cls.get_config_dict(a__ , **a__ ) # get the text config dict if we are loading from BlipConfig if config_dict.get("model_type" ) == "blip": _A = config_dict["text_config"] if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type: logger.warning( F"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """ F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" ) return cls.from_dict(a__ , **a__ ) class snake_case ( _UpperCamelCase): __UpperCamelCase = 'blip_vision_model' def __init__( self : Optional[Any] , a__ : Any=7_68 , a__ : List[str]=30_72 , a__ : str=5_12 , a__ : Any=12 , a__ : int=12 , a__ : int=3_84 , a__ : Tuple=16 , a__ : str="gelu" , a__ : Tuple=1E-5 , a__ : List[str]=0.0 , a__ : List[Any]=1E-1_0 , **a__ : int , ) -> List[str]: '''simple docstring''' super().__init__(**a__ ) _A = hidden_size _A = intermediate_size _A = projection_dim _A = num_hidden_layers _A = num_attention_heads _A = patch_size _A = image_size _A = initializer_range _A = attention_dropout _A = layer_norm_eps _A = hidden_act @classmethod def a_ ( cls : Any , a__ : Union[str, os.PathLike] , **a__ : int ) -> "PretrainedConfig": '''simple docstring''' cls._set_token_in_kwargs(a__ ) _A , _A = cls.get_config_dict(a__ , **a__ ) # get the vision config dict if we are loading from BlipConfig if config_dict.get("model_type" ) == "blip": _A = config_dict["vision_config"] if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type: logger.warning( F"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """ F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" ) return cls.from_dict(a__ , **a__ ) class snake_case ( _UpperCamelCase): __UpperCamelCase = 'blip' __UpperCamelCase = True def __init__( self : List[Any] , a__ : Optional[int]=None , a__ : str=None , a__ : List[str]=5_12 , a__ : Any=2.6_5_9_2 , a__ : str=2_56 , **a__ : Optional[int] , ) -> Dict: '''simple docstring''' super().__init__(**a__ ) if text_config is None: _A = {} logger.info("`text_config` is `None`. Initializing the `BlipTextConfig` with default values." ) if vision_config is None: _A = {} logger.info("`vision_config` is `None`. Initializing the `BlipVisionConfig` with default values." ) _A = BlipTextConfig(**a__ ) _A = BlipVisionConfig(**a__ ) _A = self.vision_config.hidden_size _A = projection_dim _A = logit_scale_init_value _A = 1.0 _A = 0.0_2 _A = image_text_hidden_size @classmethod def a_ ( cls : Tuple , a__ : BlipTextConfig , a__ : BlipVisionConfig , **a__ : Optional[int] ) -> str: '''simple docstring''' return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **a__ ) def a_ ( self : Union[str, Any] ) -> List[Any]: '''simple docstring''' _A = copy.deepcopy(self.__dict__ ) _A = self.text_config.to_dict() _A = self.vision_config.to_dict() _A = self.__class__.model_type return output
621
"""simple docstring""" import copy import os from typing import Union from ...configuration_utils import PretrainedConfig from ...utils import logging a_ = logging.get_logger(__name__) a_ = { "Salesforce/blip-vqa-base": "https://huggingface.co/Salesforce/blip-vqa-base/resolve/main/config.json", "Salesforce/blip-vqa-capfit-large": ( "https://huggingface.co/Salesforce/blip-vqa-base-capfit/resolve/main/config.json" ), "Salesforce/blip-image-captioning-base": ( "https://huggingface.co/Salesforce/blip-image-captioning-base/resolve/main/config.json" ), "Salesforce/blip-image-captioning-large": ( "https://huggingface.co/Salesforce/blip-image-captioning-large/resolve/main/config.json" ), "Salesforce/blip-itm-base-coco": "https://huggingface.co/Salesforce/blip-itm-base-coco/resolve/main/config.json", "Salesforce/blip-itm-large-coco": "https://huggingface.co/Salesforce/blip-itm-large-coco/resolve/main/config.json", "Salesforce/blip-itm-base-flikr": "https://huggingface.co/Salesforce/blip-itm-base-flikr/resolve/main/config.json", "Salesforce/blip-itm-large-flikr": ( "https://huggingface.co/Salesforce/blip-itm-large-flikr/resolve/main/config.json" ), } class snake_case ( _UpperCamelCase): __UpperCamelCase = 'blip_text_model' def __init__( self : int , a__ : List[str]=3_05_24 , a__ : List[str]=7_68 , a__ : List[Any]=7_68 , a__ : int=30_72 , a__ : List[str]=7_68 , a__ : Dict=12 , a__ : Optional[int]=8 , a__ : Optional[Any]=5_12 , a__ : List[Any]="gelu" , a__ : Optional[Any]=1E-1_2 , a__ : Any=0.0 , a__ : int=0.0 , a__ : Dict=0.0_2 , a__ : Optional[Any]=3_05_22 , a__ : Any=2 , a__ : int=0 , a__ : Union[str, Any]=1_02 , a__ : Tuple=True , a__ : Optional[int]=True , **a__ : Any , ) -> List[Any]: '''simple docstring''' super().__init__( pad_token_id=a__ , bos_token_id=a__ , eos_token_id=a__ , sep_token_id=a__ , **a__ , ) _A = vocab_size _A = hidden_size _A = encoder_hidden_size _A = intermediate_size _A = projection_dim _A = hidden_dropout_prob _A = num_hidden_layers _A = num_attention_heads _A = max_position_embeddings _A = layer_norm_eps _A = hidden_act _A = initializer_range _A = attention_probs_dropout_prob _A = is_decoder _A = use_cache @classmethod def a_ ( cls : Optional[Any] , a__ : Union[str, os.PathLike] , **a__ : Optional[Any] ) -> "PretrainedConfig": '''simple docstring''' cls._set_token_in_kwargs(a__ ) _A , _A = cls.get_config_dict(a__ , **a__ ) # get the text config dict if we are loading from BlipConfig if config_dict.get("model_type" ) == "blip": _A = config_dict["text_config"] if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type: logger.warning( F"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """ F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" ) return cls.from_dict(a__ , **a__ ) class snake_case ( _UpperCamelCase): __UpperCamelCase = 'blip_vision_model' def __init__( self : Optional[Any] , a__ : Any=7_68 , a__ : List[str]=30_72 , a__ : str=5_12 , a__ : Any=12 , a__ : int=12 , a__ : int=3_84 , a__ : Tuple=16 , a__ : str="gelu" , a__ : Tuple=1E-5 , a__ : List[str]=0.0 , a__ : List[Any]=1E-1_0 , **a__ : int , ) -> List[str]: '''simple docstring''' super().__init__(**a__ ) _A = hidden_size _A = intermediate_size _A = projection_dim _A = num_hidden_layers _A = num_attention_heads _A = patch_size _A = image_size _A = initializer_range _A = attention_dropout _A = layer_norm_eps _A = hidden_act @classmethod def a_ ( cls : Any , a__ : Union[str, os.PathLike] , **a__ : int ) -> "PretrainedConfig": '''simple docstring''' cls._set_token_in_kwargs(a__ ) _A , _A = cls.get_config_dict(a__ , **a__ ) # get the vision config dict if we are loading from BlipConfig if config_dict.get("model_type" ) == "blip": _A = config_dict["vision_config"] if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type: logger.warning( F"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """ F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" ) return cls.from_dict(a__ , **a__ ) class snake_case ( _UpperCamelCase): __UpperCamelCase = 'blip' __UpperCamelCase = True def __init__( self : List[Any] , a__ : Optional[int]=None , a__ : str=None , a__ : List[str]=5_12 , a__ : Any=2.6_5_9_2 , a__ : str=2_56 , **a__ : Optional[int] , ) -> Dict: '''simple docstring''' super().__init__(**a__ ) if text_config is None: _A = {} logger.info("`text_config` is `None`. Initializing the `BlipTextConfig` with default values." ) if vision_config is None: _A = {} logger.info("`vision_config` is `None`. Initializing the `BlipVisionConfig` with default values." ) _A = BlipTextConfig(**a__ ) _A = BlipVisionConfig(**a__ ) _A = self.vision_config.hidden_size _A = projection_dim _A = logit_scale_init_value _A = 1.0 _A = 0.0_2 _A = image_text_hidden_size @classmethod def a_ ( cls : Tuple , a__ : BlipTextConfig , a__ : BlipVisionConfig , **a__ : Optional[int] ) -> str: '''simple docstring''' return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **a__ ) def a_ ( self : Union[str, Any] ) -> List[Any]: '''simple docstring''' _A = copy.deepcopy(self.__dict__ ) _A = self.text_config.to_dict() _A = self.vision_config.to_dict() _A = self.__class__.model_type return output
621
1
"""simple docstring""" from .data_collator import ( DataCollatorForLanguageModeling, DataCollatorForPermutationLanguageModeling, DataCollatorForSeqaSeq, DataCollatorForSOP, DataCollatorForTokenClassification, DataCollatorForWholeWordMask, DataCollatorWithPadding, DefaultDataCollator, default_data_collator, ) from .metrics import glue_compute_metrics, xnli_compute_metrics from .processors import ( DataProcessor, InputExample, InputFeatures, SingleSentenceClassificationProcessor, SquadExample, SquadFeatures, SquadVaProcessor, SquadVaProcessor, glue_convert_examples_to_features, glue_output_modes, glue_processors, glue_tasks_num_labels, squad_convert_examples_to_features, xnli_output_modes, xnli_processors, xnli_tasks_num_labels, )
621
"""simple docstring""" import unittest from transformers import load_tool from .test_tools_common import ToolTesterMixin class snake_case ( unittest.TestCase , _UpperCamelCase): def a_ ( self : Optional[Any] ) -> List[str]: '''simple docstring''' _A = load_tool("text-classification" ) self.tool.setup() _A = load_tool("text-classification" , remote=a__ ) def a_ ( self : Optional[int] ) -> Dict: '''simple docstring''' _A = self.tool("That's quite cool" , ["positive", "negative"] ) self.assertEqual(a__ , "positive" ) def a_ ( self : Optional[Any] ) -> Dict: '''simple docstring''' _A = self.remote_tool("That's quite cool" , ["positive", "negative"] ) self.assertEqual(a__ , "positive" ) def a_ ( self : Dict ) -> Optional[int]: '''simple docstring''' _A = self.tool(text="That's quite cool" , labels=["positive", "negative"] ) self.assertEqual(a__ , "positive" ) def a_ ( self : Dict ) -> Any: '''simple docstring''' _A = self.remote_tool(text="That's quite cool" , labels=["positive", "negative"] ) self.assertEqual(a__ , "positive" )
621
1
"""simple docstring""" import os from typing import Dict, List, Union import tensorflow as tf from keras_nlp.tokenizers import BytePairTokenizer from tensorflow_text import pad_model_inputs from .tokenization_gpta import GPTaTokenizer class snake_case ( tf.keras.layers.Layer): def __init__( self : str , a__ : Dict[str, int] , a__ : List[str] , a__ : int = None , a__ : int = None ) -> Optional[Any]: '''simple docstring''' super().__init__() _A = pad_token_id _A = max_length _A = vocab _A = merges _A = BytePairTokenizer(a__ , a__ , sequence_length=a__ ) @classmethod def a_ ( cls : Optional[Any] , a__ : GPTaTokenizer , *a__ : Dict , **a__ : Tuple ) -> Union[str, Any]: '''simple docstring''' _A = [" ".join(a__ ) for m in tokenizer.bpe_ranks.keys()] _A = tokenizer.get_vocab() return cls(a__ , a__ , *a__ , **a__ ) @classmethod def a_ ( cls : List[str] , a__ : Union[str, os.PathLike] , *a__ : Tuple , **a__ : Optional[Any] ) -> Optional[Any]: '''simple docstring''' _A = GPTaTokenizer.from_pretrained(a__ , *a__ , **a__ ) return cls.from_tokenizer(a__ , *a__ , **a__ ) @classmethod def a_ ( cls : List[Any] , a__ : int ) -> Dict: '''simple docstring''' return cls(**a__ ) def a_ ( self : int ) -> int: '''simple docstring''' return { "vocab": self.vocab, "merges": self.merges, "max_length": self.max_length, "pad_token_id": self.pad_token_id, } def a_ ( self : Optional[Any] , a__ : int , a__ : int = None ) -> Optional[Any]: '''simple docstring''' _A = self.tf_tokenizer(a__ ) _A = tf.ones_like(a__ ) if self.pad_token_id is not None: # pad the tokens up to max length _A = max_length if max_length is not None else self.max_length if max_length is not None: _A , _A = pad_model_inputs( a__ , max_seq_length=a__ , pad_value=self.pad_token_id ) return {"attention_mask": attention_mask, "input_ids": input_ids}
621
"""simple docstring""" import gc import random import unittest import numpy as np import torch from PIL import Image from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import AutoencoderKL, PNDMScheduler, StableDiffusionInpaintPipeline, UNetaDConditionModel from diffusers.utils import floats_tensor, load_image, load_numpy, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class snake_case ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , unittest.TestCase): __UpperCamelCase = StableDiffusionInpaintPipeline __UpperCamelCase = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS __UpperCamelCase = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS __UpperCamelCase = frozenset( []) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess __UpperCamelCase = frozenset([]) def a_ ( self : Dict ) -> Optional[int]: '''simple docstring''' torch.manual_seed(0 ) _A = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=9 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=a__ , ) _A = PNDMScheduler(skip_prk_steps=a__ ) torch.manual_seed(0 ) _A = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , sample_size=1_28 , ) torch.manual_seed(0 ) _A = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , hidden_act="gelu" , projection_dim=5_12 , ) _A = CLIPTextModel(a__ ) _A = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" ) _A = { "unet": unet, "scheduler": scheduler, "vae": vae, "text_encoder": text_encoder, "tokenizer": tokenizer, "safety_checker": None, "feature_extractor": None, } return components def a_ ( self : Optional[Any] , a__ : List[str] , a__ : Tuple=0 ) -> int: '''simple docstring''' _A = floats_tensor((1, 3, 32, 32) , rng=random.Random(a__ ) ).to(a__ ) _A = image.cpu().permute(0 , 2 , 3 , 1 )[0] _A = Image.fromarray(np.uinta(a__ ) ).convert("RGB" ).resize((64, 64) ) _A = Image.fromarray(np.uinta(image + 4 ) ).convert("RGB" ).resize((64, 64) ) if str(a__ ).startswith("mps" ): _A = torch.manual_seed(a__ ) else: _A = torch.Generator(device=a__ ).manual_seed(a__ ) _A = { "prompt": "A painting of a squirrel eating a burger", "image": init_image, "mask_image": mask_image, "generator": generator, "num_inference_steps": 2, "guidance_scale": 6.0, "output_type": "numpy", } return inputs def a_ ( self : Optional[Any] ) -> Optional[Any]: '''simple docstring''' _A = "cpu" # ensure determinism for the device-dependent torch.Generator _A = self.get_dummy_components() _A = StableDiffusionInpaintPipeline(**a__ ) _A = sd_pipe.to(a__ ) sd_pipe.set_progress_bar_config(disable=a__ ) _A = self.get_dummy_inputs(a__ ) _A = sd_pipe(**a__ ).images _A = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) _A = np.array([0.4_7_2_7, 0.5_7_3_5, 0.3_9_4_1, 0.5_4_4_6, 0.5_9_2_6, 0.4_3_9_4, 0.5_0_6_2, 0.4_6_5_4, 0.4_4_7_6] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def a_ ( self : str ) -> Union[str, Any]: '''simple docstring''' super().test_inference_batch_single_identical(expected_max_diff=3E-3 ) @slow @require_torch_gpu class snake_case ( unittest.TestCase): def a_ ( self : List[Any] ) -> Any: '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def a_ ( self : Union[str, Any] ) -> Tuple: '''simple docstring''' _A = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/sd2-inpaint/init_image.png" ) _A = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png" ) _A = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint" "/yellow_cat_sitting_on_a_park_bench.npy" ) _A = "stabilityai/stable-diffusion-2-inpainting" _A = StableDiffusionInpaintPipeline.from_pretrained(a__ , safety_checker=a__ ) pipe.to(a__ ) pipe.set_progress_bar_config(disable=a__ ) pipe.enable_attention_slicing() _A = "Face of a yellow cat, high resolution, sitting on a park bench" _A = torch.manual_seed(0 ) _A = pipe( prompt=a__ , image=a__ , mask_image=a__ , generator=a__ , output_type="np" , ) _A = output.images[0] assert image.shape == (5_12, 5_12, 3) assert np.abs(expected_image - image ).max() < 9E-3 def a_ ( self : Optional[Any] ) -> List[Any]: '''simple docstring''' _A = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/sd2-inpaint/init_image.png" ) _A = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png" ) _A = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint" "/yellow_cat_sitting_on_a_park_bench_fp16.npy" ) _A = "stabilityai/stable-diffusion-2-inpainting" _A = StableDiffusionInpaintPipeline.from_pretrained( a__ , torch_dtype=torch.floataa , safety_checker=a__ , ) pipe.to(a__ ) pipe.set_progress_bar_config(disable=a__ ) pipe.enable_attention_slicing() _A = "Face of a yellow cat, high resolution, sitting on a park bench" _A = torch.manual_seed(0 ) _A = pipe( prompt=a__ , image=a__ , mask_image=a__ , generator=a__ , output_type="np" , ) _A = output.images[0] assert image.shape == (5_12, 5_12, 3) assert np.abs(expected_image - image ).max() < 5E-1 def a_ ( self : List[Any] ) -> Union[str, Any]: '''simple docstring''' torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() _A = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/sd2-inpaint/init_image.png" ) _A = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png" ) _A = "stabilityai/stable-diffusion-2-inpainting" _A = PNDMScheduler.from_pretrained(a__ , subfolder="scheduler" ) _A = StableDiffusionInpaintPipeline.from_pretrained( a__ , safety_checker=a__ , scheduler=a__ , torch_dtype=torch.floataa , ) pipe.to(a__ ) pipe.set_progress_bar_config(disable=a__ ) pipe.enable_attention_slicing(1 ) pipe.enable_sequential_cpu_offload() _A = "Face of a yellow cat, high resolution, sitting on a park bench" _A = torch.manual_seed(0 ) _A = pipe( prompt=a__ , image=a__ , mask_image=a__ , generator=a__ , num_inference_steps=2 , output_type="np" , ) _A = torch.cuda.max_memory_allocated() # make sure that less than 2.65 GB is allocated assert mem_bytes < 2.6_5 * 10**9
621
1
"""simple docstring""" def a__ ( __lowercase = 1000 ) -> int: return sum(e for e in range(3 , __lowercase ) if e % 3 == 0 or e % 5 == 0 ) if __name__ == "__main__": print(f'''{solution() = }''')
621
"""simple docstring""" def a__ ( __lowercase , __lowercase ) -> int: while a != 0: _A , _A = b % a, a return b def a__ ( __lowercase , __lowercase ) -> int: if gcd(__lowercase , __lowercase ) != 1: _A = f"""mod inverse of {a!r} and {m!r} does not exist""" raise ValueError(__lowercase ) _A , _A , _A = 1, 0, a _A , _A , _A = 0, 1, m while va != 0: _A = ua // va _A , _A , _A , _A , _A , _A = (ua - q * va), (ua - q * va), (ua - q * va), va, va, va return ua % m
621
1
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available a_ = { "configuration_bigbird_pegasus": [ "BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP", "BigBirdPegasusConfig", "BigBirdPegasusOnnxConfig", ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ = [ "BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST", "BigBirdPegasusForCausalLM", "BigBirdPegasusForConditionalGeneration", "BigBirdPegasusForQuestionAnswering", "BigBirdPegasusForSequenceClassification", "BigBirdPegasusModel", "BigBirdPegasusPreTrainedModel", ] if TYPE_CHECKING: from .configuration_bigbird_pegasus import ( BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP, BigBirdPegasusConfig, BigBirdPegasusOnnxConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_bigbird_pegasus import ( BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST, BigBirdPegasusForCausalLM, BigBirdPegasusForConditionalGeneration, BigBirdPegasusForQuestionAnswering, BigBirdPegasusForSequenceClassification, BigBirdPegasusModel, BigBirdPegasusPreTrainedModel, ) else: import sys a_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
621
"""simple docstring""" # Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import warnings from typing import List from unittest.mock import Mock import torch from torch.utils.data import DataLoader, IterableDataset, TensorDataset from accelerate.accelerator import Accelerator from accelerate.utils.dataclasses import DistributedType class snake_case ( _UpperCamelCase): def __init__( self : List[Any] , a__ : Any ) -> Any: '''simple docstring''' _A = data def __iter__( self : List[str] ) -> str: '''simple docstring''' for element in self.data: yield element def a__ ( __lowercase=True ) -> Tuple: _A = Accelerator(even_batches=__lowercase ) assert accelerator.num_processes == 2, "this script expects that two GPUs are available" return accelerator def a__ ( __lowercase , __lowercase , __lowercase , __lowercase = False ) -> Union[str, Any]: if iterable: _A = DummyIterableDataset(torch.as_tensor(range(__lowercase ) ) ) else: _A = TensorDataset(torch.as_tensor(range(__lowercase ) ) ) _A = DataLoader(__lowercase , batch_size=__lowercase ) _A = accelerator.prepare(__lowercase ) return dl def a__ ( __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , ) -> Dict: _A = create_dataloader(accelerator=__lowercase , dataset_size=__lowercase , batch_size=__lowercase ) _A = [len(batch[0] ) for batch in dl] if accelerator.process_index == 0: assert batch_sizes == process_0_expected_batch_sizes elif accelerator.process_index == 1: assert batch_sizes == process_1_expected_batch_sizes def a__ ( ) -> List[str]: _A = create_accelerator() # without padding, we would expect a different number of batches verify_dataloader_batch_sizes( __lowercase , dataset_size=3 , batch_size=1 , process_0_expected_batch_sizes=[1, 1] , process_1_expected_batch_sizes=[1, 1] , ) # without padding, we would expect the same number of batches, but different sizes verify_dataloader_batch_sizes( __lowercase , dataset_size=7 , batch_size=2 , process_0_expected_batch_sizes=[2, 2] , process_1_expected_batch_sizes=[2, 2] , ) def a__ ( ) -> List[Any]: _A = create_accelerator(even_batches=__lowercase ) verify_dataloader_batch_sizes( __lowercase , dataset_size=3 , batch_size=1 , process_0_expected_batch_sizes=[1, 1] , process_1_expected_batch_sizes=[1] , ) verify_dataloader_batch_sizes( __lowercase , dataset_size=7 , batch_size=2 , process_0_expected_batch_sizes=[2, 2] , process_1_expected_batch_sizes=[2, 1] , ) def a__ ( ) -> int: _A = create_accelerator(even_batches=__lowercase ) _A = torch.nn.Linear(1 , 1 ) _A = accelerator.prepare(__lowercase ) _A = create_dataloader(__lowercase , dataset_size=3 , batch_size=1 ) _A = [] with accelerator.join_uneven_inputs([ddp_model] ): for batch_idx, batch in enumerate(__lowercase ): _A = ddp_model(batch[0].float() ) _A = output.sum() loss.backward() batch_idxs.append(__lowercase ) accelerator.wait_for_everyone() if accelerator.process_index == 0: assert batch_idxs == [0, 1] elif accelerator.process_index == 1: assert batch_idxs == [0] def a__ ( __lowercase ) -> List[str]: with warnings.catch_warnings(record=__lowercase ) as w: with accelerator.join_uneven_inputs([Mock()] ): pass assert issubclass(w[-1].category , __lowercase ) assert "only supported for multi-GPU" in str(w[-1].message ) def a__ ( ) -> Tuple: _A = True _A = False _A = create_accelerator(even_batches=__lowercase ) _A = torch.nn.Linear(1 , 1 ) _A = accelerator.prepare(__lowercase ) _A = create_dataloader(__lowercase , dataset_size=3 , batch_size=1 ) _A = create_dataloader(__lowercase , dataset_size=3 , batch_size=1 ) with accelerator.join_uneven_inputs([ddp_model] , even_batches=__lowercase ): _A = train_dl.batch_sampler.even_batches _A = valid_dl.batch_sampler.even_batches assert train_dl_overridden_value == overridden_even_batches assert valid_dl_overridden_value == overridden_even_batches assert train_dl.batch_sampler.even_batches == default_even_batches assert valid_dl.batch_sampler.even_batches == default_even_batches def a__ ( ) -> int: _A = True _A = False _A = create_accelerator(even_batches=__lowercase ) _A = torch.nn.Linear(1 , 1 ) _A = accelerator.prepare(__lowercase ) create_dataloader(__lowercase , dataset_size=3 , batch_size=1 , iterable=__lowercase ) _A = create_dataloader(__lowercase , dataset_size=3 , batch_size=1 ) with warnings.catch_warnings(): warnings.filterwarnings("ignore" ) try: with accelerator.join_uneven_inputs([ddp_model] , even_batches=__lowercase ): _A = batch_dl.batch_sampler.even_batches except AttributeError: # ensure attribute error is not raised when processing iterable dl raise AssertionError assert batch_dl_overridden_value == overridden_even_batches assert batch_dl.batch_sampler.even_batches == default_even_batches def a__ ( ) -> Optional[Any]: _A = create_accelerator() _A = torch.nn.Linear(1 , 1 ) _A = accelerator.prepare(__lowercase ) create_dataloader(__lowercase , dataset_size=3 , batch_size=1 , iterable=__lowercase ) with warnings.catch_warnings(record=__lowercase ) as w: with accelerator.join_uneven_inputs([ddp_model] , even_batches=__lowercase ): pass assert issubclass(w[-1].category , __lowercase ) assert "only supported for map-style datasets" in str(w[-1].message ) def a__ ( ) -> Optional[Any]: _A = create_accelerator() accelerator.print("Test that even_batches variable ensures uniform batches across processes" ) test_default_ensures_even_batch_sizes() accelerator.print("Run tests with even_batches disabled" ) test_can_disable_even_batches() accelerator.print("Test joining uneven inputs" ) test_can_join_uneven_inputs() accelerator.print("Test overriding even_batches when joining uneven inputs" ) test_join_can_override_even_batches() accelerator.print("Test overriding even_batches for mixed dataloader types" ) test_join_can_override_for_mixed_type_dataloaders() accelerator.print("Test overriding even_batches raises a warning for iterable dataloaders" ) test_join_raises_warning_for_iterable_when_overriding_even_batches() accelerator.print("Test join with non DDP distributed raises warning" ) _A = accelerator.state.distributed_type _A = DistributedType.FSDP test_join_raises_warning_for_non_ddp_distributed(__lowercase ) _A = original_state if __name__ == "__main__": main()
621
1
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging a_ = logging.get_logger(__name__) a_ = { "caidas/swin2sr-classicalsr-x2-64": ( "https://huggingface.co/caidas/swin2sr-classicalsr-x2-64/resolve/main/config.json" ), } class snake_case ( _UpperCamelCase): __UpperCamelCase = 'swin2sr' __UpperCamelCase = { 'hidden_size': 'embed_dim', 'num_attention_heads': 'num_heads', 'num_hidden_layers': 'num_layers', } def __init__( self : Optional[Any] , a__ : str=64 , a__ : int=1 , a__ : Any=3 , a__ : List[str]=1_80 , a__ : List[str]=[6, 6, 6, 6, 6, 6] , a__ : Dict=[6, 6, 6, 6, 6, 6] , a__ : Optional[Any]=8 , a__ : Dict=2.0 , a__ : List[Any]=True , a__ : int=0.0 , a__ : Union[str, Any]=0.0 , a__ : List[str]=0.1 , a__ : Dict="gelu" , a__ : Any=False , a__ : List[Any]=0.0_2 , a__ : Any=1E-5 , a__ : int=2 , a__ : Optional[int]=1.0 , a__ : Dict="1conv" , a__ : List[Any]="pixelshuffle" , **a__ : Any , ) -> str: '''simple docstring''' super().__init__(**a__ ) _A = image_size _A = patch_size _A = num_channels _A = embed_dim _A = depths _A = len(a__ ) _A = num_heads _A = window_size _A = mlp_ratio _A = qkv_bias _A = hidden_dropout_prob _A = attention_probs_dropout_prob _A = drop_path_rate _A = hidden_act _A = use_absolute_embeddings _A = layer_norm_eps _A = initializer_range _A = upscale _A = img_range _A = resi_connection _A = upsampler
621
"""simple docstring""" class snake_case : def __init__( self : Optional[int] , a__ : List[Any] , a__ : List[str] , a__ : Tuple ) -> Optional[Any]: '''simple docstring''' _A = None _A = None _A = graph self._normalize_graph(a__ , a__ ) _A = len(a__ ) _A = None def a_ ( self : str , a__ : List[str] , a__ : List[Any] ) -> Dict: '''simple docstring''' if sources is int: _A = [sources] if sinks is int: _A = [sinks] if len(a__ ) == 0 or len(a__ ) == 0: return _A = sources[0] _A = sinks[0] # make fake vertex if there are more # than one source or sink if len(a__ ) > 1 or len(a__ ) > 1: _A = 0 for i in sources: max_input_flow += sum(self.graph[i] ) _A = len(self.graph ) + 1 for room in self.graph: room.insert(0 , 0 ) self.graph.insert(0 , [0] * size ) for i in sources: _A = max_input_flow _A = 0 _A = len(self.graph ) + 1 for room in self.graph: room.append(0 ) self.graph.append([0] * size ) for i in sinks: _A = max_input_flow _A = size - 1 def a_ ( self : Optional[int] ) -> List[Any]: '''simple docstring''' if self.maximum_flow_algorithm is None: raise Exception("You need to set maximum flow algorithm before." ) if self.source_index is None or self.sink_index is None: return 0 self.maximum_flow_algorithm.execute() return self.maximum_flow_algorithm.getMaximumFlow() def a_ ( self : List[Any] , a__ : Optional[Any] ) -> str: '''simple docstring''' _A = algorithm(self ) class snake_case : def __init__( self : List[str] , a__ : List[str] ) -> Union[str, Any]: '''simple docstring''' _A = flow_network _A = flow_network.verticesCount _A = flow_network.sourceIndex _A = flow_network.sinkIndex # it's just a reference, so you shouldn't change # it in your algorithms, use deep copy before doing that _A = flow_network.graph _A = False def a_ ( self : Optional[Any] ) -> List[Any]: '''simple docstring''' if not self.executed: self._algorithm() _A = True def a_ ( self : Any ) -> int: '''simple docstring''' pass class snake_case ( _UpperCamelCase): def __init__( self : Optional[Any] , a__ : Dict ) -> List[str]: '''simple docstring''' super().__init__(a__ ) # use this to save your result _A = -1 def a_ ( self : Any ) -> List[str]: '''simple docstring''' if not self.executed: raise Exception("You should execute algorithm before using its result!" ) return self.maximum_flow class snake_case ( _UpperCamelCase): def __init__( self : Union[str, Any] , a__ : Union[str, Any] ) -> Dict: '''simple docstring''' super().__init__(a__ ) _A = [[0] * self.verticies_count for i in range(self.verticies_count )] _A = [0] * self.verticies_count _A = [0] * self.verticies_count def a_ ( self : Any ) -> Dict: '''simple docstring''' _A = self.verticies_count # push some substance to graph for nextvertex_index, bandwidth in enumerate(self.graph[self.source_index] ): self.preflow[self.source_index][nextvertex_index] += bandwidth self.preflow[nextvertex_index][self.source_index] -= bandwidth self.excesses[nextvertex_index] += bandwidth # Relabel-to-front selection rule _A = [ i for i in range(self.verticies_count ) if i != self.source_index and i != self.sink_index ] # move through list _A = 0 while i < len(a__ ): _A = vertices_list[i] _A = self.heights[vertex_index] self.process_vertex(a__ ) if self.heights[vertex_index] > previous_height: # if it was relabeled, swap elements # and start from 0 index vertices_list.insert(0 , vertices_list.pop(a__ ) ) _A = 0 else: i += 1 _A = sum(self.preflow[self.source_index] ) def a_ ( self : Dict , a__ : Any ) -> Optional[int]: '''simple docstring''' while self.excesses[vertex_index] > 0: for neighbour_index in range(self.verticies_count ): # if it's neighbour and current vertex is higher if ( self.graph[vertex_index][neighbour_index] - self.preflow[vertex_index][neighbour_index] > 0 and self.heights[vertex_index] > self.heights[neighbour_index] ): self.push(a__ , a__ ) self.relabel(a__ ) def a_ ( self : str , a__ : Optional[int] , a__ : List[Any] ) -> Optional[int]: '''simple docstring''' _A = min( self.excesses[from_index] , self.graph[from_index][to_index] - self.preflow[from_index][to_index] , ) self.preflow[from_index][to_index] += preflow_delta self.preflow[to_index][from_index] -= preflow_delta self.excesses[from_index] -= preflow_delta self.excesses[to_index] += preflow_delta def a_ ( self : Any , a__ : Dict ) -> Any: '''simple docstring''' _A = None for to_index in range(self.verticies_count ): if ( self.graph[vertex_index][to_index] - self.preflow[vertex_index][to_index] > 0 ) and (min_height is None or self.heights[to_index] < min_height): _A = self.heights[to_index] if min_height is not None: _A = min_height + 1 if __name__ == "__main__": a_ = [0] a_ = [3] # graph = [ # [0, 0, 4, 6, 0, 0], # [0, 0, 5, 2, 0, 0], # [0, 0, 0, 0, 4, 4], # [0, 0, 0, 0, 6, 6], # [0, 0, 0, 0, 0, 0], # [0, 0, 0, 0, 0, 0], # ] a_ = [[0, 7, 0, 0], [0, 0, 6, 0], [0, 0, 0, 8], [9, 0, 0, 0]] # prepare our network a_ = FlowNetwork(graph, entrances, exits) # set algorithm flow_network.set_maximum_flow_algorithm(PushRelabelExecutor) # and calculate a_ = flow_network.find_maximum_flow() print(f'''maximum flow is {maximum_flow}''')
621
1
"""simple docstring""" import unittest from transformers import BertGenerationTokenizer from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_torch, slow from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin a_ = "▁" a_ = get_tests_dir("fixtures/test_sentencepiece.model") @require_sentencepiece class snake_case ( _UpperCamelCase , unittest.TestCase): __UpperCamelCase = BertGenerationTokenizer __UpperCamelCase = False __UpperCamelCase = True def a_ ( self : Any ) -> Optional[int]: '''simple docstring''' super().setUp() _A = BertGenerationTokenizer(a__ , keep_accents=a__ ) tokenizer.save_pretrained(self.tmpdirname ) def a_ ( self : Dict ) -> Dict: '''simple docstring''' _A = "<s>" _A = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(a__ ) , a__ ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(a__ ) , a__ ) def a_ ( self : int ) -> int: '''simple docstring''' _A = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , "<unk>" ) self.assertEqual(vocab_keys[1] , "<s>" ) self.assertEqual(vocab_keys[-1] , "<pad>" ) self.assertEqual(len(a__ ) , 10_02 ) def a_ ( self : List[Any] ) -> List[Any]: '''simple docstring''' self.assertEqual(self.get_tokenizer().vocab_size , 10_00 ) def a_ ( self : List[str] ) -> Tuple: '''simple docstring''' _A = BertGenerationTokenizer(a__ , keep_accents=a__ ) _A = tokenizer.tokenize("This is a test" ) self.assertListEqual(a__ , ["▁This", "▁is", "▁a", "▁t", "est"] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(a__ ) , [2_85, 46, 10, 1_70, 3_82] , ) _A = tokenizer.tokenize("I was born in 92000, and this is falsé." ) self.assertListEqual( a__ , [ SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "9", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "é", ".", ] , ) _A = tokenizer.convert_tokens_to_ids(a__ ) self.assertListEqual( a__ , [8, 21, 84, 55, 24, 19, 7, 0, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 0, 4] , ) _A = tokenizer.convert_ids_to_tokens(a__ ) self.assertListEqual( a__ , [ SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "<unk>", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "<unk>", ".", ] , ) @cached_property def a_ ( self : str ) -> List[Any]: '''simple docstring''' return BertGenerationTokenizer.from_pretrained("google/bert_for_seq_generation_L-24_bbc_encoder" ) @slow def a_ ( self : List[Any] ) -> Tuple: '''simple docstring''' _A = "Hello World!" _A = [1_85_36, 22_60, 1_01] self.assertListEqual(a__ , self.big_tokenizer.encode(a__ ) ) @slow def a_ ( self : Optional[Any] ) -> Optional[Any]: '''simple docstring''' _A = ( "This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will" " add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth" ) _A = [ 8_71, 4_19, 3_58, 9_46, 9_91, 25_21, 4_52, 3_58, 13_57, 3_87, 77_51, 35_36, 1_12, 9_85, 4_56, 1_26, 8_65, 9_38, 54_00, 57_34, 4_58, 13_68, 4_67, 7_86, 24_62, 52_46, 11_59, 6_33, 8_65, 45_19, 4_57, 5_82, 8_52, 25_57, 4_27, 9_16, 5_08, 4_05, 3_43_24, 4_97, 3_91, 4_08, 1_13_42, 12_44, 3_85, 1_00, 9_38, 9_85, 4_56, 5_74, 3_62, 1_25_97, 32_00, 31_29, 11_72, ] self.assertListEqual(a__ , self.big_tokenizer.encode(a__ ) ) @require_torch @slow def a_ ( self : Dict ) -> List[str]: '''simple docstring''' import torch from transformers import BertGenerationConfig, BertGenerationEncoder # Build sequence _A = list(self.big_tokenizer.get_vocab().keys() )[:10] _A = " ".join(a__ ) _A = self.big_tokenizer.encode_plus(a__ , return_tensors="pt" , return_token_type_ids=a__ ) _A = self.big_tokenizer.batch_encode_plus( [sequence + " " + sequence] , return_tensors="pt" , return_token_type_ids=a__ ) _A = BertGenerationConfig() _A = BertGenerationEncoder(a__ ) assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size with torch.no_grad(): model(**a__ ) model(**a__ ) @slow def a_ ( self : Tuple ) -> Optional[int]: '''simple docstring''' _A = {"input_ids": [[3_92_86, 4_58, 3_63_35, 20_01, 4_56, 1_30_73, 1_32_66, 4_55, 1_13, 77_46, 17_41, 1_11_57, 3_91, 1_30_73, 1_32_66, 4_55, 1_13, 39_67, 3_54_12, 1_13, 49_36, 1_09, 38_70, 23_77, 1_13, 3_00_84, 4_57_20, 4_58, 1_34, 1_74_96, 1_12, 5_03, 1_16_72, 1_13, 1_18, 1_12, 56_65, 1_33_47, 3_86_87, 1_12, 14_96, 3_13_89, 1_12, 32_68, 4_72_64, 1_34, 9_62, 1_12, 1_63_77, 80_35, 2_31_30, 4_30, 1_21_69, 1_55_18, 2_85_92, 4_58, 1_46, 4_16_97, 1_09, 3_91, 1_21_69, 1_55_18, 1_66_89, 4_58, 1_46, 4_13_58, 1_09, 4_52, 7_26, 40_34, 1_11, 7_63, 3_54_12, 50_82, 3_88, 19_03, 1_11, 90_51, 3_91, 28_70, 4_89_18, 19_00, 11_23, 5_50, 9_98, 1_12, 95_86, 1_59_85, 4_55, 3_91, 4_10, 2_29_55, 3_76_36, 1_14], [4_48, 1_74_96, 4_19, 36_63, 3_85, 7_63, 1_13, 2_75_33, 28_70, 32_83, 1_30_43, 16_39, 2_47_13, 5_23, 6_56, 2_40_13, 1_85_50, 25_21, 5_17, 2_70_14, 2_12_44, 4_20, 12_12, 14_65, 3_91, 9_27, 48_33, 3_88, 5_78, 1_17_86, 1_14, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [4_84, 21_69, 76_87, 2_19_32, 1_81_46, 7_26, 3_63, 1_70_32, 33_91, 1_14, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=a__ , model_name="google/bert_for_seq_generation_L-24_bbc_encoder" , revision="c817d1fd1be2ffa69431227a1fe320544943d4db" , )
621
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) a_ = { "configuration_roformer": ["ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "RoFormerConfig", "RoFormerOnnxConfig"], "tokenization_roformer": ["RoFormerTokenizer"], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ = ["RoFormerTokenizerFast"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ = [ "ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST", "RoFormerForCausalLM", "RoFormerForMaskedLM", "RoFormerForMultipleChoice", "RoFormerForQuestionAnswering", "RoFormerForSequenceClassification", "RoFormerForTokenClassification", "RoFormerLayer", "RoFormerModel", "RoFormerPreTrainedModel", "load_tf_weights_in_roformer", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ = [ "TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST", "TFRoFormerForCausalLM", "TFRoFormerForMaskedLM", "TFRoFormerForMultipleChoice", "TFRoFormerForQuestionAnswering", "TFRoFormerForSequenceClassification", "TFRoFormerForTokenClassification", "TFRoFormerLayer", "TFRoFormerModel", "TFRoFormerPreTrainedModel", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ = [ "FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST", "FlaxRoFormerForMaskedLM", "FlaxRoFormerForMultipleChoice", "FlaxRoFormerForQuestionAnswering", "FlaxRoFormerForSequenceClassification", "FlaxRoFormerForTokenClassification", "FlaxRoFormerModel", "FlaxRoFormerPreTrainedModel", ] if TYPE_CHECKING: from .configuration_roformer import ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, RoFormerConfig, RoFormerOnnxConfig from .tokenization_roformer import RoFormerTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_roformer_fast import RoFormerTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_roformer import ( ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, RoFormerForCausalLM, RoFormerForMaskedLM, RoFormerForMultipleChoice, RoFormerForQuestionAnswering, RoFormerForSequenceClassification, RoFormerForTokenClassification, RoFormerLayer, RoFormerModel, RoFormerPreTrainedModel, load_tf_weights_in_roformer, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_roformer import ( TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, TFRoFormerForCausalLM, TFRoFormerForMaskedLM, TFRoFormerForMultipleChoice, TFRoFormerForQuestionAnswering, TFRoFormerForSequenceClassification, TFRoFormerForTokenClassification, TFRoFormerLayer, TFRoFormerModel, TFRoFormerPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_roformer import ( FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, FlaxRoFormerForMaskedLM, FlaxRoFormerForMultipleChoice, FlaxRoFormerForQuestionAnswering, FlaxRoFormerForSequenceClassification, FlaxRoFormerForTokenClassification, FlaxRoFormerModel, FlaxRoFormerPreTrainedModel, ) else: import sys a_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
621
1
"""simple docstring""" import inspect import jax import jax.lax as lax import jax.numpy as jnp from ..utils import add_start_docstrings from ..utils.logging import get_logger a_ = get_logger(__name__) a_ = r"\n Args:\n input_ids (`jnp.ndarray` of shape `(batch_size, sequence_length)`):\n Indices of input sequence tokens in the vocabulary.\n\n Indices can be obtained using [`PreTrainedTokenizer`]. See [`PreTrainedTokenizer.encode`] and\n [`PreTrainedTokenizer.__call__`] for details.\n\n [What are input IDs?](../glossary#input-ids)\n scores (`jnp.ndarray` of shape `(batch_size, config.vocab_size)`):\n Prediction scores of a language modeling head. These can be logits for each vocabulary when not using beam\n search or log softmax for each vocabulary token when using beam search\n kwargs (`Dict[str, Any]`, *optional*):\n Additional logits processor specific kwargs.\n\n Return:\n `jnp.ndarray` of shape `(batch_size, config.vocab_size)`: The processed prediction scores.\n\n" class snake_case : @add_start_docstrings(a__ ) def __call__( self : Tuple , a__ : jnp.ndarray , a__ : jnp.ndarray ) -> jnp.ndarray: '''simple docstring''' raise NotImplementedError( F"""{self.__class__} is an abstract class. Only classes inheriting this class can be called.""" ) class snake_case : @add_start_docstrings(a__ ) def __call__( self : Any , a__ : jnp.ndarray , a__ : jnp.ndarray ) -> jnp.ndarray: '''simple docstring''' raise NotImplementedError( F"""{self.__class__} is an abstract class. Only classes inheriting this class can be called.""" ) class snake_case ( _UpperCamelCase): @add_start_docstrings(a__ ) def __call__( self : Optional[int] , a__ : jnp.ndarray , a__ : jnp.ndarray , a__ : int , **a__ : str ) -> jnp.ndarray: '''simple docstring''' for processor in self: _A = inspect.signature(processor.__call__ ).parameters if len(a__ ) > 3: if not all(arg in kwargs for arg in list(function_args.keys() )[2:] ): raise ValueError( F"""Make sure that all the required parameters: {list(function_args.keys() )} for """ F"""{processor.__class__} are passed to the logits processor.""" ) _A = processor(a__ , a__ , a__ , **a__ ) else: _A = processor(a__ , a__ , a__ ) return scores class snake_case ( _UpperCamelCase): def __init__( self : Optional[Any] , a__ : float ) -> int: '''simple docstring''' if not isinstance(a__ , a__ ) or not (temperature > 0): raise ValueError(F"""`temperature` has to be a strictly positive float, but is {temperature}""" ) _A = temperature def __call__( self : str , a__ : jnp.ndarray , a__ : jnp.ndarray , a__ : int ) -> jnp.ndarray: '''simple docstring''' _A = scores / self.temperature return scores class snake_case ( _UpperCamelCase): def __init__( self : Any , a__ : float , a__ : float = -float("Inf" ) , a__ : int = 1 ) -> List[Any]: '''simple docstring''' if not isinstance(a__ , a__ ) or (top_p < 0 or top_p > 1.0): raise ValueError(F"""`top_p` has to be a float > 0 and < 1, but is {top_p}""" ) if not isinstance(a__ , a__ ) or (min_tokens_to_keep < 1): raise ValueError(F"""`min_tokens_to_keep` has to be a positive integer, but is {min_tokens_to_keep}""" ) _A = top_p _A = filter_value _A = min_tokens_to_keep def __call__( self : str , a__ : jnp.ndarray , a__ : jnp.ndarray , a__ : int ) -> jnp.ndarray: '''simple docstring''' _A , _A = lax.top_k(a__ , scores.shape[-1] ) _A = jnp.full_like(a__ , self.filter_value ) _A = jax.nn.softmax(a__ , axis=-1 ).cumsum(axis=-1 ) _A = cumulative_probs < self.top_p # include the token that is higher than top_p as well _A = jnp.roll(a__ , 1 ) score_mask |= score_mask.at[:, 0].set(a__ ) # min tokens to keep _A = score_mask.at[:, : self.min_tokens_to_keep].set(a__ ) _A = jnp.where(a__ , a__ , a__ ) _A = jax.lax.sort_key_val(a__ , a__ )[-1] return next_scores class snake_case ( _UpperCamelCase): def __init__( self : Union[str, Any] , a__ : int , a__ : float = -float("Inf" ) , a__ : int = 1 ) -> Optional[Any]: '''simple docstring''' if not isinstance(a__ , a__ ) or top_k <= 0: raise ValueError(F"""`top_k` has to be a strictly positive integer, but is {top_k}""" ) _A = max(a__ , a__ ) _A = filter_value def __call__( self : Union[str, Any] , a__ : jnp.ndarray , a__ : jnp.ndarray , a__ : int ) -> jnp.ndarray: '''simple docstring''' _A , _A = scores.shape _A = jnp.full(batch_size * vocab_size , self.filter_value ) _A = min(self.top_k , scores.shape[-1] ) # Safety check _A , _A = lax.top_k(a__ , a__ ) _A = jnp.broadcast_to((jnp.arange(a__ ) * vocab_size)[:, None] , (batch_size, topk) ).flatten() _A = topk_scores.flatten() _A = topk_indices.flatten() + shift _A = next_scores_flat.at[topk_indices_flat].set(a__ ) _A = next_scores_flat.reshape(a__ , a__ ) return next_scores class snake_case ( _UpperCamelCase): def __init__( self : Any , a__ : int ) -> Any: '''simple docstring''' _A = bos_token_id def __call__( self : str , a__ : jnp.ndarray , a__ : jnp.ndarray , a__ : int ) -> jnp.ndarray: '''simple docstring''' _A = jnp.full(scores.shape , -float("inf" ) ) _A = 1 - jnp.bool_(cur_len - 1 ) _A = jnp.where(a__ , new_scores.at[:, self.bos_token_id].set(0 ) , a__ ) return scores class snake_case ( _UpperCamelCase): def __init__( self : int , a__ : int , a__ : int ) -> Dict: '''simple docstring''' _A = max_length _A = eos_token_id def __call__( self : Any , a__ : jnp.ndarray , a__ : jnp.ndarray , a__ : int ) -> jnp.ndarray: '''simple docstring''' _A = jnp.full(scores.shape , -float("inf" ) ) _A = 1 - jnp.bool_(cur_len - self.max_length + 1 ) _A = jnp.where(a__ , new_scores.at[:, self.eos_token_id].set(0 ) , a__ ) return scores class snake_case ( _UpperCamelCase): def __init__( self : Any , a__ : int , a__ : int ) -> Union[str, Any]: '''simple docstring''' if not isinstance(a__ , a__ ) or min_length < 0: raise ValueError(F"""`min_length` has to be a positive integer, but is {min_length}""" ) if not isinstance(a__ , a__ ) or eos_token_id < 0: raise ValueError(F"""`eos_token_id` has to be a positive integer, but is {eos_token_id}""" ) _A = min_length _A = eos_token_id def __call__( self : int , a__ : jnp.ndarray , a__ : jnp.ndarray , a__ : int ) -> jnp.ndarray: '''simple docstring''' _A = 1 - jnp.clip(cur_len - self.min_length , 0 , 1 ) _A = jnp.where(a__ , scores.at[:, self.eos_token_id].set(-float("inf" ) ) , a__ ) return scores class snake_case ( _UpperCamelCase): def __init__( self : Any , a__ : int , a__ : str ) -> List[Any]: '''simple docstring''' _A = list(a__ ) _A = begin_index def __call__( self : List[Any] , a__ : Tuple , a__ : Tuple , a__ : int ) -> Dict: '''simple docstring''' _A = 1 - jnp.bool_(cur_len - self.begin_index ) _A = jnp.where(a__ , scores.at[:, self.begin_suppress_tokens].set(-float("inf" ) ) , a__ ) return scores class snake_case ( _UpperCamelCase): def __init__( self : List[str] , a__ : list ) -> Union[str, Any]: '''simple docstring''' _A = list(a__ ) def __call__( self : List[Any] , a__ : jnp.ndarray , a__ : jnp.ndarray , a__ : int ) -> jnp.ndarray: '''simple docstring''' _A = scores.at[..., self.suppress_tokens].set(-float("inf" ) ) return scores class snake_case ( _UpperCamelCase): def __init__( self : int , a__ : Optional[int] ) -> Optional[int]: '''simple docstring''' _A = dict(a__ ) # Converts the dictionary of format {index: token} containing the tokens to be forced to an array, where the # index of the array corresponds to the index of the token to be forced, for XLA compatibility. # Indexes without forced tokens will have a negative value. _A = jnp.ones((max(force_token_map.keys() ) + 1) , dtype=jnp.intaa ) * -1 for index, token in force_token_map.items(): if token is not None: _A = force_token_array.at[index].set(a__ ) _A = jnp.intaa(a__ ) def __call__( self : Union[str, Any] , a__ : jnp.ndarray , a__ : jnp.ndarray , a__ : int ) -> jnp.ndarray: '''simple docstring''' def _force_token(a__ : Tuple ): _A = scores.shape[0] _A = self.force_token_array[generation_idx] _A = jnp.ones_like(a__ , dtype=scores.dtype ) * -float("inf" ) _A = jnp.zeros((batch_size, 1) , dtype=scores.dtype ) _A = lax.dynamic_update_slice(a__ , a__ , (0, current_token) ) return new_scores _A = lax.cond( cur_len >= self.force_token_array.shape[0] , lambda: scores , lambda: lax.cond( self.force_token_array[cur_len] >= 0 , lambda: _force_token(a__ ) , lambda: scores , ) , ) return scores class snake_case ( _UpperCamelCase): def __init__( self : int , a__ : Any , a__ : Dict , a__ : List[str] ) -> str: '''simple docstring''' _A = generate_config.eos_token_id _A = generate_config.no_timestamps_token_id _A = generate_config.no_timestamps_token_id + 1 _A = decoder_input_length + 1 if generate_config.is_multilingual: # room for language token and task token self.begin_index += 2 if hasattr(a__ , "max_initial_timestamp_index" ): _A = generate_config.max_initial_timestamp_index else: _A = model_config.vocab_size if self.max_initial_timestamp_index is None: _A = model_config.vocab_size def __call__( self : Dict , a__ : Any , a__ : int , a__ : List[str] ) -> Dict: '''simple docstring''' _A = scores.at[:, self.no_timestamps_token_id].set(-float("inf" ) ) def handle_pairs(a__ : Optional[Any] , a__ : Dict ): _A = jnp.where((cur_len - self.begin_index) >= 1 , a__ , a__ ) _A = jnp.where( input_ids_k[cur_len - 1] >= self.timestamp_begin , True and last_was_timestamp , a__ , ) _A = jnp.where((cur_len - self.begin_index) < 2 , a__ , a__ ) _A = jnp.where( input_ids_k[cur_len - 2] >= self.timestamp_begin , a__ , a__ , ) return jnp.where( a__ , jnp.where( penultimate_was_timestamp > 0 , scores_k.at[self.timestamp_begin :].set(-float("inf" ) ) , scores_k.at[: self.eos_token_id].set(-float("inf" ) ) , ) , a__ , ) _A = jax.vmap(a__ )(a__ , a__ ) _A = jnp.where(cur_len == self.begin_index , a__ , a__ ) _A = jnp.where( self.max_initial_timestamp_index is not None , True and apply_max_initial_timestamp , a__ , ) _A = self.timestamp_begin + self.max_initial_timestamp_index _A = jnp.where( a__ , scores.at[:, last_allowed + 1 :].set(-float("inf" ) ) , a__ , ) # if sum of probability over timestamps is above any other token, sample timestamp _A = jax.nn.log_softmax(a__ , axis=-1 ) def handle_cumulative_probs(a__ : int , a__ : Tuple ): _A = jax.nn.logsumexp(logprobs_k[self.timestamp_begin :] , axis=-1 ) _A = jnp.max(logprobs_k[: self.timestamp_begin] ) return jnp.where( timestamp_logprob > max_text_token_logprob , scores_k.at[: self.timestamp_begin].set(-float("inf" ) ) , a__ , ) _A = jax.vmap(a__ )(a__ , a__ ) return scores
621
"""simple docstring""" import warnings from ...utils import logging from .image_processing_glpn import GLPNImageProcessor a_ = logging.get_logger(__name__) class snake_case ( _UpperCamelCase): def __init__( self : str , *a__ : Dict , **a__ : Optional[int] ) -> None: '''simple docstring''' warnings.warn( "The class GLPNFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please" " use GLPNImageProcessor instead." , a__ , ) super().__init__(*a__ , **a__ )
621
1
"""simple docstring""" import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import BeitConfig, BeitForImageClassification, BeitForMaskedImageModeling, BeitImageProcessor from transformers.image_utils import PILImageResampling from transformers.utils import logging logging.set_verbosity_info() a_ = logging.get_logger(__name__) def a__ ( __lowercase , __lowercase=False , __lowercase=False ) -> str: _A = "backbone." if is_semantic else "" _A = [] for i in range(config.num_hidden_layers ): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append((f"""{prefix}blocks.{i}.norm1.weight""", f"""beit.encoder.layer.{i}.layernorm_before.weight""") ) rename_keys.append((f"""{prefix}blocks.{i}.norm1.bias""", f"""beit.encoder.layer.{i}.layernorm_before.bias""") ) rename_keys.append( (f"""{prefix}blocks.{i}.attn.proj.weight""", f"""beit.encoder.layer.{i}.attention.output.dense.weight""") ) rename_keys.append( (f"""{prefix}blocks.{i}.attn.proj.bias""", f"""beit.encoder.layer.{i}.attention.output.dense.bias""") ) rename_keys.append((f"""{prefix}blocks.{i}.norm2.weight""", f"""beit.encoder.layer.{i}.layernorm_after.weight""") ) rename_keys.append((f"""{prefix}blocks.{i}.norm2.bias""", f"""beit.encoder.layer.{i}.layernorm_after.bias""") ) rename_keys.append((f"""{prefix}blocks.{i}.mlp.fc1.weight""", f"""beit.encoder.layer.{i}.intermediate.dense.weight""") ) rename_keys.append((f"""{prefix}blocks.{i}.mlp.fc1.bias""", f"""beit.encoder.layer.{i}.intermediate.dense.bias""") ) rename_keys.append((f"""{prefix}blocks.{i}.mlp.fc2.weight""", f"""beit.encoder.layer.{i}.output.dense.weight""") ) rename_keys.append((f"""{prefix}blocks.{i}.mlp.fc2.bias""", f"""beit.encoder.layer.{i}.output.dense.bias""") ) # projection layer + position embeddings rename_keys.extend( [ (f"""{prefix}cls_token""", "beit.embeddings.cls_token"), (f"""{prefix}patch_embed.proj.weight""", "beit.embeddings.patch_embeddings.projection.weight"), (f"""{prefix}patch_embed.proj.bias""", "beit.embeddings.patch_embeddings.projection.bias"), (f"""{prefix}pos_embed""", "beit.embeddings.position_embeddings"), ] ) if has_lm_head: # mask token + layernorm rename_keys.extend( [ ("mask_token", "beit.embeddings.mask_token"), ("norm.weight", "layernorm.weight"), ("norm.bias", "layernorm.bias"), ] ) else: # layernorm + classification head rename_keys.extend( [ ("fc_norm.weight", "beit.pooler.layernorm.weight"), ("fc_norm.bias", "beit.pooler.layernorm.bias"), ("head.weight", "classifier.weight"), ("head.bias", "classifier.bias"), ] ) return rename_keys def a__ ( __lowercase , __lowercase , __lowercase=False , __lowercase=False ) -> Optional[int]: for i in range(config.num_hidden_layers ): _A = "backbone." if is_semantic else "" # queries, keys and values _A = state_dict.pop(f"""{prefix}blocks.{i}.attn.qkv.weight""" ) _A = state_dict.pop(f"""{prefix}blocks.{i}.attn.q_bias""" ) _A = state_dict.pop(f"""{prefix}blocks.{i}.attn.v_bias""" ) _A = in_proj_weight[ : config.hidden_size, : ] _A = q_bias _A = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] _A = in_proj_weight[ -config.hidden_size :, : ] _A = v_bias # gamma_1 and gamma_2 # we call them lambda because otherwise they are renamed when using .from_pretrained _A = state_dict.pop(f"""{prefix}blocks.{i}.gamma_1""" ) _A = state_dict.pop(f"""{prefix}blocks.{i}.gamma_2""" ) _A = gamma_a _A = gamma_a def a__ ( __lowercase , __lowercase , __lowercase ) -> Tuple: _A = dct.pop(__lowercase ) _A = val def a__ ( ) -> List[str]: _A = "http://images.cocodataset.org/val2017/000000039769.jpg" _A = Image.open(requests.get(__lowercase , stream=__lowercase ).raw ) return im @torch.no_grad() def a__ ( __lowercase , __lowercase , __lowercase=False ) -> Tuple: _A = False if "rvlcdip" in checkpoint_url else True _A = BeitConfig(use_absolute_position_embeddings=__lowercase , use_mask_token=__lowercase ) # size of the architecture if "large" in checkpoint_url or "dit-l" in checkpoint_url: _A = 1024 _A = 4096 _A = 24 _A = 16 # labels if "rvlcdip" in checkpoint_url: _A = 16 _A = "huggingface/label-files" _A = "rvlcdip-id2label.json" _A = json.load(open(hf_hub_download(__lowercase , __lowercase , repo_type="dataset" ) , "r" ) ) _A = {int(__lowercase ): v for k, v in idalabel.items()} _A = idalabel _A = {v: k for k, v in idalabel.items()} # load state_dict of original model, remove and rename some keys _A = torch.hub.load_state_dict_from_url(__lowercase , map_location="cpu" )["model"] _A = create_rename_keys(__lowercase , has_lm_head=__lowercase ) for src, dest in rename_keys: rename_key(__lowercase , __lowercase , __lowercase ) read_in_q_k_v(__lowercase , __lowercase , has_lm_head=__lowercase ) # load HuggingFace model _A = BeitForMaskedImageModeling(__lowercase ) if has_lm_head else BeitForImageClassification(__lowercase ) model.eval() model.load_state_dict(__lowercase ) # Check outputs on an image _A = BeitImageProcessor( size=config.image_size , resample=PILImageResampling.BILINEAR , do_center_crop=__lowercase ) _A = prepare_img() _A = image_processor(images=__lowercase , return_tensors="pt" ) _A = encoding["pixel_values"] _A = model(__lowercase ) _A = outputs.logits # verify logits _A = [1, 16] if "rvlcdip" in checkpoint_url else [1, 196, 8192] assert logits.shape == torch.Size(__lowercase ), "Shape of logits not as expected" Path(__lowercase ).mkdir(exist_ok=__lowercase ) print(f"""Saving model to {pytorch_dump_folder_path}""" ) model.save_pretrained(__lowercase ) print(f"""Saving image processor to {pytorch_dump_folder_path}""" ) image_processor.save_pretrained(__lowercase ) if push_to_hub: if has_lm_head: _A = "dit-base" if "base" in checkpoint_url else "dit-large" else: _A = "dit-base-finetuned-rvlcdip" if "dit-b" in checkpoint_url else "dit-large-finetuned-rvlcdip" image_processor.push_to_hub( repo_path_or_name=Path(__lowercase , __lowercase ) , organization="nielsr" , commit_message="Add image processor" , use_temp_dir=__lowercase , ) model.push_to_hub( repo_path_or_name=Path(__lowercase , __lowercase ) , organization="nielsr" , commit_message="Add model" , use_temp_dir=__lowercase , ) if __name__ == "__main__": a_ = argparse.ArgumentParser() parser.add_argument( "--checkpoint_url", default="https://layoutlm.blob.core.windows.net/dit/dit-pts/dit-base-224-p16-500k-62d53a.pth", type=str, help="URL to the original PyTorch checkpoint (.pth file).", ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model." ) parser.add_argument( "--push_to_hub", action="store_true", ) a_ = parser.parse_args() convert_dit_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
621
"""simple docstring""" import argparse import torch from torch import nn from transformers import MBartConfig, MBartForConditionalGeneration def a__ ( __lowercase ) -> Optional[int]: _A = [ "encoder.version", "decoder.version", "model.encoder.version", "model.decoder.version", "_float_tensor", "decoder.output_projection.weight", ] for k in ignore_keys: state_dict.pop(__lowercase , __lowercase ) def a__ ( __lowercase ) -> List[Any]: _A , _A = emb.weight.shape _A = nn.Linear(__lowercase , __lowercase , bias=__lowercase ) _A = emb.weight.data return lin_layer def a__ ( __lowercase , __lowercase="facebook/mbart-large-en-ro" , __lowercase=False , __lowercase=False ) -> List[str]: _A = torch.load(__lowercase , map_location="cpu" )["model"] remove_ignore_keys_(__lowercase ) _A = state_dict["encoder.embed_tokens.weight"].shape[0] _A = MBartConfig.from_pretrained(__lowercase , vocab_size=__lowercase ) if mbart_aa and finetuned: _A = "relu" _A = state_dict["decoder.embed_tokens.weight"] _A = MBartForConditionalGeneration(__lowercase ) model.model.load_state_dict(__lowercase ) if finetuned: _A = make_linear_from_emb(model.model.shared ) return model if __name__ == "__main__": a_ = argparse.ArgumentParser() # Required parameters parser.add_argument( "fairseq_path", type=str, help="bart.large, bart.large.cnn or a path to a model.pt on local filesystem." ) parser.add_argument("pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.") parser.add_argument( "--hf_config", default="facebook/mbart-large-cc25", type=str, help="Which huggingface architecture to use: mbart-large", ) parser.add_argument("--mbart_50", action="store_true", help="whether the model is mMART-50 checkpoint") parser.add_argument("--finetuned", action="store_true", help="whether the model is a fine-tuned checkpoint") a_ = parser.parse_args() a_ = convert_fairseq_mbart_checkpoint_from_disk( args.fairseq_path, hf_config_path=args.hf_config, finetuned=args.finetuned, mbart_aa=args.mbart_aa ) model.save_pretrained(args.pytorch_dump_folder_path)
621
1
"""simple docstring""" def a__ ( __lowercase ) -> list: if any(not isinstance(__lowercase , __lowercase ) or x < 0 for x in sequence ): raise TypeError("Sequence must be list of non-negative integers" ) for _ in range(len(__lowercase ) ): for i, (rod_upper, rod_lower) in enumerate(zip(__lowercase , sequence[1:] ) ): if rod_upper > rod_lower: sequence[i] -= rod_upper - rod_lower sequence[i + 1] += rod_upper - rod_lower return sequence if __name__ == "__main__": assert bead_sort([5, 4, 3, 2, 1]) == [1, 2, 3, 4, 5] assert bead_sort([7, 9, 4, 3, 5]) == [3, 4, 5, 7, 9]
621
"""simple docstring""" import numpy as np def a__ ( __lowercase , __lowercase ) -> np.ndarray: return np.where(vector > 0 , __lowercase , (alpha * (np.exp(__lowercase ) - 1)) ) if __name__ == "__main__": import doctest doctest.testmod()
621
1
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available a_ = { "configuration_conditional_detr": [ "CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP", "ConditionalDetrConfig", "ConditionalDetrOnnxConfig", ] } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ = ["ConditionalDetrFeatureExtractor"] a_ = ["ConditionalDetrImageProcessor"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ = [ "CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST", "ConditionalDetrForObjectDetection", "ConditionalDetrForSegmentation", "ConditionalDetrModel", "ConditionalDetrPreTrainedModel", ] if TYPE_CHECKING: from .configuration_conditional_detr import ( CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP, ConditionalDetrConfig, ConditionalDetrOnnxConfig, ) try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_conditional_detr import ConditionalDetrFeatureExtractor from .image_processing_conditional_detr import ConditionalDetrImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_conditional_detr import ( CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST, ConditionalDetrForObjectDetection, ConditionalDetrForSegmentation, ConditionalDetrModel, ConditionalDetrPreTrainedModel, ) else: import sys a_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
621
"""simple docstring""" import os import re import warnings from shutil import copyfile from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer if TYPE_CHECKING: from ...tokenization_utils_base import TextInput from ...utils import logging a_ = logging.get_logger(__name__) a_ = {"vocab_file": "spiece.model"} a_ = { "vocab_file": { "t5-small": "https://huggingface.co/t5-small/resolve/main/spiece.model", "t5-base": "https://huggingface.co/t5-base/resolve/main/spiece.model", "t5-large": "https://huggingface.co/t5-large/resolve/main/spiece.model", "t5-3b": "https://huggingface.co/t5-3b/resolve/main/spiece.model", "t5-11b": "https://huggingface.co/t5-11b/resolve/main/spiece.model", } } # TODO(PVP) - this should be removed in Transformers v5 a_ = { "t5-small": 5_12, "t5-base": 5_12, "t5-large": 5_12, "t5-3b": 5_12, "t5-11b": 5_12, } a_ = "▁" class snake_case ( _UpperCamelCase): __UpperCamelCase = VOCAB_FILES_NAMES __UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP __UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __UpperCamelCase = ['input_ids', 'attention_mask'] def __init__( self : List[str] , a__ : Optional[int] , a__ : Union[str, Any]="</s>" , a__ : Union[str, Any]="<unk>" , a__ : str="<pad>" , a__ : Optional[int]=1_00 , a__ : List[Any]=None , a__ : Optional[Dict[str, Any]] = None , a__ : Any=True , **a__ : Optional[int] , ) -> None: '''simple docstring''' if extra_ids > 0 and additional_special_tokens is None: _A = [F"""<extra_id_{i}>""" for i in range(a__ )] elif extra_ids > 0 and additional_special_tokens is not None: # Check that we have the right number of extra_id special tokens _A = len(set(filter(lambda a__ : bool("extra_id" in str(a__ ) ) , a__ ) ) ) if extra_tokens != extra_ids: raise ValueError( F"""Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are""" " provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids" " tokens" ) if legacy: logger.warning_once( F"""You are using the legacy behaviour of the {self.__class__}. This means that tokens that come after special tokens will not be properly handled. We recommend you to""" " read the related pull request available at https://github.com/huggingface/transformers/pull/24565" ) _A = legacy _A = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( eos_token=a__ , unk_token=a__ , pad_token=a__ , extra_ids=a__ , additional_special_tokens=a__ , sp_model_kwargs=self.sp_model_kwargs , legacy=a__ , **a__ , ) _A = vocab_file _A = extra_ids _A = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(a__ ) @staticmethod def a_ ( a__ : List[str] , a__ : Optional[int] , a__ : Tuple ) -> Tuple: '''simple docstring''' if pretrained_model_name_or_path in TaTokenizer.max_model_input_sizes: _A = TaTokenizer.max_model_input_sizes[pretrained_model_name_or_path] if init_max_model_length is not None and init_max_model_length != max_model_length: return init_max_model_length elif init_max_model_length is None: warnings.warn( "This tokenizer was incorrectly instantiated with a model max length of" F""" {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this""" " behavior is kept to avoid breaking backwards compatibility when padding/encoding with" " `truncation is True`.\n- Be aware that you SHOULD NOT rely on" F""" {pretrained_model_name_or_path} automatically truncating your input to""" F""" {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences""" F""" longer than {deprecated_max_model_length} you can either instantiate this tokenizer with""" " `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please" " instantiate this tokenizer with `model_max_length` set to your preferred value." , a__ , ) return max_model_length @property def a_ ( self : List[Any] ) -> Dict: '''simple docstring''' return self.sp_model.get_piece_size() + self._extra_ids def a_ ( self : Dict ) -> Optional[Any]: '''simple docstring''' _A = {self.convert_ids_to_tokens(a__ ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def a_ ( self : Optional[Any] , a__ : List[int] , a__ : Optional[List[int]] = None , a__ : bool = False ) -> List[int]: '''simple docstring''' if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=a__ , token_ids_a=a__ , already_has_special_tokens=a__ ) # normal case: some special tokens if token_ids_a is None: return ([0] * len(a__ )) + [1] return ([0] * len(a__ )) + [1] + ([0] * len(a__ )) + [1] def a_ ( self : List[str] ) -> List[str]: '''simple docstring''' return list( set(filter(lambda a__ : bool(re.search(r"<extra_id_\d+>" , a__ ) ) is not None , self.additional_special_tokens ) ) ) def a_ ( self : str ) -> List[Any]: '''simple docstring''' return [self._convert_token_to_id(a__ ) for token in self.get_sentinel_tokens()] def a_ ( self : List[Any] , a__ : List[int] ) -> List[int]: '''simple docstring''' if len(a__ ) > 0 and token_ids[-1] == self.eos_token_id: warnings.warn( F"""This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated""" " eos tokens being added." ) return token_ids else: return token_ids + [self.eos_token_id] def a_ ( self : int , a__ : List[int] , a__ : Optional[List[int]] = None ) -> List[int]: '''simple docstring''' _A = [self.eos_token_id] if token_ids_a is None: return len(token_ids_a + eos ) * [0] return len(token_ids_a + eos + token_ids_a + eos ) * [0] def a_ ( self : Union[str, Any] , a__ : List[int] , a__ : Optional[List[int]] = None ) -> List[int]: '''simple docstring''' _A = self._add_eos_if_not_present(a__ ) if token_ids_a is None: return token_ids_a else: _A = self._add_eos_if_not_present(a__ ) return token_ids_a + token_ids_a def __getstate__( self : Dict ) -> Union[str, Any]: '''simple docstring''' _A = self.__dict__.copy() _A = None return state def __setstate__( self : int , a__ : Optional[int] ) -> Union[str, Any]: '''simple docstring''' _A = d # for backward compatibility if not hasattr(self , "sp_model_kwargs" ): _A = {} _A = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def a_ ( self : int , a__ : "TextInput" , **a__ : List[str] ) -> List[str]: '''simple docstring''' if not self.legacy: _A = SPIECE_UNDERLINE + text.replace(a__ , " " ) return super().tokenize(a__ , **a__ ) def a_ ( self : str , a__ : Dict , **a__ : Optional[int] ) -> Any: '''simple docstring''' if not self.legacy: _A = text.startswith(a__ ) if is_first: _A = text[1:] _A = self.sp_model.encode(a__ , out_type=a__ ) if not self.legacy and not is_first and not text.startswith(" " ) and tokens[0].startswith(a__ ): _A = ([tokens[0][1:]] if len(tokens[0] ) > 1 else []) + tokens[1:] return tokens def a_ ( self : int , a__ : List[Any] ) -> List[str]: '''simple docstring''' if token.startswith("<extra_id_" ): _A = re.match(r"<extra_id_(\d+)>" , a__ ) _A = int(match.group(1 ) ) return self.vocab_size - num - 1 return self.sp_model.piece_to_id(a__ ) def a_ ( self : Dict , a__ : Union[str, Any] ) -> Any: '''simple docstring''' if index < self.sp_model.get_piece_size(): _A = self.sp_model.IdToPiece(a__ ) else: _A = F"""<extra_id_{self.vocab_size - 1 - index}>""" return token def a_ ( self : Optional[int] , a__ : Tuple ) -> List[str]: '''simple docstring''' _A = [] _A = "" _A = False for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: if not prev_is_special: out_string += " " out_string += self.sp_model.decode(a__ ) + token _A = True _A = [] else: current_sub_tokens.append(a__ ) _A = False out_string += self.sp_model.decode(a__ ) return out_string.strip() def a_ ( self : Dict , a__ : str , a__ : Optional[str] = None ) -> Tuple[str]: '''simple docstring''' if not os.path.isdir(a__ ): logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" ) return _A = os.path.join( a__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(a__ ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , a__ ) elif not os.path.isfile(self.vocab_file ): with open(a__ , "wb" ) as fi: _A = self.sp_model.serialized_model_proto() fi.write(a__ ) return (out_vocab_file,)
621
1
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available a_ = { "configuration_bloom": ["BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP", "BloomConfig", "BloomOnnxConfig"], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ = ["BloomTokenizerFast"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ = [ "BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST", "BloomForCausalLM", "BloomModel", "BloomPreTrainedModel", "BloomForSequenceClassification", "BloomForTokenClassification", "BloomForQuestionAnswering", ] if TYPE_CHECKING: from .configuration_bloom import BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP, BloomConfig, BloomOnnxConfig try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_bloom_fast import BloomTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_bloom import ( BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST, BloomForCausalLM, BloomForQuestionAnswering, BloomForSequenceClassification, BloomForTokenClassification, BloomModel, BloomPreTrainedModel, ) else: import sys a_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
621
"""simple docstring""" import argparse import json import os from collections import OrderedDict import numpy as np import tensorflow as tf import torch def a__ ( __lowercase ) -> List[Any]: _A = os.path.join(args.tf_model_dir , "parameters.json" ) _A = json.loads(open(__lowercase ).read() ) if not params: raise ValueError( f"""It seems that the json file at {parameter_file} is empty. Make sure you have a correct json file.""" ) if not args.output.endswith(".pt" ): _A = args.output + ".pt" _A = OrderedDict() with tf.device("/CPU:0" ): _A = tf.train.load_checkpoint(args.tf_model_dir ) _A = reader.get_variable_to_shape_map() for key_name in shapes.keys(): _A = reader.get_tensor(__lowercase ).astype(np.floataa ) if key_name.endswith("/adam_m" ) or key_name.endswith("/adam_v" ): continue if key_name.startswith("pasts/" ): if key_name.startswith("pasts/mlp" ): _A = int(key_name[9] ) elif key_name.startswith("pasts/out" ): _A = 8 _A = "model.sqout.%d.weight" % (player * 2) # enter to nn.Sequencial with Tanh, so 2 at a time _A = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix _A = torch.tensor(__lowercase ) elif key_name.startswith("model/moe" ): _A = int(key_name[9:].split("/" )[0] ) if key_name.endswith("/switch_gating/kernel" ): _A = "model.blocks.%d.feed_forward.mlp.router.classifier.weight" % player _A = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix _A = torch.tensor(__lowercase ) elif key_name.endswith("/softmlp/kernel" ): _A = "model.blocks.%d.feed_forward.soft_bypass_mlp.weight" % player _A = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix _A = torch.tensor(__lowercase ) elif key_name.endswith("/wo/kernel" ) or key_name.endswith("/wi/kernel" ): _A = key_name[-9:-7] for i in range(16 ): _A = "model.blocks.%d.feed_forward.mlp.experts.expert_%d.%s.weight" % (player, i, nlayer) _A = ( vnp[i].transpose([1, 0] ).copy() ) # In Mesh-Tensorflow, it is one array, so it is divided _A = torch.tensor(__lowercase ) elif key_name.startswith("model/mlp" ): _A = int(key_name[9:].split("/" )[0] ) if key_name.endswith("/p1/kernel" ): _A = "model.blocks.%d.feed_forward.mlp.wi.weight" % player _A = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix _A = torch.tensor(__lowercase ) elif key_name.endswith("/p1/bias" ): _A = "model.blocks.%d.feed_forward.mlp.wi.bias" % player _A = vnp.copy() # same because it is one dimensional _A = torch.tensor(__lowercase ) elif key_name.endswith("/p2/kernel" ): _A = "model.blocks.%d.feed_forward.mlp.wo.weight" % player _A = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix _A = torch.tensor(__lowercase ) elif key_name.endswith("/p2/bias" ): _A = "model.blocks.%d.feed_forward.mlp.wo.bias" % player _A = vnp.copy() # same because it is one dimensional _A = torch.tensor(__lowercase ) elif key_name.startswith("model/ln" ): _A = int(key_name[8:].split("/" )[0] ) if key_name.endswith("/b" ): _A = "model.blocks.%d.feed_forward.norm.bias" % player _A = vnp.copy() # same because it is one dimensional _A = torch.tensor(__lowercase ) elif key_name.endswith("/g" ): _A = "model.blocks.%d.feed_forward.norm.weight" % player _A = vnp.copy() # same because it is one dimensional _A = torch.tensor(__lowercase ) elif key_name.startswith("model/att" ): _A = int(key_name[9:].split("/" )[0] ) if key_name.endswith("/qkv/kernel" ): _A = vnp.copy() # Compute same dimension as Mesh-tensorflow using einsum _A = state[:, 0, :, :] _A = state[:, 1, :, :] _A = state[:, 2, :, :] _A = ( state_q.reshape([state_q.shape[0], state_q.shape[1] * state_q.shape[2]] ) .transpose([1, 0] ) .copy() ) # Mesh-Tensorflow is a diagonal matrix _A = ( state_k.reshape([state_k.shape[0], state_k.shape[1] * state_k.shape[2]] ) .transpose([1, 0] ) .copy() ) # Mesh-Tensorflow is a diagonal matrix _A = ( state_v.reshape([state_v.shape[0], state_v.shape[1] * state_v.shape[2]] ) .transpose([1, 0] ) .copy() ) # Mesh-Tensorflow is a diagonal matrix _A = "model.blocks.%d.self_attn.self_attn.q_proj.weight" % player _A = torch.tensor(__lowercase ) _A = "model.blocks.%d.self_attn.self_attn.k_proj.weight" % player _A = torch.tensor(__lowercase ) _A = "model.blocks.%d.self_attn.self_attn.v_proj.weight" % player _A = torch.tensor(__lowercase ) elif key_name.endswith("/o/kernel" ): _A = "model.blocks.%d.self_attn.self_attn.out_proj.weight" % player _A = ( vnp.reshape([vnp.shape[0] * vnp.shape[1], vnp.shape[2]] ).transpose([1, 0] ).copy() ) # Mesh-Tensorflow is a diagonal matrix _A = torch.tensor(__lowercase ) elif key_name.startswith("model/an" ): _A = int(key_name[8:].split("/" )[0] ) if key_name.endswith("/b" ): _A = "model.blocks.%d.self_attn.norm.bias" % player _A = vnp.copy() # same because it is one dimensional _A = torch.tensor(__lowercase ) elif key_name.endswith("/g" ): _A = "model.blocks.%d.self_attn.norm.weight" % player _A = vnp.copy() # same because it is one dimensional _A = torch.tensor(__lowercase ) elif ( key_name.startswith("model/wte" ) or key_name.startswith("model/wpe" ) or key_name.startswith("model/ete" ) ): _A = {"wte": "embed_tokens", "wpe": "position_embeddings", "ete": "extra_position_embeddings"}[ key_name[-3:] ] _A = "model.%s.weight" % nlayer _A = vnp.copy() # same in embedded _A = torch.tensor(__lowercase ) if key_name.startswith("model/wte" ): _A = "lm_head.weight" _A = vnp.copy() # same in embedded _A = torch.tensor(__lowercase ) elif key_name.startswith("model/wob" ): _A = "final_logits_bias" _A = vnp.copy() # same in embedded _A = state.reshape((1, -1) ) _A = torch.tensor(__lowercase ) elif key_name == "model/dense/kernel": _A = "model.last_project.weight" _A = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix _A = torch.tensor(__lowercase ) elif key_name == "model/dense_1/bias": _A = "model.last_project.bias" _A = vnp.copy() # same because it is one dimensional _A = torch.tensor(__lowercase ) torch.save(__lowercase , args.output ) if __name__ == "__main__": a_ = argparse.ArgumentParser( description="model converter.", formatter_class=argparse.ArgumentDefaultsHelpFormatter ) parser.add_argument("--tf_model_dir", metavar="PATH", type=str, required=True, help="import model") parser.add_argument("--output", metavar="PATH", type=str, required=True, help="output model") a_ = parser.parse_args() convert_tf_gptsan_to_pt(args)
621
1
"""simple docstring""" class snake_case : def __init__( self : Optional[int] ) -> Tuple: '''simple docstring''' _A = 0 _A = 0 _A = {} def a_ ( self : List[str] , a__ : List[Any] ) -> Any: '''simple docstring''' if vertex not in self.adjacency: _A = {} self.num_vertices += 1 def a_ ( self : Union[str, Any] , a__ : List[Any] , a__ : Optional[Any] , a__ : Dict ) -> Optional[int]: '''simple docstring''' self.add_vertex(a__ ) self.add_vertex(a__ ) if head == tail: return _A = weight _A = weight def a_ ( self : Optional[Any] ) -> List[Any]: '''simple docstring''' _A = self.get_edges() for edge in edges: _A , _A , _A = edge edges.remove((tail, head, weight) ) for i in range(len(a__ ) ): _A = list(edges[i] ) edges.sort(key=lambda a__ : e[2] ) for i in range(len(a__ ) - 1 ): if edges[i][2] >= edges[i + 1][2]: _A = edges[i][2] + 1 for edge in edges: _A , _A , _A = edge _A = weight _A = weight def __str__( self : Union[str, Any] ) -> List[str]: '''simple docstring''' _A = "" for tail in self.adjacency: for head in self.adjacency[tail]: _A = self.adjacency[head][tail] string += F"""{head} -> {tail} == {weight}\n""" return string.rstrip("\n" ) def a_ ( self : List[str] ) -> Optional[Any]: '''simple docstring''' _A = [] for tail in self.adjacency: for head in self.adjacency[tail]: output.append((tail, head, self.adjacency[head][tail]) ) return output def a_ ( self : List[str] ) -> Optional[int]: '''simple docstring''' return self.adjacency.keys() @staticmethod def a_ ( a__ : List[Any]=None , a__ : List[str]=None ) -> List[str]: '''simple docstring''' _A = Graph() if vertices is None: _A = [] if edges is None: _A = [] for vertex in vertices: g.add_vertex(a__ ) for edge in edges: g.add_edge(*a__ ) return g class snake_case : def __init__( self : Any ) -> Optional[int]: '''simple docstring''' _A = {} _A = {} def __len__( self : List[str] ) -> Dict: '''simple docstring''' return len(self.parent ) def a_ ( self : List[Any] , a__ : Any ) -> List[Any]: '''simple docstring''' if item in self.parent: return self.find(a__ ) _A = item _A = 0 return item def a_ ( self : str , a__ : List[str] ) -> str: '''simple docstring''' if item not in self.parent: return self.make_set(a__ ) if item != self.parent[item]: _A = self.find(self.parent[item] ) return self.parent[item] def a_ ( self : str , a__ : Optional[int] , a__ : Union[str, Any] ) -> List[str]: '''simple docstring''' _A = self.find(a__ ) _A = self.find(a__ ) if roota == roota: return roota if self.rank[roota] > self.rank[roota]: _A = roota return roota if self.rank[roota] < self.rank[roota]: _A = roota return roota if self.rank[roota] == self.rank[roota]: self.rank[roota] += 1 _A = roota return roota return None @staticmethod def a_ ( a__ : Dict ) -> Union[str, Any]: '''simple docstring''' _A = graph.num_vertices _A = Graph.UnionFind() _A = [] while num_components > 1: _A = {} for vertex in graph.get_vertices(): _A = -1 _A = graph.get_edges() for edge in edges: _A , _A , _A = edge edges.remove((tail, head, weight) ) for edge in edges: _A , _A , _A = edge _A = union_find.find(a__ ) _A = union_find.find(a__ ) if seta != seta: if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight: _A = [head, tail, weight] if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight: _A = [head, tail, weight] for vertex in cheap_edge: if cheap_edge[vertex] != -1: _A , _A , _A = cheap_edge[vertex] if union_find.find(a__ ) != union_find.find(a__ ): union_find.union(a__ , a__ ) mst_edges.append(cheap_edge[vertex] ) _A = num_components - 1 _A = Graph.build(edges=a__ ) return mst
621
"""simple docstring""" import argparse import torch from transformers import GPTaLMHeadModel, RobertaForMaskedLM if __name__ == "__main__": a_ = argparse.ArgumentParser( description=( "Extraction some layers of the full RobertaForMaskedLM or GPT2LMHeadModel for Transfer Learned" " Distillation" ) ) parser.add_argument("--model_type", default="roberta", choices=["roberta", "gpt2"]) parser.add_argument("--model_name", default="roberta-large", type=str) parser.add_argument("--dump_checkpoint", default="serialization_dir/tf_roberta_048131723.pth", type=str) parser.add_argument("--vocab_transform", action="store_true") a_ = parser.parse_args() if args.model_type == "roberta": a_ = RobertaForMaskedLM.from_pretrained(args.model_name) a_ = "roberta" elif args.model_type == "gpt2": a_ = GPTaLMHeadModel.from_pretrained(args.model_name) a_ = "transformer" a_ = model.state_dict() a_ = {} # Embeddings # if args.model_type == "gpt2": for param_name in ["wte.weight", "wpe.weight"]: a_ = state_dict[f'''{prefix}.{param_name}'''] else: for w in ["word_embeddings", "position_embeddings", "token_type_embeddings"]: a_ = f'''{prefix}.embeddings.{w}.weight''' a_ = state_dict[param_name] for w in ["weight", "bias"]: a_ = f'''{prefix}.embeddings.LayerNorm.{w}''' a_ = state_dict[param_name] # Transformer Blocks # a_ = 0 for teacher_idx in [0, 2, 4, 7, 9, 11]: if args.model_type == "gpt2": for layer in ["ln_1", "attn.c_attn", "attn.c_proj", "ln_2", "mlp.c_fc", "mlp.c_proj"]: for w in ["weight", "bias"]: a_ = state_dict[ f'''{prefix}.h.{teacher_idx}.{layer}.{w}''' ] a_ = state_dict[f'''{prefix}.h.{teacher_idx}.attn.bias'''] else: for layer in [ "attention.self.query", "attention.self.key", "attention.self.value", "attention.output.dense", "attention.output.LayerNorm", "intermediate.dense", "output.dense", "output.LayerNorm", ]: for w in ["weight", "bias"]: a_ = state_dict[ f'''{prefix}.encoder.layer.{teacher_idx}.{layer}.{w}''' ] std_idx += 1 # Language Modeling Head ###s if args.model_type == "roberta": for layer in ["lm_head.decoder.weight", "lm_head.bias"]: a_ = state_dict[f'''{layer}'''] if args.vocab_transform: for w in ["weight", "bias"]: a_ = state_dict[f'''lm_head.dense.{w}'''] a_ = state_dict[f'''lm_head.layer_norm.{w}'''] elif args.model_type == "gpt2": for w in ["weight", "bias"]: a_ = state_dict[f'''{prefix}.ln_f.{w}'''] a_ = state_dict["lm_head.weight"] print(f'''N layers selected for distillation: {std_idx}''') print(f'''Number of params transferred for distillation: {len(compressed_sd.keys())}''') print(f'''Save transferred checkpoint to {args.dump_checkpoint}.''') torch.save(compressed_sd, args.dump_checkpoint)
621
1
"""simple docstring""" import copy import inspect import unittest import numpy as np from huggingface_hub import hf_hub_download from transformers import TimesformerConfig from transformers.models.auto import get_values from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ( MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING, TimesformerForVideoClassification, TimesformerModel, ) from transformers.models.timesformer.modeling_timesformer import TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from transformers import VideoMAEImageProcessor class snake_case : def __init__( self : Optional[int] , a__ : Optional[int] , a__ : Optional[int]=13 , a__ : Any=10 , a__ : Any=3 , a__ : str=2 , a__ : Dict=2 , a__ : int=True , a__ : Any=True , a__ : List[str]=32 , a__ : str=5 , a__ : int=4 , a__ : int=37 , a__ : Union[str, Any]="gelu" , a__ : Tuple=0.1 , a__ : Optional[int]=0.1 , a__ : List[str]=10 , a__ : List[str]=0.0_2 , a__ : str="divided_space_time" , a__ : Tuple=None , ) -> List[Any]: '''simple docstring''' _A = parent _A = batch_size _A = image_size _A = num_channels _A = patch_size _A = num_frames _A = is_training _A = use_labels _A = hidden_size _A = num_hidden_layers _A = num_attention_heads _A = intermediate_size _A = hidden_act _A = hidden_dropout_prob _A = attention_probs_dropout_prob _A = attention_type _A = initializer_range _A = scope _A = num_labels # in TimeSformer, the number of spatial tokens equals num_frames * num_patches per frame + 1 CLS token _A = (image_size // patch_size) ** 2 _A = (num_frames) * self.num_patches_per_frame + 1 def a_ ( self : Union[str, Any] ) -> Optional[Any]: '''simple docstring''' _A = floats_tensor( [self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] ) _A = None if self.use_labels: _A = ids_tensor([self.batch_size] , self.num_labels ) _A = self.get_config() return config, pixel_values, labels def a_ ( self : List[Any] ) -> Optional[Any]: '''simple docstring''' _A = TimesformerConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_frames=self.num_frames , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , attention_type=self.attention_type , ) _A = self.num_labels return config def a_ ( self : Dict , a__ : Tuple , a__ : str , a__ : Dict ) -> Optional[int]: '''simple docstring''' _A = TimesformerModel(config=a__ ) model.to(a__ ) model.eval() _A = model(a__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def a_ ( self : Optional[int] , a__ : str , a__ : Tuple , a__ : Optional[int] ) -> Any: '''simple docstring''' _A = TimesformerForVideoClassification(a__ ) model.to(a__ ) model.eval() _A = model(a__ ) # verify the logits shape _A = torch.Size((self.batch_size, self.num_labels) ) self.parent.assertEqual(result.logits.shape , a__ ) def a_ ( self : Dict ) -> Dict: '''simple docstring''' _A = self.prepare_config_and_inputs() _A , _A , _A = config_and_inputs _A = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class snake_case ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase): __UpperCamelCase = (TimesformerModel, TimesformerForVideoClassification) if is_torch_available() else () __UpperCamelCase = ( {'feature-extraction': TimesformerModel, 'video-classification': TimesformerForVideoClassification} if is_torch_available() else {} ) __UpperCamelCase = False __UpperCamelCase = False __UpperCamelCase = False __UpperCamelCase = False def a_ ( self : str ) -> str: '''simple docstring''' _A = TimesformerModelTester(self ) _A = ConfigTester( self , config_class=a__ , has_text_modality=a__ , hidden_size=37 ) def a_ ( self : Dict , a__ : Optional[Any] , a__ : Union[str, Any] , a__ : Optional[int]=False ) -> Dict: '''simple docstring''' _A = copy.deepcopy(a__ ) if return_labels: if model_class in get_values(a__ ): _A = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=a__ ) return inputs_dict def a_ ( self : List[Any] ) -> Optional[Any]: '''simple docstring''' self.config_tester.run_common_tests() @unittest.skip(reason="TimeSformer does not use inputs_embeds" ) def a_ ( self : Tuple ) -> int: '''simple docstring''' pass def a_ ( self : Union[str, Any] ) -> str: '''simple docstring''' _A , _A = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _A = model_class(a__ ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) _A = model.get_output_embeddings() self.assertTrue(x is None or isinstance(a__ , nn.Linear ) ) def a_ ( self : List[str] ) -> str: '''simple docstring''' _A , _A = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _A = model_class(a__ ) _A = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic _A = [*signature.parameters.keys()] _A = ["pixel_values"] self.assertListEqual(arg_names[:1] , a__ ) def a_ ( self : Tuple ) -> Any: '''simple docstring''' _A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*a__ ) def a_ ( self : Optional[Any] ) -> Optional[Any]: '''simple docstring''' _A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_video_classification(*a__ ) @slow def a_ ( self : Dict ) -> List[str]: '''simple docstring''' for model_name in TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _A = TimesformerModel.from_pretrained(a__ ) self.assertIsNotNone(a__ ) def a_ ( self : Optional[int] ) -> Dict: '''simple docstring''' if not self.has_attentions: pass else: _A , _A = self.model_tester.prepare_config_and_inputs_for_common() _A = True for model_class in self.all_model_classes: _A = self.model_tester.seq_length _A = self.model_tester.num_frames _A = True _A = False _A = True _A = model_class(a__ ) model.to(a__ ) model.eval() with torch.no_grad(): _A = model(**self._prepare_for_class(a__ , a__ ) ) _A = outputs.attentions self.assertEqual(len(a__ ) , self.model_tester.num_hidden_layers ) # check that output_attentions also work using config del inputs_dict["output_attentions"] _A = True _A = model_class(a__ ) model.to(a__ ) model.eval() with torch.no_grad(): _A = model(**self._prepare_for_class(a__ , a__ ) ) _A = outputs.attentions self.assertEqual(len(a__ ) , self.model_tester.num_hidden_layers ) # attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1) self.assertListEqual( list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , ) _A = len(a__ ) # Check attention is always last and order is fine _A = True _A = True _A = model_class(a__ ) model.to(a__ ) model.eval() with torch.no_grad(): _A = model(**self._prepare_for_class(a__ , a__ ) ) self.assertEqual(out_len + 1 , len(a__ ) ) _A = outputs.attentions self.assertEqual(len(a__ ) , self.model_tester.num_hidden_layers ) # attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1) self.assertListEqual( list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , ) def a_ ( self : Any ) -> Optional[Any]: '''simple docstring''' def check_hidden_states_output(a__ : List[str] , a__ : Dict , a__ : Union[str, Any] ): _A = model_class(a__ ) model.to(a__ ) model.eval() with torch.no_grad(): _A = model(**self._prepare_for_class(a__ , a__ ) ) _A = outputs.hidden_states _A = self.model_tester.num_hidden_layers + 1 self.assertEqual(len(a__ ) , a__ ) _A = self.model_tester.seq_length self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , ) _A , _A = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _A = True check_hidden_states_output(a__ , a__ , a__ ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] _A = True check_hidden_states_output(a__ , a__ , a__ ) def a__ ( ) -> Tuple: _A = hf_hub_download( repo_id="hf-internal-testing/spaghetti-video" , filename="eating_spaghetti.npy" , repo_type="dataset" ) _A = np.load(__lowercase ) return list(__lowercase ) @require_torch @require_vision class snake_case ( unittest.TestCase): @cached_property def a_ ( self : List[str] ) -> Optional[int]: '''simple docstring''' return ( VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] ) if is_vision_available() else None ) @slow def a_ ( self : Dict ) -> Union[str, Any]: '''simple docstring''' _A = TimesformerForVideoClassification.from_pretrained("facebook/timesformer-base-finetuned-k400" ).to( a__ ) _A = self.default_image_processor _A = prepare_video() _A = image_processor(video[:8] , return_tensors="pt" ).to(a__ ) # forward pass with torch.no_grad(): _A = model(**a__ ) # verify the logits _A = torch.Size((1, 4_00) ) self.assertEqual(outputs.logits.shape , a__ ) _A = torch.tensor([-0.3_0_1_6, -0.7_7_1_3, -0.4_2_0_5] ).to(a__ ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , a__ , atol=1E-4 ) )
621
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available a_ = { "configuration_upernet": ["UperNetConfig"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ = [ "UperNetForSemanticSegmentation", "UperNetPreTrainedModel", ] if TYPE_CHECKING: from .configuration_upernet import UperNetConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_upernet import UperNetForSemanticSegmentation, UperNetPreTrainedModel else: import sys a_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
621
1
"""simple docstring""" import argparse from pathlib import Path import fairseq import torch from fairseq.models.xmod import XMODModel as FairseqXmodModel from packaging import version from transformers import XmodConfig, XmodForMaskedLM, XmodForSequenceClassification from transformers.utils import logging if version.parse(fairseq.__version__) < version.parse("0.12.2"): raise Exception("requires fairseq >= 0.12.2") if version.parse(fairseq.__version__) > version.parse("2"): raise Exception("requires fairseq < v2") logging.set_verbosity_info() a_ = logging.get_logger(__name__) a_ = "Hello, World!" a_ = "en_XX" def a__ ( __lowercase , __lowercase , __lowercase ) -> Optional[Any]: _A = Path("data_bin" ) _A = FairseqXmodModel.from_pretrained( model_name_or_path=str(Path(__lowercase ).parent ) , checkpoint_file=Path(__lowercase ).name , _name="xmod_base" , arch="xmod_base" , task="multilingual_masked_lm" , data_name_or_path=str(__lowercase ) , bpe="sentencepiece" , sentencepiece_model=str(Path(__lowercase ).parent / "sentencepiece.bpe.model" ) , src_dict=str(data_dir / "dict.txt" ) , ) xmod.eval() # disable dropout print(__lowercase ) _A = xmod.model.encoder.sentence_encoder _A = XmodConfig( vocab_size=xmod_sent_encoder.embed_tokens.num_embeddings , hidden_size=xmod.cfg.model.encoder_embed_dim , num_hidden_layers=xmod.cfg.model.encoder_layers , num_attention_heads=xmod.cfg.model.encoder_attention_heads , intermediate_size=xmod.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=514 , type_vocab_size=1 , layer_norm_eps=1E-5 , pre_norm=xmod.cfg.model.encoder_normalize_before , adapter_reduction_factor=getattr(xmod.cfg.model , "bottleneck" , 2 ) , adapter_layer_norm=xmod.cfg.model.adapter_layer_norm , adapter_reuse_layer_norm=xmod.cfg.model.adapter_reuse_layer_norm , ln_before_adapter=xmod.cfg.model.ln_before_adapter , languages=xmod.cfg.model.languages , ) if classification_head: _A = xmod.model.classification_heads["mnli"].out_proj.weight.shape[0] print("Our X-MOD config:" , __lowercase ) _A = XmodForSequenceClassification(__lowercase ) if classification_head else XmodForMaskedLM(__lowercase ) model.eval() # Now let's copy all the weights. # Embeddings _A = xmod_sent_encoder.embed_tokens.weight _A = xmod_sent_encoder.embed_positions.weight _A = torch.zeros_like( model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c xmod doesn't use them. _A = xmod_sent_encoder.layernorm_embedding.weight _A = xmod_sent_encoder.layernorm_embedding.bias for i in range(config.num_hidden_layers ): # Encoder: start of layer _A = model.roberta.encoder.layer[i] _A = xmod_sent_encoder.layers[i] # self attention _A = layer.attention.self if not ( xmod_layer.self_attn.k_proj.weight.data.shape == xmod_layer.self_attn.q_proj.weight.data.shape == xmod_layer.self_attn.v_proj.weight.data.shape == torch.Size((config.hidden_size, config.hidden_size) ) ): raise AssertionError("Dimensions of self-attention weights do not match." ) _A = xmod_layer.self_attn.q_proj.weight _A = xmod_layer.self_attn.q_proj.bias _A = xmod_layer.self_attn.k_proj.weight _A = xmod_layer.self_attn.k_proj.bias _A = xmod_layer.self_attn.v_proj.weight _A = xmod_layer.self_attn.v_proj.bias # self-attention output _A = layer.attention.output if self_output.dense.weight.shape != xmod_layer.self_attn.out_proj.weight.shape: raise AssertionError("Dimensions of self-attention output weights do not match." ) _A = xmod_layer.self_attn.out_proj.weight _A = xmod_layer.self_attn.out_proj.bias _A = xmod_layer.self_attn_layer_norm.weight _A = xmod_layer.self_attn_layer_norm.bias # intermediate _A = layer.intermediate if intermediate.dense.weight.shape != xmod_layer.fca.weight.shape: raise AssertionError("Dimensions of intermediate weights do not match." ) _A = xmod_layer.fca.weight _A = xmod_layer.fca.bias # output _A = layer.output if bert_output.dense.weight.shape != xmod_layer.fca.weight.shape: raise AssertionError("Dimensions of feed-forward weights do not match." ) _A = xmod_layer.fca.weight _A = xmod_layer.fca.bias _A = xmod_layer.final_layer_norm.weight _A = xmod_layer.final_layer_norm.bias if bert_output.adapter_layer_norm is not None: _A = xmod_layer.adapter_layer_norm.weight _A = xmod_layer.adapter_layer_norm.bias if sorted(bert_output.adapter_modules.keys() ) != sorted(xmod_layer.adapter_modules.keys() ): raise AssertionError("Lists of language adapters do not match." ) for lang_code, adapter in xmod_layer.adapter_modules.items(): _A = bert_output.adapter_modules[lang_code] _A = xmod_layer.adapter_modules[lang_code] _A = from_adapter.fca.weight _A = from_adapter.fca.bias _A = from_adapter.fca.weight _A = from_adapter.fca.bias # end of layer if xmod_sent_encoder.layer_norm is not None: _A = xmod_sent_encoder.layer_norm.weight _A = xmod_sent_encoder.layer_norm.bias if classification_head: _A = xmod.model.classification_heads["mnli"].dense.weight _A = xmod.model.classification_heads["mnli"].dense.bias _A = xmod.model.classification_heads["mnli"].out_proj.weight _A = xmod.model.classification_heads["mnli"].out_proj.bias else: # LM Head _A = xmod.model.encoder.lm_head.dense.weight _A = xmod.model.encoder.lm_head.dense.bias _A = xmod.model.encoder.lm_head.layer_norm.weight _A = xmod.model.encoder.lm_head.layer_norm.bias _A = xmod.model.encoder.lm_head.weight _A = xmod.model.encoder.lm_head.bias # Let's check that we get the same results. _A = xmod.encode(__lowercase ).unsqueeze(0 ) # batch of size 1 model.roberta.set_default_language(__lowercase ) _A = model(__lowercase )[0] if classification_head: _A = xmod.model.classification_heads["mnli"](xmod.extract_features(__lowercase ) ) else: _A = xmod.model(__lowercase , lang_id=[SAMPLE_LANGUAGE] )[0] print(our_output.shape , their_output.shape ) _A = torch.max(torch.abs(our_output - their_output ) ).item() print(f"""max_absolute_diff = {max_absolute_diff}""" ) # ~ 1e-7 _A = torch.allclose(__lowercase , __lowercase , atol=1E-3 ) print("Do both models output the same tensors?" , "🔥" if success else "💩" ) if not success: raise Exception("Something went wRoNg" ) Path(__lowercase ).mkdir(parents=__lowercase , exist_ok=__lowercase ) print(f"""Saving model to {pytorch_dump_folder_path}""" ) model.save_pretrained(__lowercase ) if __name__ == "__main__": a_ = argparse.ArgumentParser() # Required parameters parser.add_argument( "--xmod_checkpoint_path", default=None, type=str, required=True, help="Path the official PyTorch dump." ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model." ) parser.add_argument( "--classification_head", action="store_true", help="Whether to convert a final classification head." ) a_ = parser.parse_args() convert_xmod_checkpoint_to_pytorch( args.xmod_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head )
621
"""simple docstring""" import random import unittest from torch.utils.data import BatchSampler, DataLoader, IterableDataset from accelerate import Accelerator from accelerate.data_loader import ( BatchSamplerShard, DataLoaderDispatcher, DataLoaderShard, IterableDatasetShard, SkipBatchSampler, SkipDataLoader, skip_first_batches, ) class snake_case ( _UpperCamelCase): def __init__( self : Optional[int] , a__ : str=0.0_1 , a__ : str=10_00 ) -> int: '''simple docstring''' _A = p_stop _A = max_length def __iter__( self : Any ) -> Optional[Any]: '''simple docstring''' _A = 0 _A = False while not stop and count < self.max_length: yield count count += 1 _A = random.random() < self.p_stop class snake_case ( unittest.TestCase): def a_ ( self : List[Any] , a__ : Union[str, Any] , a__ : Union[str, Any] , a__ : List[str]=False , a__ : str=True ) -> Union[str, Any]: '''simple docstring''' _A = [ BatchSamplerShard(a__ , 2 , a__ , split_batches=a__ , even_batches=a__ ) for i in range(2 ) ] _A = [list(a__ ) for batch_sampler_shard in batch_sampler_shards] if not split_batches: self.assertListEqual([len(a__ ) for shard in batch_sampler_shards] , [len(a__ ) for e in expected] ) self.assertListEqual(a__ , a__ ) def a_ ( self : List[Any] ) -> str: '''simple docstring''' _A = BatchSampler(range(24 ) , batch_size=3 , drop_last=a__ ) _A = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]], [[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]], ] self.check_batch_sampler_shards(a__ , a__ ) _A = BatchSampler(range(24 ) , batch_size=3 , drop_last=a__ ) # Expected shouldn't change self.check_batch_sampler_shards(a__ , a__ ) # Check the shards when the dataset is a round multiple of batch size but not total batch size. _A = BatchSampler(range(21 ) , batch_size=3 , drop_last=a__ ) _A = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]], [[3, 4, 5], [9, 10, 11], [15, 16, 17], [0, 1, 2]], ] self.check_batch_sampler_shards(a__ , a__ ) _A = BatchSampler(range(21 ) , batch_size=3 , drop_last=a__ ) _A = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14]], [[3, 4, 5], [9, 10, 11], [15, 16, 17]], ] self.check_batch_sampler_shards(a__ , a__ ) # Check the shards when the dataset is not a round multiple of batch size but has a multiple of # num_processes batch. _A = BatchSampler(range(22 ) , batch_size=3 , drop_last=a__ ) _A = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]], [[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 0, 1]], ] self.check_batch_sampler_shards(a__ , a__ ) _A = BatchSampler(range(22 ) , batch_size=3 , drop_last=a__ ) _A = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14]], [[3, 4, 5], [9, 10, 11], [15, 16, 17]], ] self.check_batch_sampler_shards(a__ , a__ ) # Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of # num_processes batch. _A = BatchSampler(range(20 ) , batch_size=3 , drop_last=a__ ) _A = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 0]], [[3, 4, 5], [9, 10, 11], [15, 16, 17], [1, 2, 3]], ] self.check_batch_sampler_shards(a__ , a__ ) _A = BatchSampler(range(20 ) , batch_size=3 , drop_last=a__ ) _A = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14]], [[3, 4, 5], [9, 10, 11], [15, 16, 17]], ] self.check_batch_sampler_shards(a__ , a__ ) # Check the shards when the dataset is very small. _A = BatchSampler(range(2 ) , batch_size=3 , drop_last=a__ ) _A = [[[0, 1, 0]], [[1, 0, 1]]] self.check_batch_sampler_shards(a__ , a__ ) _A = BatchSampler(range(2 ) , batch_size=3 , drop_last=a__ ) _A = [[], []] self.check_batch_sampler_shards(a__ , a__ ) def a_ ( self : int ) -> int: '''simple docstring''' _A = BatchSampler(range(24 ) , batch_size=4 , drop_last=a__ ) _A = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]], ] self.check_batch_sampler_shards(a__ , a__ , split_batches=a__ ) _A = BatchSampler(range(24 ) , batch_size=4 , drop_last=a__ ) # Expected shouldn't change self.check_batch_sampler_shards(a__ , a__ , split_batches=a__ ) # Check the shards when the dataset is not a round multiple of batch size. _A = BatchSampler(range(22 ) , batch_size=4 , drop_last=a__ ) _A = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [0, 1]], ] self.check_batch_sampler_shards(a__ , a__ , split_batches=a__ ) _A = BatchSampler(range(22 ) , batch_size=4 , drop_last=a__ ) _A = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]], ] self.check_batch_sampler_shards(a__ , a__ , split_batches=a__ ) # Check the shards when the dataset is not a round multiple of batch size or num_processes. _A = BatchSampler(range(21 ) , batch_size=4 , drop_last=a__ ) _A = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 0]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [1, 2]], ] self.check_batch_sampler_shards(a__ , a__ , split_batches=a__ ) _A = BatchSampler(range(21 ) , batch_size=4 , drop_last=a__ ) _A = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]], ] self.check_batch_sampler_shards(a__ , a__ , split_batches=a__ ) # Check the shards when the dataset is very small. _A = BatchSampler(range(2 ) , batch_size=4 , drop_last=a__ ) _A = [[[0, 1]], [[0, 1]]] self.check_batch_sampler_shards(a__ , a__ , split_batches=a__ ) _A = BatchSampler(range(2 ) , batch_size=4 , drop_last=a__ ) _A = [[], []] self.check_batch_sampler_shards(a__ , a__ , split_batches=a__ ) def a_ ( self : List[str] ) -> List[str]: '''simple docstring''' _A = BatchSampler(range(24 ) , batch_size=3 , drop_last=a__ ) _A = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]], [[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]], ] self.check_batch_sampler_shards(a__ , a__ , even_batches=a__ ) _A = BatchSampler(range(24 ) , batch_size=3 , drop_last=a__ ) # Expected shouldn't change self.check_batch_sampler_shards(a__ , a__ , even_batches=a__ ) # Check the shards when the dataset is a round multiple of batch size but not total batch size. _A = BatchSampler(range(21 ) , batch_size=3 , drop_last=a__ ) _A = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]], [[3, 4, 5], [9, 10, 11], [15, 16, 17]], ] self.check_batch_sampler_shards(a__ , a__ , even_batches=a__ ) _A = BatchSampler(range(21 ) , batch_size=3 , drop_last=a__ ) _A = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14]], [[3, 4, 5], [9, 10, 11], [15, 16, 17]], ] self.check_batch_sampler_shards(a__ , a__ , even_batches=a__ ) # Check the shards when the dataset is not a round multiple of batch size but has a multiple of # num_processes batch. _A = BatchSampler(range(22 ) , batch_size=3 , drop_last=a__ ) _A = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]], [[3, 4, 5], [9, 10, 11], [15, 16, 17], [21]], ] self.check_batch_sampler_shards(a__ , a__ , even_batches=a__ ) _A = BatchSampler(range(22 ) , batch_size=3 , drop_last=a__ ) _A = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14]], [[3, 4, 5], [9, 10, 11], [15, 16, 17]], ] self.check_batch_sampler_shards(a__ , a__ , even_batches=a__ ) # Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of # num_processes batch. _A = BatchSampler(range(20 ) , batch_size=3 , drop_last=a__ ) _A = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19]], [[3, 4, 5], [9, 10, 11], [15, 16, 17]], ] self.check_batch_sampler_shards(a__ , a__ , even_batches=a__ ) _A = BatchSampler(range(20 ) , batch_size=3 , drop_last=a__ ) _A = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14]], [[3, 4, 5], [9, 10, 11], [15, 16, 17]], ] self.check_batch_sampler_shards(a__ , a__ , even_batches=a__ ) # Check the shards when the dataset is very small. _A = BatchSampler(range(2 ) , batch_size=3 , drop_last=a__ ) _A = [[[0, 1]], []] self.check_batch_sampler_shards(a__ , a__ , even_batches=a__ ) _A = BatchSampler(range(2 ) , batch_size=3 , drop_last=a__ ) _A = [[], []] self.check_batch_sampler_shards(a__ , a__ , even_batches=a__ ) def a_ ( self : List[str] ) -> str: '''simple docstring''' _A = BatchSampler(range(24 ) , batch_size=4 , drop_last=a__ ) _A = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]], ] self.check_batch_sampler_shards(a__ , a__ , split_batches=a__ , even_batches=a__ ) _A = BatchSampler(range(24 ) , batch_size=4 , drop_last=a__ ) # Expected shouldn't change self.check_batch_sampler_shards(a__ , a__ , split_batches=a__ , even_batches=a__ ) # Check the shards when the dataset is not a round multiple of batch size. _A = BatchSampler(range(22 ) , batch_size=4 , drop_last=a__ ) _A = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]], ] self.check_batch_sampler_shards(a__ , a__ , split_batches=a__ , even_batches=a__ ) _A = BatchSampler(range(22 ) , batch_size=4 , drop_last=a__ ) _A = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]], ] self.check_batch_sampler_shards(a__ , a__ , split_batches=a__ , even_batches=a__ ) # Check the shards when the dataset is not a round multiple of batch size or num_processes. _A = BatchSampler(range(21 ) , batch_size=4 , drop_last=a__ ) _A = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]], ] self.check_batch_sampler_shards(a__ , a__ , split_batches=a__ , even_batches=a__ ) _A = BatchSampler(range(21 ) , batch_size=4 , drop_last=a__ ) _A = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]], ] self.check_batch_sampler_shards(a__ , a__ , split_batches=a__ , even_batches=a__ ) # Check the shards when the dataset is very small. _A = BatchSampler(range(2 ) , batch_size=4 , drop_last=a__ ) _A = [[[0, 1]], []] self.check_batch_sampler_shards(a__ , a__ , split_batches=a__ , even_batches=a__ ) _A = BatchSampler(range(2 ) , batch_size=4 , drop_last=a__ ) _A = [[], []] self.check_batch_sampler_shards(a__ , a__ , split_batches=a__ , even_batches=a__ ) def a_ ( self : Union[str, Any] ) -> str: '''simple docstring''' _A = [[0, 1, 2], [3, 4], [5, 6, 7, 8], [9, 10, 11], [12, 13]] _A = [BatchSamplerShard(a__ , 2 , a__ , even_batches=a__ ) for i in range(2 )] self.assertEqual(len(batch_sampler_shards[0] ) , 3 ) self.assertEqual(len(batch_sampler_shards[1] ) , 2 ) self.assertListEqual(list(batch_sampler_shards[0] ) , [[0, 1, 2], [5, 6, 7, 8], [12, 13]] ) self.assertListEqual(list(batch_sampler_shards[1] ) , [[3, 4], [9, 10, 11]] ) def a_ ( self : Optional[int] , a__ : Optional[int] , a__ : Tuple , a__ : Optional[int] , a__ : Union[str, Any]=False , a__ : int=2 , a__ : List[Any]=False ) -> str: '''simple docstring''' random.seed(a__ ) _A = list(a__ ) _A = [ IterableDatasetShard( a__ , batch_size=a__ , drop_last=a__ , num_processes=a__ , process_index=a__ , split_batches=a__ , ) for i in range(a__ ) ] _A = [] for iterable_dataset_shard in iterable_dataset_shards: # Since our random iterable dataset will be... random... we need to use a seed to get reproducible results. random.seed(a__ ) iterable_dataset_lists.append(list(a__ ) ) _A = batch_size // num_processes if split_batches else batch_size # All iterable dataset shard should have the same length, a round multiple of shard_batch_size _A = iterable_dataset_lists[0] for l in iterable_dataset_lists[1:]: self.assertEqual(len(a__ ) , len(a__ ) ) self.assertTrue(len(a__ ) % shard_batch_size == 0 ) _A = [] for idx in range(0 , len(a__ ) , a__ ): for l in iterable_dataset_lists: observed += l[idx : idx + shard_batch_size] if not drop_last: while len(a__ ) < len(a__ ): reference += reference self.assertListEqual(a__ , reference[: len(a__ )] ) def a_ ( self : List[str] ) -> List[Any]: '''simple docstring''' _A = 42 _A = RandomIterableDataset() self.check_iterable_dataset_shards(a__ , a__ , batch_size=4 , drop_last=a__ , split_batches=a__ ) self.check_iterable_dataset_shards(a__ , a__ , batch_size=4 , drop_last=a__ , split_batches=a__ ) self.check_iterable_dataset_shards(a__ , a__ , batch_size=4 , drop_last=a__ , split_batches=a__ ) self.check_iterable_dataset_shards(a__ , a__ , batch_size=4 , drop_last=a__ , split_batches=a__ ) # Edge case with a very small dataset _A = RandomIterableDataset(max_length=2 ) self.check_iterable_dataset_shards(a__ , a__ , batch_size=4 , drop_last=a__ , split_batches=a__ ) self.check_iterable_dataset_shards(a__ , a__ , batch_size=4 , drop_last=a__ , split_batches=a__ ) self.check_iterable_dataset_shards(a__ , a__ , batch_size=4 , drop_last=a__ , split_batches=a__ ) self.check_iterable_dataset_shards(a__ , a__ , batch_size=4 , drop_last=a__ , split_batches=a__ ) def a_ ( self : List[str] ) -> Dict: '''simple docstring''' _A = BatchSampler(range(16 ) , batch_size=4 , drop_last=a__ ) _A = SkipBatchSampler(a__ , 2 ) self.assertListEqual(list(a__ ) , [[8, 9, 10, 11], [12, 13, 14, 15]] ) def a_ ( self : int ) -> Union[str, Any]: '''simple docstring''' _A = SkipDataLoader(list(range(16 ) ) , batch_size=4 , skip_batches=2 ) self.assertListEqual([t.tolist() for t in dataloader] , [[8, 9, 10, 11], [12, 13, 14, 15]] ) def a_ ( self : int ) -> Optional[int]: '''simple docstring''' _A = DataLoader(list(range(16 ) ) , batch_size=4 ) _A = skip_first_batches(a__ , num_batches=2 ) self.assertListEqual([t.tolist() for t in new_dataloader] , [[8, 9, 10, 11], [12, 13, 14, 15]] ) def a_ ( self : Optional[Any] ) -> Optional[int]: '''simple docstring''' _A = DataLoaderShard(list(range(16 ) ) , batch_size=4 ) for idx, _ in enumerate(a__ ): self.assertEqual(dataloader.end_of_dataloader , idx == 3 ) # Test it also works on the second iteration for idx, _ in enumerate(a__ ): self.assertEqual(dataloader.end_of_dataloader , idx == 3 ) def a_ ( self : int ) -> int: '''simple docstring''' Accelerator() _A = DataLoaderDispatcher(range(16 ) , batch_size=4 ) for idx, _ in enumerate(a__ ): self.assertEqual(dataloader.end_of_dataloader , idx == 3 ) # Test it also works on the second iteration for idx, _ in enumerate(a__ ): self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
621
1
"""simple docstring""" from packaging import version from .import_utils import is_accelerate_available if is_accelerate_available(): import accelerate def a__ ( __lowercase ) -> Optional[int]: if not is_accelerate_available(): return method _A = version.parse(accelerate.__version__ ).base_version if version.parse(__lowercase ) < version.parse("0.17.0" ): return method def wrapper(self , *__lowercase , **__lowercase ): if hasattr(self , "_hf_hook" ) and hasattr(self._hf_hook , "pre_forward" ): self._hf_hook.pre_forward(self ) return method(self , *__lowercase , **__lowercase ) return wrapper
621
"""simple docstring""" import gc import tempfile import unittest import numpy as np import torch from diffusers import VersatileDiffusionPipeline from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device a_ = False class snake_case ( unittest.TestCase): pass @nightly @require_torch_gpu class snake_case ( unittest.TestCase): def a_ ( self : Optional[int] ) -> str: '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def a_ ( self : Tuple ) -> Any: '''simple docstring''' _A = VersatileDiffusionPipeline.from_pretrained("shi-labs/versatile-diffusion" , torch_dtype=torch.floataa ) pipe.to(a__ ) pipe.set_progress_bar_config(disable=a__ ) _A = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg" ) _A = torch.manual_seed(0 ) _A = pipe.dual_guided( prompt="first prompt" , image=a__ , text_to_image_strength=0.7_5 , generator=a__ , guidance_scale=7.5 , num_inference_steps=2 , output_type="numpy" , ).images with tempfile.TemporaryDirectory() as tmpdirname: pipe.save_pretrained(a__ ) _A = VersatileDiffusionPipeline.from_pretrained(a__ , torch_dtype=torch.floataa ) pipe.to(a__ ) pipe.set_progress_bar_config(disable=a__ ) _A = generator.manual_seed(0 ) _A = pipe.dual_guided( prompt="first prompt" , image=a__ , text_to_image_strength=0.7_5 , generator=a__ , guidance_scale=7.5 , num_inference_steps=2 , output_type="numpy" , ).images assert np.abs(image - new_image ).sum() < 1E-5, "Models don't have the same forward pass" def a_ ( self : Optional[int] ) -> List[Any]: '''simple docstring''' _A = VersatileDiffusionPipeline.from_pretrained("shi-labs/versatile-diffusion" , torch_dtype=torch.floataa ) pipe.to(a__ ) pipe.set_progress_bar_config(disable=a__ ) _A = "cyberpunk 2077" _A = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg" ) _A = torch.manual_seed(0 ) _A = pipe.dual_guided( prompt=a__ , image=a__ , text_to_image_strength=0.7_5 , generator=a__ , guidance_scale=7.5 , num_inference_steps=50 , output_type="numpy" , ).images _A = image[0, 2_53:2_56, 2_53:2_56, -1] assert image.shape == (1, 5_12, 5_12, 3) _A = np.array([0.1_4_4_8, 0.1_6_1_9, 0.1_7_4_1, 0.1_0_8_6, 0.1_1_4_7, 0.1_1_2_8, 0.1_1_9_9, 0.1_1_6_5, 0.1_0_0_1] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 _A = "A painting of a squirrel eating a burger " _A = torch.manual_seed(0 ) _A = pipe.text_to_image( prompt=a__ , generator=a__ , guidance_scale=7.5 , num_inference_steps=50 , output_type="numpy" ).images _A = image[0, 2_53:2_56, 2_53:2_56, -1] assert image.shape == (1, 5_12, 5_12, 3) _A = np.array([0.3_3_6_7, 0.3_1_6_9, 0.2_6_5_6, 0.3_8_7_0, 0.4_7_9_0, 0.3_7_9_6, 0.4_0_0_9, 0.4_8_7_8, 0.4_7_7_8] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 _A = pipe.image_variation(a__ , generator=a__ , output_type="numpy" ).images _A = image[0, 2_53:2_56, 2_53:2_56, -1] assert image.shape == (1, 5_12, 5_12, 3) _A = np.array([0.3_0_7_6, 0.3_1_2_3, 0.3_2_8_4, 0.3_7_8_2, 0.3_7_7_0, 0.3_8_9_4, 0.4_2_9_7, 0.4_3_3_1, 0.4_4_5_6] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
621
1
"""simple docstring""" import numpy as np from cva import destroyAllWindows, imread, imshow, waitKey class snake_case : def __init__( self : Union[str, Any] , a__ : List[str] , a__ : int , a__ : int ) -> List[str]: '''simple docstring''' if dst_width < 0 or dst_height < 0: raise ValueError("Destination width/height should be > 0" ) _A = img _A = img.shape[1] _A = img.shape[0] _A = dst_width _A = dst_height _A = self.src_w / self.dst_w _A = self.src_h / self.dst_h _A = _A = ( np.ones((self.dst_h, self.dst_w, 3) , np.uinta ) * 2_55 ) def a_ ( self : Dict ) -> Dict: '''simple docstring''' for i in range(self.dst_h ): for j in range(self.dst_w ): _A = self.img[self.get_y(a__ )][self.get_x(a__ )] def a_ ( self : Tuple , a__ : int ) -> int: '''simple docstring''' return int(self.ratio_x * x ) def a_ ( self : Any , a__ : int ) -> int: '''simple docstring''' return int(self.ratio_y * y ) if __name__ == "__main__": a_ , a_ = 8_00, 6_00 a_ = imread("image_data/lena.jpg", 1) a_ = NearestNeighbour(im, dst_w, dst_h) n.process() imshow( f'''Image resized from: {im.shape[1]}x{im.shape[0]} to {dst_w}x{dst_h}''', n.output ) waitKey(0) destroyAllWindows()
621
"""simple docstring""" import os import time import warnings from dataclasses import dataclass, field from enum import Enum from typing import List, Optional, Union import torch from filelock import FileLock from torch.utils.data import Dataset from ...tokenization_utils_base import PreTrainedTokenizerBase from ...utils import logging from ..processors.glue import glue_convert_examples_to_features, glue_output_modes, glue_processors from ..processors.utils import InputFeatures a_ = logging.get_logger(__name__) @dataclass class snake_case : __UpperCamelCase = field(metadata={'help': 'The name of the task to train on: ' + ', '.join(glue_processors.keys())}) __UpperCamelCase = field( metadata={'help': 'The input data dir. Should contain the .tsv files (or other data files) for the task.'}) __UpperCamelCase = field( default=128 , metadata={ 'help': ( 'The maximum total input sequence length after tokenization. Sequences longer ' 'than this will be truncated, sequences shorter will be padded.' ) } , ) __UpperCamelCase = field( default=_UpperCamelCase , metadata={'help': 'Overwrite the cached training and evaluation sets'}) def a_ ( self : Optional[Any] ) -> Tuple: '''simple docstring''' _A = self.task_name.lower() class snake_case ( _UpperCamelCase): __UpperCamelCase = 'train' __UpperCamelCase = 'dev' __UpperCamelCase = 'test' class snake_case ( _UpperCamelCase): __UpperCamelCase = 42 __UpperCamelCase = 42 __UpperCamelCase = 42 def __init__( self : Optional[int] , a__ : GlueDataTrainingArguments , a__ : PreTrainedTokenizerBase , a__ : Optional[int] = None , a__ : Union[str, Split] = Split.train , a__ : Optional[str] = None , ) -> Tuple: '''simple docstring''' warnings.warn( "This dataset will be removed from the library soon, preprocessing should be handled with the 🤗 Datasets " "library. You can have a look at this example script for pointers: " "https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py" , a__ , ) _A = args _A = glue_processors[args.task_name]() _A = glue_output_modes[args.task_name] if isinstance(a__ , a__ ): try: _A = Split[mode] except KeyError: raise KeyError("mode is not a valid split name" ) # Load data features from cache or dataset file _A = os.path.join( cache_dir if cache_dir is not None else args.data_dir , F"""cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{args.task_name}""" , ) _A = self.processor.get_labels() if args.task_name in ["mnli", "mnli-mm"] and tokenizer.__class__.__name__ in ( "RobertaTokenizer", "RobertaTokenizerFast", "XLMRobertaTokenizer", "BartTokenizer", "BartTokenizerFast", ): # HACK(label indices are swapped in RoBERTa pretrained model) _A , _A = label_list[2], label_list[1] _A = label_list # Make sure only the first process in distributed training processes the dataset, # and the others will use the cache. _A = cached_features_file + ".lock" with FileLock(a__ ): if os.path.exists(a__ ) and not args.overwrite_cache: _A = time.time() _A = torch.load(a__ ) logger.info( F"""Loading features from cached file {cached_features_file} [took %.3f s]""" , time.time() - start ) else: logger.info(F"""Creating features from dataset file at {args.data_dir}""" ) if mode == Split.dev: _A = self.processor.get_dev_examples(args.data_dir ) elif mode == Split.test: _A = self.processor.get_test_examples(args.data_dir ) else: _A = self.processor.get_train_examples(args.data_dir ) if limit_length is not None: _A = examples[:limit_length] _A = glue_convert_examples_to_features( a__ , a__ , max_length=args.max_seq_length , label_list=a__ , output_mode=self.output_mode , ) _A = time.time() torch.save(self.features , a__ ) # ^ This seems to take a lot of time so I want to investigate why and how we can improve. logger.info( F"""Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]""" ) def __len__( self : List[Any] ) -> Any: '''simple docstring''' return len(self.features ) def __getitem__( self : Tuple , a__ : Union[str, Any] ) -> InputFeatures: '''simple docstring''' return self.features[i] def a_ ( self : Optional[int] ) -> List[Any]: '''simple docstring''' return self.label_list
621
1
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tensorflow_text_available, is_tf_available, is_tokenizers_available, is_torch_available, ) a_ = { "configuration_bert": ["BERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "BertConfig", "BertOnnxConfig"], "tokenization_bert": ["BasicTokenizer", "BertTokenizer", "WordpieceTokenizer"], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ = ["BertTokenizerFast"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ = [ "BERT_PRETRAINED_MODEL_ARCHIVE_LIST", "BertForMaskedLM", "BertForMultipleChoice", "BertForNextSentencePrediction", "BertForPreTraining", "BertForQuestionAnswering", "BertForSequenceClassification", "BertForTokenClassification", "BertLayer", "BertLMHeadModel", "BertModel", "BertPreTrainedModel", "load_tf_weights_in_bert", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ = [ "TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST", "TFBertEmbeddings", "TFBertForMaskedLM", "TFBertForMultipleChoice", "TFBertForNextSentencePrediction", "TFBertForPreTraining", "TFBertForQuestionAnswering", "TFBertForSequenceClassification", "TFBertForTokenClassification", "TFBertLMHeadModel", "TFBertMainLayer", "TFBertModel", "TFBertPreTrainedModel", ] try: if not is_tensorflow_text_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ = ["TFBertTokenizer"] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ = [ "FlaxBertForCausalLM", "FlaxBertForMaskedLM", "FlaxBertForMultipleChoice", "FlaxBertForNextSentencePrediction", "FlaxBertForPreTraining", "FlaxBertForQuestionAnswering", "FlaxBertForSequenceClassification", "FlaxBertForTokenClassification", "FlaxBertModel", "FlaxBertPreTrainedModel", ] if TYPE_CHECKING: from .configuration_bert import BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, BertConfig, BertOnnxConfig from .tokenization_bert import BasicTokenizer, BertTokenizer, WordpieceTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_bert_fast import BertTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_bert import ( BERT_PRETRAINED_MODEL_ARCHIVE_LIST, BertForMaskedLM, BertForMultipleChoice, BertForNextSentencePrediction, BertForPreTraining, BertForQuestionAnswering, BertForSequenceClassification, BertForTokenClassification, BertLayer, BertLMHeadModel, BertModel, BertPreTrainedModel, load_tf_weights_in_bert, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_bert import ( TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST, TFBertEmbeddings, TFBertForMaskedLM, TFBertForMultipleChoice, TFBertForNextSentencePrediction, TFBertForPreTraining, TFBertForQuestionAnswering, TFBertForSequenceClassification, TFBertForTokenClassification, TFBertLMHeadModel, TFBertMainLayer, TFBertModel, TFBertPreTrainedModel, ) try: if not is_tensorflow_text_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_bert_tf import TFBertTokenizer try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_bert import ( FlaxBertForCausalLM, FlaxBertForMaskedLM, FlaxBertForMultipleChoice, FlaxBertForNextSentencePrediction, FlaxBertForPreTraining, FlaxBertForQuestionAnswering, FlaxBertForSequenceClassification, FlaxBertForTokenClassification, FlaxBertModel, FlaxBertPreTrainedModel, ) else: import sys a_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
621
"""simple docstring""" def a__ ( __lowercase , __lowercase , __lowercase , __lowercase ) -> str: # Return True if there is node that has not iterated. _A = [False] * len(__lowercase ) _A = [] queue.append(__lowercase ) _A = True while queue: _A = queue.pop(0 ) for ind in range(len(graph[u] ) ): if visited[ind] is False and graph[u][ind] > 0: queue.append(__lowercase ) _A = True _A = u return visited[t] def a__ ( __lowercase , __lowercase , __lowercase ) -> int: # This array is filled by BFS and to store path _A = [-1] * (len(__lowercase )) _A = 0 while bfs(__lowercase , __lowercase , __lowercase , __lowercase ): _A = float("Inf" ) _A = sink while s != source: # Find the minimum value in select path _A = min(__lowercase , graph[parent[s]][s] ) _A = parent[s] max_flow += path_flow _A = sink while v != source: _A = parent[v] graph[u][v] -= path_flow graph[v][u] += path_flow _A = parent[v] return max_flow a_ = [ [0, 16, 13, 0, 0, 0], [0, 0, 10, 12, 0, 0], [0, 4, 0, 0, 14, 0], [0, 0, 9, 0, 0, 20], [0, 0, 0, 7, 0, 4], [0, 0, 0, 0, 0, 0], ] a_ , a_ = 0, 5 print(ford_fulkerson(graph, source, sink))
621
1
"""simple docstring""" def a__ ( __lowercase ) -> tuple[int, int]: try: _A = float(__lowercase ) except ValueError: raise ValueError("Please enter a valid number" ) _A = decimal - int(__lowercase ) if fractional_part == 0: return int(__lowercase ), 1 else: _A = len(str(__lowercase ).split("." )[1] ) _A = int(decimal * (10**number_of_frac_digits) ) _A = 10**number_of_frac_digits _A , _A = denominator, numerator while True: _A = dividend % divisor if remainder == 0: break _A , _A = divisor, remainder _A , _A = numerator / divisor, denominator / divisor return int(__lowercase ), int(__lowercase ) if __name__ == "__main__": print(f'''{decimal_to_fraction(2) = }''') print(f'''{decimal_to_fraction(89.0) = }''') print(f'''{decimal_to_fraction("67") = }''') print(f'''{decimal_to_fraction("45.0") = }''') print(f'''{decimal_to_fraction(1.5) = }''') print(f'''{decimal_to_fraction("6.25") = }''') print(f'''{decimal_to_fraction("78td") = }''')
621
"""simple docstring""" import argparse import json from collections import OrderedDict from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( ConditionalDetrConfig, ConditionalDetrForObjectDetection, ConditionalDetrForSegmentation, ConditionalDetrImageProcessor, ) from transformers.utils import logging logging.set_verbosity_info() a_ = logging.get_logger(__name__) # here we list all keys to be renamed (original name on the left, our name on the right) a_ = [] for i in range(6): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append( (f'''transformer.encoder.layers.{i}.self_attn.out_proj.weight''', f'''encoder.layers.{i}.self_attn.out_proj.weight''') ) rename_keys.append( (f'''transformer.encoder.layers.{i}.self_attn.out_proj.bias''', f'''encoder.layers.{i}.self_attn.out_proj.bias''') ) rename_keys.append((f'''transformer.encoder.layers.{i}.linear1.weight''', f'''encoder.layers.{i}.fc1.weight''')) rename_keys.append((f'''transformer.encoder.layers.{i}.linear1.bias''', f'''encoder.layers.{i}.fc1.bias''')) rename_keys.append((f'''transformer.encoder.layers.{i}.linear2.weight''', f'''encoder.layers.{i}.fc2.weight''')) rename_keys.append((f'''transformer.encoder.layers.{i}.linear2.bias''', f'''encoder.layers.{i}.fc2.bias''')) rename_keys.append( (f'''transformer.encoder.layers.{i}.norm1.weight''', f'''encoder.layers.{i}.self_attn_layer_norm.weight''') ) rename_keys.append((f'''transformer.encoder.layers.{i}.norm1.bias''', f'''encoder.layers.{i}.self_attn_layer_norm.bias''')) rename_keys.append((f'''transformer.encoder.layers.{i}.norm2.weight''', f'''encoder.layers.{i}.final_layer_norm.weight''')) rename_keys.append((f'''transformer.encoder.layers.{i}.norm2.bias''', f'''encoder.layers.{i}.final_layer_norm.bias''')) # decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms rename_keys.append( (f'''transformer.decoder.layers.{i}.self_attn.out_proj.weight''', f'''decoder.layers.{i}.self_attn.out_proj.weight''') ) rename_keys.append( (f'''transformer.decoder.layers.{i}.self_attn.out_proj.bias''', f'''decoder.layers.{i}.self_attn.out_proj.bias''') ) rename_keys.append( ( f'''transformer.decoder.layers.{i}.cross_attn.out_proj.weight''', f'''decoder.layers.{i}.encoder_attn.out_proj.weight''', ) ) rename_keys.append( ( f'''transformer.decoder.layers.{i}.cross_attn.out_proj.bias''', f'''decoder.layers.{i}.encoder_attn.out_proj.bias''', ) ) rename_keys.append((f'''transformer.decoder.layers.{i}.linear1.weight''', f'''decoder.layers.{i}.fc1.weight''')) rename_keys.append((f'''transformer.decoder.layers.{i}.linear1.bias''', f'''decoder.layers.{i}.fc1.bias''')) rename_keys.append((f'''transformer.decoder.layers.{i}.linear2.weight''', f'''decoder.layers.{i}.fc2.weight''')) rename_keys.append((f'''transformer.decoder.layers.{i}.linear2.bias''', f'''decoder.layers.{i}.fc2.bias''')) rename_keys.append( (f'''transformer.decoder.layers.{i}.norm1.weight''', f'''decoder.layers.{i}.self_attn_layer_norm.weight''') ) rename_keys.append((f'''transformer.decoder.layers.{i}.norm1.bias''', f'''decoder.layers.{i}.self_attn_layer_norm.bias''')) rename_keys.append( (f'''transformer.decoder.layers.{i}.norm2.weight''', f'''decoder.layers.{i}.encoder_attn_layer_norm.weight''') ) rename_keys.append( (f'''transformer.decoder.layers.{i}.norm2.bias''', f'''decoder.layers.{i}.encoder_attn_layer_norm.bias''') ) rename_keys.append((f'''transformer.decoder.layers.{i}.norm3.weight''', f'''decoder.layers.{i}.final_layer_norm.weight''')) rename_keys.append((f'''transformer.decoder.layers.{i}.norm3.bias''', f'''decoder.layers.{i}.final_layer_norm.bias''')) # q, k, v projections in self/cross-attention in decoder for conditional DETR rename_keys.append( (f'''transformer.decoder.layers.{i}.sa_qcontent_proj.weight''', f'''decoder.layers.{i}.sa_qcontent_proj.weight''') ) rename_keys.append( (f'''transformer.decoder.layers.{i}.sa_kcontent_proj.weight''', f'''decoder.layers.{i}.sa_kcontent_proj.weight''') ) rename_keys.append( (f'''transformer.decoder.layers.{i}.sa_qpos_proj.weight''', f'''decoder.layers.{i}.sa_qpos_proj.weight''') ) rename_keys.append( (f'''transformer.decoder.layers.{i}.sa_kpos_proj.weight''', f'''decoder.layers.{i}.sa_kpos_proj.weight''') ) rename_keys.append((f'''transformer.decoder.layers.{i}.sa_v_proj.weight''', f'''decoder.layers.{i}.sa_v_proj.weight''')) rename_keys.append( (f'''transformer.decoder.layers.{i}.ca_qcontent_proj.weight''', f'''decoder.layers.{i}.ca_qcontent_proj.weight''') ) # rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.weight", f"decoder.layers.{i}.ca_qpos_proj.weight")) rename_keys.append( (f'''transformer.decoder.layers.{i}.ca_kcontent_proj.weight''', f'''decoder.layers.{i}.ca_kcontent_proj.weight''') ) rename_keys.append( (f'''transformer.decoder.layers.{i}.ca_kpos_proj.weight''', f'''decoder.layers.{i}.ca_kpos_proj.weight''') ) rename_keys.append((f'''transformer.decoder.layers.{i}.ca_v_proj.weight''', f'''decoder.layers.{i}.ca_v_proj.weight''')) rename_keys.append( (f'''transformer.decoder.layers.{i}.ca_qpos_sine_proj.weight''', f'''decoder.layers.{i}.ca_qpos_sine_proj.weight''') ) rename_keys.append( (f'''transformer.decoder.layers.{i}.sa_qcontent_proj.bias''', f'''decoder.layers.{i}.sa_qcontent_proj.bias''') ) rename_keys.append( (f'''transformer.decoder.layers.{i}.sa_kcontent_proj.bias''', f'''decoder.layers.{i}.sa_kcontent_proj.bias''') ) rename_keys.append((f'''transformer.decoder.layers.{i}.sa_qpos_proj.bias''', f'''decoder.layers.{i}.sa_qpos_proj.bias''')) rename_keys.append((f'''transformer.decoder.layers.{i}.sa_kpos_proj.bias''', f'''decoder.layers.{i}.sa_kpos_proj.bias''')) rename_keys.append((f'''transformer.decoder.layers.{i}.sa_v_proj.bias''', f'''decoder.layers.{i}.sa_v_proj.bias''')) rename_keys.append( (f'''transformer.decoder.layers.{i}.ca_qcontent_proj.bias''', f'''decoder.layers.{i}.ca_qcontent_proj.bias''') ) # rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.bias", f"decoder.layers.{i}.ca_qpos_proj.bias")) rename_keys.append( (f'''transformer.decoder.layers.{i}.ca_kcontent_proj.bias''', f'''decoder.layers.{i}.ca_kcontent_proj.bias''') ) rename_keys.append((f'''transformer.decoder.layers.{i}.ca_kpos_proj.bias''', f'''decoder.layers.{i}.ca_kpos_proj.bias''')) rename_keys.append((f'''transformer.decoder.layers.{i}.ca_v_proj.bias''', f'''decoder.layers.{i}.ca_v_proj.bias''')) rename_keys.append( (f'''transformer.decoder.layers.{i}.ca_qpos_sine_proj.bias''', f'''decoder.layers.{i}.ca_qpos_sine_proj.bias''') ) # convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads # for conditional DETR, also convert reference point head and query scale MLP rename_keys.extend( [ ("input_proj.weight", "input_projection.weight"), ("input_proj.bias", "input_projection.bias"), ("query_embed.weight", "query_position_embeddings.weight"), ("transformer.decoder.norm.weight", "decoder.layernorm.weight"), ("transformer.decoder.norm.bias", "decoder.layernorm.bias"), ("class_embed.weight", "class_labels_classifier.weight"), ("class_embed.bias", "class_labels_classifier.bias"), ("bbox_embed.layers.0.weight", "bbox_predictor.layers.0.weight"), ("bbox_embed.layers.0.bias", "bbox_predictor.layers.0.bias"), ("bbox_embed.layers.1.weight", "bbox_predictor.layers.1.weight"), ("bbox_embed.layers.1.bias", "bbox_predictor.layers.1.bias"), ("bbox_embed.layers.2.weight", "bbox_predictor.layers.2.weight"), ("bbox_embed.layers.2.bias", "bbox_predictor.layers.2.bias"), ("transformer.decoder.ref_point_head.layers.0.weight", "decoder.ref_point_head.layers.0.weight"), ("transformer.decoder.ref_point_head.layers.0.bias", "decoder.ref_point_head.layers.0.bias"), ("transformer.decoder.ref_point_head.layers.1.weight", "decoder.ref_point_head.layers.1.weight"), ("transformer.decoder.ref_point_head.layers.1.bias", "decoder.ref_point_head.layers.1.bias"), ("transformer.decoder.query_scale.layers.0.weight", "decoder.query_scale.layers.0.weight"), ("transformer.decoder.query_scale.layers.0.bias", "decoder.query_scale.layers.0.bias"), ("transformer.decoder.query_scale.layers.1.weight", "decoder.query_scale.layers.1.weight"), ("transformer.decoder.query_scale.layers.1.bias", "decoder.query_scale.layers.1.bias"), ("transformer.decoder.layers.0.ca_qpos_proj.weight", "decoder.layers.0.ca_qpos_proj.weight"), ("transformer.decoder.layers.0.ca_qpos_proj.bias", "decoder.layers.0.ca_qpos_proj.bias"), ] ) def a__ ( __lowercase , __lowercase , __lowercase ) -> List[str]: _A = state_dict.pop(__lowercase ) _A = val def a__ ( __lowercase ) -> List[str]: _A = OrderedDict() for key, value in state_dict.items(): if "backbone.0.body" in key: _A = key.replace("backbone.0.body" , "backbone.conv_encoder.model" ) _A = value else: _A = value return new_state_dict def a__ ( __lowercase , __lowercase=False ) -> Any: _A = "" if is_panoptic: _A = "conditional_detr." # first: transformer encoder for i in range(6 ): # read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias) _A = state_dict.pop(f"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight""" ) _A = state_dict.pop(f"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias""" ) # next, add query, keys and values (in that order) to the state dict _A = in_proj_weight[:256, :] _A = in_proj_bias[:256] _A = in_proj_weight[256:512, :] _A = in_proj_bias[256:512] _A = in_proj_weight[-256:, :] _A = in_proj_bias[-256:] def a__ ( ) -> int: _A = "http://images.cocodataset.org/val2017/000000039769.jpg" _A = Image.open(requests.get(__lowercase , stream=__lowercase ).raw ) return im @torch.no_grad() def a__ ( __lowercase , __lowercase ) -> Any: _A = ConditionalDetrConfig() # set backbone and dilation attributes if "resnet101" in model_name: _A = "resnet101" if "dc5" in model_name: _A = True _A = "panoptic" in model_name if is_panoptic: _A = 250 else: _A = 91 _A = "huggingface/label-files" _A = "coco-detection-id2label.json" _A = json.load(open(hf_hub_download(__lowercase , __lowercase , repo_type="dataset" ) , "r" ) ) _A = {int(__lowercase ): v for k, v in idalabel.items()} _A = idalabel _A = {v: k for k, v in idalabel.items()} # load image processor _A = "coco_panoptic" if is_panoptic else "coco_detection" _A = ConditionalDetrImageProcessor(format=__lowercase ) # prepare image _A = prepare_img() _A = image_processor(images=__lowercase , return_tensors="pt" ) _A = encoding["pixel_values"] logger.info(f"""Converting model {model_name}...""" ) # load original model from torch hub _A = torch.hub.load("DeppMeng/ConditionalDETR" , __lowercase , pretrained=__lowercase ).eval() _A = conditional_detr.state_dict() # rename keys for src, dest in rename_keys: if is_panoptic: _A = "conditional_detr." + src rename_key(__lowercase , __lowercase , __lowercase ) _A = rename_backbone_keys(__lowercase ) # query, key and value matrices need special treatment read_in_q_k_v(__lowercase , is_panoptic=__lowercase ) # important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them _A = "conditional_detr.model." if is_panoptic else "model." for key in state_dict.copy().keys(): if is_panoptic: if ( key.startswith("conditional_detr" ) and not key.startswith("class_labels_classifier" ) and not key.startswith("bbox_predictor" ) ): _A = state_dict.pop(__lowercase ) _A = val elif "class_labels_classifier" in key or "bbox_predictor" in key: _A = state_dict.pop(__lowercase ) _A = val elif key.startswith("bbox_attention" ) or key.startswith("mask_head" ): continue else: _A = state_dict.pop(__lowercase ) _A = val else: if not key.startswith("class_labels_classifier" ) and not key.startswith("bbox_predictor" ): _A = state_dict.pop(__lowercase ) _A = val # finally, create HuggingFace model and load state dict _A = ConditionalDetrForSegmentation(__lowercase ) if is_panoptic else ConditionalDetrForObjectDetection(__lowercase ) model.load_state_dict(__lowercase ) model.eval() model.push_to_hub(repo_id=__lowercase , organization="DepuMeng" , commit_message="Add model" ) # verify our conversion _A = conditional_detr(__lowercase ) _A = model(__lowercase ) assert torch.allclose(outputs.logits , original_outputs["pred_logits"] , atol=1E-4 ) assert torch.allclose(outputs.pred_boxes , original_outputs["pred_boxes"] , atol=1E-4 ) if is_panoptic: assert torch.allclose(outputs.pred_masks , original_outputs["pred_masks"] , atol=1E-4 ) # Save model and image processor logger.info(f"""Saving PyTorch model and image processor to {pytorch_dump_folder_path}...""" ) Path(__lowercase ).mkdir(exist_ok=__lowercase ) model.save_pretrained(__lowercase ) image_processor.save_pretrained(__lowercase ) if __name__ == "__main__": a_ = argparse.ArgumentParser() parser.add_argument( "--model_name", default="conditional_detr_resnet50", type=str, help="Name of the CONDITIONAL_DETR model you'd like to convert.", ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model." ) a_ = parser.parse_args() convert_conditional_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path)
621
1
"""simple docstring""" from __future__ import annotations import inspect import unittest import numpy as np from transformers import DeiTConfig from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import cached_property, is_tf_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher, TFDeiTForMaskedImageModeling, TFDeiTModel, ) from transformers.models.deit.modeling_tf_deit import TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import DeiTImageProcessor class snake_case : def __init__( self : Union[str, Any] , a__ : List[str] , a__ : Optional[Any]=13 , a__ : List[Any]=30 , a__ : List[str]=2 , a__ : Optional[int]=3 , a__ : Any=True , a__ : Optional[int]=True , a__ : Optional[Any]=32 , a__ : List[Any]=2 , a__ : Tuple=4 , a__ : Dict=37 , a__ : Any="gelu" , a__ : List[str]=0.1 , a__ : Optional[Any]=0.1 , a__ : List[Any]=10 , a__ : List[str]=0.0_2 , a__ : int=3 , a__ : Optional[Any]=None , a__ : str=2 , ) -> int: '''simple docstring''' _A = parent _A = batch_size _A = image_size _A = patch_size _A = num_channels _A = is_training _A = use_labels _A = hidden_size _A = num_hidden_layers _A = num_attention_heads _A = intermediate_size _A = hidden_act _A = hidden_dropout_prob _A = attention_probs_dropout_prob _A = type_sequence_label_size _A = initializer_range _A = scope _A = encoder_stride # in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens) _A = (image_size // patch_size) ** 2 _A = num_patches + 2 def a_ ( self : str ) -> str: '''simple docstring''' _A = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) _A = None if self.use_labels: _A = ids_tensor([self.batch_size] , self.type_sequence_label_size ) _A = self.get_config() return config, pixel_values, labels def a_ ( self : Tuple ) -> int: '''simple docstring''' return DeiTConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=a__ , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , ) def a_ ( self : Tuple , a__ : int , a__ : Union[str, Any] , a__ : int ) -> Any: '''simple docstring''' _A = TFDeiTModel(config=a__ ) _A = model(a__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def a_ ( self : List[Any] , a__ : Union[str, Any] , a__ : Dict , a__ : Optional[int] ) -> str: '''simple docstring''' _A = TFDeiTForMaskedImageModeling(config=a__ ) _A = model(a__ ) self.parent.assertEqual( result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) ) # test greyscale images _A = 1 _A = TFDeiTForMaskedImageModeling(a__ ) _A = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) _A = model(a__ ) self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) ) def a_ ( self : int , a__ : List[str] , a__ : str , a__ : List[str] ) -> Any: '''simple docstring''' _A = self.type_sequence_label_size _A = TFDeiTForImageClassification(a__ ) _A = model(a__ , labels=a__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # test greyscale images _A = 1 _A = TFDeiTForImageClassification(a__ ) _A = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) _A = model(a__ , labels=a__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def a_ ( self : List[Any] ) -> int: '''simple docstring''' _A = self.prepare_config_and_inputs() _A , _A , _A = config_and_inputs _A = {"pixel_values": pixel_values} return config, inputs_dict @require_tf class snake_case ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase): __UpperCamelCase = ( ( TFDeiTModel, TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher, TFDeiTForMaskedImageModeling, ) if is_tf_available() else () ) __UpperCamelCase = ( { 'feature-extraction': TFDeiTModel, 'image-classification': (TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher), } if is_tf_available() else {} ) __UpperCamelCase = False __UpperCamelCase = False __UpperCamelCase = False __UpperCamelCase = False def a_ ( self : Dict ) -> Union[str, Any]: '''simple docstring''' _A = TFDeiTModelTester(self ) _A = ConfigTester(self , config_class=a__ , has_text_modality=a__ , hidden_size=37 ) def a_ ( self : Optional[int] ) -> Any: '''simple docstring''' self.config_tester.run_common_tests() @unittest.skip(reason="DeiT does not use inputs_embeds" ) def a_ ( self : Union[str, Any] ) -> Dict: '''simple docstring''' pass def a_ ( self : Optional[Any] ) -> Any: '''simple docstring''' _A , _A = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _A = model_class(a__ ) self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) ) _A = model.get_output_embeddings() self.assertTrue(x is None or isinstance(a__ , tf.keras.layers.Dense ) ) def a_ ( self : Dict ) -> List[str]: '''simple docstring''' _A , _A = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _A = model_class(a__ ) _A = inspect.signature(model.call ) # signature.parameters is an OrderedDict => so arg_names order is deterministic _A = [*signature.parameters.keys()] _A = ["pixel_values"] self.assertListEqual(arg_names[:1] , a__ ) def a_ ( self : Union[str, Any] ) -> Dict: '''simple docstring''' _A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*a__ ) def a_ ( self : int ) -> Union[str, Any]: '''simple docstring''' _A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_image_modeling(*a__ ) def a_ ( self : List[str] ) -> Optional[Any]: '''simple docstring''' _A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*a__ ) def a_ ( self : int , a__ : List[Any] , a__ : List[str] , a__ : str=False ) -> str: '''simple docstring''' _A = super()._prepare_for_class(a__ , a__ , return_labels=a__ ) if return_labels: if "labels" in inputs_dict and "labels" not in inspect.signature(model_class.call ).parameters: del inputs_dict["labels"] return inputs_dict @slow def a_ ( self : Optional[Any] ) -> Any: '''simple docstring''' for model_name in TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _A = TFDeiTModel.from_pretrained(a__ ) self.assertIsNotNone(a__ ) def a__ ( ) -> Union[str, Any]: _A = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) return image @require_tf @require_vision class snake_case ( unittest.TestCase): @cached_property def a_ ( self : List[str] ) -> Optional[Any]: '''simple docstring''' return ( DeiTImageProcessor.from_pretrained("facebook/deit-base-distilled-patch16-224" ) if is_vision_available() else None ) @slow def a_ ( self : Tuple ) -> List[Any]: '''simple docstring''' _A = TFDeiTForImageClassificationWithTeacher.from_pretrained("facebook/deit-base-distilled-patch16-224" ) _A = self.default_image_processor _A = prepare_img() _A = image_processor(images=a__ , return_tensors="tf" ) # forward pass _A = model(**a__ ) # verify the logits _A = tf.TensorShape((1, 10_00) ) self.assertEqual(outputs.logits.shape , a__ ) _A = tf.constant([-1.0_2_6_6, 0.1_9_1_2, -1.2_8_6_1] ) self.assertTrue(np.allclose(outputs.logits[0, :3] , a__ , atol=1E-4 ) )
621
"""simple docstring""" import random def a__ ( __lowercase , __lowercase , __lowercase ) -> Optional[Any]: _A = a[left_index] _A = left_index + 1 for j in range(left_index + 1 , __lowercase ): if a[j] < pivot: _A , _A = a[i], a[j] i += 1 _A , _A = a[i - 1], a[left_index] return i - 1 def a__ ( __lowercase , __lowercase , __lowercase ) -> int: if left < right: _A = random.randint(__lowercase , right - 1 ) _A , _A = ( a[left], a[pivot], ) # switches the pivot with the left most bound _A = partition(__lowercase , __lowercase , __lowercase ) quick_sort_random( __lowercase , __lowercase , __lowercase ) # recursive quicksort to the left of the pivot point quick_sort_random( __lowercase , pivot_index + 1 , __lowercase ) # recursive quicksort to the right of the pivot point def a__ ( ) -> Dict: _A = input("Enter numbers separated by a comma:\n" ).strip() _A = [int(__lowercase ) for item in user_input.split("," )] quick_sort_random(__lowercase , 0 , len(__lowercase ) ) print(__lowercase ) if __name__ == "__main__": main()
621
1
"""simple docstring""" # Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import warnings from typing import List from unittest.mock import Mock import torch from torch.utils.data import DataLoader, IterableDataset, TensorDataset from accelerate.accelerator import Accelerator from accelerate.utils.dataclasses import DistributedType class snake_case ( _UpperCamelCase): def __init__( self : List[Any] , a__ : Any ) -> Any: '''simple docstring''' _A = data def __iter__( self : List[str] ) -> str: '''simple docstring''' for element in self.data: yield element def a__ ( __lowercase=True ) -> Tuple: _A = Accelerator(even_batches=__lowercase ) assert accelerator.num_processes == 2, "this script expects that two GPUs are available" return accelerator def a__ ( __lowercase , __lowercase , __lowercase , __lowercase = False ) -> Union[str, Any]: if iterable: _A = DummyIterableDataset(torch.as_tensor(range(__lowercase ) ) ) else: _A = TensorDataset(torch.as_tensor(range(__lowercase ) ) ) _A = DataLoader(__lowercase , batch_size=__lowercase ) _A = accelerator.prepare(__lowercase ) return dl def a__ ( __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , ) -> Dict: _A = create_dataloader(accelerator=__lowercase , dataset_size=__lowercase , batch_size=__lowercase ) _A = [len(batch[0] ) for batch in dl] if accelerator.process_index == 0: assert batch_sizes == process_0_expected_batch_sizes elif accelerator.process_index == 1: assert batch_sizes == process_1_expected_batch_sizes def a__ ( ) -> List[str]: _A = create_accelerator() # without padding, we would expect a different number of batches verify_dataloader_batch_sizes( __lowercase , dataset_size=3 , batch_size=1 , process_0_expected_batch_sizes=[1, 1] , process_1_expected_batch_sizes=[1, 1] , ) # without padding, we would expect the same number of batches, but different sizes verify_dataloader_batch_sizes( __lowercase , dataset_size=7 , batch_size=2 , process_0_expected_batch_sizes=[2, 2] , process_1_expected_batch_sizes=[2, 2] , ) def a__ ( ) -> List[Any]: _A = create_accelerator(even_batches=__lowercase ) verify_dataloader_batch_sizes( __lowercase , dataset_size=3 , batch_size=1 , process_0_expected_batch_sizes=[1, 1] , process_1_expected_batch_sizes=[1] , ) verify_dataloader_batch_sizes( __lowercase , dataset_size=7 , batch_size=2 , process_0_expected_batch_sizes=[2, 2] , process_1_expected_batch_sizes=[2, 1] , ) def a__ ( ) -> int: _A = create_accelerator(even_batches=__lowercase ) _A = torch.nn.Linear(1 , 1 ) _A = accelerator.prepare(__lowercase ) _A = create_dataloader(__lowercase , dataset_size=3 , batch_size=1 ) _A = [] with accelerator.join_uneven_inputs([ddp_model] ): for batch_idx, batch in enumerate(__lowercase ): _A = ddp_model(batch[0].float() ) _A = output.sum() loss.backward() batch_idxs.append(__lowercase ) accelerator.wait_for_everyone() if accelerator.process_index == 0: assert batch_idxs == [0, 1] elif accelerator.process_index == 1: assert batch_idxs == [0] def a__ ( __lowercase ) -> List[str]: with warnings.catch_warnings(record=__lowercase ) as w: with accelerator.join_uneven_inputs([Mock()] ): pass assert issubclass(w[-1].category , __lowercase ) assert "only supported for multi-GPU" in str(w[-1].message ) def a__ ( ) -> Tuple: _A = True _A = False _A = create_accelerator(even_batches=__lowercase ) _A = torch.nn.Linear(1 , 1 ) _A = accelerator.prepare(__lowercase ) _A = create_dataloader(__lowercase , dataset_size=3 , batch_size=1 ) _A = create_dataloader(__lowercase , dataset_size=3 , batch_size=1 ) with accelerator.join_uneven_inputs([ddp_model] , even_batches=__lowercase ): _A = train_dl.batch_sampler.even_batches _A = valid_dl.batch_sampler.even_batches assert train_dl_overridden_value == overridden_even_batches assert valid_dl_overridden_value == overridden_even_batches assert train_dl.batch_sampler.even_batches == default_even_batches assert valid_dl.batch_sampler.even_batches == default_even_batches def a__ ( ) -> int: _A = True _A = False _A = create_accelerator(even_batches=__lowercase ) _A = torch.nn.Linear(1 , 1 ) _A = accelerator.prepare(__lowercase ) create_dataloader(__lowercase , dataset_size=3 , batch_size=1 , iterable=__lowercase ) _A = create_dataloader(__lowercase , dataset_size=3 , batch_size=1 ) with warnings.catch_warnings(): warnings.filterwarnings("ignore" ) try: with accelerator.join_uneven_inputs([ddp_model] , even_batches=__lowercase ): _A = batch_dl.batch_sampler.even_batches except AttributeError: # ensure attribute error is not raised when processing iterable dl raise AssertionError assert batch_dl_overridden_value == overridden_even_batches assert batch_dl.batch_sampler.even_batches == default_even_batches def a__ ( ) -> Optional[Any]: _A = create_accelerator() _A = torch.nn.Linear(1 , 1 ) _A = accelerator.prepare(__lowercase ) create_dataloader(__lowercase , dataset_size=3 , batch_size=1 , iterable=__lowercase ) with warnings.catch_warnings(record=__lowercase ) as w: with accelerator.join_uneven_inputs([ddp_model] , even_batches=__lowercase ): pass assert issubclass(w[-1].category , __lowercase ) assert "only supported for map-style datasets" in str(w[-1].message ) def a__ ( ) -> Optional[Any]: _A = create_accelerator() accelerator.print("Test that even_batches variable ensures uniform batches across processes" ) test_default_ensures_even_batch_sizes() accelerator.print("Run tests with even_batches disabled" ) test_can_disable_even_batches() accelerator.print("Test joining uneven inputs" ) test_can_join_uneven_inputs() accelerator.print("Test overriding even_batches when joining uneven inputs" ) test_join_can_override_even_batches() accelerator.print("Test overriding even_batches for mixed dataloader types" ) test_join_can_override_for_mixed_type_dataloaders() accelerator.print("Test overriding even_batches raises a warning for iterable dataloaders" ) test_join_raises_warning_for_iterable_when_overriding_even_batches() accelerator.print("Test join with non DDP distributed raises warning" ) _A = accelerator.state.distributed_type _A = DistributedType.FSDP test_join_raises_warning_for_non_ddp_distributed(__lowercase ) _A = original_state if __name__ == "__main__": main()
621
"""simple docstring""" import copy from typing import Any, Dict, List, Optional, Union import numpy as np from ...audio_utils import mel_filter_bank, spectrogram, window_function from ...feature_extraction_sequence_utils import SequenceFeatureExtractor from ...feature_extraction_utils import BatchFeature from ...utils import TensorType, logging a_ = logging.get_logger(__name__) class snake_case ( _UpperCamelCase): __UpperCamelCase = ['input_features'] def __init__( self : int , a__ : Optional[Any]=80 , a__ : Optional[int]=1_60_00 , a__ : int=1_60 , a__ : Union[str, Any]=30 , a__ : Tuple=4_00 , a__ : List[Any]=0.0 , a__ : Optional[Any]=False , **a__ : List[Any] , ) -> str: '''simple docstring''' super().__init__( feature_size=a__ , sampling_rate=a__ , padding_value=a__ , return_attention_mask=a__ , **a__ , ) _A = n_fft _A = hop_length _A = chunk_length _A = chunk_length * sampling_rate _A = self.n_samples // hop_length _A = sampling_rate _A = mel_filter_bank( num_frequency_bins=1 + n_fft // 2 , num_mel_filters=a__ , min_frequency=0.0 , max_frequency=8_0_0_0.0 , sampling_rate=a__ , norm="slaney" , mel_scale="slaney" , ) def a_ ( self : int , a__ : np.array ) -> np.ndarray: '''simple docstring''' _A = spectrogram( a__ , window_function(self.n_fft , "hann" ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters , log_mel="log10" , ) _A = log_spec[:, :-1] _A = np.maximum(a__ , log_spec.max() - 8.0 ) _A = (log_spec + 4.0) / 4.0 return log_spec @staticmethod # Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm def a_ ( a__ : List[np.ndarray] , a__ : List[np.ndarray] , a__ : float = 0.0 ) -> List[np.ndarray]: '''simple docstring''' if attention_mask is not None: _A = np.array(a__ , np.intaa ) _A = [] for vector, length in zip(a__ , attention_mask.sum(-1 ) ): _A = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1E-7 ) if length < normed_slice.shape[0]: _A = padding_value normed_input_values.append(a__ ) else: _A = [(x - x.mean()) / np.sqrt(x.var() + 1E-7 ) for x in input_values] return normed_input_values def __call__( self : Optional[int] , a__ : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , a__ : bool = True , a__ : Optional[int] = None , a__ : Optional[Union[str, TensorType]] = None , a__ : Optional[bool] = None , a__ : Optional[str] = "max_length" , a__ : Optional[int] = None , a__ : Optional[int] = None , a__ : Optional[bool] = None , **a__ : Dict , ) -> BatchFeature: '''simple docstring''' if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( F"""The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a""" F""" sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input""" F""" was sampled with {self.sampling_rate} and not {sampling_rate}.""" ) else: logger.warning( "It is strongly recommended to pass the `sampling_rate` argument to this function. " "Failing to do so can result in silent errors that might be hard to debug." ) _A = isinstance(a__ , np.ndarray ) and len(raw_speech.shape ) > 1 if is_batched_numpy and len(raw_speech.shape ) > 2: raise ValueError(F"""Only mono-channel audio is supported for input to {self}""" ) _A = is_batched_numpy or ( isinstance(a__ , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) )) ) if is_batched: _A = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech] elif not is_batched and not isinstance(a__ , np.ndarray ): _A = np.asarray(a__ , dtype=np.floataa ) elif isinstance(a__ , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ): _A = raw_speech.astype(np.floataa ) # always return batch if not is_batched: _A = [np.asarray([raw_speech] ).T] _A = BatchFeature({"input_features": raw_speech} ) # convert into correct format for padding _A = self.pad( a__ , padding=a__ , max_length=max_length if max_length else self.n_samples , truncation=a__ , pad_to_multiple_of=a__ , return_attention_mask=return_attention_mask or do_normalize , ) # zero-mean and unit-variance normalization if do_normalize: _A = self.zero_mean_unit_var_norm( padded_inputs["input_features"] , attention_mask=padded_inputs["attention_mask"] , padding_value=self.padding_value , ) _A = np.stack(padded_inputs["input_features"] , axis=0 ) # make sure list is in array format _A = padded_inputs.get("input_features" ).transpose(2 , 0 , 1 ) _A = [self._np_extract_fbank_features(a__ ) for waveform in input_features[0]] if isinstance(input_features[0] , a__ ): _A = [np.asarray(a__ , dtype=np.floataa ) for feature in input_features] else: _A = input_features if return_attention_mask: # rescale from sample (48000) to feature (3000) _A = padded_inputs["attention_mask"][:, :: self.hop_length] if return_tensors is not None: _A = padded_inputs.convert_to_tensors(a__ ) return padded_inputs def a_ ( self : Dict ) -> Dict[str, Any]: '''simple docstring''' _A = copy.deepcopy(self.__dict__ ) _A = self.__class__.__name__ if "mel_filters" in output: del output["mel_filters"] return output
621
1
"""simple docstring""" import logging import os import threading import time try: import warnings except ImportError: a_ = None try: import msvcrt except ImportError: a_ = None try: import fcntl except ImportError: a_ = None # Backward compatibility # ------------------------------------------------ try: TimeoutError except NameError: a_ = OSError # Data # ------------------------------------------------ a_ = [ "Timeout", "BaseFileLock", "WindowsFileLock", "UnixFileLock", "SoftFileLock", "FileLock", ] a_ = "3.0.12" a_ = None def a__ ( ) -> List[str]: global _logger _A = _logger or logging.getLogger(__name__ ) return _logger class snake_case ( _UpperCamelCase): def __init__( self : str , a__ : Optional[Any] ) -> int: '''simple docstring''' _A = lock_file return None def __str__( self : Optional[int] ) -> Union[str, Any]: '''simple docstring''' _A = F"""The file lock '{self.lock_file}' could not be acquired.""" return temp class snake_case : def __init__( self : Union[str, Any] , a__ : Tuple ) -> List[str]: '''simple docstring''' _A = lock return None def __enter__( self : Optional[Any] ) -> Tuple: '''simple docstring''' return self.lock def __exit__( self : Tuple , a__ : str , a__ : List[str] , a__ : Union[str, Any] ) -> Optional[int]: '''simple docstring''' self.lock.release() return None class snake_case : def __init__( self : int , a__ : Optional[int] , a__ : Optional[int]=-1 , a__ : Optional[int]=None ) -> Optional[int]: '''simple docstring''' _A = max_filename_length if max_filename_length is not None else 2_55 # Hash the filename if it's too long _A = self.hash_filename_if_too_long(a__ , a__ ) # The path to the lock file. _A = lock_file # The file descriptor for the *_lock_file* as it is returned by the # os.open() function. # This file lock is only NOT None, if the object currently holds the # lock. _A = None # The default timeout value. _A = timeout # We use this lock primarily for the lock counter. _A = threading.Lock() # The lock counter is used for implementing the nested locking # mechanism. Whenever the lock is acquired, the counter is increased and # the lock is only released, when this value is 0 again. _A = 0 return None @property def a_ ( self : Any ) -> List[str]: '''simple docstring''' return self._lock_file @property def a_ ( self : Dict ) -> int: '''simple docstring''' return self._timeout @timeout.setter def a_ ( self : Optional[int] , a__ : List[Any] ) -> Optional[int]: '''simple docstring''' _A = float(a__ ) return None def a_ ( self : Tuple ) -> List[Any]: '''simple docstring''' raise NotImplementedError() def a_ ( self : Tuple ) -> Any: '''simple docstring''' raise NotImplementedError() @property def a_ ( self : Any ) -> Optional[int]: '''simple docstring''' return self._lock_file_fd is not None def a_ ( self : Any , a__ : Optional[int]=None , a__ : Optional[int]=0.0_5 ) -> str: '''simple docstring''' if timeout is None: _A = self.timeout # Increment the number right at the beginning. # We can still undo it, if something fails. with self._thread_lock: self._lock_counter += 1 _A = id(self ) _A = self._lock_file _A = time.time() try: while True: with self._thread_lock: if not self.is_locked: logger().debug(F"""Attempting to acquire lock {lock_id} on {lock_filename}""" ) self._acquire() if self.is_locked: logger().debug(F"""Lock {lock_id} acquired on {lock_filename}""" ) break elif timeout >= 0 and time.time() - start_time > timeout: logger().debug(F"""Timeout on acquiring lock {lock_id} on {lock_filename}""" ) raise Timeout(self._lock_file ) else: logger().debug( F"""Lock {lock_id} not acquired on {lock_filename}, waiting {poll_intervall} seconds ...""" ) time.sleep(a__ ) except: # noqa # Something did go wrong, so decrement the counter. with self._thread_lock: _A = max(0 , self._lock_counter - 1 ) raise return _Acquire_ReturnProxy(lock=self ) def a_ ( self : Dict , a__ : int=False ) -> Optional[Any]: '''simple docstring''' with self._thread_lock: if self.is_locked: self._lock_counter -= 1 if self._lock_counter == 0 or force: _A = id(self ) _A = self._lock_file logger().debug(F"""Attempting to release lock {lock_id} on {lock_filename}""" ) self._release() _A = 0 logger().debug(F"""Lock {lock_id} released on {lock_filename}""" ) return None def __enter__( self : Union[str, Any] ) -> Dict: '''simple docstring''' self.acquire() return self def __exit__( self : Union[str, Any] , a__ : Dict , a__ : int , a__ : Dict ) -> str: '''simple docstring''' self.release() return None def __del__( self : List[Any] ) -> Dict: '''simple docstring''' self.release(force=a__ ) return None def a_ ( self : Any , a__ : str , a__ : int ) -> str: '''simple docstring''' _A = os.path.basename(a__ ) if len(a__ ) > max_length and max_length > 0: _A = os.path.dirname(a__ ) _A = str(hash(a__ ) ) _A = filename[: max_length - len(a__ ) - 8] + "..." + hashed_filename + ".lock" return os.path.join(a__ , a__ ) else: return path class snake_case ( _UpperCamelCase): def __init__( self : Union[str, Any] , a__ : List[str] , a__ : Optional[int]=-1 , a__ : Optional[int]=None ) -> Union[str, Any]: '''simple docstring''' from .file_utils import relative_to_absolute_path super().__init__(a__ , timeout=a__ , max_filename_length=a__ ) _A = "\\\\?\\" + relative_to_absolute_path(self.lock_file ) def a_ ( self : Optional[int] ) -> Union[str, Any]: '''simple docstring''' _A = os.O_RDWR | os.O_CREAT | os.O_TRUNC try: _A = os.open(self._lock_file , a__ ) except OSError: pass else: try: msvcrt.locking(a__ , msvcrt.LK_NBLCK , 1 ) except OSError: os.close(a__ ) else: _A = fd return None def a_ ( self : int ) -> Union[str, Any]: '''simple docstring''' _A = self._lock_file_fd _A = None msvcrt.locking(a__ , msvcrt.LK_UNLCK , 1 ) os.close(a__ ) try: os.remove(self._lock_file ) # Probably another instance of the application # that acquired the file lock. except OSError: pass return None class snake_case ( _UpperCamelCase): def __init__( self : List[Any] , a__ : Optional[Any] , a__ : List[Any]=-1 , a__ : Optional[Any]=None ) -> Tuple: '''simple docstring''' _A = os.statvfs(os.path.dirname(a__ ) ).f_namemax super().__init__(a__ , timeout=a__ , max_filename_length=a__ ) def a_ ( self : Optional[int] ) -> Union[str, Any]: '''simple docstring''' _A = os.O_RDWR | os.O_CREAT | os.O_TRUNC _A = os.open(self._lock_file , a__ ) try: fcntl.flock(a__ , fcntl.LOCK_EX | fcntl.LOCK_NB ) except OSError: os.close(a__ ) else: _A = fd return None def a_ ( self : Dict ) -> Optional[Any]: '''simple docstring''' _A = self._lock_file_fd _A = None fcntl.flock(a__ , fcntl.LOCK_UN ) os.close(a__ ) return None class snake_case ( _UpperCamelCase): def a_ ( self : Union[str, Any] ) -> List[str]: '''simple docstring''' _A = os.O_WRONLY | os.O_CREAT | os.O_EXCL | os.O_TRUNC try: _A = os.open(self._lock_file , a__ ) except OSError: pass else: _A = fd return None def a_ ( self : Optional[int] ) -> List[Any]: '''simple docstring''' os.close(self._lock_file_fd ) _A = None try: os.remove(self._lock_file ) # The file is already deleted and that's what we want. except OSError: pass return None a_ = None if msvcrt: a_ = WindowsFileLock elif fcntl: a_ = UnixFileLock else: a_ = SoftFileLock if warnings is not None: warnings.warn("only soft file lock is available")
621
"""simple docstring""" from __future__ import annotations def a__ ( __lowercase , __lowercase ) -> float: _A = sorted(numsa + numsa ) _A , _A = divmod(len(__lowercase ) , 2 ) if mod == 1: return all_numbers[div] else: return (all_numbers[div] + all_numbers[div - 1]) / 2 if __name__ == "__main__": import doctest doctest.testmod() a_ = [float(x) for x in input("Enter the elements of first array: ").split()] a_ = [float(x) for x in input("Enter the elements of second array: ").split()] print(f'''The median of two arrays is: {median_of_two_arrays(array_a, array_a)}''')
621
1
"""simple docstring""" import functools import logging import os import sys import threading from logging import ( CRITICAL, # NOQA DEBUG, # NOQA ERROR, # NOQA FATAL, # NOQA INFO, # NOQA NOTSET, # NOQA WARN, # NOQA WARNING, # NOQA ) from typing import Optional import huggingface_hub.utils as hf_hub_utils from tqdm import auto as tqdm_lib a_ = threading.Lock() a_ = None a_ = { "debug": logging.DEBUG, "info": logging.INFO, "warning": logging.WARNING, "error": logging.ERROR, "critical": logging.CRITICAL, } a_ = logging.WARNING a_ = True def a__ ( ) -> Any: _A = os.getenv("TRANSFORMERS_VERBOSITY" , __lowercase ) if env_level_str: if env_level_str in log_levels: return log_levels[env_level_str] else: logging.getLogger().warning( f"""Unknown option TRANSFORMERS_VERBOSITY={env_level_str}, """ f"""has to be one of: { ', '.join(log_levels.keys() ) }""" ) return _default_log_level def a__ ( ) -> str: return __name__.split("." )[0] def a__ ( ) -> logging.Logger: return logging.getLogger(_get_library_name() ) def a__ ( ) -> None: global _default_handler with _lock: if _default_handler: # This library has already configured the library root logger. return _A = logging.StreamHandler() # Set sys.stderr as stream. _A = sys.stderr.flush # Apply our default configuration to the library root logger. _A = _get_library_root_logger() library_root_logger.addHandler(_default_handler ) library_root_logger.setLevel(_get_default_logging_level() ) _A = False def a__ ( ) -> None: global _default_handler with _lock: if not _default_handler: return _A = _get_library_root_logger() library_root_logger.removeHandler(_default_handler ) library_root_logger.setLevel(logging.NOTSET ) _A = None def a__ ( ) -> Any: return log_levels def a__ ( __lowercase = None ) -> logging.Logger: if name is None: _A = _get_library_name() _configure_library_root_logger() return logging.getLogger(__lowercase ) def a__ ( ) -> int: _configure_library_root_logger() return _get_library_root_logger().getEffectiveLevel() def a__ ( __lowercase ) -> None: _configure_library_root_logger() _get_library_root_logger().setLevel(__lowercase ) def a__ ( ) -> List[Any]: return set_verbosity(__lowercase ) def a__ ( ) -> Optional[Any]: return set_verbosity(__lowercase ) def a__ ( ) -> Dict: return set_verbosity(__lowercase ) def a__ ( ) -> Optional[int]: return set_verbosity(__lowercase ) def a__ ( ) -> None: _configure_library_root_logger() assert _default_handler is not None _get_library_root_logger().removeHandler(_default_handler ) def a__ ( ) -> None: _configure_library_root_logger() assert _default_handler is not None _get_library_root_logger().addHandler(_default_handler ) def a__ ( __lowercase ) -> None: _configure_library_root_logger() assert handler is not None _get_library_root_logger().addHandler(__lowercase ) def a__ ( __lowercase ) -> None: _configure_library_root_logger() assert handler is not None and handler not in _get_library_root_logger().handlers _get_library_root_logger().removeHandler(__lowercase ) def a__ ( ) -> None: _configure_library_root_logger() _A = False def a__ ( ) -> None: _configure_library_root_logger() _A = True def a__ ( ) -> None: _A = _get_library_root_logger().handlers for handler in handlers: _A = logging.Formatter("[%(levelname)s|%(filename)s:%(lineno)s] %(asctime)s >> %(message)s" ) handler.setFormatter(__lowercase ) def a__ ( ) -> None: _A = _get_library_root_logger().handlers for handler in handlers: handler.setFormatter(__lowercase ) def a__ ( self , *__lowercase , **__lowercase ) -> int: _A = os.getenv("TRANSFORMERS_NO_ADVISORY_WARNINGS" , __lowercase ) if no_advisory_warnings: return self.warning(*__lowercase , **__lowercase ) a_ = warning_advice @functools.lru_cache(__lowercase ) def a__ ( self , *__lowercase , **__lowercase ) -> Union[str, Any]: self.warning(*__lowercase , **__lowercase ) a_ = warning_once class snake_case : def __init__( self : int , *a__ : Any , **a__ : int ) -> List[str]: # pylint: disable=unused-argument '''simple docstring''' _A = args[0] if args else None def __iter__( self : List[Any] ) -> Union[str, Any]: '''simple docstring''' return iter(self._iterator ) def __getattr__( self : List[str] , a__ : Optional[Any] ) -> Dict: '''simple docstring''' def empty_fn(*a__ : int , **a__ : Union[str, Any] ): # pylint: disable=unused-argument return return empty_fn def __enter__( self : Optional[Any] ) -> Dict: '''simple docstring''' return self def __exit__( self : Optional[int] , a__ : Optional[int] , a__ : Union[str, Any] , a__ : int ) -> List[str]: '''simple docstring''' return class snake_case : def __call__( self : Dict , *a__ : str , **a__ : Dict ) -> int: '''simple docstring''' if _tqdm_active: return tqdm_lib.tqdm(*a__ , **a__ ) else: return EmptyTqdm(*a__ , **a__ ) def a_ ( self : Union[str, Any] , *a__ : Any , **a__ : str ) -> Tuple: '''simple docstring''' _A = None if _tqdm_active: return tqdm_lib.tqdm.set_lock(*a__ , **a__ ) def a_ ( self : Optional[Any] ) -> Any: '''simple docstring''' if _tqdm_active: return tqdm_lib.tqdm.get_lock() a_ = _tqdm_cls() def a__ ( ) -> bool: global _tqdm_active return bool(_tqdm_active ) def a__ ( ) -> Dict: global _tqdm_active _A = True hf_hub_utils.enable_progress_bars() def a__ ( ) -> List[Any]: global _tqdm_active _A = False hf_hub_utils.disable_progress_bars()
621
"""simple docstring""" import copy import os from typing import Union from ...configuration_utils import PretrainedConfig from ...utils import logging a_ = logging.get_logger(__name__) a_ = { "Salesforce/blip-vqa-base": "https://huggingface.co/Salesforce/blip-vqa-base/resolve/main/config.json", "Salesforce/blip-vqa-capfit-large": ( "https://huggingface.co/Salesforce/blip-vqa-base-capfit/resolve/main/config.json" ), "Salesforce/blip-image-captioning-base": ( "https://huggingface.co/Salesforce/blip-image-captioning-base/resolve/main/config.json" ), "Salesforce/blip-image-captioning-large": ( "https://huggingface.co/Salesforce/blip-image-captioning-large/resolve/main/config.json" ), "Salesforce/blip-itm-base-coco": "https://huggingface.co/Salesforce/blip-itm-base-coco/resolve/main/config.json", "Salesforce/blip-itm-large-coco": "https://huggingface.co/Salesforce/blip-itm-large-coco/resolve/main/config.json", "Salesforce/blip-itm-base-flikr": "https://huggingface.co/Salesforce/blip-itm-base-flikr/resolve/main/config.json", "Salesforce/blip-itm-large-flikr": ( "https://huggingface.co/Salesforce/blip-itm-large-flikr/resolve/main/config.json" ), } class snake_case ( _UpperCamelCase): __UpperCamelCase = 'blip_text_model' def __init__( self : int , a__ : List[str]=3_05_24 , a__ : List[str]=7_68 , a__ : List[Any]=7_68 , a__ : int=30_72 , a__ : List[str]=7_68 , a__ : Dict=12 , a__ : Optional[int]=8 , a__ : Optional[Any]=5_12 , a__ : List[Any]="gelu" , a__ : Optional[Any]=1E-1_2 , a__ : Any=0.0 , a__ : int=0.0 , a__ : Dict=0.0_2 , a__ : Optional[Any]=3_05_22 , a__ : Any=2 , a__ : int=0 , a__ : Union[str, Any]=1_02 , a__ : Tuple=True , a__ : Optional[int]=True , **a__ : Any , ) -> List[Any]: '''simple docstring''' super().__init__( pad_token_id=a__ , bos_token_id=a__ , eos_token_id=a__ , sep_token_id=a__ , **a__ , ) _A = vocab_size _A = hidden_size _A = encoder_hidden_size _A = intermediate_size _A = projection_dim _A = hidden_dropout_prob _A = num_hidden_layers _A = num_attention_heads _A = max_position_embeddings _A = layer_norm_eps _A = hidden_act _A = initializer_range _A = attention_probs_dropout_prob _A = is_decoder _A = use_cache @classmethod def a_ ( cls : Optional[Any] , a__ : Union[str, os.PathLike] , **a__ : Optional[Any] ) -> "PretrainedConfig": '''simple docstring''' cls._set_token_in_kwargs(a__ ) _A , _A = cls.get_config_dict(a__ , **a__ ) # get the text config dict if we are loading from BlipConfig if config_dict.get("model_type" ) == "blip": _A = config_dict["text_config"] if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type: logger.warning( F"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """ F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" ) return cls.from_dict(a__ , **a__ ) class snake_case ( _UpperCamelCase): __UpperCamelCase = 'blip_vision_model' def __init__( self : Optional[Any] , a__ : Any=7_68 , a__ : List[str]=30_72 , a__ : str=5_12 , a__ : Any=12 , a__ : int=12 , a__ : int=3_84 , a__ : Tuple=16 , a__ : str="gelu" , a__ : Tuple=1E-5 , a__ : List[str]=0.0 , a__ : List[Any]=1E-1_0 , **a__ : int , ) -> List[str]: '''simple docstring''' super().__init__(**a__ ) _A = hidden_size _A = intermediate_size _A = projection_dim _A = num_hidden_layers _A = num_attention_heads _A = patch_size _A = image_size _A = initializer_range _A = attention_dropout _A = layer_norm_eps _A = hidden_act @classmethod def a_ ( cls : Any , a__ : Union[str, os.PathLike] , **a__ : int ) -> "PretrainedConfig": '''simple docstring''' cls._set_token_in_kwargs(a__ ) _A , _A = cls.get_config_dict(a__ , **a__ ) # get the vision config dict if we are loading from BlipConfig if config_dict.get("model_type" ) == "blip": _A = config_dict["vision_config"] if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type: logger.warning( F"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """ F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" ) return cls.from_dict(a__ , **a__ ) class snake_case ( _UpperCamelCase): __UpperCamelCase = 'blip' __UpperCamelCase = True def __init__( self : List[Any] , a__ : Optional[int]=None , a__ : str=None , a__ : List[str]=5_12 , a__ : Any=2.6_5_9_2 , a__ : str=2_56 , **a__ : Optional[int] , ) -> Dict: '''simple docstring''' super().__init__(**a__ ) if text_config is None: _A = {} logger.info("`text_config` is `None`. Initializing the `BlipTextConfig` with default values." ) if vision_config is None: _A = {} logger.info("`vision_config` is `None`. Initializing the `BlipVisionConfig` with default values." ) _A = BlipTextConfig(**a__ ) _A = BlipVisionConfig(**a__ ) _A = self.vision_config.hidden_size _A = projection_dim _A = logit_scale_init_value _A = 1.0 _A = 0.0_2 _A = image_text_hidden_size @classmethod def a_ ( cls : Tuple , a__ : BlipTextConfig , a__ : BlipVisionConfig , **a__ : Optional[int] ) -> str: '''simple docstring''' return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **a__ ) def a_ ( self : Union[str, Any] ) -> List[Any]: '''simple docstring''' _A = copy.deepcopy(self.__dict__ ) _A = self.text_config.to_dict() _A = self.vision_config.to_dict() _A = self.__class__.model_type return output
621
1
"""simple docstring""" import numpy as np def a__ ( __lowercase , __lowercase ) -> np.ndarray: return np.where(vector > 0 , __lowercase , (alpha * (np.exp(__lowercase ) - 1)) ) if __name__ == "__main__": import doctest doctest.testmod()
621
"""simple docstring""" import unittest from transformers import load_tool from .test_tools_common import ToolTesterMixin class snake_case ( unittest.TestCase , _UpperCamelCase): def a_ ( self : Optional[Any] ) -> List[str]: '''simple docstring''' _A = load_tool("text-classification" ) self.tool.setup() _A = load_tool("text-classification" , remote=a__ ) def a_ ( self : Optional[int] ) -> Dict: '''simple docstring''' _A = self.tool("That's quite cool" , ["positive", "negative"] ) self.assertEqual(a__ , "positive" ) def a_ ( self : Optional[Any] ) -> Dict: '''simple docstring''' _A = self.remote_tool("That's quite cool" , ["positive", "negative"] ) self.assertEqual(a__ , "positive" ) def a_ ( self : Dict ) -> Optional[int]: '''simple docstring''' _A = self.tool(text="That's quite cool" , labels=["positive", "negative"] ) self.assertEqual(a__ , "positive" ) def a_ ( self : Dict ) -> Any: '''simple docstring''' _A = self.remote_tool(text="That's quite cool" , labels=["positive", "negative"] ) self.assertEqual(a__ , "positive" )
621
1
"""simple docstring""" import re from typing import Callable, List, Optional, Union import tensorflow as tf try: from tensorflow.keras.optimizers.legacy import Adam except ImportError: from tensorflow.keras.optimizers import Adam class snake_case ( tf.keras.optimizers.schedules.LearningRateSchedule): def __init__( self : Tuple , a__ : float , a__ : Callable , a__ : int , a__ : float = 1.0 , a__ : str = None , ) -> Optional[int]: '''simple docstring''' super().__init__() _A = initial_learning_rate _A = warmup_steps _A = power _A = decay_schedule_fn _A = name def __call__( self : Optional[Any] , a__ : int ) -> Dict: '''simple docstring''' with tf.name_scope(self.name or "WarmUp" ) as name: # Implements polynomial warmup. i.e., if global_step < warmup_steps, the # learning rate will be `global_step/num_warmup_steps * init_lr`. _A = tf.cast(a__ , tf.floataa ) _A = tf.cast(self.warmup_steps , tf.floataa ) _A = global_step_float / warmup_steps_float _A = self.initial_learning_rate * tf.math.pow(a__ , self.power ) return tf.cond( global_step_float < warmup_steps_float , lambda: warmup_learning_rate , lambda: self.decay_schedule_fn(step - self.warmup_steps ) , name=a__ , ) def a_ ( self : Tuple ) -> Any: '''simple docstring''' return { "initial_learning_rate": self.initial_learning_rate, "decay_schedule_fn": self.decay_schedule_fn, "warmup_steps": self.warmup_steps, "power": self.power, "name": self.name, } def a__ ( __lowercase , __lowercase , __lowercase , __lowercase = 0.0 , __lowercase = 0.9 , __lowercase = 0.999 , __lowercase = 1E-8 , __lowercase = None , __lowercase = None , __lowercase = 0.0 , __lowercase = 1.0 , __lowercase = None , ) -> Union[str, Any]: _A = tf.keras.optimizers.schedules.PolynomialDecay( initial_learning_rate=__lowercase , decay_steps=num_train_steps - num_warmup_steps , end_learning_rate=init_lr * min_lr_ratio , power=__lowercase , ) if num_warmup_steps: _A = WarmUp( initial_learning_rate=__lowercase , decay_schedule_fn=__lowercase , warmup_steps=__lowercase , ) if weight_decay_rate > 0.0: _A = AdamWeightDecay( learning_rate=__lowercase , weight_decay_rate=__lowercase , beta_a=__lowercase , beta_a=__lowercase , epsilon=__lowercase , clipnorm=__lowercase , global_clipnorm=__lowercase , exclude_from_weight_decay=["LayerNorm", "layer_norm", "bias"] , include_in_weight_decay=__lowercase , ) else: _A = tf.keras.optimizers.Adam( learning_rate=__lowercase , beta_a=__lowercase , beta_a=__lowercase , epsilon=__lowercase , clipnorm=__lowercase , global_clipnorm=__lowercase , ) # We return the optimizer and the LR scheduler in order to better track the # evolution of the LR independently of the optimizer. return optimizer, lr_schedule class snake_case ( _UpperCamelCase): def __init__( self : Optional[int] , a__ : Union[float, tf.keras.optimizers.schedules.LearningRateSchedule] = 0.0_0_1 , a__ : float = 0.9 , a__ : float = 0.9_9_9 , a__ : float = 1E-7 , a__ : bool = False , a__ : float = 0.0 , a__ : Optional[List[str]] = None , a__ : Optional[List[str]] = None , a__ : str = "AdamWeightDecay" , **a__ : Union[str, Any] , ) -> str: '''simple docstring''' super().__init__(a__ , a__ , a__ , a__ , a__ , a__ , **a__ ) _A = weight_decay_rate _A = include_in_weight_decay _A = exclude_from_weight_decay @classmethod def a_ ( cls : int , a__ : Dict ) -> int: '''simple docstring''' _A = {"WarmUp": WarmUp} return super(a__ , cls ).from_config(a__ , custom_objects=a__ ) def a_ ( self : Any , a__ : Tuple , a__ : Dict , a__ : Any ) -> Optional[Any]: '''simple docstring''' super(a__ , self )._prepare_local(a__ , a__ , a__ ) _A = tf.constant( self.weight_decay_rate , name="adam_weight_decay_rate" ) def a_ ( self : int , a__ : Optional[Any] , a__ : List[str] , a__ : str ) -> str: '''simple docstring''' _A = self._do_use_weight_decay(var.name ) if do_decay: return var.assign_sub( learning_rate * var * apply_state[(var.device, var.dtype.base_dtype)]["weight_decay_rate"] , use_locking=self._use_locking , ) return tf.no_op() def a_ ( self : Optional[Any] , a__ : Dict , a__ : Any=None , **a__ : List[str] ) -> Tuple: '''simple docstring''' _A , _A = list(zip(*a__ ) ) return super(a__ , self ).apply_gradients(zip(a__ , a__ ) , name=a__ , **a__ ) def a_ ( self : Optional[int] , a__ : Dict , a__ : Tuple , a__ : Union[str, Any] ) -> List[str]: '''simple docstring''' if apply_state is None: return self._decayed_lr_t[var_dtype], {} _A = apply_state or {} _A = apply_state.get((var_device, var_dtype) ) if coefficients is None: _A = self._fallback_apply_state(a__ , a__ ) _A = coefficients return coefficients["lr_t"], {"apply_state": apply_state} def a_ ( self : List[str] , a__ : Union[str, Any] , a__ : Optional[Any] , a__ : Optional[int]=None ) -> Tuple: '''simple docstring''' _A , _A = self._get_lr(var.device , var.dtype.base_dtype , a__ ) _A = self._decay_weights_op(a__ , a__ , a__ ) with tf.control_dependencies([decay] ): return super(a__ , self )._resource_apply_dense(a__ , a__ , **a__ ) def a_ ( self : List[Any] , a__ : List[Any] , a__ : Union[str, Any] , a__ : Dict , a__ : List[Any]=None ) -> Optional[int]: '''simple docstring''' _A , _A = self._get_lr(var.device , var.dtype.base_dtype , a__ ) _A = self._decay_weights_op(a__ , a__ , a__ ) with tf.control_dependencies([decay] ): return super(a__ , self )._resource_apply_sparse(a__ , a__ , a__ , **a__ ) def a_ ( self : Optional[int] ) -> int: '''simple docstring''' _A = super().get_config() config.update({"weight_decay_rate": self.weight_decay_rate} ) return config def a_ ( self : Any , a__ : List[Any] ) -> Union[str, Any]: '''simple docstring''' if self.weight_decay_rate == 0: return False if self._include_in_weight_decay: for r in self._include_in_weight_decay: if re.search(a__ , a__ ) is not None: return True if self._exclude_from_weight_decay: for r in self._exclude_from_weight_decay: if re.search(a__ , a__ ) is not None: return False return True class snake_case ( _UpperCamelCase): def __init__( self : Any ) -> List[Any]: '''simple docstring''' _A = [] _A = None @property def a_ ( self : str ) -> List[Any]: '''simple docstring''' if self._accum_steps is None: _A = tf.Variable( tf.constant(0 , dtype=tf.intaa ) , trainable=a__ , synchronization=tf.VariableSynchronization.ON_READ , aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA , ) return self._accum_steps.value() @property def a_ ( self : List[Any] ) -> int: '''simple docstring''' if not self._gradients: raise ValueError("The accumulator should be called first to initialize the gradients" ) return [gradient.value() if gradient is not None else gradient for gradient in self._gradients] def __call__( self : Tuple , a__ : Optional[Any] ) -> int: '''simple docstring''' if not self._gradients: _A = self.step # Create the step variable. self._gradients.extend( [ tf.Variable( tf.zeros_like(a__ ) , trainable=a__ , synchronization=tf.VariableSynchronization.ON_READ , aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA , ) if gradient is not None else gradient for gradient in gradients ] ) if len(a__ ) != len(self._gradients ): raise ValueError(F"""Expected {len(self._gradients )} gradients, but got {len(a__ )}""" ) for accum_gradient, gradient in zip(self._gradients , a__ ): if accum_gradient is not None and gradient is not None: accum_gradient.assign_add(a__ ) self._accum_steps.assign_add(1 ) def a_ ( self : str ) -> int: '''simple docstring''' if not self._gradients: return self._accum_steps.assign(0 ) for gradient in self._gradients: if gradient is not None: gradient.assign(tf.zeros_like(a__ ) )
621
"""simple docstring""" import gc import random import unittest import numpy as np import torch from PIL import Image from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import AutoencoderKL, PNDMScheduler, StableDiffusionInpaintPipeline, UNetaDConditionModel from diffusers.utils import floats_tensor, load_image, load_numpy, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class snake_case ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , unittest.TestCase): __UpperCamelCase = StableDiffusionInpaintPipeline __UpperCamelCase = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS __UpperCamelCase = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS __UpperCamelCase = frozenset( []) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess __UpperCamelCase = frozenset([]) def a_ ( self : Dict ) -> Optional[int]: '''simple docstring''' torch.manual_seed(0 ) _A = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=9 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=a__ , ) _A = PNDMScheduler(skip_prk_steps=a__ ) torch.manual_seed(0 ) _A = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , sample_size=1_28 , ) torch.manual_seed(0 ) _A = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , hidden_act="gelu" , projection_dim=5_12 , ) _A = CLIPTextModel(a__ ) _A = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" ) _A = { "unet": unet, "scheduler": scheduler, "vae": vae, "text_encoder": text_encoder, "tokenizer": tokenizer, "safety_checker": None, "feature_extractor": None, } return components def a_ ( self : Optional[Any] , a__ : List[str] , a__ : Tuple=0 ) -> int: '''simple docstring''' _A = floats_tensor((1, 3, 32, 32) , rng=random.Random(a__ ) ).to(a__ ) _A = image.cpu().permute(0 , 2 , 3 , 1 )[0] _A = Image.fromarray(np.uinta(a__ ) ).convert("RGB" ).resize((64, 64) ) _A = Image.fromarray(np.uinta(image + 4 ) ).convert("RGB" ).resize((64, 64) ) if str(a__ ).startswith("mps" ): _A = torch.manual_seed(a__ ) else: _A = torch.Generator(device=a__ ).manual_seed(a__ ) _A = { "prompt": "A painting of a squirrel eating a burger", "image": init_image, "mask_image": mask_image, "generator": generator, "num_inference_steps": 2, "guidance_scale": 6.0, "output_type": "numpy", } return inputs def a_ ( self : Optional[Any] ) -> Optional[Any]: '''simple docstring''' _A = "cpu" # ensure determinism for the device-dependent torch.Generator _A = self.get_dummy_components() _A = StableDiffusionInpaintPipeline(**a__ ) _A = sd_pipe.to(a__ ) sd_pipe.set_progress_bar_config(disable=a__ ) _A = self.get_dummy_inputs(a__ ) _A = sd_pipe(**a__ ).images _A = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) _A = np.array([0.4_7_2_7, 0.5_7_3_5, 0.3_9_4_1, 0.5_4_4_6, 0.5_9_2_6, 0.4_3_9_4, 0.5_0_6_2, 0.4_6_5_4, 0.4_4_7_6] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def a_ ( self : str ) -> Union[str, Any]: '''simple docstring''' super().test_inference_batch_single_identical(expected_max_diff=3E-3 ) @slow @require_torch_gpu class snake_case ( unittest.TestCase): def a_ ( self : List[Any] ) -> Any: '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def a_ ( self : Union[str, Any] ) -> Tuple: '''simple docstring''' _A = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/sd2-inpaint/init_image.png" ) _A = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png" ) _A = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint" "/yellow_cat_sitting_on_a_park_bench.npy" ) _A = "stabilityai/stable-diffusion-2-inpainting" _A = StableDiffusionInpaintPipeline.from_pretrained(a__ , safety_checker=a__ ) pipe.to(a__ ) pipe.set_progress_bar_config(disable=a__ ) pipe.enable_attention_slicing() _A = "Face of a yellow cat, high resolution, sitting on a park bench" _A = torch.manual_seed(0 ) _A = pipe( prompt=a__ , image=a__ , mask_image=a__ , generator=a__ , output_type="np" , ) _A = output.images[0] assert image.shape == (5_12, 5_12, 3) assert np.abs(expected_image - image ).max() < 9E-3 def a_ ( self : Optional[Any] ) -> List[Any]: '''simple docstring''' _A = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/sd2-inpaint/init_image.png" ) _A = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png" ) _A = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint" "/yellow_cat_sitting_on_a_park_bench_fp16.npy" ) _A = "stabilityai/stable-diffusion-2-inpainting" _A = StableDiffusionInpaintPipeline.from_pretrained( a__ , torch_dtype=torch.floataa , safety_checker=a__ , ) pipe.to(a__ ) pipe.set_progress_bar_config(disable=a__ ) pipe.enable_attention_slicing() _A = "Face of a yellow cat, high resolution, sitting on a park bench" _A = torch.manual_seed(0 ) _A = pipe( prompt=a__ , image=a__ , mask_image=a__ , generator=a__ , output_type="np" , ) _A = output.images[0] assert image.shape == (5_12, 5_12, 3) assert np.abs(expected_image - image ).max() < 5E-1 def a_ ( self : List[Any] ) -> Union[str, Any]: '''simple docstring''' torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() _A = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/sd2-inpaint/init_image.png" ) _A = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png" ) _A = "stabilityai/stable-diffusion-2-inpainting" _A = PNDMScheduler.from_pretrained(a__ , subfolder="scheduler" ) _A = StableDiffusionInpaintPipeline.from_pretrained( a__ , safety_checker=a__ , scheduler=a__ , torch_dtype=torch.floataa , ) pipe.to(a__ ) pipe.set_progress_bar_config(disable=a__ ) pipe.enable_attention_slicing(1 ) pipe.enable_sequential_cpu_offload() _A = "Face of a yellow cat, high resolution, sitting on a park bench" _A = torch.manual_seed(0 ) _A = pipe( prompt=a__ , image=a__ , mask_image=a__ , generator=a__ , num_inference_steps=2 , output_type="np" , ) _A = torch.cuda.max_memory_allocated() # make sure that less than 2.65 GB is allocated assert mem_bytes < 2.6_5 * 10**9
621
1
"""simple docstring""" import platform from argparse import ArgumentParser import huggingface_hub from .. import __version__ as version from ..utils import is_accelerate_available, is_torch_available, is_transformers_available, is_xformers_available from . import BaseDiffusersCLICommand def a__ ( __lowercase ) -> Union[str, Any]: return EnvironmentCommand() class snake_case ( _UpperCamelCase): @staticmethod def a_ ( a__ : ArgumentParser ) -> Union[str, Any]: '''simple docstring''' _A = parser.add_parser("env" ) download_parser.set_defaults(func=a__ ) def a_ ( self : List[Any] ) -> Any: '''simple docstring''' _A = huggingface_hub.__version__ _A = "not installed" _A = "NA" if is_torch_available(): import torch _A = torch.__version__ _A = torch.cuda.is_available() _A = "not installed" if is_transformers_available(): import transformers _A = transformers.__version__ _A = "not installed" if is_accelerate_available(): import accelerate _A = accelerate.__version__ _A = "not installed" if is_xformers_available(): import xformers _A = xformers.__version__ _A = { "`diffusers` version": version, "Platform": platform.platform(), "Python version": platform.python_version(), "PyTorch version (GPU?)": F"""{pt_version} ({pt_cuda_available})""", "Huggingface_hub version": hub_version, "Transformers version": transformers_version, "Accelerate version": accelerate_version, "xFormers version": xformers_version, "Using GPU in script?": "<fill in>", "Using distributed or parallel set-up in script?": "<fill in>", } print("\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n" ) print(self.format_dict(a__ ) ) return info @staticmethod def a_ ( a__ : Optional[Any] ) -> List[str]: '''simple docstring''' return "\n".join([F"""- {prop}: {val}""" for prop, val in d.items()] ) + "\n"
621
"""simple docstring""" def a__ ( __lowercase , __lowercase ) -> int: while a != 0: _A , _A = b % a, a return b def a__ ( __lowercase , __lowercase ) -> int: if gcd(__lowercase , __lowercase ) != 1: _A = f"""mod inverse of {a!r} and {m!r} does not exist""" raise ValueError(__lowercase ) _A , _A , _A = 1, 0, a _A , _A , _A = 0, 1, m while va != 0: _A = ua // va _A , _A , _A , _A , _A , _A = (ua - q * va), (ua - q * va), (ua - q * va), va, va, va return ua % m
621
1
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_speech_available, is_torch_available a_ = { "configuration_audio_spectrogram_transformer": [ "AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "ASTConfig", ] } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ = [ "AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST", "ASTForAudioClassification", "ASTModel", "ASTPreTrainedModel", ] try: if not is_speech_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ = ["ASTFeatureExtractor"] if TYPE_CHECKING: from .configuration_audio_spectrogram_transformer import ( AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, ASTConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_audio_spectrogram_transformer import ( AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, ASTForAudioClassification, ASTModel, ASTPreTrainedModel, ) try: if not is_speech_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_audio_spectrogram_transformer import ASTFeatureExtractor else: import sys a_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
621
"""simple docstring""" # Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import warnings from typing import List from unittest.mock import Mock import torch from torch.utils.data import DataLoader, IterableDataset, TensorDataset from accelerate.accelerator import Accelerator from accelerate.utils.dataclasses import DistributedType class snake_case ( _UpperCamelCase): def __init__( self : List[Any] , a__ : Any ) -> Any: '''simple docstring''' _A = data def __iter__( self : List[str] ) -> str: '''simple docstring''' for element in self.data: yield element def a__ ( __lowercase=True ) -> Tuple: _A = Accelerator(even_batches=__lowercase ) assert accelerator.num_processes == 2, "this script expects that two GPUs are available" return accelerator def a__ ( __lowercase , __lowercase , __lowercase , __lowercase = False ) -> Union[str, Any]: if iterable: _A = DummyIterableDataset(torch.as_tensor(range(__lowercase ) ) ) else: _A = TensorDataset(torch.as_tensor(range(__lowercase ) ) ) _A = DataLoader(__lowercase , batch_size=__lowercase ) _A = accelerator.prepare(__lowercase ) return dl def a__ ( __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , ) -> Dict: _A = create_dataloader(accelerator=__lowercase , dataset_size=__lowercase , batch_size=__lowercase ) _A = [len(batch[0] ) for batch in dl] if accelerator.process_index == 0: assert batch_sizes == process_0_expected_batch_sizes elif accelerator.process_index == 1: assert batch_sizes == process_1_expected_batch_sizes def a__ ( ) -> List[str]: _A = create_accelerator() # without padding, we would expect a different number of batches verify_dataloader_batch_sizes( __lowercase , dataset_size=3 , batch_size=1 , process_0_expected_batch_sizes=[1, 1] , process_1_expected_batch_sizes=[1, 1] , ) # without padding, we would expect the same number of batches, but different sizes verify_dataloader_batch_sizes( __lowercase , dataset_size=7 , batch_size=2 , process_0_expected_batch_sizes=[2, 2] , process_1_expected_batch_sizes=[2, 2] , ) def a__ ( ) -> List[Any]: _A = create_accelerator(even_batches=__lowercase ) verify_dataloader_batch_sizes( __lowercase , dataset_size=3 , batch_size=1 , process_0_expected_batch_sizes=[1, 1] , process_1_expected_batch_sizes=[1] , ) verify_dataloader_batch_sizes( __lowercase , dataset_size=7 , batch_size=2 , process_0_expected_batch_sizes=[2, 2] , process_1_expected_batch_sizes=[2, 1] , ) def a__ ( ) -> int: _A = create_accelerator(even_batches=__lowercase ) _A = torch.nn.Linear(1 , 1 ) _A = accelerator.prepare(__lowercase ) _A = create_dataloader(__lowercase , dataset_size=3 , batch_size=1 ) _A = [] with accelerator.join_uneven_inputs([ddp_model] ): for batch_idx, batch in enumerate(__lowercase ): _A = ddp_model(batch[0].float() ) _A = output.sum() loss.backward() batch_idxs.append(__lowercase ) accelerator.wait_for_everyone() if accelerator.process_index == 0: assert batch_idxs == [0, 1] elif accelerator.process_index == 1: assert batch_idxs == [0] def a__ ( __lowercase ) -> List[str]: with warnings.catch_warnings(record=__lowercase ) as w: with accelerator.join_uneven_inputs([Mock()] ): pass assert issubclass(w[-1].category , __lowercase ) assert "only supported for multi-GPU" in str(w[-1].message ) def a__ ( ) -> Tuple: _A = True _A = False _A = create_accelerator(even_batches=__lowercase ) _A = torch.nn.Linear(1 , 1 ) _A = accelerator.prepare(__lowercase ) _A = create_dataloader(__lowercase , dataset_size=3 , batch_size=1 ) _A = create_dataloader(__lowercase , dataset_size=3 , batch_size=1 ) with accelerator.join_uneven_inputs([ddp_model] , even_batches=__lowercase ): _A = train_dl.batch_sampler.even_batches _A = valid_dl.batch_sampler.even_batches assert train_dl_overridden_value == overridden_even_batches assert valid_dl_overridden_value == overridden_even_batches assert train_dl.batch_sampler.even_batches == default_even_batches assert valid_dl.batch_sampler.even_batches == default_even_batches def a__ ( ) -> int: _A = True _A = False _A = create_accelerator(even_batches=__lowercase ) _A = torch.nn.Linear(1 , 1 ) _A = accelerator.prepare(__lowercase ) create_dataloader(__lowercase , dataset_size=3 , batch_size=1 , iterable=__lowercase ) _A = create_dataloader(__lowercase , dataset_size=3 , batch_size=1 ) with warnings.catch_warnings(): warnings.filterwarnings("ignore" ) try: with accelerator.join_uneven_inputs([ddp_model] , even_batches=__lowercase ): _A = batch_dl.batch_sampler.even_batches except AttributeError: # ensure attribute error is not raised when processing iterable dl raise AssertionError assert batch_dl_overridden_value == overridden_even_batches assert batch_dl.batch_sampler.even_batches == default_even_batches def a__ ( ) -> Optional[Any]: _A = create_accelerator() _A = torch.nn.Linear(1 , 1 ) _A = accelerator.prepare(__lowercase ) create_dataloader(__lowercase , dataset_size=3 , batch_size=1 , iterable=__lowercase ) with warnings.catch_warnings(record=__lowercase ) as w: with accelerator.join_uneven_inputs([ddp_model] , even_batches=__lowercase ): pass assert issubclass(w[-1].category , __lowercase ) assert "only supported for map-style datasets" in str(w[-1].message ) def a__ ( ) -> Optional[Any]: _A = create_accelerator() accelerator.print("Test that even_batches variable ensures uniform batches across processes" ) test_default_ensures_even_batch_sizes() accelerator.print("Run tests with even_batches disabled" ) test_can_disable_even_batches() accelerator.print("Test joining uneven inputs" ) test_can_join_uneven_inputs() accelerator.print("Test overriding even_batches when joining uneven inputs" ) test_join_can_override_even_batches() accelerator.print("Test overriding even_batches for mixed dataloader types" ) test_join_can_override_for_mixed_type_dataloaders() accelerator.print("Test overriding even_batches raises a warning for iterable dataloaders" ) test_join_raises_warning_for_iterable_when_overriding_even_batches() accelerator.print("Test join with non DDP distributed raises warning" ) _A = accelerator.state.distributed_type _A = DistributedType.FSDP test_join_raises_warning_for_non_ddp_distributed(__lowercase ) _A = original_state if __name__ == "__main__": main()
621
1
"""simple docstring""" import json from typing import List, Optional, Tuple from tokenizers import pre_tokenizers, processors from ...tokenization_utils_base import AddedToken, BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_mvp import MvpTokenizer a_ = logging.get_logger(__name__) a_ = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"} # See all MVP models at https://huggingface.co/models?filter=mvp a_ = { "vocab_file": { "RUCAIBox/mvp": "https://huggingface.co/RUCAIBox/mvp/resolve/main/vocab.json", }, "added_tokens.json": { "RUCAIBox/mvp": "https://huggingface.co/RUCAIBox/mvp/resolve/main/added_tokens.json", }, "merges_file": { "RUCAIBox/mvp": "https://huggingface.co/RUCAIBox/mvp/resolve/main/merges.txt", }, "tokenizer_file": { "RUCAIBox/mvp": "https://huggingface.co/RUCAIBox/mvp/resolve/main/tokenizer.json", }, } a_ = { "RUCAIBox/mvp": 10_24, } class snake_case ( _UpperCamelCase): __UpperCamelCase = VOCAB_FILES_NAMES __UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP __UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __UpperCamelCase = ['input_ids', 'attention_mask'] __UpperCamelCase = MvpTokenizer def __init__( self : List[Any] , a__ : Any=None , a__ : Optional[int]=None , a__ : int=None , a__ : Dict="replace" , a__ : List[Any]="<s>" , a__ : Dict="</s>" , a__ : int="</s>" , a__ : int="<s>" , a__ : Optional[int]="<unk>" , a__ : Tuple="<pad>" , a__ : List[Any]="<mask>" , a__ : Optional[Any]=False , a__ : Union[str, Any]=True , **a__ : Union[str, Any] , ) -> Optional[int]: '''simple docstring''' super().__init__( a__ , a__ , tokenizer_file=a__ , errors=a__ , bos_token=a__ , eos_token=a__ , sep_token=a__ , cls_token=a__ , unk_token=a__ , pad_token=a__ , mask_token=a__ , add_prefix_space=a__ , trim_offsets=a__ , **a__ , ) _A = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() ) if pre_tok_state.get("add_prefix_space" , a__ ) != add_prefix_space: _A = getattr(a__ , pre_tok_state.pop("type" ) ) _A = add_prefix_space _A = pre_tok_class(**a__ ) _A = add_prefix_space # the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__` _A = "post_processor" _A = getattr(self.backend_tokenizer , a__ , a__ ) if tokenizer_component_instance: _A = json.loads(tokenizer_component_instance.__getstate__() ) # The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class` if "sep" in state: _A = tuple(state["sep"] ) if "cls" in state: _A = tuple(state["cls"] ) _A = False if state.get("add_prefix_space" , a__ ) != add_prefix_space: _A = add_prefix_space _A = True if state.get("trim_offsets" , a__ ) != trim_offsets: _A = trim_offsets _A = True if changes_to_apply: _A = getattr(a__ , state.pop("type" ) ) _A = component_class(**a__ ) setattr(self.backend_tokenizer , a__ , a__ ) @property def a_ ( self : Union[str, Any] ) -> str: '''simple docstring''' if self._mask_token is None: if self.verbose: logger.error("Using mask_token, but it is not set yet." ) return None return str(self._mask_token ) @mask_token.setter def a_ ( self : List[Any] , a__ : List[str] ) -> Optional[Any]: '''simple docstring''' _A = AddedToken(a__ , lstrip=a__ , rstrip=a__ ) if isinstance(a__ , a__ ) else value _A = value def a_ ( self : Optional[int] , *a__ : Any , **a__ : Dict ) -> BatchEncoding: '''simple docstring''' _A = kwargs.get("is_split_into_words" , a__ ) if is_split_into_words and not self.add_prefix_space: raise ValueError( F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """ "to use it with pretokenized inputs." ) return super()._batch_encode_plus(*a__ , **a__ ) def a_ ( self : Optional[Any] , *a__ : Tuple , **a__ : List[Any] ) -> BatchEncoding: '''simple docstring''' _A = kwargs.get("is_split_into_words" , a__ ) if is_split_into_words and not self.add_prefix_space: raise ValueError( F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """ "to use it with pretokenized inputs." ) return super()._encode_plus(*a__ , **a__ ) def a_ ( self : Tuple , a__ : str , a__ : Optional[str] = None ) -> Tuple[str]: '''simple docstring''' _A = self._tokenizer.model.save(a__ , name=a__ ) return tuple(a__ ) def a_ ( self : int , a__ : List[Any] , a__ : int=None ) -> Any: '''simple docstring''' _A = [self.bos_token_id] + token_ids_a + [self.eos_token_id] if token_ids_a is None: return output return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id] def a_ ( self : Any , a__ : List[int] , a__ : Optional[List[int]] = None ) -> List[int]: '''simple docstring''' _A = [self.sep_token_id] _A = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
621
"""simple docstring""" class snake_case : def __init__( self : Optional[int] , a__ : List[Any] , a__ : List[str] , a__ : Tuple ) -> Optional[Any]: '''simple docstring''' _A = None _A = None _A = graph self._normalize_graph(a__ , a__ ) _A = len(a__ ) _A = None def a_ ( self : str , a__ : List[str] , a__ : List[Any] ) -> Dict: '''simple docstring''' if sources is int: _A = [sources] if sinks is int: _A = [sinks] if len(a__ ) == 0 or len(a__ ) == 0: return _A = sources[0] _A = sinks[0] # make fake vertex if there are more # than one source or sink if len(a__ ) > 1 or len(a__ ) > 1: _A = 0 for i in sources: max_input_flow += sum(self.graph[i] ) _A = len(self.graph ) + 1 for room in self.graph: room.insert(0 , 0 ) self.graph.insert(0 , [0] * size ) for i in sources: _A = max_input_flow _A = 0 _A = len(self.graph ) + 1 for room in self.graph: room.append(0 ) self.graph.append([0] * size ) for i in sinks: _A = max_input_flow _A = size - 1 def a_ ( self : Optional[int] ) -> List[Any]: '''simple docstring''' if self.maximum_flow_algorithm is None: raise Exception("You need to set maximum flow algorithm before." ) if self.source_index is None or self.sink_index is None: return 0 self.maximum_flow_algorithm.execute() return self.maximum_flow_algorithm.getMaximumFlow() def a_ ( self : List[Any] , a__ : Optional[Any] ) -> str: '''simple docstring''' _A = algorithm(self ) class snake_case : def __init__( self : List[str] , a__ : List[str] ) -> Union[str, Any]: '''simple docstring''' _A = flow_network _A = flow_network.verticesCount _A = flow_network.sourceIndex _A = flow_network.sinkIndex # it's just a reference, so you shouldn't change # it in your algorithms, use deep copy before doing that _A = flow_network.graph _A = False def a_ ( self : Optional[Any] ) -> List[Any]: '''simple docstring''' if not self.executed: self._algorithm() _A = True def a_ ( self : Any ) -> int: '''simple docstring''' pass class snake_case ( _UpperCamelCase): def __init__( self : Optional[Any] , a__ : Dict ) -> List[str]: '''simple docstring''' super().__init__(a__ ) # use this to save your result _A = -1 def a_ ( self : Any ) -> List[str]: '''simple docstring''' if not self.executed: raise Exception("You should execute algorithm before using its result!" ) return self.maximum_flow class snake_case ( _UpperCamelCase): def __init__( self : Union[str, Any] , a__ : Union[str, Any] ) -> Dict: '''simple docstring''' super().__init__(a__ ) _A = [[0] * self.verticies_count for i in range(self.verticies_count )] _A = [0] * self.verticies_count _A = [0] * self.verticies_count def a_ ( self : Any ) -> Dict: '''simple docstring''' _A = self.verticies_count # push some substance to graph for nextvertex_index, bandwidth in enumerate(self.graph[self.source_index] ): self.preflow[self.source_index][nextvertex_index] += bandwidth self.preflow[nextvertex_index][self.source_index] -= bandwidth self.excesses[nextvertex_index] += bandwidth # Relabel-to-front selection rule _A = [ i for i in range(self.verticies_count ) if i != self.source_index and i != self.sink_index ] # move through list _A = 0 while i < len(a__ ): _A = vertices_list[i] _A = self.heights[vertex_index] self.process_vertex(a__ ) if self.heights[vertex_index] > previous_height: # if it was relabeled, swap elements # and start from 0 index vertices_list.insert(0 , vertices_list.pop(a__ ) ) _A = 0 else: i += 1 _A = sum(self.preflow[self.source_index] ) def a_ ( self : Dict , a__ : Any ) -> Optional[int]: '''simple docstring''' while self.excesses[vertex_index] > 0: for neighbour_index in range(self.verticies_count ): # if it's neighbour and current vertex is higher if ( self.graph[vertex_index][neighbour_index] - self.preflow[vertex_index][neighbour_index] > 0 and self.heights[vertex_index] > self.heights[neighbour_index] ): self.push(a__ , a__ ) self.relabel(a__ ) def a_ ( self : str , a__ : Optional[int] , a__ : List[Any] ) -> Optional[int]: '''simple docstring''' _A = min( self.excesses[from_index] , self.graph[from_index][to_index] - self.preflow[from_index][to_index] , ) self.preflow[from_index][to_index] += preflow_delta self.preflow[to_index][from_index] -= preflow_delta self.excesses[from_index] -= preflow_delta self.excesses[to_index] += preflow_delta def a_ ( self : Any , a__ : Dict ) -> Any: '''simple docstring''' _A = None for to_index in range(self.verticies_count ): if ( self.graph[vertex_index][to_index] - self.preflow[vertex_index][to_index] > 0 ) and (min_height is None or self.heights[to_index] < min_height): _A = self.heights[to_index] if min_height is not None: _A = min_height + 1 if __name__ == "__main__": a_ = [0] a_ = [3] # graph = [ # [0, 0, 4, 6, 0, 0], # [0, 0, 5, 2, 0, 0], # [0, 0, 0, 0, 4, 4], # [0, 0, 0, 0, 6, 6], # [0, 0, 0, 0, 0, 0], # [0, 0, 0, 0, 0, 0], # ] a_ = [[0, 7, 0, 0], [0, 0, 6, 0], [0, 0, 0, 8], [9, 0, 0, 0]] # prepare our network a_ = FlowNetwork(graph, entrances, exits) # set algorithm flow_network.set_maximum_flow_algorithm(PushRelabelExecutor) # and calculate a_ = flow_network.find_maximum_flow() print(f'''maximum flow is {maximum_flow}''')
621
1
"""simple docstring""" import copy from ...configuration_utils import PretrainedConfig from ...utils import logging from ..auto.configuration_auto import CONFIG_MAPPING a_ = logging.get_logger(__name__) class snake_case ( _UpperCamelCase): __UpperCamelCase = 'upernet' def __init__( self : Tuple , a__ : Optional[int]=None , a__ : Dict=5_12 , a__ : List[Any]=0.0_2 , a__ : Optional[int]=[1, 2, 3, 6] , a__ : str=True , a__ : Optional[int]=0.4 , a__ : Tuple=3_84 , a__ : Tuple=2_56 , a__ : List[Any]=1 , a__ : Tuple=False , a__ : str=2_55 , **a__ : str , ) -> str: '''simple docstring''' super().__init__(**a__ ) if backbone_config is None: logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." ) _A = CONFIG_MAPPING["resnet"](out_features=["stage1", "stage2", "stage3", "stage4"] ) elif isinstance(a__ , a__ ): _A = backbone_config.get("model_type" ) _A = CONFIG_MAPPING[backbone_model_type] _A = config_class.from_dict(a__ ) _A = backbone_config _A = hidden_size _A = initializer_range _A = pool_scales _A = use_auxiliary_head _A = auxiliary_loss_weight _A = auxiliary_in_channels _A = auxiliary_channels _A = auxiliary_num_convs _A = auxiliary_concat_input _A = loss_ignore_index def a_ ( self : Any ) -> List[str]: '''simple docstring''' _A = copy.deepcopy(self.__dict__ ) _A = self.backbone_config.to_dict() _A = self.__class__.model_type return output
621
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) a_ = { "configuration_roformer": ["ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "RoFormerConfig", "RoFormerOnnxConfig"], "tokenization_roformer": ["RoFormerTokenizer"], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ = ["RoFormerTokenizerFast"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ = [ "ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST", "RoFormerForCausalLM", "RoFormerForMaskedLM", "RoFormerForMultipleChoice", "RoFormerForQuestionAnswering", "RoFormerForSequenceClassification", "RoFormerForTokenClassification", "RoFormerLayer", "RoFormerModel", "RoFormerPreTrainedModel", "load_tf_weights_in_roformer", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ = [ "TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST", "TFRoFormerForCausalLM", "TFRoFormerForMaskedLM", "TFRoFormerForMultipleChoice", "TFRoFormerForQuestionAnswering", "TFRoFormerForSequenceClassification", "TFRoFormerForTokenClassification", "TFRoFormerLayer", "TFRoFormerModel", "TFRoFormerPreTrainedModel", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ = [ "FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST", "FlaxRoFormerForMaskedLM", "FlaxRoFormerForMultipleChoice", "FlaxRoFormerForQuestionAnswering", "FlaxRoFormerForSequenceClassification", "FlaxRoFormerForTokenClassification", "FlaxRoFormerModel", "FlaxRoFormerPreTrainedModel", ] if TYPE_CHECKING: from .configuration_roformer import ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, RoFormerConfig, RoFormerOnnxConfig from .tokenization_roformer import RoFormerTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_roformer_fast import RoFormerTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_roformer import ( ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, RoFormerForCausalLM, RoFormerForMaskedLM, RoFormerForMultipleChoice, RoFormerForQuestionAnswering, RoFormerForSequenceClassification, RoFormerForTokenClassification, RoFormerLayer, RoFormerModel, RoFormerPreTrainedModel, load_tf_weights_in_roformer, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_roformer import ( TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, TFRoFormerForCausalLM, TFRoFormerForMaskedLM, TFRoFormerForMultipleChoice, TFRoFormerForQuestionAnswering, TFRoFormerForSequenceClassification, TFRoFormerForTokenClassification, TFRoFormerLayer, TFRoFormerModel, TFRoFormerPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_roformer import ( FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, FlaxRoFormerForMaskedLM, FlaxRoFormerForMultipleChoice, FlaxRoFormerForQuestionAnswering, FlaxRoFormerForSequenceClassification, FlaxRoFormerForTokenClassification, FlaxRoFormerModel, FlaxRoFormerPreTrainedModel, ) else: import sys a_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
621
1
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available a_ = { "configuration_graphormer": ["GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "GraphormerConfig"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ = [ "GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST", "GraphormerForGraphClassification", "GraphormerModel", "GraphormerPreTrainedModel", ] if TYPE_CHECKING: from .configuration_graphormer import GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, GraphormerConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_graphormer import ( GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST, GraphormerForGraphClassification, GraphormerModel, GraphormerPreTrainedModel, ) else: import sys a_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
621
"""simple docstring""" import warnings from ...utils import logging from .image_processing_glpn import GLPNImageProcessor a_ = logging.get_logger(__name__) class snake_case ( _UpperCamelCase): def __init__( self : str , *a__ : Dict , **a__ : Optional[int] ) -> None: '''simple docstring''' warnings.warn( "The class GLPNFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please" " use GLPNImageProcessor instead." , a__ , ) super().__init__(*a__ , **a__ )
621
1
"""simple docstring""" import unittest from pathlib import Path from tempfile import TemporaryDirectory from transformers import AutoConfig, TFGPTaLMHeadModel, is_keras_nlp_available, is_tf_available from transformers.models.gpta.tokenization_gpta import GPTaTokenizer from transformers.testing_utils import require_keras_nlp, require_tf, slow if is_tf_available(): import tensorflow as tf if is_keras_nlp_available(): from transformers.models.gpta import TFGPTaTokenizer a_ = ["gpt2"] a_ = "gpt2" if is_tf_available(): class snake_case ( tf.Module): def __init__( self : Optional[Any] , a__ : str ) -> str: '''simple docstring''' super().__init__() _A = tokenizer _A = AutoConfig.from_pretrained(a__ ) _A = TFGPTaLMHeadModel.from_config(a__ ) @tf.function(input_signature=(tf.TensorSpec((None,) , tf.string , name="text" ),) ) def a_ ( self : List[str] , a__ : Dict ) -> Union[str, Any]: '''simple docstring''' _A = self.tokenizer(a__ ) _A = tokenized["input_ids"].to_tensor() _A = tf.cast(input_ids_dense > 0 , tf.intaa ) # input_mask = tf.reshape(input_mask, [-1, MAX_SEQ_LEN]) _A = self.model(input_ids=a__ , attention_mask=a__ )["logits"] return outputs @require_tf @require_keras_nlp class snake_case ( unittest.TestCase): def a_ ( self : List[Any] ) -> str: '''simple docstring''' super().setUp() _A = [GPTaTokenizer.from_pretrained(a__ ) for checkpoint in (TOKENIZER_CHECKPOINTS)] _A = [TFGPTaTokenizer.from_pretrained(a__ ) for checkpoint in TOKENIZER_CHECKPOINTS] assert len(self.tokenizers ) == len(self.tf_tokenizers ) _A = [ "This is a straightforward English test sentence.", "This one has some weird characters\rto\nsee\r\nif those\u00E9break things.", "Now we're going to add some Chinese: 一 二 三 一二三", "And some much more rare Chinese: 齉 堃 齉堃", "Je vais aussi écrire en français pour tester les accents", "Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ", ] _A = list(zip(self.test_sentences , self.test_sentences[::-1] ) ) def a_ ( self : Optional[int] ) -> Tuple: '''simple docstring''' for tokenizer, tf_tokenizer in zip(self.tokenizers , self.tf_tokenizers ): for test_inputs in self.test_sentences: _A = tokenizer([test_inputs] , return_tensors="tf" ) _A = tf_tokenizer([test_inputs] ) for key in python_outputs.keys(): # convert them to numpy to avoid messing with ragged tensors _A = python_outputs[key].numpy() _A = tf_outputs[key].numpy() self.assertTrue(tf.reduce_all(python_outputs_values.shape == tf_outputs_values.shape ) ) self.assertTrue(tf.reduce_all(tf.cast(a__ , tf.intaa ) == tf_outputs_values ) ) @slow def a_ ( self : Any ) -> List[Any]: '''simple docstring''' for tf_tokenizer in self.tf_tokenizers: _A = tf.function(a__ ) for test_inputs in self.test_sentences: _A = tf.constant(a__ ) _A = compiled_tokenizer(a__ ) _A = tf_tokenizer(a__ ) for key in eager_outputs.keys(): self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key] ) ) @slow def a_ ( self : List[Any] ) -> str: '''simple docstring''' for tf_tokenizer in self.tf_tokenizers: _A = ModelToSave(tokenizer=a__ ) _A = tf.convert_to_tensor([self.test_sentences[0]] ) _A = model.serving(a__ ) # Build model with some sample inputs with TemporaryDirectory() as tempdir: _A = Path(a__ ) / "saved.model" tf.saved_model.save(a__ , a__ , signatures={"serving_default": model.serving} ) _A = tf.saved_model.load(a__ ) _A = loaded_model.signatures["serving_default"](a__ )["output_0"] # We may see small differences because the loaded model is compiled, so we need an epsilon for the test self.assertTrue(tf.reduce_all(out == loaded_output ) ) @slow def a_ ( self : int ) -> Union[str, Any]: '''simple docstring''' for tf_tokenizer in self.tf_tokenizers: _A = tf.convert_to_tensor([self.test_sentences[0]] ) _A = tf_tokenizer(a__ ) # Build model with some sample inputs _A = tf_tokenizer.get_config() _A = TFGPTaTokenizer.from_config(a__ ) _A = model_from_config(a__ ) for key in from_config_output.keys(): self.assertTrue(tf.reduce_all(from_config_output[key] == out[key] ) ) @slow def a_ ( self : Dict ) -> Dict: '''simple docstring''' for tf_tokenizer in self.tf_tokenizers: # for the test to run _A = 12_31_23 for max_length in [3, 5, 10_24]: _A = tf.convert_to_tensor([self.test_sentences[0]] ) _A = tf_tokenizer(a__ , max_length=a__ ) _A = out["input_ids"].numpy().shape[1] assert out_length == max_length
621
"""simple docstring""" import argparse import torch from torch import nn from transformers import MBartConfig, MBartForConditionalGeneration def a__ ( __lowercase ) -> Optional[int]: _A = [ "encoder.version", "decoder.version", "model.encoder.version", "model.decoder.version", "_float_tensor", "decoder.output_projection.weight", ] for k in ignore_keys: state_dict.pop(__lowercase , __lowercase ) def a__ ( __lowercase ) -> List[Any]: _A , _A = emb.weight.shape _A = nn.Linear(__lowercase , __lowercase , bias=__lowercase ) _A = emb.weight.data return lin_layer def a__ ( __lowercase , __lowercase="facebook/mbart-large-en-ro" , __lowercase=False , __lowercase=False ) -> List[str]: _A = torch.load(__lowercase , map_location="cpu" )["model"] remove_ignore_keys_(__lowercase ) _A = state_dict["encoder.embed_tokens.weight"].shape[0] _A = MBartConfig.from_pretrained(__lowercase , vocab_size=__lowercase ) if mbart_aa and finetuned: _A = "relu" _A = state_dict["decoder.embed_tokens.weight"] _A = MBartForConditionalGeneration(__lowercase ) model.model.load_state_dict(__lowercase ) if finetuned: _A = make_linear_from_emb(model.model.shared ) return model if __name__ == "__main__": a_ = argparse.ArgumentParser() # Required parameters parser.add_argument( "fairseq_path", type=str, help="bart.large, bart.large.cnn or a path to a model.pt on local filesystem." ) parser.add_argument("pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.") parser.add_argument( "--hf_config", default="facebook/mbart-large-cc25", type=str, help="Which huggingface architecture to use: mbart-large", ) parser.add_argument("--mbart_50", action="store_true", help="whether the model is mMART-50 checkpoint") parser.add_argument("--finetuned", action="store_true", help="whether the model is a fine-tuned checkpoint") a_ = parser.parse_args() a_ = convert_fairseq_mbart_checkpoint_from_disk( args.fairseq_path, hf_config_path=args.hf_config, finetuned=args.finetuned, mbart_aa=args.mbart_aa ) model.save_pretrained(args.pytorch_dump_folder_path)
621
1
"""simple docstring""" def a__ ( __lowercase ) -> list: if len(__lowercase ) <= 1: return lst _A = 1 while i < len(__lowercase ): if lst[i - 1] <= lst[i]: i += 1 else: _A , _A = lst[i], lst[i - 1] i -= 1 if i == 0: _A = 1 return lst if __name__ == "__main__": a_ = input("Enter numbers separated by a comma:\n").strip() a_ = [int(item) for item in user_input.split(",")] print(gnome_sort(unsorted))
621
"""simple docstring""" import numpy as np def a__ ( __lowercase , __lowercase ) -> np.ndarray: return np.where(vector > 0 , __lowercase , (alpha * (np.exp(__lowercase ) - 1)) ) if __name__ == "__main__": import doctest doctest.testmod()
621
1
"""simple docstring""" import inspect import unittest from typing import List import numpy as np from transformers import EfficientFormerConfig from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import cached_property, is_tf_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TFEfficientFormerForImageClassification, TFEfficientFormerForImageClassificationWithTeacher, TFEfficientFormerModel, ) from transformers.models.efficientformer.modeling_tf_efficientformer import ( TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, ) if is_vision_available(): from PIL import Image from transformers import EfficientFormerImageProcessor class snake_case : def __init__( self : Optional[Any] , a__ : int , a__ : int = 13 , a__ : int = 64 , a__ : int = 2 , a__ : int = 3 , a__ : int = 3 , a__ : bool = True , a__ : bool = True , a__ : int = 1_28 , a__ : Dict=[16, 32, 64, 1_28] , a__ : int = 7 , a__ : int = 4 , a__ : int = 37 , a__ : str = "gelu" , a__ : float = 0.1 , a__ : float = 0.1 , a__ : int = 10 , a__ : float = 0.0_2 , a__ : int = 2 , a__ : int = 1 , a__ : int = 1_28 , a__ : List[int] = [2, 2, 2, 2] , a__ : int = 2 , a__ : int = 2 , ) -> Tuple: '''simple docstring''' _A = parent _A = batch_size _A = image_size _A = patch_size _A = num_channels _A = is_training _A = use_labels _A = hidden_size _A = num_hidden_layers _A = num_attention_heads _A = intermediate_size _A = hidden_act _A = hidden_dropout_prob _A = attention_probs_dropout_prob _A = type_sequence_label_size _A = initializer_range _A = encoder_stride _A = num_attention_outputs _A = embed_dim _A = embed_dim + 1 _A = resolution _A = depths _A = hidden_sizes _A = dim _A = mlp_expansion_ratio def a_ ( self : Any ) -> Dict: '''simple docstring''' _A = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) _A = None if self.use_labels: _A = ids_tensor([self.batch_size] , self.type_sequence_label_size ) _A = self.get_config() return config, pixel_values, labels def a_ ( self : Union[str, Any] ) -> List[str]: '''simple docstring''' return EfficientFormerConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=a__ , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , resolution=self.resolution , depths=self.depths , hidden_sizes=self.hidden_sizes , dim=self.dim , mlp_expansion_ratio=self.mlp_expansion_ratio , ) def a_ ( self : Union[str, Any] , a__ : Optional[Any] , a__ : Any , a__ : Tuple ) -> Tuple: '''simple docstring''' _A = TFEfficientFormerModel(config=a__ ) _A = model(a__ , training=a__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def a_ ( self : Tuple , a__ : int , a__ : Dict , a__ : List[str] ) -> List[str]: '''simple docstring''' _A = self.type_sequence_label_size _A = TFEfficientFormerForImageClassification(a__ ) _A = model(a__ , labels=a__ , training=a__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # test greyscale images _A = 1 _A = TFEfficientFormerForImageClassification(a__ ) _A = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) _A = model(a__ , labels=a__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def a_ ( self : Union[str, Any] ) -> Optional[int]: '''simple docstring''' _A = self.prepare_config_and_inputs() _A , _A , _A = config_and_inputs _A = {"pixel_values": pixel_values} return config, inputs_dict @require_tf class snake_case ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase): __UpperCamelCase = ( ( TFEfficientFormerModel, TFEfficientFormerForImageClassificationWithTeacher, TFEfficientFormerForImageClassification, ) if is_tf_available() else () ) __UpperCamelCase = ( { 'feature-extraction': TFEfficientFormerModel, 'image-classification': ( TFEfficientFormerForImageClassification, TFEfficientFormerForImageClassificationWithTeacher, ), } if is_tf_available() else {} ) __UpperCamelCase = False __UpperCamelCase = False __UpperCamelCase = False __UpperCamelCase = False __UpperCamelCase = False def a_ ( self : Any ) -> Optional[Any]: '''simple docstring''' _A = TFEfficientFormerModelTester(self ) _A = ConfigTester( self , config_class=a__ , has_text_modality=a__ , hidden_size=37 ) def a_ ( self : int ) -> List[Any]: '''simple docstring''' self.config_tester.run_common_tests() @unittest.skip(reason="EfficientFormer does not use inputs_embeds" ) def a_ ( self : Dict ) -> Any: '''simple docstring''' pass @unittest.skip(reason="EfficientFormer does not support input and output embeddings" ) def a_ ( self : Optional[int] ) -> Optional[int]: '''simple docstring''' pass def a_ ( self : int ) -> Dict: '''simple docstring''' _A , _A = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _A = model_class(a__ ) _A = inspect.signature(model.call ) # signature.parameters is an OrderedDict => so arg_names order is deterministic _A = [*signature.parameters.keys()] _A = ["pixel_values"] self.assertListEqual(arg_names[:1] , a__ ) def a_ ( self : Optional[int] ) -> List[Any]: '''simple docstring''' def check_hidden_states_output(a__ : Union[str, Any] , a__ : Optional[int] , a__ : int ): _A = model_class(a__ ) _A = model(**self._prepare_for_class(a__ , a__ ) , training=a__ ) _A = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states _A = getattr( self.model_tester , "expected_num_hidden_layers" , self.model_tester.num_hidden_layers + 1 ) self.assertEqual(len(a__ ) , a__ ) if hasattr(self.model_tester , "encoder_seq_length" ): _A = self.model_tester.encoder_seq_length if hasattr(self.model_tester , "chunk_length" ) and self.model_tester.chunk_length > 1: _A = seq_length * self.model_tester.chunk_length else: _A = self.model_tester.seq_length self.assertListEqual( list(hidden_states[-1].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , ) if config.is_encoder_decoder: _A = outputs.decoder_hidden_states self.asseretIsInstance(a__ , (list, tuple) ) self.assertEqual(len(a__ ) , a__ ) _A = getattr(self.model_tester , "seq_length" , a__ ) _A = getattr(self.model_tester , "decoder_seq_length" , a__ ) self.assertListEqual( list(hidden_states[-1].shape[-2:] ) , [decoder_seq_length, self.model_tester.hidden_size] , ) _A , _A = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _A = True check_hidden_states_output(a__ , a__ , a__ ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] _A = True check_hidden_states_output(a__ , a__ , a__ ) def a_ ( self : Union[str, Any] , a__ : List[Any] , a__ : Any , a__ : Tuple=False ) -> int: '''simple docstring''' _A = super()._prepare_for_class(a__ , a__ , return_labels=a__ ) if return_labels: if model_class.__name__ == "TFEfficientFormerForImageClassificationWithTeacher": del inputs_dict["labels"] return inputs_dict def a_ ( self : List[Any] ) -> List[Any]: '''simple docstring''' _A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*a__ ) @unittest.skip(reason="EfficientFormer does not implement masked image modeling yet" ) def a_ ( self : Tuple ) -> str: '''simple docstring''' _A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_image_modeling(*a__ ) def a_ ( self : List[Any] ) -> int: '''simple docstring''' _A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*a__ ) @slow def a_ ( self : Dict ) -> List[str]: '''simple docstring''' for model_name in TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _A = TFEfficientFormerModel.from_pretrained(a__ ) self.assertIsNotNone(a__ ) def a_ ( self : int ) -> str: '''simple docstring''' _A , _A = self.model_tester.prepare_config_and_inputs_for_common() _A = True _A = getattr(self.model_tester , "seq_length" , a__ ) _A = getattr(self.model_tester , "encoder_seq_length" , a__ ) _A = getattr(self.model_tester , "key_length" , a__ ) _A = getattr(self.model_tester , "chunk_length" , a__ ) if chunk_length is not None and hasattr(self.model_tester , "num_hashes" ): _A = encoder_seq_length * self.model_tester.num_hashes for model_class in self.all_model_classes: _A = True _A = False _A = True _A = model_class(a__ ) _A = model(**self._prepare_for_class(a__ , a__ ) , training=a__ ) _A = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(a__ ) , self.model_tester.num_attention_outputs ) # check that output_attentions also work using config del inputs_dict["output_attentions"] _A = True _A = model_class(a__ ) _A = model(**self._prepare_for_class(a__ , a__ ) , training=a__ ) _A = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(a__ ) , self.model_tester.num_attention_outputs ) if chunk_length is not None: self.assertListEqual( list(attentions[0].shape[-4:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, chunk_length, encoder_key_length] , ) else: self.assertListEqual( list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length] , ) def a_ ( self : Dict ) -> int: '''simple docstring''' _A , _A = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: # Prepare our model _A = model_class(a__ ) # These are maximally general inputs for the model, with multiple None dimensions # Hopefully this will catch any conditionals that fail for flexible shapes _A = { key: tf.keras.Input(shape=val.shape[1:] , dtype=val.dtype , name=a__ ) for key, val in model.input_signature.items() if key in model.dummy_inputs } _A = model(a__ ) self.assertTrue(outputs_dict is not None ) def a__ ( ) -> Optional[Any]: _A = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) return image @require_tf @require_vision class snake_case ( unittest.TestCase): @cached_property def a_ ( self : Optional[Any] ) -> Any: '''simple docstring''' return ( EfficientFormerImageProcessor.from_pretrained("snap-research/efficientformer-l1-300" ) if is_vision_available() else None ) @slow def a_ ( self : Optional[Any] ) -> int: '''simple docstring''' _A = TFEfficientFormerForImageClassification.from_pretrained("snap-research/efficientformer-l1-300" ) _A = self.default_image_processor _A = prepare_img() _A = image_processor(images=a__ , return_tensors="tf" ) # forward pass _A = model(**a__ , training=a__ ) # verify the logits _A = tf.TensorShape((1, 10_00) ) self.assertEqual(outputs.logits.shape , a__ ) _A = tf.constant([-0.0_5_5_5, 0.4_8_2_5, -0.0_8_5_2] ) self.assertTrue(np.allclose(outputs.logits[0, :3] , a__ , atol=1E-4 ) ) @slow def a_ ( self : str ) -> int: '''simple docstring''' _A = TFEfficientFormerForImageClassificationWithTeacher.from_pretrained( "snap-research/efficientformer-l1-300" ) _A = self.default_image_processor _A = prepare_img() _A = image_processor(images=a__ , return_tensors="tf" ) # forward pass _A = model(**a__ , training=a__ ) # verify the logits _A = tf.TensorShape((1, 10_00) ) self.assertEqual(outputs.logits.shape , a__ ) _A = tf.constant([-0.1_3_1_2, 0.4_3_5_3, -1.0_4_9_9] ) self.assertTrue(np.allclose(outputs.logits[0, :3] , a__ , atol=1E-4 ) )
621
"""simple docstring""" import os import re import warnings from shutil import copyfile from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer if TYPE_CHECKING: from ...tokenization_utils_base import TextInput from ...utils import logging a_ = logging.get_logger(__name__) a_ = {"vocab_file": "spiece.model"} a_ = { "vocab_file": { "t5-small": "https://huggingface.co/t5-small/resolve/main/spiece.model", "t5-base": "https://huggingface.co/t5-base/resolve/main/spiece.model", "t5-large": "https://huggingface.co/t5-large/resolve/main/spiece.model", "t5-3b": "https://huggingface.co/t5-3b/resolve/main/spiece.model", "t5-11b": "https://huggingface.co/t5-11b/resolve/main/spiece.model", } } # TODO(PVP) - this should be removed in Transformers v5 a_ = { "t5-small": 5_12, "t5-base": 5_12, "t5-large": 5_12, "t5-3b": 5_12, "t5-11b": 5_12, } a_ = "▁" class snake_case ( _UpperCamelCase): __UpperCamelCase = VOCAB_FILES_NAMES __UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP __UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __UpperCamelCase = ['input_ids', 'attention_mask'] def __init__( self : List[str] , a__ : Optional[int] , a__ : Union[str, Any]="</s>" , a__ : Union[str, Any]="<unk>" , a__ : str="<pad>" , a__ : Optional[int]=1_00 , a__ : List[Any]=None , a__ : Optional[Dict[str, Any]] = None , a__ : Any=True , **a__ : Optional[int] , ) -> None: '''simple docstring''' if extra_ids > 0 and additional_special_tokens is None: _A = [F"""<extra_id_{i}>""" for i in range(a__ )] elif extra_ids > 0 and additional_special_tokens is not None: # Check that we have the right number of extra_id special tokens _A = len(set(filter(lambda a__ : bool("extra_id" in str(a__ ) ) , a__ ) ) ) if extra_tokens != extra_ids: raise ValueError( F"""Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are""" " provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids" " tokens" ) if legacy: logger.warning_once( F"""You are using the legacy behaviour of the {self.__class__}. This means that tokens that come after special tokens will not be properly handled. We recommend you to""" " read the related pull request available at https://github.com/huggingface/transformers/pull/24565" ) _A = legacy _A = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( eos_token=a__ , unk_token=a__ , pad_token=a__ , extra_ids=a__ , additional_special_tokens=a__ , sp_model_kwargs=self.sp_model_kwargs , legacy=a__ , **a__ , ) _A = vocab_file _A = extra_ids _A = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(a__ ) @staticmethod def a_ ( a__ : List[str] , a__ : Optional[int] , a__ : Tuple ) -> Tuple: '''simple docstring''' if pretrained_model_name_or_path in TaTokenizer.max_model_input_sizes: _A = TaTokenizer.max_model_input_sizes[pretrained_model_name_or_path] if init_max_model_length is not None and init_max_model_length != max_model_length: return init_max_model_length elif init_max_model_length is None: warnings.warn( "This tokenizer was incorrectly instantiated with a model max length of" F""" {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this""" " behavior is kept to avoid breaking backwards compatibility when padding/encoding with" " `truncation is True`.\n- Be aware that you SHOULD NOT rely on" F""" {pretrained_model_name_or_path} automatically truncating your input to""" F""" {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences""" F""" longer than {deprecated_max_model_length} you can either instantiate this tokenizer with""" " `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please" " instantiate this tokenizer with `model_max_length` set to your preferred value." , a__ , ) return max_model_length @property def a_ ( self : List[Any] ) -> Dict: '''simple docstring''' return self.sp_model.get_piece_size() + self._extra_ids def a_ ( self : Dict ) -> Optional[Any]: '''simple docstring''' _A = {self.convert_ids_to_tokens(a__ ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def a_ ( self : Optional[Any] , a__ : List[int] , a__ : Optional[List[int]] = None , a__ : bool = False ) -> List[int]: '''simple docstring''' if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=a__ , token_ids_a=a__ , already_has_special_tokens=a__ ) # normal case: some special tokens if token_ids_a is None: return ([0] * len(a__ )) + [1] return ([0] * len(a__ )) + [1] + ([0] * len(a__ )) + [1] def a_ ( self : List[str] ) -> List[str]: '''simple docstring''' return list( set(filter(lambda a__ : bool(re.search(r"<extra_id_\d+>" , a__ ) ) is not None , self.additional_special_tokens ) ) ) def a_ ( self : str ) -> List[Any]: '''simple docstring''' return [self._convert_token_to_id(a__ ) for token in self.get_sentinel_tokens()] def a_ ( self : List[Any] , a__ : List[int] ) -> List[int]: '''simple docstring''' if len(a__ ) > 0 and token_ids[-1] == self.eos_token_id: warnings.warn( F"""This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated""" " eos tokens being added." ) return token_ids else: return token_ids + [self.eos_token_id] def a_ ( self : int , a__ : List[int] , a__ : Optional[List[int]] = None ) -> List[int]: '''simple docstring''' _A = [self.eos_token_id] if token_ids_a is None: return len(token_ids_a + eos ) * [0] return len(token_ids_a + eos + token_ids_a + eos ) * [0] def a_ ( self : Union[str, Any] , a__ : List[int] , a__ : Optional[List[int]] = None ) -> List[int]: '''simple docstring''' _A = self._add_eos_if_not_present(a__ ) if token_ids_a is None: return token_ids_a else: _A = self._add_eos_if_not_present(a__ ) return token_ids_a + token_ids_a def __getstate__( self : Dict ) -> Union[str, Any]: '''simple docstring''' _A = self.__dict__.copy() _A = None return state def __setstate__( self : int , a__ : Optional[int] ) -> Union[str, Any]: '''simple docstring''' _A = d # for backward compatibility if not hasattr(self , "sp_model_kwargs" ): _A = {} _A = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def a_ ( self : int , a__ : "TextInput" , **a__ : List[str] ) -> List[str]: '''simple docstring''' if not self.legacy: _A = SPIECE_UNDERLINE + text.replace(a__ , " " ) return super().tokenize(a__ , **a__ ) def a_ ( self : str , a__ : Dict , **a__ : Optional[int] ) -> Any: '''simple docstring''' if not self.legacy: _A = text.startswith(a__ ) if is_first: _A = text[1:] _A = self.sp_model.encode(a__ , out_type=a__ ) if not self.legacy and not is_first and not text.startswith(" " ) and tokens[0].startswith(a__ ): _A = ([tokens[0][1:]] if len(tokens[0] ) > 1 else []) + tokens[1:] return tokens def a_ ( self : int , a__ : List[Any] ) -> List[str]: '''simple docstring''' if token.startswith("<extra_id_" ): _A = re.match(r"<extra_id_(\d+)>" , a__ ) _A = int(match.group(1 ) ) return self.vocab_size - num - 1 return self.sp_model.piece_to_id(a__ ) def a_ ( self : Dict , a__ : Union[str, Any] ) -> Any: '''simple docstring''' if index < self.sp_model.get_piece_size(): _A = self.sp_model.IdToPiece(a__ ) else: _A = F"""<extra_id_{self.vocab_size - 1 - index}>""" return token def a_ ( self : Optional[int] , a__ : Tuple ) -> List[str]: '''simple docstring''' _A = [] _A = "" _A = False for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: if not prev_is_special: out_string += " " out_string += self.sp_model.decode(a__ ) + token _A = True _A = [] else: current_sub_tokens.append(a__ ) _A = False out_string += self.sp_model.decode(a__ ) return out_string.strip() def a_ ( self : Dict , a__ : str , a__ : Optional[str] = None ) -> Tuple[str]: '''simple docstring''' if not os.path.isdir(a__ ): logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" ) return _A = os.path.join( a__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(a__ ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , a__ ) elif not os.path.isfile(self.vocab_file ): with open(a__ , "wb" ) as fi: _A = self.sp_model.serialized_model_proto() fi.write(a__ ) return (out_vocab_file,)
621
1
"""simple docstring""" from ...utils import is_torch_available, is_transformers_available if is_transformers_available() and is_torch_available(): from .pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings, VQDiffusionPipeline
621
"""simple docstring""" import argparse import json import os from collections import OrderedDict import numpy as np import tensorflow as tf import torch def a__ ( __lowercase ) -> List[Any]: _A = os.path.join(args.tf_model_dir , "parameters.json" ) _A = json.loads(open(__lowercase ).read() ) if not params: raise ValueError( f"""It seems that the json file at {parameter_file} is empty. Make sure you have a correct json file.""" ) if not args.output.endswith(".pt" ): _A = args.output + ".pt" _A = OrderedDict() with tf.device("/CPU:0" ): _A = tf.train.load_checkpoint(args.tf_model_dir ) _A = reader.get_variable_to_shape_map() for key_name in shapes.keys(): _A = reader.get_tensor(__lowercase ).astype(np.floataa ) if key_name.endswith("/adam_m" ) or key_name.endswith("/adam_v" ): continue if key_name.startswith("pasts/" ): if key_name.startswith("pasts/mlp" ): _A = int(key_name[9] ) elif key_name.startswith("pasts/out" ): _A = 8 _A = "model.sqout.%d.weight" % (player * 2) # enter to nn.Sequencial with Tanh, so 2 at a time _A = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix _A = torch.tensor(__lowercase ) elif key_name.startswith("model/moe" ): _A = int(key_name[9:].split("/" )[0] ) if key_name.endswith("/switch_gating/kernel" ): _A = "model.blocks.%d.feed_forward.mlp.router.classifier.weight" % player _A = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix _A = torch.tensor(__lowercase ) elif key_name.endswith("/softmlp/kernel" ): _A = "model.blocks.%d.feed_forward.soft_bypass_mlp.weight" % player _A = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix _A = torch.tensor(__lowercase ) elif key_name.endswith("/wo/kernel" ) or key_name.endswith("/wi/kernel" ): _A = key_name[-9:-7] for i in range(16 ): _A = "model.blocks.%d.feed_forward.mlp.experts.expert_%d.%s.weight" % (player, i, nlayer) _A = ( vnp[i].transpose([1, 0] ).copy() ) # In Mesh-Tensorflow, it is one array, so it is divided _A = torch.tensor(__lowercase ) elif key_name.startswith("model/mlp" ): _A = int(key_name[9:].split("/" )[0] ) if key_name.endswith("/p1/kernel" ): _A = "model.blocks.%d.feed_forward.mlp.wi.weight" % player _A = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix _A = torch.tensor(__lowercase ) elif key_name.endswith("/p1/bias" ): _A = "model.blocks.%d.feed_forward.mlp.wi.bias" % player _A = vnp.copy() # same because it is one dimensional _A = torch.tensor(__lowercase ) elif key_name.endswith("/p2/kernel" ): _A = "model.blocks.%d.feed_forward.mlp.wo.weight" % player _A = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix _A = torch.tensor(__lowercase ) elif key_name.endswith("/p2/bias" ): _A = "model.blocks.%d.feed_forward.mlp.wo.bias" % player _A = vnp.copy() # same because it is one dimensional _A = torch.tensor(__lowercase ) elif key_name.startswith("model/ln" ): _A = int(key_name[8:].split("/" )[0] ) if key_name.endswith("/b" ): _A = "model.blocks.%d.feed_forward.norm.bias" % player _A = vnp.copy() # same because it is one dimensional _A = torch.tensor(__lowercase ) elif key_name.endswith("/g" ): _A = "model.blocks.%d.feed_forward.norm.weight" % player _A = vnp.copy() # same because it is one dimensional _A = torch.tensor(__lowercase ) elif key_name.startswith("model/att" ): _A = int(key_name[9:].split("/" )[0] ) if key_name.endswith("/qkv/kernel" ): _A = vnp.copy() # Compute same dimension as Mesh-tensorflow using einsum _A = state[:, 0, :, :] _A = state[:, 1, :, :] _A = state[:, 2, :, :] _A = ( state_q.reshape([state_q.shape[0], state_q.shape[1] * state_q.shape[2]] ) .transpose([1, 0] ) .copy() ) # Mesh-Tensorflow is a diagonal matrix _A = ( state_k.reshape([state_k.shape[0], state_k.shape[1] * state_k.shape[2]] ) .transpose([1, 0] ) .copy() ) # Mesh-Tensorflow is a diagonal matrix _A = ( state_v.reshape([state_v.shape[0], state_v.shape[1] * state_v.shape[2]] ) .transpose([1, 0] ) .copy() ) # Mesh-Tensorflow is a diagonal matrix _A = "model.blocks.%d.self_attn.self_attn.q_proj.weight" % player _A = torch.tensor(__lowercase ) _A = "model.blocks.%d.self_attn.self_attn.k_proj.weight" % player _A = torch.tensor(__lowercase ) _A = "model.blocks.%d.self_attn.self_attn.v_proj.weight" % player _A = torch.tensor(__lowercase ) elif key_name.endswith("/o/kernel" ): _A = "model.blocks.%d.self_attn.self_attn.out_proj.weight" % player _A = ( vnp.reshape([vnp.shape[0] * vnp.shape[1], vnp.shape[2]] ).transpose([1, 0] ).copy() ) # Mesh-Tensorflow is a diagonal matrix _A = torch.tensor(__lowercase ) elif key_name.startswith("model/an" ): _A = int(key_name[8:].split("/" )[0] ) if key_name.endswith("/b" ): _A = "model.blocks.%d.self_attn.norm.bias" % player _A = vnp.copy() # same because it is one dimensional _A = torch.tensor(__lowercase ) elif key_name.endswith("/g" ): _A = "model.blocks.%d.self_attn.norm.weight" % player _A = vnp.copy() # same because it is one dimensional _A = torch.tensor(__lowercase ) elif ( key_name.startswith("model/wte" ) or key_name.startswith("model/wpe" ) or key_name.startswith("model/ete" ) ): _A = {"wte": "embed_tokens", "wpe": "position_embeddings", "ete": "extra_position_embeddings"}[ key_name[-3:] ] _A = "model.%s.weight" % nlayer _A = vnp.copy() # same in embedded _A = torch.tensor(__lowercase ) if key_name.startswith("model/wte" ): _A = "lm_head.weight" _A = vnp.copy() # same in embedded _A = torch.tensor(__lowercase ) elif key_name.startswith("model/wob" ): _A = "final_logits_bias" _A = vnp.copy() # same in embedded _A = state.reshape((1, -1) ) _A = torch.tensor(__lowercase ) elif key_name == "model/dense/kernel": _A = "model.last_project.weight" _A = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix _A = torch.tensor(__lowercase ) elif key_name == "model/dense_1/bias": _A = "model.last_project.bias" _A = vnp.copy() # same because it is one dimensional _A = torch.tensor(__lowercase ) torch.save(__lowercase , args.output ) if __name__ == "__main__": a_ = argparse.ArgumentParser( description="model converter.", formatter_class=argparse.ArgumentDefaultsHelpFormatter ) parser.add_argument("--tf_model_dir", metavar="PATH", type=str, required=True, help="import model") parser.add_argument("--output", metavar="PATH", type=str, required=True, help="output model") a_ = parser.parse_args() convert_tf_gptsan_to_pt(args)
621
1
"""simple docstring""" from __future__ import annotations from itertools import permutations from random import randint from timeit import repeat def a__ ( ) -> tuple[list[int], int]: _A = [randint(-1000 , 1000 ) for i in range(10 )] _A = randint(-5000 , 5000 ) return (arr, r) a_ = make_dataset() def a__ ( __lowercase , __lowercase ) -> tuple[int, ...]: for triplet in permutations(__lowercase , 3 ): if sum(__lowercase ) == target: return tuple(sorted(__lowercase ) ) return (0, 0, 0) def a__ ( __lowercase , __lowercase ) -> tuple[int, int, int]: arr.sort() _A = len(__lowercase ) for i in range(n - 1 ): _A , _A = i + 1, n - 1 while left < right: if arr[i] + arr[left] + arr[right] == target: return (arr[i], arr[left], arr[right]) elif arr[i] + arr[left] + arr[right] < target: left += 1 elif arr[i] + arr[left] + arr[right] > target: right -= 1 return (0, 0, 0) def a__ ( ) -> tuple[float, float]: _A = "\nfrom __main__ import dataset, triplet_sum1, triplet_sum2\n" _A = "\ntriplet_sum1(*dataset)\n" _A = "\ntriplet_sum2(*dataset)\n" _A = repeat(setup=__lowercase , stmt=__lowercase , repeat=5 , number=1_0000 ) _A = repeat(setup=__lowercase , stmt=__lowercase , repeat=5 , number=1_0000 ) return (min(__lowercase ), min(__lowercase )) if __name__ == "__main__": from doctest import testmod testmod() a_ = solution_times() print(f'''The time for naive implementation is {times[0]}.''') print(f'''The time for optimized implementation is {times[1]}.''')
621
"""simple docstring""" import argparse import torch from transformers import GPTaLMHeadModel, RobertaForMaskedLM if __name__ == "__main__": a_ = argparse.ArgumentParser( description=( "Extraction some layers of the full RobertaForMaskedLM or GPT2LMHeadModel for Transfer Learned" " Distillation" ) ) parser.add_argument("--model_type", default="roberta", choices=["roberta", "gpt2"]) parser.add_argument("--model_name", default="roberta-large", type=str) parser.add_argument("--dump_checkpoint", default="serialization_dir/tf_roberta_048131723.pth", type=str) parser.add_argument("--vocab_transform", action="store_true") a_ = parser.parse_args() if args.model_type == "roberta": a_ = RobertaForMaskedLM.from_pretrained(args.model_name) a_ = "roberta" elif args.model_type == "gpt2": a_ = GPTaLMHeadModel.from_pretrained(args.model_name) a_ = "transformer" a_ = model.state_dict() a_ = {} # Embeddings # if args.model_type == "gpt2": for param_name in ["wte.weight", "wpe.weight"]: a_ = state_dict[f'''{prefix}.{param_name}'''] else: for w in ["word_embeddings", "position_embeddings", "token_type_embeddings"]: a_ = f'''{prefix}.embeddings.{w}.weight''' a_ = state_dict[param_name] for w in ["weight", "bias"]: a_ = f'''{prefix}.embeddings.LayerNorm.{w}''' a_ = state_dict[param_name] # Transformer Blocks # a_ = 0 for teacher_idx in [0, 2, 4, 7, 9, 11]: if args.model_type == "gpt2": for layer in ["ln_1", "attn.c_attn", "attn.c_proj", "ln_2", "mlp.c_fc", "mlp.c_proj"]: for w in ["weight", "bias"]: a_ = state_dict[ f'''{prefix}.h.{teacher_idx}.{layer}.{w}''' ] a_ = state_dict[f'''{prefix}.h.{teacher_idx}.attn.bias'''] else: for layer in [ "attention.self.query", "attention.self.key", "attention.self.value", "attention.output.dense", "attention.output.LayerNorm", "intermediate.dense", "output.dense", "output.LayerNorm", ]: for w in ["weight", "bias"]: a_ = state_dict[ f'''{prefix}.encoder.layer.{teacher_idx}.{layer}.{w}''' ] std_idx += 1 # Language Modeling Head ###s if args.model_type == "roberta": for layer in ["lm_head.decoder.weight", "lm_head.bias"]: a_ = state_dict[f'''{layer}'''] if args.vocab_transform: for w in ["weight", "bias"]: a_ = state_dict[f'''lm_head.dense.{w}'''] a_ = state_dict[f'''lm_head.layer_norm.{w}'''] elif args.model_type == "gpt2": for w in ["weight", "bias"]: a_ = state_dict[f'''{prefix}.ln_f.{w}'''] a_ = state_dict["lm_head.weight"] print(f'''N layers selected for distillation: {std_idx}''') print(f'''Number of params transferred for distillation: {len(compressed_sd.keys())}''') print(f'''Save transferred checkpoint to {args.dump_checkpoint}.''') torch.save(compressed_sd, args.dump_checkpoint)
621
1
"""simple docstring""" import numpy as np import torch from torch.utils.data import Dataset from utils import logger class snake_case ( _UpperCamelCase): def __init__( self : Optional[int] , a__ : Optional[int] , a__ : List[Any] ) -> int: '''simple docstring''' _A = params _A = np.array(a__ ) _A = np.array([len(a__ ) for t in data] ) self.check() self.remove_long_sequences() self.remove_empty_sequences() self.remove_unknown_sequences() self.check() self.print_statistics() def __getitem__( self : Tuple , a__ : Optional[Any] ) -> Tuple: '''simple docstring''' return (self.token_ids[index], self.lengths[index]) def __len__( self : Dict ) -> Optional[int]: '''simple docstring''' return len(self.lengths ) def a_ ( self : List[Any] ) -> Tuple: '''simple docstring''' assert len(self.token_ids ) == len(self.lengths ) assert all(self.lengths[i] == len(self.token_ids[i] ) for i in range(len(self.lengths ) ) ) def a_ ( self : Optional[int] ) -> Optional[int]: '''simple docstring''' _A = self.params.max_model_input_size _A = self.lengths > max_len logger.info(F"""Splitting {sum(a__ )} too long sequences.""" ) def divide_chunks(a__ : Tuple , a__ : str ): return [l[i : i + n] for i in range(0 , len(a__ ) , a__ )] _A = [] _A = [] if self.params.mlm: _A , _A = self.params.special_tok_ids["cls_token"], self.params.special_tok_ids["sep_token"] else: _A , _A = self.params.special_tok_ids["bos_token"], self.params.special_tok_ids["eos_token"] for seq_, len_ in zip(self.token_ids , self.lengths ): assert (seq_[0] == cls_id) and (seq_[-1] == sep_id), seq_ if len_ <= max_len: new_tok_ids.append(seq_ ) new_lengths.append(len_ ) else: _A = [] for sub_s in divide_chunks(seq_ , max_len - 2 ): if sub_s[0] != cls_id: _A = np.insert(a__ , 0 , a__ ) if sub_s[-1] != sep_id: _A = np.insert(a__ , len(a__ ) , a__ ) assert len(a__ ) <= max_len assert (sub_s[0] == cls_id) and (sub_s[-1] == sep_id), sub_s sub_seqs.append(a__ ) new_tok_ids.extend(a__ ) new_lengths.extend([len(a__ ) for l in sub_seqs] ) _A = np.array(a__ ) _A = np.array(a__ ) def a_ ( self : Optional[Any] ) -> str: '''simple docstring''' _A = len(self ) _A = self.lengths > 11 _A = self.token_ids[indices] _A = self.lengths[indices] _A = len(self ) logger.info(F"""Remove {init_size - new_size} too short (<=11 tokens) sequences.""" ) def a_ ( self : int ) -> Any: '''simple docstring''' if "unk_token" not in self.params.special_tok_ids: return else: _A = self.params.special_tok_ids["unk_token"] _A = len(self ) _A = np.array([np.count_nonzero(a == unk_token_id ) for a in self.token_ids] ) _A = (unk_occs / self.lengths) < 0.5 _A = self.token_ids[indices] _A = self.lengths[indices] _A = len(self ) logger.info(F"""Remove {init_size - new_size} sequences with a high level of unknown tokens (50%).""" ) def a_ ( self : str ) -> Union[str, Any]: '''simple docstring''' if not self.params.is_master: return logger.info(F"""{len(self )} sequences""" ) # data_len = sum(self.lengths) # nb_unique_tokens = len(Counter(list(chain(*self.token_ids)))) # logger.info(f'{data_len} tokens ({nb_unique_tokens} unique)') # unk_idx = self.params.special_tok_ids['unk_token'] # nb_unknown = sum([(t==unk_idx).sum() for t in self.token_ids]) # logger.info(f'{nb_unknown} unknown tokens (covering {100*nb_unknown/data_len:.2f}% of the data)') def a_ ( self : Any , a__ : int ) -> Tuple: '''simple docstring''' _A = [t[0] for t in batch] _A = [t[1] for t in batch] assert len(a__ ) == len(a__ ) # Max for paddings _A = max(a__ ) # Pad token ids if self.params.mlm: _A = self.params.special_tok_ids["pad_token"] else: _A = self.params.special_tok_ids["unk_token"] _A = [list(t.astype(a__ ) ) + [pad_idx] * (max_seq_len_ - len(a__ )) for t in token_ids] assert len(tk_ ) == len(a__ ) assert all(len(a__ ) == max_seq_len_ for t in tk_ ) _A = torch.tensor(tk_ ) # (bs, max_seq_len_) _A = torch.tensor(a__ ) # (bs) return tk_t, lg_t
621
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available a_ = { "configuration_upernet": ["UperNetConfig"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ = [ "UperNetForSemanticSegmentation", "UperNetPreTrainedModel", ] if TYPE_CHECKING: from .configuration_upernet import UperNetConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_upernet import UperNetForSemanticSegmentation, UperNetPreTrainedModel else: import sys a_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
621
1
"""simple docstring""" import importlib import shutil import threading import warnings from typing import List import fsspec import fsspec.asyn from . import compression from .hffilesystem import HfFileSystem a_ = importlib.util.find_spec("s3fs") is not None if _has_safs: from .safilesystem import SaFileSystem # noqa: F401 a_ = [ compression.BzaFileSystem, compression.GzipFileSystem, compression.LzaFileSystem, compression.XzFileSystem, compression.ZstdFileSystem, ] # Register custom filesystems for fs_class in COMPRESSION_FILESYSTEMS + [HfFileSystem]: if fs_class.protocol in fsspec.registry and fsspec.registry[fs_class.protocol] is not fs_class: warnings.warn(f'''A filesystem protocol was already set for {fs_class.protocol} and will be overwritten.''') fsspec.register_implementation(fs_class.protocol, fs_class, clobber=True) def a__ ( __lowercase ) -> str: if "://" in dataset_path: _A = dataset_path.split("://" )[1] return dataset_path def a__ ( __lowercase ) -> bool: if fs is not None and fs.protocol != "file": return True else: return False def a__ ( __lowercase , __lowercase , __lowercase ) -> Optional[Any]: _A = not is_remote_filesystem(__lowercase ) if is_local: # LocalFileSystem.mv does copy + rm, it is more efficient to simply move a local directory shutil.move(fs._strip_protocol(__lowercase ) , fs._strip_protocol(__lowercase ) ) else: fs.mv(__lowercase , __lowercase , recursive=__lowercase ) def a__ ( ) -> None: if hasattr(fsspec.asyn , "reset_lock" ): # for future fsspec>2022.05.0 fsspec.asyn.reset_lock() else: _A = None _A = None _A = threading.Lock()
621
"""simple docstring""" import random import unittest from torch.utils.data import BatchSampler, DataLoader, IterableDataset from accelerate import Accelerator from accelerate.data_loader import ( BatchSamplerShard, DataLoaderDispatcher, DataLoaderShard, IterableDatasetShard, SkipBatchSampler, SkipDataLoader, skip_first_batches, ) class snake_case ( _UpperCamelCase): def __init__( self : Optional[int] , a__ : str=0.0_1 , a__ : str=10_00 ) -> int: '''simple docstring''' _A = p_stop _A = max_length def __iter__( self : Any ) -> Optional[Any]: '''simple docstring''' _A = 0 _A = False while not stop and count < self.max_length: yield count count += 1 _A = random.random() < self.p_stop class snake_case ( unittest.TestCase): def a_ ( self : List[Any] , a__ : Union[str, Any] , a__ : Union[str, Any] , a__ : List[str]=False , a__ : str=True ) -> Union[str, Any]: '''simple docstring''' _A = [ BatchSamplerShard(a__ , 2 , a__ , split_batches=a__ , even_batches=a__ ) for i in range(2 ) ] _A = [list(a__ ) for batch_sampler_shard in batch_sampler_shards] if not split_batches: self.assertListEqual([len(a__ ) for shard in batch_sampler_shards] , [len(a__ ) for e in expected] ) self.assertListEqual(a__ , a__ ) def a_ ( self : List[Any] ) -> str: '''simple docstring''' _A = BatchSampler(range(24 ) , batch_size=3 , drop_last=a__ ) _A = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]], [[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]], ] self.check_batch_sampler_shards(a__ , a__ ) _A = BatchSampler(range(24 ) , batch_size=3 , drop_last=a__ ) # Expected shouldn't change self.check_batch_sampler_shards(a__ , a__ ) # Check the shards when the dataset is a round multiple of batch size but not total batch size. _A = BatchSampler(range(21 ) , batch_size=3 , drop_last=a__ ) _A = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]], [[3, 4, 5], [9, 10, 11], [15, 16, 17], [0, 1, 2]], ] self.check_batch_sampler_shards(a__ , a__ ) _A = BatchSampler(range(21 ) , batch_size=3 , drop_last=a__ ) _A = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14]], [[3, 4, 5], [9, 10, 11], [15, 16, 17]], ] self.check_batch_sampler_shards(a__ , a__ ) # Check the shards when the dataset is not a round multiple of batch size but has a multiple of # num_processes batch. _A = BatchSampler(range(22 ) , batch_size=3 , drop_last=a__ ) _A = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]], [[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 0, 1]], ] self.check_batch_sampler_shards(a__ , a__ ) _A = BatchSampler(range(22 ) , batch_size=3 , drop_last=a__ ) _A = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14]], [[3, 4, 5], [9, 10, 11], [15, 16, 17]], ] self.check_batch_sampler_shards(a__ , a__ ) # Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of # num_processes batch. _A = BatchSampler(range(20 ) , batch_size=3 , drop_last=a__ ) _A = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 0]], [[3, 4, 5], [9, 10, 11], [15, 16, 17], [1, 2, 3]], ] self.check_batch_sampler_shards(a__ , a__ ) _A = BatchSampler(range(20 ) , batch_size=3 , drop_last=a__ ) _A = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14]], [[3, 4, 5], [9, 10, 11], [15, 16, 17]], ] self.check_batch_sampler_shards(a__ , a__ ) # Check the shards when the dataset is very small. _A = BatchSampler(range(2 ) , batch_size=3 , drop_last=a__ ) _A = [[[0, 1, 0]], [[1, 0, 1]]] self.check_batch_sampler_shards(a__ , a__ ) _A = BatchSampler(range(2 ) , batch_size=3 , drop_last=a__ ) _A = [[], []] self.check_batch_sampler_shards(a__ , a__ ) def a_ ( self : int ) -> int: '''simple docstring''' _A = BatchSampler(range(24 ) , batch_size=4 , drop_last=a__ ) _A = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]], ] self.check_batch_sampler_shards(a__ , a__ , split_batches=a__ ) _A = BatchSampler(range(24 ) , batch_size=4 , drop_last=a__ ) # Expected shouldn't change self.check_batch_sampler_shards(a__ , a__ , split_batches=a__ ) # Check the shards when the dataset is not a round multiple of batch size. _A = BatchSampler(range(22 ) , batch_size=4 , drop_last=a__ ) _A = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [0, 1]], ] self.check_batch_sampler_shards(a__ , a__ , split_batches=a__ ) _A = BatchSampler(range(22 ) , batch_size=4 , drop_last=a__ ) _A = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]], ] self.check_batch_sampler_shards(a__ , a__ , split_batches=a__ ) # Check the shards when the dataset is not a round multiple of batch size or num_processes. _A = BatchSampler(range(21 ) , batch_size=4 , drop_last=a__ ) _A = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 0]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [1, 2]], ] self.check_batch_sampler_shards(a__ , a__ , split_batches=a__ ) _A = BatchSampler(range(21 ) , batch_size=4 , drop_last=a__ ) _A = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]], ] self.check_batch_sampler_shards(a__ , a__ , split_batches=a__ ) # Check the shards when the dataset is very small. _A = BatchSampler(range(2 ) , batch_size=4 , drop_last=a__ ) _A = [[[0, 1]], [[0, 1]]] self.check_batch_sampler_shards(a__ , a__ , split_batches=a__ ) _A = BatchSampler(range(2 ) , batch_size=4 , drop_last=a__ ) _A = [[], []] self.check_batch_sampler_shards(a__ , a__ , split_batches=a__ ) def a_ ( self : List[str] ) -> List[str]: '''simple docstring''' _A = BatchSampler(range(24 ) , batch_size=3 , drop_last=a__ ) _A = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]], [[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]], ] self.check_batch_sampler_shards(a__ , a__ , even_batches=a__ ) _A = BatchSampler(range(24 ) , batch_size=3 , drop_last=a__ ) # Expected shouldn't change self.check_batch_sampler_shards(a__ , a__ , even_batches=a__ ) # Check the shards when the dataset is a round multiple of batch size but not total batch size. _A = BatchSampler(range(21 ) , batch_size=3 , drop_last=a__ ) _A = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]], [[3, 4, 5], [9, 10, 11], [15, 16, 17]], ] self.check_batch_sampler_shards(a__ , a__ , even_batches=a__ ) _A = BatchSampler(range(21 ) , batch_size=3 , drop_last=a__ ) _A = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14]], [[3, 4, 5], [9, 10, 11], [15, 16, 17]], ] self.check_batch_sampler_shards(a__ , a__ , even_batches=a__ ) # Check the shards when the dataset is not a round multiple of batch size but has a multiple of # num_processes batch. _A = BatchSampler(range(22 ) , batch_size=3 , drop_last=a__ ) _A = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]], [[3, 4, 5], [9, 10, 11], [15, 16, 17], [21]], ] self.check_batch_sampler_shards(a__ , a__ , even_batches=a__ ) _A = BatchSampler(range(22 ) , batch_size=3 , drop_last=a__ ) _A = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14]], [[3, 4, 5], [9, 10, 11], [15, 16, 17]], ] self.check_batch_sampler_shards(a__ , a__ , even_batches=a__ ) # Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of # num_processes batch. _A = BatchSampler(range(20 ) , batch_size=3 , drop_last=a__ ) _A = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19]], [[3, 4, 5], [9, 10, 11], [15, 16, 17]], ] self.check_batch_sampler_shards(a__ , a__ , even_batches=a__ ) _A = BatchSampler(range(20 ) , batch_size=3 , drop_last=a__ ) _A = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14]], [[3, 4, 5], [9, 10, 11], [15, 16, 17]], ] self.check_batch_sampler_shards(a__ , a__ , even_batches=a__ ) # Check the shards when the dataset is very small. _A = BatchSampler(range(2 ) , batch_size=3 , drop_last=a__ ) _A = [[[0, 1]], []] self.check_batch_sampler_shards(a__ , a__ , even_batches=a__ ) _A = BatchSampler(range(2 ) , batch_size=3 , drop_last=a__ ) _A = [[], []] self.check_batch_sampler_shards(a__ , a__ , even_batches=a__ ) def a_ ( self : List[str] ) -> str: '''simple docstring''' _A = BatchSampler(range(24 ) , batch_size=4 , drop_last=a__ ) _A = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]], ] self.check_batch_sampler_shards(a__ , a__ , split_batches=a__ , even_batches=a__ ) _A = BatchSampler(range(24 ) , batch_size=4 , drop_last=a__ ) # Expected shouldn't change self.check_batch_sampler_shards(a__ , a__ , split_batches=a__ , even_batches=a__ ) # Check the shards when the dataset is not a round multiple of batch size. _A = BatchSampler(range(22 ) , batch_size=4 , drop_last=a__ ) _A = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]], ] self.check_batch_sampler_shards(a__ , a__ , split_batches=a__ , even_batches=a__ ) _A = BatchSampler(range(22 ) , batch_size=4 , drop_last=a__ ) _A = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]], ] self.check_batch_sampler_shards(a__ , a__ , split_batches=a__ , even_batches=a__ ) # Check the shards when the dataset is not a round multiple of batch size or num_processes. _A = BatchSampler(range(21 ) , batch_size=4 , drop_last=a__ ) _A = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]], ] self.check_batch_sampler_shards(a__ , a__ , split_batches=a__ , even_batches=a__ ) _A = BatchSampler(range(21 ) , batch_size=4 , drop_last=a__ ) _A = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]], ] self.check_batch_sampler_shards(a__ , a__ , split_batches=a__ , even_batches=a__ ) # Check the shards when the dataset is very small. _A = BatchSampler(range(2 ) , batch_size=4 , drop_last=a__ ) _A = [[[0, 1]], []] self.check_batch_sampler_shards(a__ , a__ , split_batches=a__ , even_batches=a__ ) _A = BatchSampler(range(2 ) , batch_size=4 , drop_last=a__ ) _A = [[], []] self.check_batch_sampler_shards(a__ , a__ , split_batches=a__ , even_batches=a__ ) def a_ ( self : Union[str, Any] ) -> str: '''simple docstring''' _A = [[0, 1, 2], [3, 4], [5, 6, 7, 8], [9, 10, 11], [12, 13]] _A = [BatchSamplerShard(a__ , 2 , a__ , even_batches=a__ ) for i in range(2 )] self.assertEqual(len(batch_sampler_shards[0] ) , 3 ) self.assertEqual(len(batch_sampler_shards[1] ) , 2 ) self.assertListEqual(list(batch_sampler_shards[0] ) , [[0, 1, 2], [5, 6, 7, 8], [12, 13]] ) self.assertListEqual(list(batch_sampler_shards[1] ) , [[3, 4], [9, 10, 11]] ) def a_ ( self : Optional[int] , a__ : Optional[int] , a__ : Tuple , a__ : Optional[int] , a__ : Union[str, Any]=False , a__ : int=2 , a__ : List[Any]=False ) -> str: '''simple docstring''' random.seed(a__ ) _A = list(a__ ) _A = [ IterableDatasetShard( a__ , batch_size=a__ , drop_last=a__ , num_processes=a__ , process_index=a__ , split_batches=a__ , ) for i in range(a__ ) ] _A = [] for iterable_dataset_shard in iterable_dataset_shards: # Since our random iterable dataset will be... random... we need to use a seed to get reproducible results. random.seed(a__ ) iterable_dataset_lists.append(list(a__ ) ) _A = batch_size // num_processes if split_batches else batch_size # All iterable dataset shard should have the same length, a round multiple of shard_batch_size _A = iterable_dataset_lists[0] for l in iterable_dataset_lists[1:]: self.assertEqual(len(a__ ) , len(a__ ) ) self.assertTrue(len(a__ ) % shard_batch_size == 0 ) _A = [] for idx in range(0 , len(a__ ) , a__ ): for l in iterable_dataset_lists: observed += l[idx : idx + shard_batch_size] if not drop_last: while len(a__ ) < len(a__ ): reference += reference self.assertListEqual(a__ , reference[: len(a__ )] ) def a_ ( self : List[str] ) -> List[Any]: '''simple docstring''' _A = 42 _A = RandomIterableDataset() self.check_iterable_dataset_shards(a__ , a__ , batch_size=4 , drop_last=a__ , split_batches=a__ ) self.check_iterable_dataset_shards(a__ , a__ , batch_size=4 , drop_last=a__ , split_batches=a__ ) self.check_iterable_dataset_shards(a__ , a__ , batch_size=4 , drop_last=a__ , split_batches=a__ ) self.check_iterable_dataset_shards(a__ , a__ , batch_size=4 , drop_last=a__ , split_batches=a__ ) # Edge case with a very small dataset _A = RandomIterableDataset(max_length=2 ) self.check_iterable_dataset_shards(a__ , a__ , batch_size=4 , drop_last=a__ , split_batches=a__ ) self.check_iterable_dataset_shards(a__ , a__ , batch_size=4 , drop_last=a__ , split_batches=a__ ) self.check_iterable_dataset_shards(a__ , a__ , batch_size=4 , drop_last=a__ , split_batches=a__ ) self.check_iterable_dataset_shards(a__ , a__ , batch_size=4 , drop_last=a__ , split_batches=a__ ) def a_ ( self : List[str] ) -> Dict: '''simple docstring''' _A = BatchSampler(range(16 ) , batch_size=4 , drop_last=a__ ) _A = SkipBatchSampler(a__ , 2 ) self.assertListEqual(list(a__ ) , [[8, 9, 10, 11], [12, 13, 14, 15]] ) def a_ ( self : int ) -> Union[str, Any]: '''simple docstring''' _A = SkipDataLoader(list(range(16 ) ) , batch_size=4 , skip_batches=2 ) self.assertListEqual([t.tolist() for t in dataloader] , [[8, 9, 10, 11], [12, 13, 14, 15]] ) def a_ ( self : int ) -> Optional[int]: '''simple docstring''' _A = DataLoader(list(range(16 ) ) , batch_size=4 ) _A = skip_first_batches(a__ , num_batches=2 ) self.assertListEqual([t.tolist() for t in new_dataloader] , [[8, 9, 10, 11], [12, 13, 14, 15]] ) def a_ ( self : Optional[Any] ) -> Optional[int]: '''simple docstring''' _A = DataLoaderShard(list(range(16 ) ) , batch_size=4 ) for idx, _ in enumerate(a__ ): self.assertEqual(dataloader.end_of_dataloader , idx == 3 ) # Test it also works on the second iteration for idx, _ in enumerate(a__ ): self.assertEqual(dataloader.end_of_dataloader , idx == 3 ) def a_ ( self : int ) -> int: '''simple docstring''' Accelerator() _A = DataLoaderDispatcher(range(16 ) , batch_size=4 ) for idx, _ in enumerate(a__ ): self.assertEqual(dataloader.end_of_dataloader , idx == 3 ) # Test it also works on the second iteration for idx, _ in enumerate(a__ ): self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
621
1
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) a_ = { "configuration_roformer": ["ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "RoFormerConfig", "RoFormerOnnxConfig"], "tokenization_roformer": ["RoFormerTokenizer"], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ = ["RoFormerTokenizerFast"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ = [ "ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST", "RoFormerForCausalLM", "RoFormerForMaskedLM", "RoFormerForMultipleChoice", "RoFormerForQuestionAnswering", "RoFormerForSequenceClassification", "RoFormerForTokenClassification", "RoFormerLayer", "RoFormerModel", "RoFormerPreTrainedModel", "load_tf_weights_in_roformer", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ = [ "TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST", "TFRoFormerForCausalLM", "TFRoFormerForMaskedLM", "TFRoFormerForMultipleChoice", "TFRoFormerForQuestionAnswering", "TFRoFormerForSequenceClassification", "TFRoFormerForTokenClassification", "TFRoFormerLayer", "TFRoFormerModel", "TFRoFormerPreTrainedModel", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ = [ "FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST", "FlaxRoFormerForMaskedLM", "FlaxRoFormerForMultipleChoice", "FlaxRoFormerForQuestionAnswering", "FlaxRoFormerForSequenceClassification", "FlaxRoFormerForTokenClassification", "FlaxRoFormerModel", "FlaxRoFormerPreTrainedModel", ] if TYPE_CHECKING: from .configuration_roformer import ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, RoFormerConfig, RoFormerOnnxConfig from .tokenization_roformer import RoFormerTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_roformer_fast import RoFormerTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_roformer import ( ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, RoFormerForCausalLM, RoFormerForMaskedLM, RoFormerForMultipleChoice, RoFormerForQuestionAnswering, RoFormerForSequenceClassification, RoFormerForTokenClassification, RoFormerLayer, RoFormerModel, RoFormerPreTrainedModel, load_tf_weights_in_roformer, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_roformer import ( TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, TFRoFormerForCausalLM, TFRoFormerForMaskedLM, TFRoFormerForMultipleChoice, TFRoFormerForQuestionAnswering, TFRoFormerForSequenceClassification, TFRoFormerForTokenClassification, TFRoFormerLayer, TFRoFormerModel, TFRoFormerPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_roformer import ( FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, FlaxRoFormerForMaskedLM, FlaxRoFormerForMultipleChoice, FlaxRoFormerForQuestionAnswering, FlaxRoFormerForSequenceClassification, FlaxRoFormerForTokenClassification, FlaxRoFormerModel, FlaxRoFormerPreTrainedModel, ) else: import sys a_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
621
"""simple docstring""" import gc import tempfile import unittest import numpy as np import torch from diffusers import VersatileDiffusionPipeline from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device a_ = False class snake_case ( unittest.TestCase): pass @nightly @require_torch_gpu class snake_case ( unittest.TestCase): def a_ ( self : Optional[int] ) -> str: '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def a_ ( self : Tuple ) -> Any: '''simple docstring''' _A = VersatileDiffusionPipeline.from_pretrained("shi-labs/versatile-diffusion" , torch_dtype=torch.floataa ) pipe.to(a__ ) pipe.set_progress_bar_config(disable=a__ ) _A = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg" ) _A = torch.manual_seed(0 ) _A = pipe.dual_guided( prompt="first prompt" , image=a__ , text_to_image_strength=0.7_5 , generator=a__ , guidance_scale=7.5 , num_inference_steps=2 , output_type="numpy" , ).images with tempfile.TemporaryDirectory() as tmpdirname: pipe.save_pretrained(a__ ) _A = VersatileDiffusionPipeline.from_pretrained(a__ , torch_dtype=torch.floataa ) pipe.to(a__ ) pipe.set_progress_bar_config(disable=a__ ) _A = generator.manual_seed(0 ) _A = pipe.dual_guided( prompt="first prompt" , image=a__ , text_to_image_strength=0.7_5 , generator=a__ , guidance_scale=7.5 , num_inference_steps=2 , output_type="numpy" , ).images assert np.abs(image - new_image ).sum() < 1E-5, "Models don't have the same forward pass" def a_ ( self : Optional[int] ) -> List[Any]: '''simple docstring''' _A = VersatileDiffusionPipeline.from_pretrained("shi-labs/versatile-diffusion" , torch_dtype=torch.floataa ) pipe.to(a__ ) pipe.set_progress_bar_config(disable=a__ ) _A = "cyberpunk 2077" _A = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg" ) _A = torch.manual_seed(0 ) _A = pipe.dual_guided( prompt=a__ , image=a__ , text_to_image_strength=0.7_5 , generator=a__ , guidance_scale=7.5 , num_inference_steps=50 , output_type="numpy" , ).images _A = image[0, 2_53:2_56, 2_53:2_56, -1] assert image.shape == (1, 5_12, 5_12, 3) _A = np.array([0.1_4_4_8, 0.1_6_1_9, 0.1_7_4_1, 0.1_0_8_6, 0.1_1_4_7, 0.1_1_2_8, 0.1_1_9_9, 0.1_1_6_5, 0.1_0_0_1] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 _A = "A painting of a squirrel eating a burger " _A = torch.manual_seed(0 ) _A = pipe.text_to_image( prompt=a__ , generator=a__ , guidance_scale=7.5 , num_inference_steps=50 , output_type="numpy" ).images _A = image[0, 2_53:2_56, 2_53:2_56, -1] assert image.shape == (1, 5_12, 5_12, 3) _A = np.array([0.3_3_6_7, 0.3_1_6_9, 0.2_6_5_6, 0.3_8_7_0, 0.4_7_9_0, 0.3_7_9_6, 0.4_0_0_9, 0.4_8_7_8, 0.4_7_7_8] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 _A = pipe.image_variation(a__ , generator=a__ , output_type="numpy" ).images _A = image[0, 2_53:2_56, 2_53:2_56, -1] assert image.shape == (1, 5_12, 5_12, 3) _A = np.array([0.3_0_7_6, 0.3_1_2_3, 0.3_2_8_4, 0.3_7_8_2, 0.3_7_7_0, 0.3_8_9_4, 0.4_2_9_7, 0.4_3_3_1, 0.4_4_5_6] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
621
1
"""simple docstring""" from typing import List, Optional, Union from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType class snake_case ( _UpperCamelCase): __UpperCamelCase = ['image_processor', 'tokenizer'] __UpperCamelCase = 'BridgeTowerImageProcessor' __UpperCamelCase = ('RobertaTokenizer', 'RobertaTokenizerFast') def __init__( self : str , a__ : List[Any] , a__ : str ) -> Dict: '''simple docstring''' super().__init__(a__ , a__ ) def __call__( self : Tuple , a__ : Optional[int] , a__ : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , a__ : bool = True , a__ : Union[bool, str, PaddingStrategy] = False , a__ : Union[bool, str, TruncationStrategy] = None , a__ : Optional[int] = None , a__ : int = 0 , a__ : Optional[int] = None , a__ : Optional[bool] = None , a__ : Optional[bool] = None , a__ : bool = False , a__ : bool = False , a__ : bool = False , a__ : bool = False , a__ : bool = True , a__ : Optional[Union[str, TensorType]] = None , **a__ : str , ) -> BatchEncoding: '''simple docstring''' _A = self.tokenizer( text=a__ , add_special_tokens=a__ , padding=a__ , truncation=a__ , max_length=a__ , stride=a__ , pad_to_multiple_of=a__ , return_token_type_ids=a__ , return_attention_mask=a__ , return_overflowing_tokens=a__ , return_special_tokens_mask=a__ , return_offsets_mapping=a__ , return_length=a__ , verbose=a__ , return_tensors=a__ , **a__ , ) # add pixel_values + pixel_mask _A = self.image_processor( a__ , return_tensors=a__ , do_normalize=a__ , do_center_crop=a__ , **a__ ) encoding.update(a__ ) return encoding def a_ ( self : List[Any] , *a__ : Dict , **a__ : str ) -> Tuple: '''simple docstring''' return self.tokenizer.batch_decode(*a__ , **a__ ) def a_ ( self : Optional[int] , *a__ : Dict , **a__ : List[str] ) -> str: '''simple docstring''' return self.tokenizer.decode(*a__ , **a__ ) @property def a_ ( self : List[Any] ) -> Any: '''simple docstring''' _A = self.tokenizer.model_input_names _A = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
621
"""simple docstring""" import os import time import warnings from dataclasses import dataclass, field from enum import Enum from typing import List, Optional, Union import torch from filelock import FileLock from torch.utils.data import Dataset from ...tokenization_utils_base import PreTrainedTokenizerBase from ...utils import logging from ..processors.glue import glue_convert_examples_to_features, glue_output_modes, glue_processors from ..processors.utils import InputFeatures a_ = logging.get_logger(__name__) @dataclass class snake_case : __UpperCamelCase = field(metadata={'help': 'The name of the task to train on: ' + ', '.join(glue_processors.keys())}) __UpperCamelCase = field( metadata={'help': 'The input data dir. Should contain the .tsv files (or other data files) for the task.'}) __UpperCamelCase = field( default=128 , metadata={ 'help': ( 'The maximum total input sequence length after tokenization. Sequences longer ' 'than this will be truncated, sequences shorter will be padded.' ) } , ) __UpperCamelCase = field( default=_UpperCamelCase , metadata={'help': 'Overwrite the cached training and evaluation sets'}) def a_ ( self : Optional[Any] ) -> Tuple: '''simple docstring''' _A = self.task_name.lower() class snake_case ( _UpperCamelCase): __UpperCamelCase = 'train' __UpperCamelCase = 'dev' __UpperCamelCase = 'test' class snake_case ( _UpperCamelCase): __UpperCamelCase = 42 __UpperCamelCase = 42 __UpperCamelCase = 42 def __init__( self : Optional[int] , a__ : GlueDataTrainingArguments , a__ : PreTrainedTokenizerBase , a__ : Optional[int] = None , a__ : Union[str, Split] = Split.train , a__ : Optional[str] = None , ) -> Tuple: '''simple docstring''' warnings.warn( "This dataset will be removed from the library soon, preprocessing should be handled with the 🤗 Datasets " "library. You can have a look at this example script for pointers: " "https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py" , a__ , ) _A = args _A = glue_processors[args.task_name]() _A = glue_output_modes[args.task_name] if isinstance(a__ , a__ ): try: _A = Split[mode] except KeyError: raise KeyError("mode is not a valid split name" ) # Load data features from cache or dataset file _A = os.path.join( cache_dir if cache_dir is not None else args.data_dir , F"""cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{args.task_name}""" , ) _A = self.processor.get_labels() if args.task_name in ["mnli", "mnli-mm"] and tokenizer.__class__.__name__ in ( "RobertaTokenizer", "RobertaTokenizerFast", "XLMRobertaTokenizer", "BartTokenizer", "BartTokenizerFast", ): # HACK(label indices are swapped in RoBERTa pretrained model) _A , _A = label_list[2], label_list[1] _A = label_list # Make sure only the first process in distributed training processes the dataset, # and the others will use the cache. _A = cached_features_file + ".lock" with FileLock(a__ ): if os.path.exists(a__ ) and not args.overwrite_cache: _A = time.time() _A = torch.load(a__ ) logger.info( F"""Loading features from cached file {cached_features_file} [took %.3f s]""" , time.time() - start ) else: logger.info(F"""Creating features from dataset file at {args.data_dir}""" ) if mode == Split.dev: _A = self.processor.get_dev_examples(args.data_dir ) elif mode == Split.test: _A = self.processor.get_test_examples(args.data_dir ) else: _A = self.processor.get_train_examples(args.data_dir ) if limit_length is not None: _A = examples[:limit_length] _A = glue_convert_examples_to_features( a__ , a__ , max_length=args.max_seq_length , label_list=a__ , output_mode=self.output_mode , ) _A = time.time() torch.save(self.features , a__ ) # ^ This seems to take a lot of time so I want to investigate why and how we can improve. logger.info( F"""Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]""" ) def __len__( self : List[Any] ) -> Any: '''simple docstring''' return len(self.features ) def __getitem__( self : Tuple , a__ : Union[str, Any] ) -> InputFeatures: '''simple docstring''' return self.features[i] def a_ ( self : Optional[int] ) -> List[Any]: '''simple docstring''' return self.label_list
621
1
"""simple docstring""" import inspect import warnings from typing import Any, Dict, Optional, Union from packaging import version def a__ ( *__lowercase , __lowercase = None , __lowercase=True , __lowercase=2 ) -> List[str]: from .. import __version__ _A = take_from _A = () if not isinstance(args[0] , __lowercase ): _A = (args,) for attribute, version_name, message in args: if version.parse(version.parse(__lowercase ).base_version ) >= version.parse(__lowercase ): raise ValueError( f"""The deprecation tuple {(attribute, version_name, message)} should be removed since diffusers'""" f""" version {__version__} is >= {version_name}""" ) _A = None if isinstance(__lowercase , __lowercase ) and attribute in deprecated_kwargs: values += (deprecated_kwargs.pop(__lowercase ),) _A = f"""The `{attribute}` argument is deprecated and will be removed in version {version_name}.""" elif hasattr(__lowercase , __lowercase ): values += (getattr(__lowercase , __lowercase ),) _A = f"""The `{attribute}` attribute is deprecated and will be removed in version {version_name}.""" elif deprecated_kwargs is None: _A = f"""`{attribute}` is deprecated and will be removed in version {version_name}.""" if warning is not None: _A = warning + " " if standard_warn else "" warnings.warn(warning + message , __lowercase , stacklevel=__lowercase ) if isinstance(__lowercase , __lowercase ) and len(__lowercase ) > 0: _A = inspect.getouterframes(inspect.currentframe() )[1] _A = call_frame.filename _A = call_frame.lineno _A = call_frame.function _A , _A = next(iter(deprecated_kwargs.items() ) ) raise TypeError(f"""{function} in {filename} line {line_number-1} got an unexpected keyword argument `{key}`""" ) if len(__lowercase ) == 0: return elif len(__lowercase ) == 1: return values[0] return values
621
"""simple docstring""" def a__ ( __lowercase , __lowercase , __lowercase , __lowercase ) -> str: # Return True if there is node that has not iterated. _A = [False] * len(__lowercase ) _A = [] queue.append(__lowercase ) _A = True while queue: _A = queue.pop(0 ) for ind in range(len(graph[u] ) ): if visited[ind] is False and graph[u][ind] > 0: queue.append(__lowercase ) _A = True _A = u return visited[t] def a__ ( __lowercase , __lowercase , __lowercase ) -> int: # This array is filled by BFS and to store path _A = [-1] * (len(__lowercase )) _A = 0 while bfs(__lowercase , __lowercase , __lowercase , __lowercase ): _A = float("Inf" ) _A = sink while s != source: # Find the minimum value in select path _A = min(__lowercase , graph[parent[s]][s] ) _A = parent[s] max_flow += path_flow _A = sink while v != source: _A = parent[v] graph[u][v] -= path_flow graph[v][u] += path_flow _A = parent[v] return max_flow a_ = [ [0, 16, 13, 0, 0, 0], [0, 0, 10, 12, 0, 0], [0, 4, 0, 0, 14, 0], [0, 0, 9, 0, 0, 20], [0, 0, 0, 7, 0, 4], [0, 0, 0, 0, 0, 0], ] a_ , a_ = 0, 5 print(ford_fulkerson(graph, source, sink))
621
1
"""simple docstring""" import json from typing import List, Optional, Tuple from tokenizers import pre_tokenizers, processors from ...tokenization_utils_base import AddedToken, BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_bart import BartTokenizer a_ = logging.get_logger(__name__) a_ = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"} # See all BART models at https://huggingface.co/models?filter=bart a_ = { "vocab_file": { "facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/vocab.json", "facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/vocab.json", "facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json", "facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json", "facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json", "yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json", }, "merges_file": { "facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/merges.txt", "facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/merges.txt", "facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt", "facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt", "facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt", "yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt", }, "tokenizer_file": { "facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/tokenizer.json", "facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/tokenizer.json", "facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/tokenizer.json", "facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/tokenizer.json", "facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/tokenizer.json", "yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/tokenizer.json", }, } a_ = { "facebook/bart-base": 10_24, "facebook/bart-large": 10_24, "facebook/bart-large-mnli": 10_24, "facebook/bart-large-cnn": 10_24, "facebook/bart-large-xsum": 10_24, "yjernite/bart_eli5": 10_24, } class snake_case ( _UpperCamelCase): __UpperCamelCase = VOCAB_FILES_NAMES __UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP __UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __UpperCamelCase = ['input_ids', 'attention_mask'] __UpperCamelCase = BartTokenizer def __init__( self : Union[str, Any] , a__ : Optional[int]=None , a__ : Any=None , a__ : int=None , a__ : List[str]="replace" , a__ : Optional[int]="<s>" , a__ : List[Any]="</s>" , a__ : str="</s>" , a__ : Dict="<s>" , a__ : Any="<unk>" , a__ : Dict="<pad>" , a__ : Union[str, Any]="<mask>" , a__ : Union[str, Any]=False , a__ : Optional[int]=True , **a__ : Tuple , ) -> Union[str, Any]: '''simple docstring''' super().__init__( a__ , a__ , tokenizer_file=a__ , errors=a__ , bos_token=a__ , eos_token=a__ , sep_token=a__ , cls_token=a__ , unk_token=a__ , pad_token=a__ , mask_token=a__ , add_prefix_space=a__ , trim_offsets=a__ , **a__ , ) _A = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() ) if pre_tok_state.get("add_prefix_space" , a__ ) != add_prefix_space: _A = getattr(a__ , pre_tok_state.pop("type" ) ) _A = add_prefix_space _A = pre_tok_class(**a__ ) _A = add_prefix_space # the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__` _A = "post_processor" _A = getattr(self.backend_tokenizer , a__ , a__ ) if tokenizer_component_instance: _A = json.loads(tokenizer_component_instance.__getstate__() ) # The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class` if "sep" in state: _A = tuple(state["sep"] ) if "cls" in state: _A = tuple(state["cls"] ) _A = False if state.get("add_prefix_space" , a__ ) != add_prefix_space: _A = add_prefix_space _A = True if state.get("trim_offsets" , a__ ) != trim_offsets: _A = trim_offsets _A = True if changes_to_apply: _A = getattr(a__ , state.pop("type" ) ) _A = component_class(**a__ ) setattr(self.backend_tokenizer , a__ , a__ ) @property def a_ ( self : Union[str, Any] ) -> str: '''simple docstring''' if self._mask_token is None: if self.verbose: logger.error("Using mask_token, but it is not set yet." ) return None return str(self._mask_token ) @mask_token.setter def a_ ( self : Tuple , a__ : Optional[Any] ) -> Union[str, Any]: '''simple docstring''' _A = AddedToken(a__ , lstrip=a__ , rstrip=a__ ) if isinstance(a__ , a__ ) else value _A = value def a_ ( self : int , *a__ : List[str] , **a__ : List[str] ) -> BatchEncoding: '''simple docstring''' _A = kwargs.get("is_split_into_words" , a__ ) if is_split_into_words and not self.add_prefix_space: raise ValueError( F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """ "to use it with pretokenized inputs." ) return super()._batch_encode_plus(*a__ , **a__ ) def a_ ( self : Optional[Any] , *a__ : Tuple , **a__ : Dict ) -> BatchEncoding: '''simple docstring''' _A = kwargs.get("is_split_into_words" , a__ ) if is_split_into_words and not self.add_prefix_space: raise ValueError( F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """ "to use it with pretokenized inputs." ) return super()._encode_plus(*a__ , **a__ ) def a_ ( self : Tuple , a__ : str , a__ : Optional[str] = None ) -> Tuple[str]: '''simple docstring''' _A = self._tokenizer.model.save(a__ , name=a__ ) return tuple(a__ ) def a_ ( self : Any , a__ : Dict , a__ : Dict=None ) -> str: '''simple docstring''' _A = [self.bos_token_id] + token_ids_a + [self.eos_token_id] if token_ids_a is None: return output return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id] def a_ ( self : str , a__ : List[int] , a__ : Optional[List[int]] = None ) -> List[int]: '''simple docstring''' _A = [self.sep_token_id] _A = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
621
"""simple docstring""" import argparse import json from collections import OrderedDict from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( ConditionalDetrConfig, ConditionalDetrForObjectDetection, ConditionalDetrForSegmentation, ConditionalDetrImageProcessor, ) from transformers.utils import logging logging.set_verbosity_info() a_ = logging.get_logger(__name__) # here we list all keys to be renamed (original name on the left, our name on the right) a_ = [] for i in range(6): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append( (f'''transformer.encoder.layers.{i}.self_attn.out_proj.weight''', f'''encoder.layers.{i}.self_attn.out_proj.weight''') ) rename_keys.append( (f'''transformer.encoder.layers.{i}.self_attn.out_proj.bias''', f'''encoder.layers.{i}.self_attn.out_proj.bias''') ) rename_keys.append((f'''transformer.encoder.layers.{i}.linear1.weight''', f'''encoder.layers.{i}.fc1.weight''')) rename_keys.append((f'''transformer.encoder.layers.{i}.linear1.bias''', f'''encoder.layers.{i}.fc1.bias''')) rename_keys.append((f'''transformer.encoder.layers.{i}.linear2.weight''', f'''encoder.layers.{i}.fc2.weight''')) rename_keys.append((f'''transformer.encoder.layers.{i}.linear2.bias''', f'''encoder.layers.{i}.fc2.bias''')) rename_keys.append( (f'''transformer.encoder.layers.{i}.norm1.weight''', f'''encoder.layers.{i}.self_attn_layer_norm.weight''') ) rename_keys.append((f'''transformer.encoder.layers.{i}.norm1.bias''', f'''encoder.layers.{i}.self_attn_layer_norm.bias''')) rename_keys.append((f'''transformer.encoder.layers.{i}.norm2.weight''', f'''encoder.layers.{i}.final_layer_norm.weight''')) rename_keys.append((f'''transformer.encoder.layers.{i}.norm2.bias''', f'''encoder.layers.{i}.final_layer_norm.bias''')) # decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms rename_keys.append( (f'''transformer.decoder.layers.{i}.self_attn.out_proj.weight''', f'''decoder.layers.{i}.self_attn.out_proj.weight''') ) rename_keys.append( (f'''transformer.decoder.layers.{i}.self_attn.out_proj.bias''', f'''decoder.layers.{i}.self_attn.out_proj.bias''') ) rename_keys.append( ( f'''transformer.decoder.layers.{i}.cross_attn.out_proj.weight''', f'''decoder.layers.{i}.encoder_attn.out_proj.weight''', ) ) rename_keys.append( ( f'''transformer.decoder.layers.{i}.cross_attn.out_proj.bias''', f'''decoder.layers.{i}.encoder_attn.out_proj.bias''', ) ) rename_keys.append((f'''transformer.decoder.layers.{i}.linear1.weight''', f'''decoder.layers.{i}.fc1.weight''')) rename_keys.append((f'''transformer.decoder.layers.{i}.linear1.bias''', f'''decoder.layers.{i}.fc1.bias''')) rename_keys.append((f'''transformer.decoder.layers.{i}.linear2.weight''', f'''decoder.layers.{i}.fc2.weight''')) rename_keys.append((f'''transformer.decoder.layers.{i}.linear2.bias''', f'''decoder.layers.{i}.fc2.bias''')) rename_keys.append( (f'''transformer.decoder.layers.{i}.norm1.weight''', f'''decoder.layers.{i}.self_attn_layer_norm.weight''') ) rename_keys.append((f'''transformer.decoder.layers.{i}.norm1.bias''', f'''decoder.layers.{i}.self_attn_layer_norm.bias''')) rename_keys.append( (f'''transformer.decoder.layers.{i}.norm2.weight''', f'''decoder.layers.{i}.encoder_attn_layer_norm.weight''') ) rename_keys.append( (f'''transformer.decoder.layers.{i}.norm2.bias''', f'''decoder.layers.{i}.encoder_attn_layer_norm.bias''') ) rename_keys.append((f'''transformer.decoder.layers.{i}.norm3.weight''', f'''decoder.layers.{i}.final_layer_norm.weight''')) rename_keys.append((f'''transformer.decoder.layers.{i}.norm3.bias''', f'''decoder.layers.{i}.final_layer_norm.bias''')) # q, k, v projections in self/cross-attention in decoder for conditional DETR rename_keys.append( (f'''transformer.decoder.layers.{i}.sa_qcontent_proj.weight''', f'''decoder.layers.{i}.sa_qcontent_proj.weight''') ) rename_keys.append( (f'''transformer.decoder.layers.{i}.sa_kcontent_proj.weight''', f'''decoder.layers.{i}.sa_kcontent_proj.weight''') ) rename_keys.append( (f'''transformer.decoder.layers.{i}.sa_qpos_proj.weight''', f'''decoder.layers.{i}.sa_qpos_proj.weight''') ) rename_keys.append( (f'''transformer.decoder.layers.{i}.sa_kpos_proj.weight''', f'''decoder.layers.{i}.sa_kpos_proj.weight''') ) rename_keys.append((f'''transformer.decoder.layers.{i}.sa_v_proj.weight''', f'''decoder.layers.{i}.sa_v_proj.weight''')) rename_keys.append( (f'''transformer.decoder.layers.{i}.ca_qcontent_proj.weight''', f'''decoder.layers.{i}.ca_qcontent_proj.weight''') ) # rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.weight", f"decoder.layers.{i}.ca_qpos_proj.weight")) rename_keys.append( (f'''transformer.decoder.layers.{i}.ca_kcontent_proj.weight''', f'''decoder.layers.{i}.ca_kcontent_proj.weight''') ) rename_keys.append( (f'''transformer.decoder.layers.{i}.ca_kpos_proj.weight''', f'''decoder.layers.{i}.ca_kpos_proj.weight''') ) rename_keys.append((f'''transformer.decoder.layers.{i}.ca_v_proj.weight''', f'''decoder.layers.{i}.ca_v_proj.weight''')) rename_keys.append( (f'''transformer.decoder.layers.{i}.ca_qpos_sine_proj.weight''', f'''decoder.layers.{i}.ca_qpos_sine_proj.weight''') ) rename_keys.append( (f'''transformer.decoder.layers.{i}.sa_qcontent_proj.bias''', f'''decoder.layers.{i}.sa_qcontent_proj.bias''') ) rename_keys.append( (f'''transformer.decoder.layers.{i}.sa_kcontent_proj.bias''', f'''decoder.layers.{i}.sa_kcontent_proj.bias''') ) rename_keys.append((f'''transformer.decoder.layers.{i}.sa_qpos_proj.bias''', f'''decoder.layers.{i}.sa_qpos_proj.bias''')) rename_keys.append((f'''transformer.decoder.layers.{i}.sa_kpos_proj.bias''', f'''decoder.layers.{i}.sa_kpos_proj.bias''')) rename_keys.append((f'''transformer.decoder.layers.{i}.sa_v_proj.bias''', f'''decoder.layers.{i}.sa_v_proj.bias''')) rename_keys.append( (f'''transformer.decoder.layers.{i}.ca_qcontent_proj.bias''', f'''decoder.layers.{i}.ca_qcontent_proj.bias''') ) # rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.bias", f"decoder.layers.{i}.ca_qpos_proj.bias")) rename_keys.append( (f'''transformer.decoder.layers.{i}.ca_kcontent_proj.bias''', f'''decoder.layers.{i}.ca_kcontent_proj.bias''') ) rename_keys.append((f'''transformer.decoder.layers.{i}.ca_kpos_proj.bias''', f'''decoder.layers.{i}.ca_kpos_proj.bias''')) rename_keys.append((f'''transformer.decoder.layers.{i}.ca_v_proj.bias''', f'''decoder.layers.{i}.ca_v_proj.bias''')) rename_keys.append( (f'''transformer.decoder.layers.{i}.ca_qpos_sine_proj.bias''', f'''decoder.layers.{i}.ca_qpos_sine_proj.bias''') ) # convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads # for conditional DETR, also convert reference point head and query scale MLP rename_keys.extend( [ ("input_proj.weight", "input_projection.weight"), ("input_proj.bias", "input_projection.bias"), ("query_embed.weight", "query_position_embeddings.weight"), ("transformer.decoder.norm.weight", "decoder.layernorm.weight"), ("transformer.decoder.norm.bias", "decoder.layernorm.bias"), ("class_embed.weight", "class_labels_classifier.weight"), ("class_embed.bias", "class_labels_classifier.bias"), ("bbox_embed.layers.0.weight", "bbox_predictor.layers.0.weight"), ("bbox_embed.layers.0.bias", "bbox_predictor.layers.0.bias"), ("bbox_embed.layers.1.weight", "bbox_predictor.layers.1.weight"), ("bbox_embed.layers.1.bias", "bbox_predictor.layers.1.bias"), ("bbox_embed.layers.2.weight", "bbox_predictor.layers.2.weight"), ("bbox_embed.layers.2.bias", "bbox_predictor.layers.2.bias"), ("transformer.decoder.ref_point_head.layers.0.weight", "decoder.ref_point_head.layers.0.weight"), ("transformer.decoder.ref_point_head.layers.0.bias", "decoder.ref_point_head.layers.0.bias"), ("transformer.decoder.ref_point_head.layers.1.weight", "decoder.ref_point_head.layers.1.weight"), ("transformer.decoder.ref_point_head.layers.1.bias", "decoder.ref_point_head.layers.1.bias"), ("transformer.decoder.query_scale.layers.0.weight", "decoder.query_scale.layers.0.weight"), ("transformer.decoder.query_scale.layers.0.bias", "decoder.query_scale.layers.0.bias"), ("transformer.decoder.query_scale.layers.1.weight", "decoder.query_scale.layers.1.weight"), ("transformer.decoder.query_scale.layers.1.bias", "decoder.query_scale.layers.1.bias"), ("transformer.decoder.layers.0.ca_qpos_proj.weight", "decoder.layers.0.ca_qpos_proj.weight"), ("transformer.decoder.layers.0.ca_qpos_proj.bias", "decoder.layers.0.ca_qpos_proj.bias"), ] ) def a__ ( __lowercase , __lowercase , __lowercase ) -> List[str]: _A = state_dict.pop(__lowercase ) _A = val def a__ ( __lowercase ) -> List[str]: _A = OrderedDict() for key, value in state_dict.items(): if "backbone.0.body" in key: _A = key.replace("backbone.0.body" , "backbone.conv_encoder.model" ) _A = value else: _A = value return new_state_dict def a__ ( __lowercase , __lowercase=False ) -> Any: _A = "" if is_panoptic: _A = "conditional_detr." # first: transformer encoder for i in range(6 ): # read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias) _A = state_dict.pop(f"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight""" ) _A = state_dict.pop(f"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias""" ) # next, add query, keys and values (in that order) to the state dict _A = in_proj_weight[:256, :] _A = in_proj_bias[:256] _A = in_proj_weight[256:512, :] _A = in_proj_bias[256:512] _A = in_proj_weight[-256:, :] _A = in_proj_bias[-256:] def a__ ( ) -> int: _A = "http://images.cocodataset.org/val2017/000000039769.jpg" _A = Image.open(requests.get(__lowercase , stream=__lowercase ).raw ) return im @torch.no_grad() def a__ ( __lowercase , __lowercase ) -> Any: _A = ConditionalDetrConfig() # set backbone and dilation attributes if "resnet101" in model_name: _A = "resnet101" if "dc5" in model_name: _A = True _A = "panoptic" in model_name if is_panoptic: _A = 250 else: _A = 91 _A = "huggingface/label-files" _A = "coco-detection-id2label.json" _A = json.load(open(hf_hub_download(__lowercase , __lowercase , repo_type="dataset" ) , "r" ) ) _A = {int(__lowercase ): v for k, v in idalabel.items()} _A = idalabel _A = {v: k for k, v in idalabel.items()} # load image processor _A = "coco_panoptic" if is_panoptic else "coco_detection" _A = ConditionalDetrImageProcessor(format=__lowercase ) # prepare image _A = prepare_img() _A = image_processor(images=__lowercase , return_tensors="pt" ) _A = encoding["pixel_values"] logger.info(f"""Converting model {model_name}...""" ) # load original model from torch hub _A = torch.hub.load("DeppMeng/ConditionalDETR" , __lowercase , pretrained=__lowercase ).eval() _A = conditional_detr.state_dict() # rename keys for src, dest in rename_keys: if is_panoptic: _A = "conditional_detr." + src rename_key(__lowercase , __lowercase , __lowercase ) _A = rename_backbone_keys(__lowercase ) # query, key and value matrices need special treatment read_in_q_k_v(__lowercase , is_panoptic=__lowercase ) # important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them _A = "conditional_detr.model." if is_panoptic else "model." for key in state_dict.copy().keys(): if is_panoptic: if ( key.startswith("conditional_detr" ) and not key.startswith("class_labels_classifier" ) and not key.startswith("bbox_predictor" ) ): _A = state_dict.pop(__lowercase ) _A = val elif "class_labels_classifier" in key or "bbox_predictor" in key: _A = state_dict.pop(__lowercase ) _A = val elif key.startswith("bbox_attention" ) or key.startswith("mask_head" ): continue else: _A = state_dict.pop(__lowercase ) _A = val else: if not key.startswith("class_labels_classifier" ) and not key.startswith("bbox_predictor" ): _A = state_dict.pop(__lowercase ) _A = val # finally, create HuggingFace model and load state dict _A = ConditionalDetrForSegmentation(__lowercase ) if is_panoptic else ConditionalDetrForObjectDetection(__lowercase ) model.load_state_dict(__lowercase ) model.eval() model.push_to_hub(repo_id=__lowercase , organization="DepuMeng" , commit_message="Add model" ) # verify our conversion _A = conditional_detr(__lowercase ) _A = model(__lowercase ) assert torch.allclose(outputs.logits , original_outputs["pred_logits"] , atol=1E-4 ) assert torch.allclose(outputs.pred_boxes , original_outputs["pred_boxes"] , atol=1E-4 ) if is_panoptic: assert torch.allclose(outputs.pred_masks , original_outputs["pred_masks"] , atol=1E-4 ) # Save model and image processor logger.info(f"""Saving PyTorch model and image processor to {pytorch_dump_folder_path}...""" ) Path(__lowercase ).mkdir(exist_ok=__lowercase ) model.save_pretrained(__lowercase ) image_processor.save_pretrained(__lowercase ) if __name__ == "__main__": a_ = argparse.ArgumentParser() parser.add_argument( "--model_name", default="conditional_detr_resnet50", type=str, help="Name of the CONDITIONAL_DETR model you'd like to convert.", ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model." ) a_ = parser.parse_args() convert_conditional_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path)
621
1
"""simple docstring""" import argparse import torch from transformers import GPTaLMHeadModel, RobertaForMaskedLM if __name__ == "__main__": a_ = argparse.ArgumentParser( description=( "Extraction some layers of the full RobertaForMaskedLM or GPT2LMHeadModel for Transfer Learned" " Distillation" ) ) parser.add_argument("--model_type", default="roberta", choices=["roberta", "gpt2"]) parser.add_argument("--model_name", default="roberta-large", type=str) parser.add_argument("--dump_checkpoint", default="serialization_dir/tf_roberta_048131723.pth", type=str) parser.add_argument("--vocab_transform", action="store_true") a_ = parser.parse_args() if args.model_type == "roberta": a_ = RobertaForMaskedLM.from_pretrained(args.model_name) a_ = "roberta" elif args.model_type == "gpt2": a_ = GPTaLMHeadModel.from_pretrained(args.model_name) a_ = "transformer" a_ = model.state_dict() a_ = {} # Embeddings # if args.model_type == "gpt2": for param_name in ["wte.weight", "wpe.weight"]: a_ = state_dict[f'''{prefix}.{param_name}'''] else: for w in ["word_embeddings", "position_embeddings", "token_type_embeddings"]: a_ = f'''{prefix}.embeddings.{w}.weight''' a_ = state_dict[param_name] for w in ["weight", "bias"]: a_ = f'''{prefix}.embeddings.LayerNorm.{w}''' a_ = state_dict[param_name] # Transformer Blocks # a_ = 0 for teacher_idx in [0, 2, 4, 7, 9, 11]: if args.model_type == "gpt2": for layer in ["ln_1", "attn.c_attn", "attn.c_proj", "ln_2", "mlp.c_fc", "mlp.c_proj"]: for w in ["weight", "bias"]: a_ = state_dict[ f'''{prefix}.h.{teacher_idx}.{layer}.{w}''' ] a_ = state_dict[f'''{prefix}.h.{teacher_idx}.attn.bias'''] else: for layer in [ "attention.self.query", "attention.self.key", "attention.self.value", "attention.output.dense", "attention.output.LayerNorm", "intermediate.dense", "output.dense", "output.LayerNorm", ]: for w in ["weight", "bias"]: a_ = state_dict[ f'''{prefix}.encoder.layer.{teacher_idx}.{layer}.{w}''' ] std_idx += 1 # Language Modeling Head ###s if args.model_type == "roberta": for layer in ["lm_head.decoder.weight", "lm_head.bias"]: a_ = state_dict[f'''{layer}'''] if args.vocab_transform: for w in ["weight", "bias"]: a_ = state_dict[f'''lm_head.dense.{w}'''] a_ = state_dict[f'''lm_head.layer_norm.{w}'''] elif args.model_type == "gpt2": for w in ["weight", "bias"]: a_ = state_dict[f'''{prefix}.ln_f.{w}'''] a_ = state_dict["lm_head.weight"] print(f'''N layers selected for distillation: {std_idx}''') print(f'''Number of params transferred for distillation: {len(compressed_sd.keys())}''') print(f'''Save transferred checkpoint to {args.dump_checkpoint}.''') torch.save(compressed_sd, args.dump_checkpoint)
621
"""simple docstring""" import random def a__ ( __lowercase , __lowercase , __lowercase ) -> Optional[Any]: _A = a[left_index] _A = left_index + 1 for j in range(left_index + 1 , __lowercase ): if a[j] < pivot: _A , _A = a[i], a[j] i += 1 _A , _A = a[i - 1], a[left_index] return i - 1 def a__ ( __lowercase , __lowercase , __lowercase ) -> int: if left < right: _A = random.randint(__lowercase , right - 1 ) _A , _A = ( a[left], a[pivot], ) # switches the pivot with the left most bound _A = partition(__lowercase , __lowercase , __lowercase ) quick_sort_random( __lowercase , __lowercase , __lowercase ) # recursive quicksort to the left of the pivot point quick_sort_random( __lowercase , pivot_index + 1 , __lowercase ) # recursive quicksort to the right of the pivot point def a__ ( ) -> Dict: _A = input("Enter numbers separated by a comma:\n" ).strip() _A = [int(__lowercase ) for item in user_input.split("," )] quick_sort_random(__lowercase , 0 , len(__lowercase ) ) print(__lowercase ) if __name__ == "__main__": main()
621
1
"""simple docstring""" from __future__ import annotations def a__ ( __lowercase , __lowercase ) -> float: _A = sorted(numsa + numsa ) _A , _A = divmod(len(__lowercase ) , 2 ) if mod == 1: return all_numbers[div] else: return (all_numbers[div] + all_numbers[div - 1]) / 2 if __name__ == "__main__": import doctest doctest.testmod() a_ = [float(x) for x in input("Enter the elements of first array: ").split()] a_ = [float(x) for x in input("Enter the elements of second array: ").split()] print(f'''The median of two arrays is: {median_of_two_arrays(array_a, array_a)}''')
621
"""simple docstring""" import copy from typing import Any, Dict, List, Optional, Union import numpy as np from ...audio_utils import mel_filter_bank, spectrogram, window_function from ...feature_extraction_sequence_utils import SequenceFeatureExtractor from ...feature_extraction_utils import BatchFeature from ...utils import TensorType, logging a_ = logging.get_logger(__name__) class snake_case ( _UpperCamelCase): __UpperCamelCase = ['input_features'] def __init__( self : int , a__ : Optional[Any]=80 , a__ : Optional[int]=1_60_00 , a__ : int=1_60 , a__ : Union[str, Any]=30 , a__ : Tuple=4_00 , a__ : List[Any]=0.0 , a__ : Optional[Any]=False , **a__ : List[Any] , ) -> str: '''simple docstring''' super().__init__( feature_size=a__ , sampling_rate=a__ , padding_value=a__ , return_attention_mask=a__ , **a__ , ) _A = n_fft _A = hop_length _A = chunk_length _A = chunk_length * sampling_rate _A = self.n_samples // hop_length _A = sampling_rate _A = mel_filter_bank( num_frequency_bins=1 + n_fft // 2 , num_mel_filters=a__ , min_frequency=0.0 , max_frequency=8_0_0_0.0 , sampling_rate=a__ , norm="slaney" , mel_scale="slaney" , ) def a_ ( self : int , a__ : np.array ) -> np.ndarray: '''simple docstring''' _A = spectrogram( a__ , window_function(self.n_fft , "hann" ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters , log_mel="log10" , ) _A = log_spec[:, :-1] _A = np.maximum(a__ , log_spec.max() - 8.0 ) _A = (log_spec + 4.0) / 4.0 return log_spec @staticmethod # Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm def a_ ( a__ : List[np.ndarray] , a__ : List[np.ndarray] , a__ : float = 0.0 ) -> List[np.ndarray]: '''simple docstring''' if attention_mask is not None: _A = np.array(a__ , np.intaa ) _A = [] for vector, length in zip(a__ , attention_mask.sum(-1 ) ): _A = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1E-7 ) if length < normed_slice.shape[0]: _A = padding_value normed_input_values.append(a__ ) else: _A = [(x - x.mean()) / np.sqrt(x.var() + 1E-7 ) for x in input_values] return normed_input_values def __call__( self : Optional[int] , a__ : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , a__ : bool = True , a__ : Optional[int] = None , a__ : Optional[Union[str, TensorType]] = None , a__ : Optional[bool] = None , a__ : Optional[str] = "max_length" , a__ : Optional[int] = None , a__ : Optional[int] = None , a__ : Optional[bool] = None , **a__ : Dict , ) -> BatchFeature: '''simple docstring''' if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( F"""The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a""" F""" sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input""" F""" was sampled with {self.sampling_rate} and not {sampling_rate}.""" ) else: logger.warning( "It is strongly recommended to pass the `sampling_rate` argument to this function. " "Failing to do so can result in silent errors that might be hard to debug." ) _A = isinstance(a__ , np.ndarray ) and len(raw_speech.shape ) > 1 if is_batched_numpy and len(raw_speech.shape ) > 2: raise ValueError(F"""Only mono-channel audio is supported for input to {self}""" ) _A = is_batched_numpy or ( isinstance(a__ , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) )) ) if is_batched: _A = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech] elif not is_batched and not isinstance(a__ , np.ndarray ): _A = np.asarray(a__ , dtype=np.floataa ) elif isinstance(a__ , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ): _A = raw_speech.astype(np.floataa ) # always return batch if not is_batched: _A = [np.asarray([raw_speech] ).T] _A = BatchFeature({"input_features": raw_speech} ) # convert into correct format for padding _A = self.pad( a__ , padding=a__ , max_length=max_length if max_length else self.n_samples , truncation=a__ , pad_to_multiple_of=a__ , return_attention_mask=return_attention_mask or do_normalize , ) # zero-mean and unit-variance normalization if do_normalize: _A = self.zero_mean_unit_var_norm( padded_inputs["input_features"] , attention_mask=padded_inputs["attention_mask"] , padding_value=self.padding_value , ) _A = np.stack(padded_inputs["input_features"] , axis=0 ) # make sure list is in array format _A = padded_inputs.get("input_features" ).transpose(2 , 0 , 1 ) _A = [self._np_extract_fbank_features(a__ ) for waveform in input_features[0]] if isinstance(input_features[0] , a__ ): _A = [np.asarray(a__ , dtype=np.floataa ) for feature in input_features] else: _A = input_features if return_attention_mask: # rescale from sample (48000) to feature (3000) _A = padded_inputs["attention_mask"][:, :: self.hop_length] if return_tensors is not None: _A = padded_inputs.convert_to_tensors(a__ ) return padded_inputs def a_ ( self : Dict ) -> Dict[str, Any]: '''simple docstring''' _A = copy.deepcopy(self.__dict__ ) _A = self.__class__.__name__ if "mel_filters" in output: del output["mel_filters"] return output
621
1
"""simple docstring""" import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import DetrConfig, DetrForObjectDetection, DetrForSegmentation, DetrImageProcessor, ResNetConfig from transformers.utils import logging logging.set_verbosity_info() a_ = logging.get_logger(__name__) def a__ ( __lowercase ) -> List[str]: # initialize config if "resnet-50" in model_name: _A = ResNetConfig.from_pretrained("microsoft/resnet-50" ) elif "resnet-101" in model_name: _A = ResNetConfig.from_pretrained("microsoft/resnet-101" ) else: raise ValueError("Model name should include either resnet50 or resnet101" ) _A = DetrConfig(use_timm_backbone=__lowercase , backbone_config=__lowercase ) # set label attributes _A = "panoptic" in model_name if is_panoptic: _A = 250 else: _A = 91 _A = "huggingface/label-files" _A = "coco-detection-id2label.json" _A = json.load(open(hf_hub_download(__lowercase , __lowercase , repo_type="dataset" ) , "r" ) ) _A = {int(__lowercase ): v for k, v in idalabel.items()} _A = idalabel _A = {v: k for k, v in idalabel.items()} return config, is_panoptic def a__ ( __lowercase ) -> str: # here we list all keys to be renamed (original name on the left, our name on the right) _A = [] # stem # fmt: off rename_keys.append(("backbone.0.body.conv1.weight", "backbone.conv_encoder.model.embedder.embedder.convolution.weight") ) rename_keys.append(("backbone.0.body.bn1.weight", "backbone.conv_encoder.model.embedder.embedder.normalization.weight") ) rename_keys.append(("backbone.0.body.bn1.bias", "backbone.conv_encoder.model.embedder.embedder.normalization.bias") ) rename_keys.append(("backbone.0.body.bn1.running_mean", "backbone.conv_encoder.model.embedder.embedder.normalization.running_mean") ) rename_keys.append(("backbone.0.body.bn1.running_var", "backbone.conv_encoder.model.embedder.embedder.normalization.running_var") ) # stages for stage_idx in range(len(config.backbone_config.depths ) ): for layer_idx in range(config.backbone_config.depths[stage_idx] ): # shortcut if layer_idx == 0: rename_keys.append( ( f"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.0.weight""", f"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.convolution.weight""", ) ) rename_keys.append( ( f"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.weight""", f"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.weight""", ) ) rename_keys.append( ( f"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.bias""", f"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.bias""", ) ) rename_keys.append( ( f"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_mean""", f"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_mean""", ) ) rename_keys.append( ( f"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_var""", f"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_var""", ) ) # 3 convs for i in range(3 ): rename_keys.append( ( f"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.conv{i+1}.weight""", f"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.convolution.weight""", ) ) rename_keys.append( ( f"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.weight""", f"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.weight""", ) ) rename_keys.append( ( f"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.bias""", f"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.bias""", ) ) rename_keys.append( ( f"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.running_mean""", f"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.running_mean""", ) ) rename_keys.append( ( f"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.running_var""", f"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.running_var""", ) ) # fmt: on for i in range(config.encoder_layers ): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append( ( f"""transformer.encoder.layers.{i}.self_attn.out_proj.weight""", f"""encoder.layers.{i}.self_attn.out_proj.weight""", ) ) rename_keys.append( (f"""transformer.encoder.layers.{i}.self_attn.out_proj.bias""", f"""encoder.layers.{i}.self_attn.out_proj.bias""") ) rename_keys.append((f"""transformer.encoder.layers.{i}.linear1.weight""", f"""encoder.layers.{i}.fc1.weight""") ) rename_keys.append((f"""transformer.encoder.layers.{i}.linear1.bias""", f"""encoder.layers.{i}.fc1.bias""") ) rename_keys.append((f"""transformer.encoder.layers.{i}.linear2.weight""", f"""encoder.layers.{i}.fc2.weight""") ) rename_keys.append((f"""transformer.encoder.layers.{i}.linear2.bias""", f"""encoder.layers.{i}.fc2.bias""") ) rename_keys.append( (f"""transformer.encoder.layers.{i}.norm1.weight""", f"""encoder.layers.{i}.self_attn_layer_norm.weight""") ) rename_keys.append( (f"""transformer.encoder.layers.{i}.norm1.bias""", f"""encoder.layers.{i}.self_attn_layer_norm.bias""") ) rename_keys.append( (f"""transformer.encoder.layers.{i}.norm2.weight""", f"""encoder.layers.{i}.final_layer_norm.weight""") ) rename_keys.append((f"""transformer.encoder.layers.{i}.norm2.bias""", f"""encoder.layers.{i}.final_layer_norm.bias""") ) # decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms rename_keys.append( ( f"""transformer.decoder.layers.{i}.self_attn.out_proj.weight""", f"""decoder.layers.{i}.self_attn.out_proj.weight""", ) ) rename_keys.append( (f"""transformer.decoder.layers.{i}.self_attn.out_proj.bias""", f"""decoder.layers.{i}.self_attn.out_proj.bias""") ) rename_keys.append( ( f"""transformer.decoder.layers.{i}.multihead_attn.out_proj.weight""", f"""decoder.layers.{i}.encoder_attn.out_proj.weight""", ) ) rename_keys.append( ( f"""transformer.decoder.layers.{i}.multihead_attn.out_proj.bias""", f"""decoder.layers.{i}.encoder_attn.out_proj.bias""", ) ) rename_keys.append((f"""transformer.decoder.layers.{i}.linear1.weight""", f"""decoder.layers.{i}.fc1.weight""") ) rename_keys.append((f"""transformer.decoder.layers.{i}.linear1.bias""", f"""decoder.layers.{i}.fc1.bias""") ) rename_keys.append((f"""transformer.decoder.layers.{i}.linear2.weight""", f"""decoder.layers.{i}.fc2.weight""") ) rename_keys.append((f"""transformer.decoder.layers.{i}.linear2.bias""", f"""decoder.layers.{i}.fc2.bias""") ) rename_keys.append( (f"""transformer.decoder.layers.{i}.norm1.weight""", f"""decoder.layers.{i}.self_attn_layer_norm.weight""") ) rename_keys.append( (f"""transformer.decoder.layers.{i}.norm1.bias""", f"""decoder.layers.{i}.self_attn_layer_norm.bias""") ) rename_keys.append( (f"""transformer.decoder.layers.{i}.norm2.weight""", f"""decoder.layers.{i}.encoder_attn_layer_norm.weight""") ) rename_keys.append( (f"""transformer.decoder.layers.{i}.norm2.bias""", f"""decoder.layers.{i}.encoder_attn_layer_norm.bias""") ) rename_keys.append( (f"""transformer.decoder.layers.{i}.norm3.weight""", f"""decoder.layers.{i}.final_layer_norm.weight""") ) rename_keys.append((f"""transformer.decoder.layers.{i}.norm3.bias""", f"""decoder.layers.{i}.final_layer_norm.bias""") ) # convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads rename_keys.extend( [ ("input_proj.weight", "input_projection.weight"), ("input_proj.bias", "input_projection.bias"), ("query_embed.weight", "query_position_embeddings.weight"), ("transformer.decoder.norm.weight", "decoder.layernorm.weight"), ("transformer.decoder.norm.bias", "decoder.layernorm.bias"), ("class_embed.weight", "class_labels_classifier.weight"), ("class_embed.bias", "class_labels_classifier.bias"), ("bbox_embed.layers.0.weight", "bbox_predictor.layers.0.weight"), ("bbox_embed.layers.0.bias", "bbox_predictor.layers.0.bias"), ("bbox_embed.layers.1.weight", "bbox_predictor.layers.1.weight"), ("bbox_embed.layers.1.bias", "bbox_predictor.layers.1.bias"), ("bbox_embed.layers.2.weight", "bbox_predictor.layers.2.weight"), ("bbox_embed.layers.2.bias", "bbox_predictor.layers.2.bias"), ] ) return rename_keys def a__ ( __lowercase , __lowercase , __lowercase ) -> Tuple: _A = state_dict.pop(__lowercase ) _A = val def a__ ( __lowercase , __lowercase=False ) -> Optional[int]: _A = "" if is_panoptic: _A = "detr." # first: transformer encoder for i in range(6 ): # read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias) _A = state_dict.pop(f"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight""" ) _A = state_dict.pop(f"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias""" ) # next, add query, keys and values (in that order) to the state dict _A = in_proj_weight[:256, :] _A = in_proj_bias[:256] _A = in_proj_weight[256:512, :] _A = in_proj_bias[256:512] _A = in_proj_weight[-256:, :] _A = in_proj_bias[-256:] # next: transformer decoder (which is a bit more complex because it also includes cross-attention) for i in range(6 ): # read in weights + bias of input projection layer of self-attention _A = state_dict.pop(f"""{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight""" ) _A = state_dict.pop(f"""{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias""" ) # next, add query, keys and values (in that order) to the state dict _A = in_proj_weight[:256, :] _A = in_proj_bias[:256] _A = in_proj_weight[256:512, :] _A = in_proj_bias[256:512] _A = in_proj_weight[-256:, :] _A = in_proj_bias[-256:] # read in weights + bias of input projection layer of cross-attention _A = state_dict.pop( f"""{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight""" ) _A = state_dict.pop(f"""{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias""" ) # next, add query, keys and values (in that order) of cross-attention to the state dict _A = in_proj_weight_cross_attn[:256, :] _A = in_proj_bias_cross_attn[:256] _A = in_proj_weight_cross_attn[256:512, :] _A = in_proj_bias_cross_attn[256:512] _A = in_proj_weight_cross_attn[-256:, :] _A = in_proj_bias_cross_attn[-256:] def a__ ( ) -> Optional[Any]: _A = "http://images.cocodataset.org/val2017/000000039769.jpg" _A = Image.open(requests.get(__lowercase , stream=__lowercase ).raw ) return im @torch.no_grad() def a__ ( __lowercase , __lowercase=None , __lowercase=False ) -> List[str]: _A , _A = get_detr_config(__lowercase ) # load original model from torch hub _A = { "detr-resnet-50": "detr_resnet50", "detr-resnet-101": "detr_resnet101", } logger.info(f"""Converting model {model_name}...""" ) _A = torch.hub.load("facebookresearch/detr" , model_name_to_original_name[model_name] , pretrained=__lowercase ).eval() _A = detr.state_dict() # rename keys for src, dest in create_rename_keys(__lowercase ): if is_panoptic: _A = "detr." + src rename_key(__lowercase , __lowercase , __lowercase ) # query, key and value matrices need special treatment read_in_q_k_v(__lowercase , is_panoptic=__lowercase ) # important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them _A = "detr.model." if is_panoptic else "model." for key in state_dict.copy().keys(): if is_panoptic: if ( key.startswith("detr" ) and not key.startswith("class_labels_classifier" ) and not key.startswith("bbox_predictor" ) ): _A = state_dict.pop(__lowercase ) _A = val elif "class_labels_classifier" in key or "bbox_predictor" in key: _A = state_dict.pop(__lowercase ) _A = val elif key.startswith("bbox_attention" ) or key.startswith("mask_head" ): continue else: _A = state_dict.pop(__lowercase ) _A = val else: if not key.startswith("class_labels_classifier" ) and not key.startswith("bbox_predictor" ): _A = state_dict.pop(__lowercase ) _A = val # finally, create HuggingFace model and load state dict _A = DetrForSegmentation(__lowercase ) if is_panoptic else DetrForObjectDetection(__lowercase ) model.load_state_dict(__lowercase ) model.eval() # verify our conversion on an image _A = "coco_panoptic" if is_panoptic else "coco_detection" _A = DetrImageProcessor(format=__lowercase ) _A = processor(images=prepare_img() , return_tensors="pt" ) _A = encoding["pixel_values"] _A = detr(__lowercase ) _A = model(__lowercase ) assert torch.allclose(outputs.logits , original_outputs["pred_logits"] , atol=1E-3 ) assert torch.allclose(outputs.pred_boxes , original_outputs["pred_boxes"] , atol=1E-3 ) if is_panoptic: assert torch.allclose(outputs.pred_masks , original_outputs["pred_masks"] , atol=1E-4 ) print("Looks ok!" ) if pytorch_dump_folder_path is not None: # Save model and image processor logger.info(f"""Saving PyTorch model and image processor to {pytorch_dump_folder_path}...""" ) Path(__lowercase ).mkdir(exist_ok=__lowercase ) model.save_pretrained(__lowercase ) processor.save_pretrained(__lowercase ) if push_to_hub: # Upload model and image processor to the hub logger.info("Uploading PyTorch model and image processor to the hub..." ) model.push_to_hub(f"""nielsr/{model_name}""" ) processor.push_to_hub(f"""nielsr/{model_name}""" ) if __name__ == "__main__": a_ = argparse.ArgumentParser() parser.add_argument( "--model_name", default="detr-resnet-50", type=str, choices=["detr-resnet-50", "detr-resnet-101"], help="Name of the DETR model you'd like to convert.", ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model." ) parser.add_argument("--push_to_hub", action="store_true", help="Whether to push the model to the hub or not.") a_ = parser.parse_args() convert_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
621
"""simple docstring""" from __future__ import annotations def a__ ( __lowercase , __lowercase ) -> float: _A = sorted(numsa + numsa ) _A , _A = divmod(len(__lowercase ) , 2 ) if mod == 1: return all_numbers[div] else: return (all_numbers[div] + all_numbers[div - 1]) / 2 if __name__ == "__main__": import doctest doctest.testmod() a_ = [float(x) for x in input("Enter the elements of first array: ").split()] a_ = [float(x) for x in input("Enter the elements of second array: ").split()] print(f'''The median of two arrays is: {median_of_two_arrays(array_a, array_a)}''')
621
1
"""simple docstring""" import math from typing import Callable, List, Optional, Union import numpy as np import PIL import torch from PIL import Image from transformers import CLIPTextModel, CLIPTokenizer from diffusers.models import AutoencoderKL, UNetaDConditionModel from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_upscale import StableDiffusionUpscalePipeline from diffusers.schedulers import DDIMScheduler, DDPMScheduler, LMSDiscreteScheduler, PNDMScheduler def a__ ( __lowercase , __lowercase , __lowercase=[] ) -> Optional[int]: _A = size[0] - overlap_pixels * 2 _A = size[1] - overlap_pixels * 2 for letter in ["l", "r"]: if letter in remove_borders: size_x += overlap_pixels for letter in ["t", "b"]: if letter in remove_borders: size_y += overlap_pixels _A = np.ones((size_y, size_x) , dtype=np.uinta ) * 255 _A = np.pad(__lowercase , mode="linear_ramp" , pad_width=__lowercase , end_values=0 ) if "l" in remove_borders: _A = mask[:, overlap_pixels : mask.shape[1]] if "r" in remove_borders: _A = mask[:, 0 : mask.shape[1] - overlap_pixels] if "t" in remove_borders: _A = mask[overlap_pixels : mask.shape[0], :] if "b" in remove_borders: _A = mask[0 : mask.shape[0] - overlap_pixels, :] return mask def a__ ( __lowercase , __lowercase , __lowercase ) -> Optional[Any]: return max(__lowercase , min(__lowercase , __lowercase ) ) def a__ ( __lowercase , __lowercase , __lowercase ) -> str: return ( clamp(rect[0] , min[0] , max[0] ), clamp(rect[1] , min[1] , max[1] ), clamp(rect[2] , min[0] , max[0] ), clamp(rect[3] , min[1] , max[1] ), ) def a__ ( __lowercase , __lowercase , __lowercase ) -> Union[str, Any]: _A = list(__lowercase ) rect[0] -= overlap rect[1] -= overlap rect[2] += overlap rect[3] += overlap _A = clamp_rect(__lowercase , [0, 0] , [image_size[0], image_size[1]] ) return rect def a__ ( __lowercase , __lowercase , __lowercase , __lowercase ) -> Any: _A = Image.new("RGB" , (tile.size[0] + original_slice, tile.size[1]) ) result.paste( original_image.resize((tile.size[0], tile.size[1]) , Image.BICUBIC ).crop( (slice_x, 0, slice_x + original_slice, tile.size[1]) ) , (0, 0) , ) result.paste(__lowercase , (original_slice, 0) ) return result def a__ ( __lowercase , __lowercase ) -> Any: _A = (original_image_slice * 4, 0, tile.size[0], tile.size[1]) _A = tile.crop(__lowercase ) return tile def a__ ( __lowercase , __lowercase ) -> List[str]: _A = n % d return n - divisor class snake_case ( _UpperCamelCase): def __init__( self : Union[str, Any] , a__ : AutoencoderKL , a__ : CLIPTextModel , a__ : CLIPTokenizer , a__ : UNetaDConditionModel , a__ : DDPMScheduler , a__ : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , a__ : int = 3_50 , ) -> Tuple: '''simple docstring''' super().__init__( vae=a__ , text_encoder=a__ , tokenizer=a__ , unet=a__ , low_res_scheduler=a__ , scheduler=a__ , max_noise_level=a__ , ) def a_ ( self : Tuple , a__ : Dict , a__ : int , a__ : List[Any] , a__ : Optional[int] , a__ : Tuple , a__ : int , a__ : List[Any] , **a__ : str ) -> Dict: '''simple docstring''' torch.manual_seed(0 ) _A = ( min(image.size[0] - (tile_size + original_image_slice) , x * tile_size ), min(image.size[1] - (tile_size + original_image_slice) , y * tile_size ), min(image.size[0] , (x + 1) * tile_size ), min(image.size[1] , (y + 1) * tile_size ), ) _A = add_overlap_rect(a__ , a__ , image.size ) _A = image.crop(a__ ) _A = ((crop_rect[0] + ((crop_rect[2] - crop_rect[0]) / 2)) / image.size[0]) * tile.size[0] _A = translated_slice_x - (original_image_slice / 2) _A = max(0 , a__ ) _A = squeeze_tile(a__ , a__ , a__ , a__ ) _A = to_input.size _A = to_input.resize((tile_size, tile_size) , Image.BICUBIC ) _A = super(a__ , self ).__call__(image=a__ , **a__ ).images[0] _A = upscaled_tile.resize((orig_input_size[0] * 4, orig_input_size[1] * 4) , Image.BICUBIC ) _A = unsqueeze_tile(a__ , a__ ) _A = upscaled_tile.resize((tile.size[0] * 4, tile.size[1] * 4) , Image.BICUBIC ) _A = [] if x == 0: remove_borders.append("l" ) elif crop_rect[2] == image.size[0]: remove_borders.append("r" ) if y == 0: remove_borders.append("t" ) elif crop_rect[3] == image.size[1]: remove_borders.append("b" ) _A = Image.fromarray( make_transparency_mask( (upscaled_tile.size[0], upscaled_tile.size[1]) , tile_border * 4 , remove_borders=a__ ) , mode="L" , ) final_image.paste( a__ , (crop_rect_with_overlap[0] * 4, crop_rect_with_overlap[1] * 4) , a__ ) @torch.no_grad() def __call__( self : List[Any] , a__ : Union[str, List[str]] , a__ : Union[PIL.Image.Image, List[PIL.Image.Image]] , a__ : int = 75 , a__ : float = 9.0 , a__ : int = 50 , a__ : Optional[Union[str, List[str]]] = None , a__ : Optional[int] = 1 , a__ : float = 0.0 , a__ : Optional[torch.Generator] = None , a__ : Optional[torch.FloatTensor] = None , a__ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , a__ : int = 1 , a__ : int = 1_28 , a__ : int = 32 , a__ : int = 32 , ) -> List[str]: '''simple docstring''' _A = Image.new("RGB" , (image.size[0] * 4, image.size[1] * 4) ) _A = math.ceil(image.size[0] / tile_size ) _A = math.ceil(image.size[1] / tile_size ) _A = tcx * tcy _A = 0 for y in range(a__ ): for x in range(a__ ): self._process_tile( a__ , a__ , a__ , a__ , a__ , a__ , a__ , prompt=a__ , num_inference_steps=a__ , guidance_scale=a__ , noise_level=a__ , negative_prompt=a__ , num_images_per_prompt=a__ , eta=a__ , generator=a__ , latents=a__ , ) current_count += 1 if callback is not None: callback({"progress": current_count / total_tile_count, "image": final_image} ) return final_image def a__ ( ) -> Union[str, Any]: # Run a demo _A = "stabilityai/stable-diffusion-x4-upscaler" _A = StableDiffusionTiledUpscalePipeline.from_pretrained(__lowercase , revision="fp16" , torch_dtype=torch.floataa ) _A = pipe.to("cuda" ) _A = Image.open("../../docs/source/imgs/diffusers_library.jpg" ) def callback(__lowercase ): print(f"""progress: {obj['progress']:.4f}""" ) obj["image"].save("diffusers_library_progress.jpg" ) _A = pipe(image=__lowercase , prompt="Black font, white background, vector" , noise_level=40 , callback=__lowercase ) final_image.save("diffusers_library.jpg" ) if __name__ == "__main__": main()
621
"""simple docstring""" import copy import os from typing import Union from ...configuration_utils import PretrainedConfig from ...utils import logging a_ = logging.get_logger(__name__) a_ = { "Salesforce/blip-vqa-base": "https://huggingface.co/Salesforce/blip-vqa-base/resolve/main/config.json", "Salesforce/blip-vqa-capfit-large": ( "https://huggingface.co/Salesforce/blip-vqa-base-capfit/resolve/main/config.json" ), "Salesforce/blip-image-captioning-base": ( "https://huggingface.co/Salesforce/blip-image-captioning-base/resolve/main/config.json" ), "Salesforce/blip-image-captioning-large": ( "https://huggingface.co/Salesforce/blip-image-captioning-large/resolve/main/config.json" ), "Salesforce/blip-itm-base-coco": "https://huggingface.co/Salesforce/blip-itm-base-coco/resolve/main/config.json", "Salesforce/blip-itm-large-coco": "https://huggingface.co/Salesforce/blip-itm-large-coco/resolve/main/config.json", "Salesforce/blip-itm-base-flikr": "https://huggingface.co/Salesforce/blip-itm-base-flikr/resolve/main/config.json", "Salesforce/blip-itm-large-flikr": ( "https://huggingface.co/Salesforce/blip-itm-large-flikr/resolve/main/config.json" ), } class snake_case ( _UpperCamelCase): __UpperCamelCase = 'blip_text_model' def __init__( self : int , a__ : List[str]=3_05_24 , a__ : List[str]=7_68 , a__ : List[Any]=7_68 , a__ : int=30_72 , a__ : List[str]=7_68 , a__ : Dict=12 , a__ : Optional[int]=8 , a__ : Optional[Any]=5_12 , a__ : List[Any]="gelu" , a__ : Optional[Any]=1E-1_2 , a__ : Any=0.0 , a__ : int=0.0 , a__ : Dict=0.0_2 , a__ : Optional[Any]=3_05_22 , a__ : Any=2 , a__ : int=0 , a__ : Union[str, Any]=1_02 , a__ : Tuple=True , a__ : Optional[int]=True , **a__ : Any , ) -> List[Any]: '''simple docstring''' super().__init__( pad_token_id=a__ , bos_token_id=a__ , eos_token_id=a__ , sep_token_id=a__ , **a__ , ) _A = vocab_size _A = hidden_size _A = encoder_hidden_size _A = intermediate_size _A = projection_dim _A = hidden_dropout_prob _A = num_hidden_layers _A = num_attention_heads _A = max_position_embeddings _A = layer_norm_eps _A = hidden_act _A = initializer_range _A = attention_probs_dropout_prob _A = is_decoder _A = use_cache @classmethod def a_ ( cls : Optional[Any] , a__ : Union[str, os.PathLike] , **a__ : Optional[Any] ) -> "PretrainedConfig": '''simple docstring''' cls._set_token_in_kwargs(a__ ) _A , _A = cls.get_config_dict(a__ , **a__ ) # get the text config dict if we are loading from BlipConfig if config_dict.get("model_type" ) == "blip": _A = config_dict["text_config"] if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type: logger.warning( F"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """ F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" ) return cls.from_dict(a__ , **a__ ) class snake_case ( _UpperCamelCase): __UpperCamelCase = 'blip_vision_model' def __init__( self : Optional[Any] , a__ : Any=7_68 , a__ : List[str]=30_72 , a__ : str=5_12 , a__ : Any=12 , a__ : int=12 , a__ : int=3_84 , a__ : Tuple=16 , a__ : str="gelu" , a__ : Tuple=1E-5 , a__ : List[str]=0.0 , a__ : List[Any]=1E-1_0 , **a__ : int , ) -> List[str]: '''simple docstring''' super().__init__(**a__ ) _A = hidden_size _A = intermediate_size _A = projection_dim _A = num_hidden_layers _A = num_attention_heads _A = patch_size _A = image_size _A = initializer_range _A = attention_dropout _A = layer_norm_eps _A = hidden_act @classmethod def a_ ( cls : Any , a__ : Union[str, os.PathLike] , **a__ : int ) -> "PretrainedConfig": '''simple docstring''' cls._set_token_in_kwargs(a__ ) _A , _A = cls.get_config_dict(a__ , **a__ ) # get the vision config dict if we are loading from BlipConfig if config_dict.get("model_type" ) == "blip": _A = config_dict["vision_config"] if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type: logger.warning( F"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """ F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" ) return cls.from_dict(a__ , **a__ ) class snake_case ( _UpperCamelCase): __UpperCamelCase = 'blip' __UpperCamelCase = True def __init__( self : List[Any] , a__ : Optional[int]=None , a__ : str=None , a__ : List[str]=5_12 , a__ : Any=2.6_5_9_2 , a__ : str=2_56 , **a__ : Optional[int] , ) -> Dict: '''simple docstring''' super().__init__(**a__ ) if text_config is None: _A = {} logger.info("`text_config` is `None`. Initializing the `BlipTextConfig` with default values." ) if vision_config is None: _A = {} logger.info("`vision_config` is `None`. Initializing the `BlipVisionConfig` with default values." ) _A = BlipTextConfig(**a__ ) _A = BlipVisionConfig(**a__ ) _A = self.vision_config.hidden_size _A = projection_dim _A = logit_scale_init_value _A = 1.0 _A = 0.0_2 _A = image_text_hidden_size @classmethod def a_ ( cls : Tuple , a__ : BlipTextConfig , a__ : BlipVisionConfig , **a__ : Optional[int] ) -> str: '''simple docstring''' return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **a__ ) def a_ ( self : Union[str, Any] ) -> List[Any]: '''simple docstring''' _A = copy.deepcopy(self.__dict__ ) _A = self.text_config.to_dict() _A = self.vision_config.to_dict() _A = self.__class__.model_type return output
621
1
"""simple docstring""" from __future__ import annotations def a__ ( __lowercase ) -> float: _A = 0.00 _A = 0 for resistor in resistors: if resistor <= 0: _A = f"""Resistor at index {index} has a negative or zero value!""" raise ValueError(__lowercase ) first_sum += 1 / float(__lowercase ) index += 1 return 1 / first_sum def a__ ( __lowercase ) -> float: _A = 0.00 _A = 0 for resistor in resistors: sum_r += resistor if resistor < 0: _A = f"""Resistor at index {index} has a negative value!""" raise ValueError(__lowercase ) index += 1 return sum_r if __name__ == "__main__": import doctest doctest.testmod()
621
"""simple docstring""" import unittest from transformers import load_tool from .test_tools_common import ToolTesterMixin class snake_case ( unittest.TestCase , _UpperCamelCase): def a_ ( self : Optional[Any] ) -> List[str]: '''simple docstring''' _A = load_tool("text-classification" ) self.tool.setup() _A = load_tool("text-classification" , remote=a__ ) def a_ ( self : Optional[int] ) -> Dict: '''simple docstring''' _A = self.tool("That's quite cool" , ["positive", "negative"] ) self.assertEqual(a__ , "positive" ) def a_ ( self : Optional[Any] ) -> Dict: '''simple docstring''' _A = self.remote_tool("That's quite cool" , ["positive", "negative"] ) self.assertEqual(a__ , "positive" ) def a_ ( self : Dict ) -> Optional[int]: '''simple docstring''' _A = self.tool(text="That's quite cool" , labels=["positive", "negative"] ) self.assertEqual(a__ , "positive" ) def a_ ( self : Dict ) -> Any: '''simple docstring''' _A = self.remote_tool(text="That's quite cool" , labels=["positive", "negative"] ) self.assertEqual(a__ , "positive" )
621
1
"""simple docstring""" from typing import List, Optional, Union from ...image_utils import ImageInput from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType class snake_case ( _UpperCamelCase): __UpperCamelCase = ['image_processor', 'tokenizer'] __UpperCamelCase = 'BlipImageProcessor' __UpperCamelCase = 'AutoTokenizer' def __init__( self : int , a__ : int , a__ : Union[str, Any] ) -> List[str]: '''simple docstring''' _A = False super().__init__(a__ , a__ ) _A = self.image_processor def __call__( self : List[str] , a__ : ImageInput = None , a__ : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , a__ : bool = True , a__ : Union[bool, str, PaddingStrategy] = False , a__ : Union[bool, str, TruncationStrategy] = None , a__ : Optional[int] = None , a__ : int = 0 , a__ : Optional[int] = None , a__ : Optional[bool] = None , a__ : bool = False , a__ : bool = False , a__ : bool = False , a__ : bool = False , a__ : bool = False , a__ : bool = True , a__ : Optional[Union[str, TensorType]] = None , **a__ : Union[str, Any] , ) -> BatchEncoding: '''simple docstring''' if images is None and text is None: raise ValueError("You have to specify either images or text." ) # Get only text if images is None: _A = self.tokenizer _A = self.tokenizer( text=a__ , add_special_tokens=a__ , padding=a__ , truncation=a__ , max_length=a__ , stride=a__ , pad_to_multiple_of=a__ , return_attention_mask=a__ , return_overflowing_tokens=a__ , return_special_tokens_mask=a__ , return_offsets_mapping=a__ , return_token_type_ids=a__ , return_length=a__ , verbose=a__ , return_tensors=a__ , **a__ , ) return text_encoding # add pixel_values _A = self.image_processor(a__ , return_tensors=a__ ) if text is not None: _A = self.tokenizer( text=a__ , add_special_tokens=a__ , padding=a__ , truncation=a__ , max_length=a__ , stride=a__ , pad_to_multiple_of=a__ , return_attention_mask=a__ , return_overflowing_tokens=a__ , return_special_tokens_mask=a__ , return_offsets_mapping=a__ , return_token_type_ids=a__ , return_length=a__ , verbose=a__ , return_tensors=a__ , **a__ , ) else: _A = None if text_encoding is not None: encoding_image_processor.update(a__ ) return encoding_image_processor def a_ ( self : Tuple , *a__ : int , **a__ : Tuple ) -> Any: '''simple docstring''' return self.tokenizer.batch_decode(*a__ , **a__ ) def a_ ( self : Any , *a__ : List[str] , **a__ : Optional[Any] ) -> Any: '''simple docstring''' return self.tokenizer.decode(*a__ , **a__ ) @property # Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names def a_ ( self : Tuple ) -> Any: '''simple docstring''' _A = self.tokenizer.model_input_names _A = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
621
"""simple docstring""" import gc import random import unittest import numpy as np import torch from PIL import Image from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import AutoencoderKL, PNDMScheduler, StableDiffusionInpaintPipeline, UNetaDConditionModel from diffusers.utils import floats_tensor, load_image, load_numpy, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class snake_case ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , unittest.TestCase): __UpperCamelCase = StableDiffusionInpaintPipeline __UpperCamelCase = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS __UpperCamelCase = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS __UpperCamelCase = frozenset( []) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess __UpperCamelCase = frozenset([]) def a_ ( self : Dict ) -> Optional[int]: '''simple docstring''' torch.manual_seed(0 ) _A = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=9 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=a__ , ) _A = PNDMScheduler(skip_prk_steps=a__ ) torch.manual_seed(0 ) _A = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , sample_size=1_28 , ) torch.manual_seed(0 ) _A = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , hidden_act="gelu" , projection_dim=5_12 , ) _A = CLIPTextModel(a__ ) _A = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" ) _A = { "unet": unet, "scheduler": scheduler, "vae": vae, "text_encoder": text_encoder, "tokenizer": tokenizer, "safety_checker": None, "feature_extractor": None, } return components def a_ ( self : Optional[Any] , a__ : List[str] , a__ : Tuple=0 ) -> int: '''simple docstring''' _A = floats_tensor((1, 3, 32, 32) , rng=random.Random(a__ ) ).to(a__ ) _A = image.cpu().permute(0 , 2 , 3 , 1 )[0] _A = Image.fromarray(np.uinta(a__ ) ).convert("RGB" ).resize((64, 64) ) _A = Image.fromarray(np.uinta(image + 4 ) ).convert("RGB" ).resize((64, 64) ) if str(a__ ).startswith("mps" ): _A = torch.manual_seed(a__ ) else: _A = torch.Generator(device=a__ ).manual_seed(a__ ) _A = { "prompt": "A painting of a squirrel eating a burger", "image": init_image, "mask_image": mask_image, "generator": generator, "num_inference_steps": 2, "guidance_scale": 6.0, "output_type": "numpy", } return inputs def a_ ( self : Optional[Any] ) -> Optional[Any]: '''simple docstring''' _A = "cpu" # ensure determinism for the device-dependent torch.Generator _A = self.get_dummy_components() _A = StableDiffusionInpaintPipeline(**a__ ) _A = sd_pipe.to(a__ ) sd_pipe.set_progress_bar_config(disable=a__ ) _A = self.get_dummy_inputs(a__ ) _A = sd_pipe(**a__ ).images _A = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) _A = np.array([0.4_7_2_7, 0.5_7_3_5, 0.3_9_4_1, 0.5_4_4_6, 0.5_9_2_6, 0.4_3_9_4, 0.5_0_6_2, 0.4_6_5_4, 0.4_4_7_6] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def a_ ( self : str ) -> Union[str, Any]: '''simple docstring''' super().test_inference_batch_single_identical(expected_max_diff=3E-3 ) @slow @require_torch_gpu class snake_case ( unittest.TestCase): def a_ ( self : List[Any] ) -> Any: '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def a_ ( self : Union[str, Any] ) -> Tuple: '''simple docstring''' _A = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/sd2-inpaint/init_image.png" ) _A = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png" ) _A = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint" "/yellow_cat_sitting_on_a_park_bench.npy" ) _A = "stabilityai/stable-diffusion-2-inpainting" _A = StableDiffusionInpaintPipeline.from_pretrained(a__ , safety_checker=a__ ) pipe.to(a__ ) pipe.set_progress_bar_config(disable=a__ ) pipe.enable_attention_slicing() _A = "Face of a yellow cat, high resolution, sitting on a park bench" _A = torch.manual_seed(0 ) _A = pipe( prompt=a__ , image=a__ , mask_image=a__ , generator=a__ , output_type="np" , ) _A = output.images[0] assert image.shape == (5_12, 5_12, 3) assert np.abs(expected_image - image ).max() < 9E-3 def a_ ( self : Optional[Any] ) -> List[Any]: '''simple docstring''' _A = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/sd2-inpaint/init_image.png" ) _A = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png" ) _A = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint" "/yellow_cat_sitting_on_a_park_bench_fp16.npy" ) _A = "stabilityai/stable-diffusion-2-inpainting" _A = StableDiffusionInpaintPipeline.from_pretrained( a__ , torch_dtype=torch.floataa , safety_checker=a__ , ) pipe.to(a__ ) pipe.set_progress_bar_config(disable=a__ ) pipe.enable_attention_slicing() _A = "Face of a yellow cat, high resolution, sitting on a park bench" _A = torch.manual_seed(0 ) _A = pipe( prompt=a__ , image=a__ , mask_image=a__ , generator=a__ , output_type="np" , ) _A = output.images[0] assert image.shape == (5_12, 5_12, 3) assert np.abs(expected_image - image ).max() < 5E-1 def a_ ( self : List[Any] ) -> Union[str, Any]: '''simple docstring''' torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() _A = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/sd2-inpaint/init_image.png" ) _A = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png" ) _A = "stabilityai/stable-diffusion-2-inpainting" _A = PNDMScheduler.from_pretrained(a__ , subfolder="scheduler" ) _A = StableDiffusionInpaintPipeline.from_pretrained( a__ , safety_checker=a__ , scheduler=a__ , torch_dtype=torch.floataa , ) pipe.to(a__ ) pipe.set_progress_bar_config(disable=a__ ) pipe.enable_attention_slicing(1 ) pipe.enable_sequential_cpu_offload() _A = "Face of a yellow cat, high resolution, sitting on a park bench" _A = torch.manual_seed(0 ) _A = pipe( prompt=a__ , image=a__ , mask_image=a__ , generator=a__ , num_inference_steps=2 , output_type="np" , ) _A = torch.cuda.max_memory_allocated() # make sure that less than 2.65 GB is allocated assert mem_bytes < 2.6_5 * 10**9
621
1
"""simple docstring""" from __future__ import annotations a_ = { "A": ["B", "C", "E"], "B": ["A", "D", "E"], "C": ["A", "F", "G"], "D": ["B"], "E": ["A", "B", "D"], "F": ["C"], "G": ["C"], } class snake_case : def __init__( self : Optional[int] , a__ : dict[str, list[str]] , a__ : str ) -> None: '''simple docstring''' _A = graph # mapping node to its parent in resulting breadth first tree _A = {} _A = source_vertex def a_ ( self : List[str] ) -> None: '''simple docstring''' _A = {self.source_vertex} _A = None _A = [self.source_vertex] # first in first out queue while queue: _A = queue.pop(0 ) for adjacent_vertex in self.graph[vertex]: if adjacent_vertex not in visited: visited.add(a__ ) _A = vertex queue.append(a__ ) def a_ ( self : List[Any] , a__ : str ) -> str: '''simple docstring''' if target_vertex == self.source_vertex: return self.source_vertex _A = self.parent.get(a__ ) if target_vertex_parent is None: _A = ( F"""No path from vertex: {self.source_vertex} to vertex: {target_vertex}""" ) raise ValueError(a__ ) return self.shortest_path(a__ ) + F"""->{target_vertex}""" if __name__ == "__main__": a_ = Graph(graph, "G") g.breath_first_search() print(g.shortest_path("D")) print(g.shortest_path("G")) print(g.shortest_path("Foo"))
621
"""simple docstring""" def a__ ( __lowercase , __lowercase ) -> int: while a != 0: _A , _A = b % a, a return b def a__ ( __lowercase , __lowercase ) -> int: if gcd(__lowercase , __lowercase ) != 1: _A = f"""mod inverse of {a!r} and {m!r} does not exist""" raise ValueError(__lowercase ) _A , _A , _A = 1, 0, a _A , _A , _A = 0, 1, m while va != 0: _A = ua // va _A , _A , _A , _A , _A , _A = (ua - q * va), (ua - q * va), (ua - q * va), va, va, va return ua % m
621
1
"""simple docstring""" import gc import unittest from parameterized import parameterized from diffusers import FlaxUNetaDConditionModel from diffusers.utils import is_flax_available from diffusers.utils.testing_utils import load_hf_numpy, require_flax, slow if is_flax_available(): import jax import jax.numpy as jnp @slow @require_flax class snake_case ( unittest.TestCase): def a_ ( self : Optional[Any] , a__ : List[Any] , a__ : int ) -> Optional[Any]: '''simple docstring''' return F"""gaussian_noise_s={seed}_shape={'_'.join([str(a__ ) for s in shape] )}.npy""" def a_ ( self : Tuple ) -> Dict: '''simple docstring''' super().tearDown() gc.collect() def a_ ( self : Tuple , a__ : Dict=0 , a__ : List[str]=(4, 4, 64, 64) , a__ : Union[str, Any]=False ) -> List[Any]: '''simple docstring''' _A = jnp.bfloataa if fpaa else jnp.floataa _A = jnp.array(load_hf_numpy(self.get_file_format(a__ , a__ ) ) , dtype=a__ ) return image def a_ ( self : List[str] , a__ : Tuple=False , a__ : Any="CompVis/stable-diffusion-v1-4" ) -> str: '''simple docstring''' _A = jnp.bfloataa if fpaa else jnp.floataa _A = "bf16" if fpaa else None _A , _A = FlaxUNetaDConditionModel.from_pretrained( a__ , subfolder="unet" , dtype=a__ , revision=a__ ) return model, params def a_ ( self : List[str] , a__ : Any=0 , a__ : int=(4, 77, 7_68) , a__ : int=False ) -> Any: '''simple docstring''' _A = jnp.bfloataa if fpaa else jnp.floataa _A = jnp.array(load_hf_numpy(self.get_file_format(a__ , a__ ) ) , dtype=a__ ) return hidden_states @parameterized.expand( [ # fmt: off [83, 4, [-0.2_3_2_3, -0.1_3_0_4, 0.0_8_1_3, -0.3_0_9_3, -0.0_9_1_9, -0.1_5_7_1, -0.1_1_2_5, -0.5_8_0_6]], [17, 0.5_5, [-0.0_8_3_1, -0.2_4_4_3, 0.0_9_0_1, -0.0_9_1_9, 0.3_3_9_6, 0.0_1_0_3, -0.3_7_4_3, 0.0_7_0_1]], [8, 0.8_9, [-0.4_8_6_3, 0.0_8_5_9, 0.0_8_7_5, -0.1_6_5_8, 0.9_1_9_9, -0.0_1_1_4, 0.4_8_3_9, 0.4_6_3_9]], [3, 10_00, [-0.5_6_4_9, 0.2_4_0_2, -0.5_5_1_8, 0.1_2_4_8, 1.1_3_2_8, -0.2_4_4_3, -0.0_3_2_5, -1.0_0_7_8]], # fmt: on ] ) def a_ ( self : Optional[Any] , a__ : Tuple , a__ : List[str] , a__ : List[str] ) -> Optional[int]: '''simple docstring''' _A , _A = self.get_unet_model(model_id="CompVis/stable-diffusion-v1-4" , fpaa=a__ ) _A = self.get_latents(a__ , fpaa=a__ ) _A = self.get_encoder_hidden_states(a__ , fpaa=a__ ) _A = model.apply( {"params": params} , a__ , jnp.array(a__ , dtype=jnp.intaa ) , encoder_hidden_states=a__ , ).sample assert sample.shape == latents.shape _A = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa ) _A = jnp.array(a__ , dtype=jnp.floataa ) # Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, in the same hardware assert jnp.allclose(a__ , a__ , atol=1E-2 ) @parameterized.expand( [ # fmt: off [83, 4, [0.1_5_1_4, 0.0_8_0_7, 0.1_6_2_4, 0.1_0_1_6, -0.1_8_9_6, 0.0_2_6_3, 0.0_6_7_7, 0.2_3_1_0]], [17, 0.5_5, [0.1_1_6_4, -0.0_2_1_6, 0.0_1_7_0, 0.1_5_8_9, -0.3_1_2_0, 0.1_0_0_5, -0.0_5_8_1, -0.1_4_5_8]], [8, 0.8_9, [-0.1_7_5_8, -0.0_1_6_9, 0.1_0_0_4, -0.1_4_1_1, 0.1_3_1_2, 0.1_1_0_3, -0.1_9_9_6, 0.2_1_3_9]], [3, 10_00, [0.1_2_1_4, 0.0_3_5_2, -0.0_7_3_1, -0.1_5_6_2, -0.0_9_9_4, -0.0_9_0_6, -0.2_3_4_0, -0.0_5_3_9]], # fmt: on ] ) def a_ ( self : Optional[int] , a__ : List[Any] , a__ : Tuple , a__ : List[Any] ) -> str: '''simple docstring''' _A , _A = self.get_unet_model(model_id="stabilityai/stable-diffusion-2" , fpaa=a__ ) _A = self.get_latents(a__ , shape=(4, 4, 96, 96) , fpaa=a__ ) _A = self.get_encoder_hidden_states(a__ , shape=(4, 77, 10_24) , fpaa=a__ ) _A = model.apply( {"params": params} , a__ , jnp.array(a__ , dtype=jnp.intaa ) , encoder_hidden_states=a__ , ).sample assert sample.shape == latents.shape _A = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa ) _A = jnp.array(a__ , dtype=jnp.floataa ) # Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, on the same hardware assert jnp.allclose(a__ , a__ , atol=1E-2 )
621
"""simple docstring""" # Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import warnings from typing import List from unittest.mock import Mock import torch from torch.utils.data import DataLoader, IterableDataset, TensorDataset from accelerate.accelerator import Accelerator from accelerate.utils.dataclasses import DistributedType class snake_case ( _UpperCamelCase): def __init__( self : List[Any] , a__ : Any ) -> Any: '''simple docstring''' _A = data def __iter__( self : List[str] ) -> str: '''simple docstring''' for element in self.data: yield element def a__ ( __lowercase=True ) -> Tuple: _A = Accelerator(even_batches=__lowercase ) assert accelerator.num_processes == 2, "this script expects that two GPUs are available" return accelerator def a__ ( __lowercase , __lowercase , __lowercase , __lowercase = False ) -> Union[str, Any]: if iterable: _A = DummyIterableDataset(torch.as_tensor(range(__lowercase ) ) ) else: _A = TensorDataset(torch.as_tensor(range(__lowercase ) ) ) _A = DataLoader(__lowercase , batch_size=__lowercase ) _A = accelerator.prepare(__lowercase ) return dl def a__ ( __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , ) -> Dict: _A = create_dataloader(accelerator=__lowercase , dataset_size=__lowercase , batch_size=__lowercase ) _A = [len(batch[0] ) for batch in dl] if accelerator.process_index == 0: assert batch_sizes == process_0_expected_batch_sizes elif accelerator.process_index == 1: assert batch_sizes == process_1_expected_batch_sizes def a__ ( ) -> List[str]: _A = create_accelerator() # without padding, we would expect a different number of batches verify_dataloader_batch_sizes( __lowercase , dataset_size=3 , batch_size=1 , process_0_expected_batch_sizes=[1, 1] , process_1_expected_batch_sizes=[1, 1] , ) # without padding, we would expect the same number of batches, but different sizes verify_dataloader_batch_sizes( __lowercase , dataset_size=7 , batch_size=2 , process_0_expected_batch_sizes=[2, 2] , process_1_expected_batch_sizes=[2, 2] , ) def a__ ( ) -> List[Any]: _A = create_accelerator(even_batches=__lowercase ) verify_dataloader_batch_sizes( __lowercase , dataset_size=3 , batch_size=1 , process_0_expected_batch_sizes=[1, 1] , process_1_expected_batch_sizes=[1] , ) verify_dataloader_batch_sizes( __lowercase , dataset_size=7 , batch_size=2 , process_0_expected_batch_sizes=[2, 2] , process_1_expected_batch_sizes=[2, 1] , ) def a__ ( ) -> int: _A = create_accelerator(even_batches=__lowercase ) _A = torch.nn.Linear(1 , 1 ) _A = accelerator.prepare(__lowercase ) _A = create_dataloader(__lowercase , dataset_size=3 , batch_size=1 ) _A = [] with accelerator.join_uneven_inputs([ddp_model] ): for batch_idx, batch in enumerate(__lowercase ): _A = ddp_model(batch[0].float() ) _A = output.sum() loss.backward() batch_idxs.append(__lowercase ) accelerator.wait_for_everyone() if accelerator.process_index == 0: assert batch_idxs == [0, 1] elif accelerator.process_index == 1: assert batch_idxs == [0] def a__ ( __lowercase ) -> List[str]: with warnings.catch_warnings(record=__lowercase ) as w: with accelerator.join_uneven_inputs([Mock()] ): pass assert issubclass(w[-1].category , __lowercase ) assert "only supported for multi-GPU" in str(w[-1].message ) def a__ ( ) -> Tuple: _A = True _A = False _A = create_accelerator(even_batches=__lowercase ) _A = torch.nn.Linear(1 , 1 ) _A = accelerator.prepare(__lowercase ) _A = create_dataloader(__lowercase , dataset_size=3 , batch_size=1 ) _A = create_dataloader(__lowercase , dataset_size=3 , batch_size=1 ) with accelerator.join_uneven_inputs([ddp_model] , even_batches=__lowercase ): _A = train_dl.batch_sampler.even_batches _A = valid_dl.batch_sampler.even_batches assert train_dl_overridden_value == overridden_even_batches assert valid_dl_overridden_value == overridden_even_batches assert train_dl.batch_sampler.even_batches == default_even_batches assert valid_dl.batch_sampler.even_batches == default_even_batches def a__ ( ) -> int: _A = True _A = False _A = create_accelerator(even_batches=__lowercase ) _A = torch.nn.Linear(1 , 1 ) _A = accelerator.prepare(__lowercase ) create_dataloader(__lowercase , dataset_size=3 , batch_size=1 , iterable=__lowercase ) _A = create_dataloader(__lowercase , dataset_size=3 , batch_size=1 ) with warnings.catch_warnings(): warnings.filterwarnings("ignore" ) try: with accelerator.join_uneven_inputs([ddp_model] , even_batches=__lowercase ): _A = batch_dl.batch_sampler.even_batches except AttributeError: # ensure attribute error is not raised when processing iterable dl raise AssertionError assert batch_dl_overridden_value == overridden_even_batches assert batch_dl.batch_sampler.even_batches == default_even_batches def a__ ( ) -> Optional[Any]: _A = create_accelerator() _A = torch.nn.Linear(1 , 1 ) _A = accelerator.prepare(__lowercase ) create_dataloader(__lowercase , dataset_size=3 , batch_size=1 , iterable=__lowercase ) with warnings.catch_warnings(record=__lowercase ) as w: with accelerator.join_uneven_inputs([ddp_model] , even_batches=__lowercase ): pass assert issubclass(w[-1].category , __lowercase ) assert "only supported for map-style datasets" in str(w[-1].message ) def a__ ( ) -> Optional[Any]: _A = create_accelerator() accelerator.print("Test that even_batches variable ensures uniform batches across processes" ) test_default_ensures_even_batch_sizes() accelerator.print("Run tests with even_batches disabled" ) test_can_disable_even_batches() accelerator.print("Test joining uneven inputs" ) test_can_join_uneven_inputs() accelerator.print("Test overriding even_batches when joining uneven inputs" ) test_join_can_override_even_batches() accelerator.print("Test overriding even_batches for mixed dataloader types" ) test_join_can_override_for_mixed_type_dataloaders() accelerator.print("Test overriding even_batches raises a warning for iterable dataloaders" ) test_join_raises_warning_for_iterable_when_overriding_even_batches() accelerator.print("Test join with non DDP distributed raises warning" ) _A = accelerator.state.distributed_type _A = DistributedType.FSDP test_join_raises_warning_for_non_ddp_distributed(__lowercase ) _A = original_state if __name__ == "__main__": main()
621
1
"""simple docstring""" # Author: OMKAR PATHAK, Nwachukwu Chidiebere # Use a Python dictionary to construct the graph. from __future__ import annotations from pprint import pformat from typing import Generic, TypeVar a_ = TypeVar("T") class snake_case ( Generic[T]): def __init__( self : Dict , a__ : bool = True ) -> None: '''simple docstring''' _A = {} # dictionary of lists _A = directed def a_ ( self : int , a__ : T , a__ : T ) -> GraphAdjacencyList[T]: '''simple docstring''' if not self.directed: # For undirected graphs # if both source vertex and destination vertex are both present in the # adjacency list, add destination vertex to source vertex list of adjacent # vertices and add source vertex to destination vertex list of adjacent # vertices. if source_vertex in self.adj_list and destination_vertex in self.adj_list: self.adj_list[source_vertex].append(a__ ) self.adj_list[destination_vertex].append(a__ ) # if only source vertex is present in adjacency list, add destination vertex # to source vertex list of adjacent vertices, then create a new vertex with # destination vertex as key and assign a list containing the source vertex # as it's first adjacent vertex. elif source_vertex in self.adj_list: self.adj_list[source_vertex].append(a__ ) _A = [source_vertex] # if only destination vertex is present in adjacency list, add source vertex # to destination vertex list of adjacent vertices, then create a new vertex # with source vertex as key and assign a list containing the source vertex # as it's first adjacent vertex. elif destination_vertex in self.adj_list: self.adj_list[destination_vertex].append(a__ ) _A = [destination_vertex] # if both source vertex and destination vertex are not present in adjacency # list, create a new vertex with source vertex as key and assign a list # containing the destination vertex as it's first adjacent vertex also # create a new vertex with destination vertex as key and assign a list # containing the source vertex as it's first adjacent vertex. else: _A = [destination_vertex] _A = [source_vertex] else: # For directed graphs # if both source vertex and destination vertex are present in adjacency # list, add destination vertex to source vertex list of adjacent vertices. if source_vertex in self.adj_list and destination_vertex in self.adj_list: self.adj_list[source_vertex].append(a__ ) # if only source vertex is present in adjacency list, add destination # vertex to source vertex list of adjacent vertices and create a new vertex # with destination vertex as key, which has no adjacent vertex elif source_vertex in self.adj_list: self.adj_list[source_vertex].append(a__ ) _A = [] # if only destination vertex is present in adjacency list, create a new # vertex with source vertex as key and assign a list containing destination # vertex as first adjacent vertex elif destination_vertex in self.adj_list: _A = [destination_vertex] # if both source vertex and destination vertex are not present in adjacency # list, create a new vertex with source vertex as key and a list containing # destination vertex as it's first adjacent vertex. Then create a new vertex # with destination vertex as key, which has no adjacent vertex else: _A = [destination_vertex] _A = [] return self def __repr__( self : int ) -> str: '''simple docstring''' return pformat(self.adj_list )
621
"""simple docstring""" class snake_case : def __init__( self : Optional[int] , a__ : List[Any] , a__ : List[str] , a__ : Tuple ) -> Optional[Any]: '''simple docstring''' _A = None _A = None _A = graph self._normalize_graph(a__ , a__ ) _A = len(a__ ) _A = None def a_ ( self : str , a__ : List[str] , a__ : List[Any] ) -> Dict: '''simple docstring''' if sources is int: _A = [sources] if sinks is int: _A = [sinks] if len(a__ ) == 0 or len(a__ ) == 0: return _A = sources[0] _A = sinks[0] # make fake vertex if there are more # than one source or sink if len(a__ ) > 1 or len(a__ ) > 1: _A = 0 for i in sources: max_input_flow += sum(self.graph[i] ) _A = len(self.graph ) + 1 for room in self.graph: room.insert(0 , 0 ) self.graph.insert(0 , [0] * size ) for i in sources: _A = max_input_flow _A = 0 _A = len(self.graph ) + 1 for room in self.graph: room.append(0 ) self.graph.append([0] * size ) for i in sinks: _A = max_input_flow _A = size - 1 def a_ ( self : Optional[int] ) -> List[Any]: '''simple docstring''' if self.maximum_flow_algorithm is None: raise Exception("You need to set maximum flow algorithm before." ) if self.source_index is None or self.sink_index is None: return 0 self.maximum_flow_algorithm.execute() return self.maximum_flow_algorithm.getMaximumFlow() def a_ ( self : List[Any] , a__ : Optional[Any] ) -> str: '''simple docstring''' _A = algorithm(self ) class snake_case : def __init__( self : List[str] , a__ : List[str] ) -> Union[str, Any]: '''simple docstring''' _A = flow_network _A = flow_network.verticesCount _A = flow_network.sourceIndex _A = flow_network.sinkIndex # it's just a reference, so you shouldn't change # it in your algorithms, use deep copy before doing that _A = flow_network.graph _A = False def a_ ( self : Optional[Any] ) -> List[Any]: '''simple docstring''' if not self.executed: self._algorithm() _A = True def a_ ( self : Any ) -> int: '''simple docstring''' pass class snake_case ( _UpperCamelCase): def __init__( self : Optional[Any] , a__ : Dict ) -> List[str]: '''simple docstring''' super().__init__(a__ ) # use this to save your result _A = -1 def a_ ( self : Any ) -> List[str]: '''simple docstring''' if not self.executed: raise Exception("You should execute algorithm before using its result!" ) return self.maximum_flow class snake_case ( _UpperCamelCase): def __init__( self : Union[str, Any] , a__ : Union[str, Any] ) -> Dict: '''simple docstring''' super().__init__(a__ ) _A = [[0] * self.verticies_count for i in range(self.verticies_count )] _A = [0] * self.verticies_count _A = [0] * self.verticies_count def a_ ( self : Any ) -> Dict: '''simple docstring''' _A = self.verticies_count # push some substance to graph for nextvertex_index, bandwidth in enumerate(self.graph[self.source_index] ): self.preflow[self.source_index][nextvertex_index] += bandwidth self.preflow[nextvertex_index][self.source_index] -= bandwidth self.excesses[nextvertex_index] += bandwidth # Relabel-to-front selection rule _A = [ i for i in range(self.verticies_count ) if i != self.source_index and i != self.sink_index ] # move through list _A = 0 while i < len(a__ ): _A = vertices_list[i] _A = self.heights[vertex_index] self.process_vertex(a__ ) if self.heights[vertex_index] > previous_height: # if it was relabeled, swap elements # and start from 0 index vertices_list.insert(0 , vertices_list.pop(a__ ) ) _A = 0 else: i += 1 _A = sum(self.preflow[self.source_index] ) def a_ ( self : Dict , a__ : Any ) -> Optional[int]: '''simple docstring''' while self.excesses[vertex_index] > 0: for neighbour_index in range(self.verticies_count ): # if it's neighbour and current vertex is higher if ( self.graph[vertex_index][neighbour_index] - self.preflow[vertex_index][neighbour_index] > 0 and self.heights[vertex_index] > self.heights[neighbour_index] ): self.push(a__ , a__ ) self.relabel(a__ ) def a_ ( self : str , a__ : Optional[int] , a__ : List[Any] ) -> Optional[int]: '''simple docstring''' _A = min( self.excesses[from_index] , self.graph[from_index][to_index] - self.preflow[from_index][to_index] , ) self.preflow[from_index][to_index] += preflow_delta self.preflow[to_index][from_index] -= preflow_delta self.excesses[from_index] -= preflow_delta self.excesses[to_index] += preflow_delta def a_ ( self : Any , a__ : Dict ) -> Any: '''simple docstring''' _A = None for to_index in range(self.verticies_count ): if ( self.graph[vertex_index][to_index] - self.preflow[vertex_index][to_index] > 0 ) and (min_height is None or self.heights[to_index] < min_height): _A = self.heights[to_index] if min_height is not None: _A = min_height + 1 if __name__ == "__main__": a_ = [0] a_ = [3] # graph = [ # [0, 0, 4, 6, 0, 0], # [0, 0, 5, 2, 0, 0], # [0, 0, 0, 0, 4, 4], # [0, 0, 0, 0, 6, 6], # [0, 0, 0, 0, 0, 0], # [0, 0, 0, 0, 0, 0], # ] a_ = [[0, 7, 0, 0], [0, 0, 6, 0], [0, 0, 0, 8], [9, 0, 0, 0]] # prepare our network a_ = FlowNetwork(graph, entrances, exits) # set algorithm flow_network.set_maximum_flow_algorithm(PushRelabelExecutor) # and calculate a_ = flow_network.find_maximum_flow() print(f'''maximum flow is {maximum_flow}''')
621
1
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available a_ = {"configuration_yolos": ["YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP", "YolosConfig", "YolosOnnxConfig"]} try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ = ["YolosFeatureExtractor"] a_ = ["YolosImageProcessor"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ = [ "YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST", "YolosForObjectDetection", "YolosModel", "YolosPreTrainedModel", ] if TYPE_CHECKING: from .configuration_yolos import YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP, YolosConfig, YolosOnnxConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_yolos import YolosFeatureExtractor from .image_processing_yolos import YolosImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_yolos import ( YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST, YolosForObjectDetection, YolosModel, YolosPreTrainedModel, ) else: import sys a_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
621
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) a_ = { "configuration_roformer": ["ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "RoFormerConfig", "RoFormerOnnxConfig"], "tokenization_roformer": ["RoFormerTokenizer"], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ = ["RoFormerTokenizerFast"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ = [ "ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST", "RoFormerForCausalLM", "RoFormerForMaskedLM", "RoFormerForMultipleChoice", "RoFormerForQuestionAnswering", "RoFormerForSequenceClassification", "RoFormerForTokenClassification", "RoFormerLayer", "RoFormerModel", "RoFormerPreTrainedModel", "load_tf_weights_in_roformer", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ = [ "TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST", "TFRoFormerForCausalLM", "TFRoFormerForMaskedLM", "TFRoFormerForMultipleChoice", "TFRoFormerForQuestionAnswering", "TFRoFormerForSequenceClassification", "TFRoFormerForTokenClassification", "TFRoFormerLayer", "TFRoFormerModel", "TFRoFormerPreTrainedModel", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ = [ "FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST", "FlaxRoFormerForMaskedLM", "FlaxRoFormerForMultipleChoice", "FlaxRoFormerForQuestionAnswering", "FlaxRoFormerForSequenceClassification", "FlaxRoFormerForTokenClassification", "FlaxRoFormerModel", "FlaxRoFormerPreTrainedModel", ] if TYPE_CHECKING: from .configuration_roformer import ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, RoFormerConfig, RoFormerOnnxConfig from .tokenization_roformer import RoFormerTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_roformer_fast import RoFormerTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_roformer import ( ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, RoFormerForCausalLM, RoFormerForMaskedLM, RoFormerForMultipleChoice, RoFormerForQuestionAnswering, RoFormerForSequenceClassification, RoFormerForTokenClassification, RoFormerLayer, RoFormerModel, RoFormerPreTrainedModel, load_tf_weights_in_roformer, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_roformer import ( TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, TFRoFormerForCausalLM, TFRoFormerForMaskedLM, TFRoFormerForMultipleChoice, TFRoFormerForQuestionAnswering, TFRoFormerForSequenceClassification, TFRoFormerForTokenClassification, TFRoFormerLayer, TFRoFormerModel, TFRoFormerPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_roformer import ( FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, FlaxRoFormerForMaskedLM, FlaxRoFormerForMultipleChoice, FlaxRoFormerForQuestionAnswering, FlaxRoFormerForSequenceClassification, FlaxRoFormerForTokenClassification, FlaxRoFormerModel, FlaxRoFormerPreTrainedModel, ) else: import sys a_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
621
1
"""simple docstring""" def a__ ( __lowercase , __lowercase ) -> int: return abs(__lowercase ) if a == 0 else greatest_common_divisor(b % a , __lowercase ) def a__ ( __lowercase , __lowercase ) -> int: while y: # --> when y=0 then loop will terminate and return x as final GCD. _A , _A = y, x % y return abs(__lowercase ) def a__ ( ) -> Tuple: try: _A = input("Enter two integers separated by comma (,): " ).split("," ) _A = int(nums[0] ) _A = int(nums[1] ) print( f"""greatest_common_divisor({num_a}, {num_a}) = """ f"""{greatest_common_divisor(__lowercase , __lowercase )}""" ) print(f"""By iterative gcd({num_a}, {num_a}) = {gcd_by_iterative(__lowercase , __lowercase )}""" ) except (IndexError, UnboundLocalError, ValueError): print("Wrong input" ) if __name__ == "__main__": main()
621
"""simple docstring""" import warnings from ...utils import logging from .image_processing_glpn import GLPNImageProcessor a_ = logging.get_logger(__name__) class snake_case ( _UpperCamelCase): def __init__( self : str , *a__ : Dict , **a__ : Optional[int] ) -> None: '''simple docstring''' warnings.warn( "The class GLPNFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please" " use GLPNImageProcessor instead." , a__ , ) super().__init__(*a__ , **a__ )
621
1
"""simple docstring""" a_ = "ABCDEFGHIJKLMNOPQRSTUVWXYZ" def a__ ( ) -> None: _A = input("Enter message: " ) _A = input("Enter key [alphanumeric]: " ) _A = input("Encrypt/Decrypt [e/d]: " ) if mode.lower().startswith("e" ): _A = "encrypt" _A = encrypt_message(__lowercase , __lowercase ) elif mode.lower().startswith("d" ): _A = "decrypt" _A = decrypt_message(__lowercase , __lowercase ) print(f"""\n{mode.title()}ed message:""" ) print(__lowercase ) def a__ ( __lowercase , __lowercase ) -> str: return translate_message(__lowercase , __lowercase , "encrypt" ) def a__ ( __lowercase , __lowercase ) -> str: return translate_message(__lowercase , __lowercase , "decrypt" ) def a__ ( __lowercase , __lowercase , __lowercase ) -> str: _A = [] _A = 0 _A = key.upper() for symbol in message: _A = LETTERS.find(symbol.upper() ) if num != -1: if mode == "encrypt": num += LETTERS.find(key[key_index] ) elif mode == "decrypt": num -= LETTERS.find(key[key_index] ) num %= len(__lowercase ) if symbol.isupper(): translated.append(LETTERS[num] ) elif symbol.islower(): translated.append(LETTERS[num].lower() ) key_index += 1 if key_index == len(__lowercase ): _A = 0 else: translated.append(__lowercase ) return "".join(__lowercase ) if __name__ == "__main__": main()
621
"""simple docstring""" import argparse import torch from torch import nn from transformers import MBartConfig, MBartForConditionalGeneration def a__ ( __lowercase ) -> Optional[int]: _A = [ "encoder.version", "decoder.version", "model.encoder.version", "model.decoder.version", "_float_tensor", "decoder.output_projection.weight", ] for k in ignore_keys: state_dict.pop(__lowercase , __lowercase ) def a__ ( __lowercase ) -> List[Any]: _A , _A = emb.weight.shape _A = nn.Linear(__lowercase , __lowercase , bias=__lowercase ) _A = emb.weight.data return lin_layer def a__ ( __lowercase , __lowercase="facebook/mbart-large-en-ro" , __lowercase=False , __lowercase=False ) -> List[str]: _A = torch.load(__lowercase , map_location="cpu" )["model"] remove_ignore_keys_(__lowercase ) _A = state_dict["encoder.embed_tokens.weight"].shape[0] _A = MBartConfig.from_pretrained(__lowercase , vocab_size=__lowercase ) if mbart_aa and finetuned: _A = "relu" _A = state_dict["decoder.embed_tokens.weight"] _A = MBartForConditionalGeneration(__lowercase ) model.model.load_state_dict(__lowercase ) if finetuned: _A = make_linear_from_emb(model.model.shared ) return model if __name__ == "__main__": a_ = argparse.ArgumentParser() # Required parameters parser.add_argument( "fairseq_path", type=str, help="bart.large, bart.large.cnn or a path to a model.pt on local filesystem." ) parser.add_argument("pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.") parser.add_argument( "--hf_config", default="facebook/mbart-large-cc25", type=str, help="Which huggingface architecture to use: mbart-large", ) parser.add_argument("--mbart_50", action="store_true", help="whether the model is mMART-50 checkpoint") parser.add_argument("--finetuned", action="store_true", help="whether the model is a fine-tuned checkpoint") a_ = parser.parse_args() a_ = convert_fairseq_mbart_checkpoint_from_disk( args.fairseq_path, hf_config_path=args.hf_config, finetuned=args.finetuned, mbart_aa=args.mbart_aa ) model.save_pretrained(args.pytorch_dump_folder_path)
621
1
"""simple docstring""" import argparse import hashlib import os import urllib import warnings import torch from torch import nn from tqdm import tqdm from transformers import WhisperConfig, WhisperForConditionalGeneration a_ = { "tiny.en": "https://openaipublic.azureedge.net/main/whisper/models/d3dd57d32accea0b295c96e26691aa14d8822fac7d9d27d5dc00b4ca2826dd03/tiny.en.pt", "tiny": "https://openaipublic.azureedge.net/main/whisper/models/65147644a518d12f04e32d6f3b26facc3f8dd46e5390956a9424a650c0ce22b9/tiny.pt", "base.en": "https://openaipublic.azureedge.net/main/whisper/models/25a8566e1d0c1e2231d1c762132cd20e0f96a85d16145c3a00adf5d1ac670ead/base.en.pt", "base": "https://openaipublic.azureedge.net/main/whisper/models/ed3a0b6b1c0edf879ad9b11b1af5a0e6ab5db9205f891f668f8b0e6c6326e34e/base.pt", "small.en": "https://openaipublic.azureedge.net/main/whisper/models/f953ad0fd29cacd07d5a9eda5624af0f6bcf2258be67c92b79389873d91e0872/small.en.pt", "small": "https://openaipublic.azureedge.net/main/whisper/models/9ecf779972d90ba49c06d968637d720dd632c55bbf19d441fb42bf17a411e794/small.pt", "medium.en": "https://openaipublic.azureedge.net/main/whisper/models/d7440d1dc186f76616474e0ff0b3b6b879abc9d1a4926b7adfa41db2d497ab4f/medium.en.pt", "medium": "https://openaipublic.azureedge.net/main/whisper/models/345ae4da62f9b3d59415adc60127b97c714f32e89e936602e85993674d08dcb1/medium.pt", "large": "https://openaipublic.azureedge.net/main/whisper/models/e4b87e7e0bf463eb8e6956e646f1e277e901512310def2c24bf0e11bd3c28e9a/large.pt", "large-v2": "https://openaipublic.azureedge.net/main/whisper/models/81f7c96c852ee8fc832187b0132e569d6c3065a3252ed18e56effd0b6a73e524/large-v2.pt", } def a__ ( __lowercase ) -> Dict: _A = ["layers", "blocks"] for k in ignore_keys: state_dict.pop(__lowercase , __lowercase ) a_ = { "blocks": "layers", "mlp.0": "fc1", "mlp.2": "fc2", "mlp_ln": "final_layer_norm", ".attn.query": ".self_attn.q_proj", ".attn.key": ".self_attn.k_proj", ".attn.value": ".self_attn.v_proj", ".attn_ln": ".self_attn_layer_norm", ".attn.out": ".self_attn.out_proj", ".cross_attn.query": ".encoder_attn.q_proj", ".cross_attn.key": ".encoder_attn.k_proj", ".cross_attn.value": ".encoder_attn.v_proj", ".cross_attn_ln": ".encoder_attn_layer_norm", ".cross_attn.out": ".encoder_attn.out_proj", "decoder.ln.": "decoder.layer_norm.", "encoder.ln.": "encoder.layer_norm.", "token_embedding": "embed_tokens", "encoder.positional_embedding": "encoder.embed_positions.weight", "decoder.positional_embedding": "decoder.embed_positions.weight", "ln_post": "layer_norm", } def a__ ( __lowercase ) -> Optional[Any]: _A = list(s_dict.keys() ) for key in keys: _A = key for k, v in WHISPER_MAPPING.items(): if k in key: _A = new_key.replace(__lowercase , __lowercase ) print(f"""{key} -> {new_key}""" ) _A = s_dict.pop(__lowercase ) return s_dict def a__ ( __lowercase ) -> int: _A , _A = emb.weight.shape _A = nn.Linear(__lowercase , __lowercase , bias=__lowercase ) _A = emb.weight.data return lin_layer def a__ ( __lowercase , __lowercase ) -> bytes: os.makedirs(__lowercase , exist_ok=__lowercase ) _A = os.path.basename(__lowercase ) _A = url.split("/" )[-2] _A = os.path.join(__lowercase , __lowercase ) if os.path.exists(__lowercase ) and not os.path.isfile(__lowercase ): raise RuntimeError(f"""{download_target} exists and is not a regular file""" ) if os.path.isfile(__lowercase ): _A = open(__lowercase , "rb" ).read() if hashlib.shaaaa(__lowercase ).hexdigest() == expected_shaaaa: return model_bytes else: warnings.warn(f"""{download_target} exists, but the SHA256 checksum does not match; re-downloading the file""" ) with urllib.request.urlopen(__lowercase ) as source, open(__lowercase , "wb" ) as output: with tqdm( total=int(source.info().get("Content-Length" ) ) , ncols=80 , unit="iB" , unit_scale=__lowercase , unit_divisor=1024 ) as loop: while True: _A = source.read(8192 ) if not buffer: break output.write(__lowercase ) loop.update(len(__lowercase ) ) _A = open(__lowercase , "rb" ).read() if hashlib.shaaaa(__lowercase ).hexdigest() != expected_shaaaa: raise RuntimeError( "Model has been downloaded but the SHA256 checksum does not not match. Please retry loading the model." ) return model_bytes def a__ ( __lowercase , __lowercase ) -> int: if ".pt" not in checkpoint_path: _A = _download(_MODELS[checkpoint_path] ) else: _A = torch.load(__lowercase , map_location="cpu" ) _A = original_checkpoint["dims"] _A = original_checkpoint["model_state_dict"] _A = state_dict["decoder.token_embedding.weight"] remove_ignore_keys_(__lowercase ) rename_keys(__lowercase ) _A = True _A = state_dict["decoder.layers.0.fc1.weight"].shape[0] _A = WhisperConfig( vocab_size=dimensions["n_vocab"] , encoder_ffn_dim=__lowercase , decoder_ffn_dim=__lowercase , num_mel_bins=dimensions["n_mels"] , d_model=dimensions["n_audio_state"] , max_target_positions=dimensions["n_text_ctx"] , encoder_layers=dimensions["n_audio_layer"] , encoder_attention_heads=dimensions["n_audio_head"] , decoder_layers=dimensions["n_text_layer"] , decoder_attention_heads=dimensions["n_text_state"] , max_source_positions=dimensions["n_audio_ctx"] , ) _A = WhisperForConditionalGeneration(__lowercase ) _A , _A = model.model.load_state_dict(__lowercase , strict=__lowercase ) if len(__lowercase ) > 0 and not set(__lowercase ) <= { "encoder.embed_positions.weights", "decoder.embed_positions.weights", }: raise ValueError( "Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing," f""" but all the following weights are missing {missing}""" ) if tie_embeds: _A = make_linear_from_emb(model.model.decoder.embed_tokens ) else: _A = proj_out_weights model.save_pretrained(__lowercase ) if __name__ == "__main__": a_ = argparse.ArgumentParser() # # Required parameters parser.add_argument("--checkpoint_path", type=str, help="Patht to the downloaded checkpoints") parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.") a_ = parser.parse_args() convert_openai_whisper_to_tfms(args.checkpoint_path, args.pytorch_dump_folder_path)
621
"""simple docstring""" import numpy as np def a__ ( __lowercase , __lowercase ) -> np.ndarray: return np.where(vector > 0 , __lowercase , (alpha * (np.exp(__lowercase ) - 1)) ) if __name__ == "__main__": import doctest doctest.testmod()
621
1
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) a_ = { "configuration_roberta": ["ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP", "RobertaConfig", "RobertaOnnxConfig"], "tokenization_roberta": ["RobertaTokenizer"], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ = ["RobertaTokenizerFast"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ = [ "ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST", "RobertaForCausalLM", "RobertaForMaskedLM", "RobertaForMultipleChoice", "RobertaForQuestionAnswering", "RobertaForSequenceClassification", "RobertaForTokenClassification", "RobertaModel", "RobertaPreTrainedModel", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ = [ "TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST", "TFRobertaForCausalLM", "TFRobertaForMaskedLM", "TFRobertaForMultipleChoice", "TFRobertaForQuestionAnswering", "TFRobertaForSequenceClassification", "TFRobertaForTokenClassification", "TFRobertaMainLayer", "TFRobertaModel", "TFRobertaPreTrainedModel", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ = [ "FlaxRobertaForCausalLM", "FlaxRobertaForMaskedLM", "FlaxRobertaForMultipleChoice", "FlaxRobertaForQuestionAnswering", "FlaxRobertaForSequenceClassification", "FlaxRobertaForTokenClassification", "FlaxRobertaModel", "FlaxRobertaPreTrainedModel", ] if TYPE_CHECKING: from .configuration_roberta import ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, RobertaConfig, RobertaOnnxConfig from .tokenization_roberta import RobertaTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_roberta_fast import RobertaTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_roberta import ( ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST, RobertaForCausalLM, RobertaForMaskedLM, RobertaForMultipleChoice, RobertaForQuestionAnswering, RobertaForSequenceClassification, RobertaForTokenClassification, RobertaModel, RobertaPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_roberta import ( TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST, TFRobertaForCausalLM, TFRobertaForMaskedLM, TFRobertaForMultipleChoice, TFRobertaForQuestionAnswering, TFRobertaForSequenceClassification, TFRobertaForTokenClassification, TFRobertaMainLayer, TFRobertaModel, TFRobertaPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_roberta import ( FlaxRobertaForCausalLM, FlaxRobertaForMaskedLM, FlaxRobertaForMultipleChoice, FlaxRobertaForQuestionAnswering, FlaxRobertaForSequenceClassification, FlaxRobertaForTokenClassification, FlaxRobertaModel, FlaxRobertaPreTrainedModel, ) else: import sys a_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
621
"""simple docstring""" import os import re import warnings from shutil import copyfile from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer if TYPE_CHECKING: from ...tokenization_utils_base import TextInput from ...utils import logging a_ = logging.get_logger(__name__) a_ = {"vocab_file": "spiece.model"} a_ = { "vocab_file": { "t5-small": "https://huggingface.co/t5-small/resolve/main/spiece.model", "t5-base": "https://huggingface.co/t5-base/resolve/main/spiece.model", "t5-large": "https://huggingface.co/t5-large/resolve/main/spiece.model", "t5-3b": "https://huggingface.co/t5-3b/resolve/main/spiece.model", "t5-11b": "https://huggingface.co/t5-11b/resolve/main/spiece.model", } } # TODO(PVP) - this should be removed in Transformers v5 a_ = { "t5-small": 5_12, "t5-base": 5_12, "t5-large": 5_12, "t5-3b": 5_12, "t5-11b": 5_12, } a_ = "▁" class snake_case ( _UpperCamelCase): __UpperCamelCase = VOCAB_FILES_NAMES __UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP __UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __UpperCamelCase = ['input_ids', 'attention_mask'] def __init__( self : List[str] , a__ : Optional[int] , a__ : Union[str, Any]="</s>" , a__ : Union[str, Any]="<unk>" , a__ : str="<pad>" , a__ : Optional[int]=1_00 , a__ : List[Any]=None , a__ : Optional[Dict[str, Any]] = None , a__ : Any=True , **a__ : Optional[int] , ) -> None: '''simple docstring''' if extra_ids > 0 and additional_special_tokens is None: _A = [F"""<extra_id_{i}>""" for i in range(a__ )] elif extra_ids > 0 and additional_special_tokens is not None: # Check that we have the right number of extra_id special tokens _A = len(set(filter(lambda a__ : bool("extra_id" in str(a__ ) ) , a__ ) ) ) if extra_tokens != extra_ids: raise ValueError( F"""Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are""" " provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids" " tokens" ) if legacy: logger.warning_once( F"""You are using the legacy behaviour of the {self.__class__}. This means that tokens that come after special tokens will not be properly handled. We recommend you to""" " read the related pull request available at https://github.com/huggingface/transformers/pull/24565" ) _A = legacy _A = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( eos_token=a__ , unk_token=a__ , pad_token=a__ , extra_ids=a__ , additional_special_tokens=a__ , sp_model_kwargs=self.sp_model_kwargs , legacy=a__ , **a__ , ) _A = vocab_file _A = extra_ids _A = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(a__ ) @staticmethod def a_ ( a__ : List[str] , a__ : Optional[int] , a__ : Tuple ) -> Tuple: '''simple docstring''' if pretrained_model_name_or_path in TaTokenizer.max_model_input_sizes: _A = TaTokenizer.max_model_input_sizes[pretrained_model_name_or_path] if init_max_model_length is not None and init_max_model_length != max_model_length: return init_max_model_length elif init_max_model_length is None: warnings.warn( "This tokenizer was incorrectly instantiated with a model max length of" F""" {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this""" " behavior is kept to avoid breaking backwards compatibility when padding/encoding with" " `truncation is True`.\n- Be aware that you SHOULD NOT rely on" F""" {pretrained_model_name_or_path} automatically truncating your input to""" F""" {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences""" F""" longer than {deprecated_max_model_length} you can either instantiate this tokenizer with""" " `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please" " instantiate this tokenizer with `model_max_length` set to your preferred value." , a__ , ) return max_model_length @property def a_ ( self : List[Any] ) -> Dict: '''simple docstring''' return self.sp_model.get_piece_size() + self._extra_ids def a_ ( self : Dict ) -> Optional[Any]: '''simple docstring''' _A = {self.convert_ids_to_tokens(a__ ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def a_ ( self : Optional[Any] , a__ : List[int] , a__ : Optional[List[int]] = None , a__ : bool = False ) -> List[int]: '''simple docstring''' if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=a__ , token_ids_a=a__ , already_has_special_tokens=a__ ) # normal case: some special tokens if token_ids_a is None: return ([0] * len(a__ )) + [1] return ([0] * len(a__ )) + [1] + ([0] * len(a__ )) + [1] def a_ ( self : List[str] ) -> List[str]: '''simple docstring''' return list( set(filter(lambda a__ : bool(re.search(r"<extra_id_\d+>" , a__ ) ) is not None , self.additional_special_tokens ) ) ) def a_ ( self : str ) -> List[Any]: '''simple docstring''' return [self._convert_token_to_id(a__ ) for token in self.get_sentinel_tokens()] def a_ ( self : List[Any] , a__ : List[int] ) -> List[int]: '''simple docstring''' if len(a__ ) > 0 and token_ids[-1] == self.eos_token_id: warnings.warn( F"""This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated""" " eos tokens being added." ) return token_ids else: return token_ids + [self.eos_token_id] def a_ ( self : int , a__ : List[int] , a__ : Optional[List[int]] = None ) -> List[int]: '''simple docstring''' _A = [self.eos_token_id] if token_ids_a is None: return len(token_ids_a + eos ) * [0] return len(token_ids_a + eos + token_ids_a + eos ) * [0] def a_ ( self : Union[str, Any] , a__ : List[int] , a__ : Optional[List[int]] = None ) -> List[int]: '''simple docstring''' _A = self._add_eos_if_not_present(a__ ) if token_ids_a is None: return token_ids_a else: _A = self._add_eos_if_not_present(a__ ) return token_ids_a + token_ids_a def __getstate__( self : Dict ) -> Union[str, Any]: '''simple docstring''' _A = self.__dict__.copy() _A = None return state def __setstate__( self : int , a__ : Optional[int] ) -> Union[str, Any]: '''simple docstring''' _A = d # for backward compatibility if not hasattr(self , "sp_model_kwargs" ): _A = {} _A = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def a_ ( self : int , a__ : "TextInput" , **a__ : List[str] ) -> List[str]: '''simple docstring''' if not self.legacy: _A = SPIECE_UNDERLINE + text.replace(a__ , " " ) return super().tokenize(a__ , **a__ ) def a_ ( self : str , a__ : Dict , **a__ : Optional[int] ) -> Any: '''simple docstring''' if not self.legacy: _A = text.startswith(a__ ) if is_first: _A = text[1:] _A = self.sp_model.encode(a__ , out_type=a__ ) if not self.legacy and not is_first and not text.startswith(" " ) and tokens[0].startswith(a__ ): _A = ([tokens[0][1:]] if len(tokens[0] ) > 1 else []) + tokens[1:] return tokens def a_ ( self : int , a__ : List[Any] ) -> List[str]: '''simple docstring''' if token.startswith("<extra_id_" ): _A = re.match(r"<extra_id_(\d+)>" , a__ ) _A = int(match.group(1 ) ) return self.vocab_size - num - 1 return self.sp_model.piece_to_id(a__ ) def a_ ( self : Dict , a__ : Union[str, Any] ) -> Any: '''simple docstring''' if index < self.sp_model.get_piece_size(): _A = self.sp_model.IdToPiece(a__ ) else: _A = F"""<extra_id_{self.vocab_size - 1 - index}>""" return token def a_ ( self : Optional[int] , a__ : Tuple ) -> List[str]: '''simple docstring''' _A = [] _A = "" _A = False for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: if not prev_is_special: out_string += " " out_string += self.sp_model.decode(a__ ) + token _A = True _A = [] else: current_sub_tokens.append(a__ ) _A = False out_string += self.sp_model.decode(a__ ) return out_string.strip() def a_ ( self : Dict , a__ : str , a__ : Optional[str] = None ) -> Tuple[str]: '''simple docstring''' if not os.path.isdir(a__ ): logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" ) return _A = os.path.join( a__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(a__ ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , a__ ) elif not os.path.isfile(self.vocab_file ): with open(a__ , "wb" ) as fi: _A = self.sp_model.serialized_model_proto() fi.write(a__ ) return (out_vocab_file,)
621
1
"""simple docstring""" import random def a__ ( __lowercase , __lowercase , __lowercase ) -> Optional[Any]: _A = a[left_index] _A = left_index + 1 for j in range(left_index + 1 , __lowercase ): if a[j] < pivot: _A , _A = a[i], a[j] i += 1 _A , _A = a[i - 1], a[left_index] return i - 1 def a__ ( __lowercase , __lowercase , __lowercase ) -> int: if left < right: _A = random.randint(__lowercase , right - 1 ) _A , _A = ( a[left], a[pivot], ) # switches the pivot with the left most bound _A = partition(__lowercase , __lowercase , __lowercase ) quick_sort_random( __lowercase , __lowercase , __lowercase ) # recursive quicksort to the left of the pivot point quick_sort_random( __lowercase , pivot_index + 1 , __lowercase ) # recursive quicksort to the right of the pivot point def a__ ( ) -> Dict: _A = input("Enter numbers separated by a comma:\n" ).strip() _A = [int(__lowercase ) for item in user_input.split("," )] quick_sort_random(__lowercase , 0 , len(__lowercase ) ) print(__lowercase ) if __name__ == "__main__": main()
621
"""simple docstring""" import argparse import json import os from collections import OrderedDict import numpy as np import tensorflow as tf import torch def a__ ( __lowercase ) -> List[Any]: _A = os.path.join(args.tf_model_dir , "parameters.json" ) _A = json.loads(open(__lowercase ).read() ) if not params: raise ValueError( f"""It seems that the json file at {parameter_file} is empty. Make sure you have a correct json file.""" ) if not args.output.endswith(".pt" ): _A = args.output + ".pt" _A = OrderedDict() with tf.device("/CPU:0" ): _A = tf.train.load_checkpoint(args.tf_model_dir ) _A = reader.get_variable_to_shape_map() for key_name in shapes.keys(): _A = reader.get_tensor(__lowercase ).astype(np.floataa ) if key_name.endswith("/adam_m" ) or key_name.endswith("/adam_v" ): continue if key_name.startswith("pasts/" ): if key_name.startswith("pasts/mlp" ): _A = int(key_name[9] ) elif key_name.startswith("pasts/out" ): _A = 8 _A = "model.sqout.%d.weight" % (player * 2) # enter to nn.Sequencial with Tanh, so 2 at a time _A = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix _A = torch.tensor(__lowercase ) elif key_name.startswith("model/moe" ): _A = int(key_name[9:].split("/" )[0] ) if key_name.endswith("/switch_gating/kernel" ): _A = "model.blocks.%d.feed_forward.mlp.router.classifier.weight" % player _A = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix _A = torch.tensor(__lowercase ) elif key_name.endswith("/softmlp/kernel" ): _A = "model.blocks.%d.feed_forward.soft_bypass_mlp.weight" % player _A = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix _A = torch.tensor(__lowercase ) elif key_name.endswith("/wo/kernel" ) or key_name.endswith("/wi/kernel" ): _A = key_name[-9:-7] for i in range(16 ): _A = "model.blocks.%d.feed_forward.mlp.experts.expert_%d.%s.weight" % (player, i, nlayer) _A = ( vnp[i].transpose([1, 0] ).copy() ) # In Mesh-Tensorflow, it is one array, so it is divided _A = torch.tensor(__lowercase ) elif key_name.startswith("model/mlp" ): _A = int(key_name[9:].split("/" )[0] ) if key_name.endswith("/p1/kernel" ): _A = "model.blocks.%d.feed_forward.mlp.wi.weight" % player _A = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix _A = torch.tensor(__lowercase ) elif key_name.endswith("/p1/bias" ): _A = "model.blocks.%d.feed_forward.mlp.wi.bias" % player _A = vnp.copy() # same because it is one dimensional _A = torch.tensor(__lowercase ) elif key_name.endswith("/p2/kernel" ): _A = "model.blocks.%d.feed_forward.mlp.wo.weight" % player _A = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix _A = torch.tensor(__lowercase ) elif key_name.endswith("/p2/bias" ): _A = "model.blocks.%d.feed_forward.mlp.wo.bias" % player _A = vnp.copy() # same because it is one dimensional _A = torch.tensor(__lowercase ) elif key_name.startswith("model/ln" ): _A = int(key_name[8:].split("/" )[0] ) if key_name.endswith("/b" ): _A = "model.blocks.%d.feed_forward.norm.bias" % player _A = vnp.copy() # same because it is one dimensional _A = torch.tensor(__lowercase ) elif key_name.endswith("/g" ): _A = "model.blocks.%d.feed_forward.norm.weight" % player _A = vnp.copy() # same because it is one dimensional _A = torch.tensor(__lowercase ) elif key_name.startswith("model/att" ): _A = int(key_name[9:].split("/" )[0] ) if key_name.endswith("/qkv/kernel" ): _A = vnp.copy() # Compute same dimension as Mesh-tensorflow using einsum _A = state[:, 0, :, :] _A = state[:, 1, :, :] _A = state[:, 2, :, :] _A = ( state_q.reshape([state_q.shape[0], state_q.shape[1] * state_q.shape[2]] ) .transpose([1, 0] ) .copy() ) # Mesh-Tensorflow is a diagonal matrix _A = ( state_k.reshape([state_k.shape[0], state_k.shape[1] * state_k.shape[2]] ) .transpose([1, 0] ) .copy() ) # Mesh-Tensorflow is a diagonal matrix _A = ( state_v.reshape([state_v.shape[0], state_v.shape[1] * state_v.shape[2]] ) .transpose([1, 0] ) .copy() ) # Mesh-Tensorflow is a diagonal matrix _A = "model.blocks.%d.self_attn.self_attn.q_proj.weight" % player _A = torch.tensor(__lowercase ) _A = "model.blocks.%d.self_attn.self_attn.k_proj.weight" % player _A = torch.tensor(__lowercase ) _A = "model.blocks.%d.self_attn.self_attn.v_proj.weight" % player _A = torch.tensor(__lowercase ) elif key_name.endswith("/o/kernel" ): _A = "model.blocks.%d.self_attn.self_attn.out_proj.weight" % player _A = ( vnp.reshape([vnp.shape[0] * vnp.shape[1], vnp.shape[2]] ).transpose([1, 0] ).copy() ) # Mesh-Tensorflow is a diagonal matrix _A = torch.tensor(__lowercase ) elif key_name.startswith("model/an" ): _A = int(key_name[8:].split("/" )[0] ) if key_name.endswith("/b" ): _A = "model.blocks.%d.self_attn.norm.bias" % player _A = vnp.copy() # same because it is one dimensional _A = torch.tensor(__lowercase ) elif key_name.endswith("/g" ): _A = "model.blocks.%d.self_attn.norm.weight" % player _A = vnp.copy() # same because it is one dimensional _A = torch.tensor(__lowercase ) elif ( key_name.startswith("model/wte" ) or key_name.startswith("model/wpe" ) or key_name.startswith("model/ete" ) ): _A = {"wte": "embed_tokens", "wpe": "position_embeddings", "ete": "extra_position_embeddings"}[ key_name[-3:] ] _A = "model.%s.weight" % nlayer _A = vnp.copy() # same in embedded _A = torch.tensor(__lowercase ) if key_name.startswith("model/wte" ): _A = "lm_head.weight" _A = vnp.copy() # same in embedded _A = torch.tensor(__lowercase ) elif key_name.startswith("model/wob" ): _A = "final_logits_bias" _A = vnp.copy() # same in embedded _A = state.reshape((1, -1) ) _A = torch.tensor(__lowercase ) elif key_name == "model/dense/kernel": _A = "model.last_project.weight" _A = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix _A = torch.tensor(__lowercase ) elif key_name == "model/dense_1/bias": _A = "model.last_project.bias" _A = vnp.copy() # same because it is one dimensional _A = torch.tensor(__lowercase ) torch.save(__lowercase , args.output ) if __name__ == "__main__": a_ = argparse.ArgumentParser( description="model converter.", formatter_class=argparse.ArgumentDefaultsHelpFormatter ) parser.add_argument("--tf_model_dir", metavar="PATH", type=str, required=True, help="import model") parser.add_argument("--output", metavar="PATH", type=str, required=True, help="output model") a_ = parser.parse_args() convert_tf_gptsan_to_pt(args)
621
1
"""simple docstring""" # Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from pathlib import Path import torch from ...utils import is_npu_available, is_xpu_available from .config_args import ClusterConfig, default_json_config_file from .config_utils import SubcommandHelpFormatter a_ = "Create a default config file for Accelerate with only a few flags set." def a__ ( __lowercase="no" , __lowercase = default_json_config_file , __lowercase = False ) -> Any: _A = Path(__lowercase ) path.parent.mkdir(parents=__lowercase , exist_ok=__lowercase ) if path.exists(): print( f"""Configuration already exists at {save_location}, will not override. Run `accelerate config` manually or pass a different `save_location`.""" ) return False _A = mixed_precision.lower() if mixed_precision not in ["no", "fp16", "bf16", "fp8"]: raise ValueError( f"""`mixed_precision` should be one of 'no', 'fp16', 'bf16', or 'fp8'. Received {mixed_precision}""" ) _A = { "compute_environment": "LOCAL_MACHINE", "mixed_precision": mixed_precision, } if torch.cuda.is_available(): _A = torch.cuda.device_count() _A = num_gpus _A = False if num_gpus > 1: _A = "MULTI_GPU" else: _A = "NO" elif is_xpu_available() and use_xpu: _A = torch.xpu.device_count() _A = num_xpus _A = False if num_xpus > 1: _A = "MULTI_XPU" else: _A = "NO" elif is_npu_available(): _A = torch.npu.device_count() _A = num_npus _A = False if num_npus > 1: _A = "MULTI_NPU" else: _A = "NO" else: _A = 0 _A = True _A = 1 _A = "NO" _A = ClusterConfig(**__lowercase ) config.to_json_file(__lowercase ) return path def a__ ( __lowercase , __lowercase ) -> Dict: _A = parser.add_parser("default" , parents=__lowercase , help=__lowercase , formatter_class=__lowercase ) parser.add_argument( "--config_file" , default=__lowercase , help=( "The path to use to store the config file. Will default to a file named default_config.yaml in the cache " "location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have " "such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed " "with 'huggingface'." ) , dest="save_location" , ) parser.add_argument( "--mixed_precision" , choices=["no", "fp16", "bf16"] , type=__lowercase , help="Whether or not to use mixed precision training. " "Choose between FP16 and BF16 (bfloat16) training. " "BF16 training is only supported on Nvidia Ampere GPUs and PyTorch 1.10 or later." , default="no" , ) parser.set_defaults(func=__lowercase ) return parser def a__ ( __lowercase ) -> Any: _A = write_basic_config(args.mixed_precision , args.save_location ) if config_file: print(f"""accelerate configuration saved at {config_file}""" )
621
"""simple docstring""" import argparse import torch from transformers import GPTaLMHeadModel, RobertaForMaskedLM if __name__ == "__main__": a_ = argparse.ArgumentParser( description=( "Extraction some layers of the full RobertaForMaskedLM or GPT2LMHeadModel for Transfer Learned" " Distillation" ) ) parser.add_argument("--model_type", default="roberta", choices=["roberta", "gpt2"]) parser.add_argument("--model_name", default="roberta-large", type=str) parser.add_argument("--dump_checkpoint", default="serialization_dir/tf_roberta_048131723.pth", type=str) parser.add_argument("--vocab_transform", action="store_true") a_ = parser.parse_args() if args.model_type == "roberta": a_ = RobertaForMaskedLM.from_pretrained(args.model_name) a_ = "roberta" elif args.model_type == "gpt2": a_ = GPTaLMHeadModel.from_pretrained(args.model_name) a_ = "transformer" a_ = model.state_dict() a_ = {} # Embeddings # if args.model_type == "gpt2": for param_name in ["wte.weight", "wpe.weight"]: a_ = state_dict[f'''{prefix}.{param_name}'''] else: for w in ["word_embeddings", "position_embeddings", "token_type_embeddings"]: a_ = f'''{prefix}.embeddings.{w}.weight''' a_ = state_dict[param_name] for w in ["weight", "bias"]: a_ = f'''{prefix}.embeddings.LayerNorm.{w}''' a_ = state_dict[param_name] # Transformer Blocks # a_ = 0 for teacher_idx in [0, 2, 4, 7, 9, 11]: if args.model_type == "gpt2": for layer in ["ln_1", "attn.c_attn", "attn.c_proj", "ln_2", "mlp.c_fc", "mlp.c_proj"]: for w in ["weight", "bias"]: a_ = state_dict[ f'''{prefix}.h.{teacher_idx}.{layer}.{w}''' ] a_ = state_dict[f'''{prefix}.h.{teacher_idx}.attn.bias'''] else: for layer in [ "attention.self.query", "attention.self.key", "attention.self.value", "attention.output.dense", "attention.output.LayerNorm", "intermediate.dense", "output.dense", "output.LayerNorm", ]: for w in ["weight", "bias"]: a_ = state_dict[ f'''{prefix}.encoder.layer.{teacher_idx}.{layer}.{w}''' ] std_idx += 1 # Language Modeling Head ###s if args.model_type == "roberta": for layer in ["lm_head.decoder.weight", "lm_head.bias"]: a_ = state_dict[f'''{layer}'''] if args.vocab_transform: for w in ["weight", "bias"]: a_ = state_dict[f'''lm_head.dense.{w}'''] a_ = state_dict[f'''lm_head.layer_norm.{w}'''] elif args.model_type == "gpt2": for w in ["weight", "bias"]: a_ = state_dict[f'''{prefix}.ln_f.{w}'''] a_ = state_dict["lm_head.weight"] print(f'''N layers selected for distillation: {std_idx}''') print(f'''Number of params transferred for distillation: {len(compressed_sd.keys())}''') print(f'''Save transferred checkpoint to {args.dump_checkpoint}.''') torch.save(compressed_sd, args.dump_checkpoint)
621
1
"""simple docstring""" import inspect import unittest from transformers import DecisionTransformerConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import DecisionTransformerModel from transformers.models.decision_transformer.modeling_decision_transformer import ( DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, ) class snake_case : def __init__( self : Dict , a__ : Any , a__ : Union[str, Any]=13 , a__ : str=7 , a__ : str=6 , a__ : int=17 , a__ : Union[str, Any]=23 , a__ : Union[str, Any]=11 , a__ : Union[str, Any]=True , ) -> List[Any]: '''simple docstring''' _A = parent _A = batch_size _A = seq_length _A = act_dim _A = state_dim _A = hidden_size _A = max_length _A = is_training def a_ ( self : List[Any] ) -> Optional[int]: '''simple docstring''' _A = floats_tensor((self.batch_size, self.seq_length, self.state_dim) ) _A = floats_tensor((self.batch_size, self.seq_length, self.act_dim) ) _A = floats_tensor((self.batch_size, self.seq_length, 1) ) _A = floats_tensor((self.batch_size, self.seq_length, 1) ) _A = ids_tensor((self.batch_size, self.seq_length) , vocab_size=10_00 ) _A = random_attention_mask((self.batch_size, self.seq_length) ) _A = self.get_config() return ( config, states, actions, rewards, returns_to_go, timesteps, attention_mask, ) def a_ ( self : Union[str, Any] ) -> str: '''simple docstring''' return DecisionTransformerConfig( batch_size=self.batch_size , seq_length=self.seq_length , act_dim=self.act_dim , state_dim=self.state_dim , hidden_size=self.hidden_size , max_length=self.max_length , ) def a_ ( self : List[Any] , a__ : Union[str, Any] , a__ : str , a__ : Tuple , a__ : str , a__ : Dict , a__ : str , a__ : Optional[int] , ) -> Any: '''simple docstring''' _A = DecisionTransformerModel(config=a__ ) model.to(a__ ) model.eval() _A = model(a__ , a__ , a__ , a__ , a__ , a__ ) self.parent.assertEqual(result.state_preds.shape , states.shape ) self.parent.assertEqual(result.action_preds.shape , actions.shape ) self.parent.assertEqual(result.return_preds.shape , returns_to_go.shape ) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.seq_length * 3, self.hidden_size) ) # seq length *3 as there are 3 modelities: states, returns and actions def a_ ( self : Tuple ) -> int: '''simple docstring''' _A = self.prepare_config_and_inputs() ( ( _A ) , ( _A ) , ( _A ) , ( _A ) , ( _A ) , ( _A ) , ( _A ) , ) = config_and_inputs _A = { "states": states, "actions": actions, "rewards": rewards, "returns_to_go": returns_to_go, "timesteps": timesteps, "attention_mask": attention_mask, } return config, inputs_dict @require_torch class snake_case ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , unittest.TestCase): __UpperCamelCase = (DecisionTransformerModel,) if is_torch_available() else () __UpperCamelCase = () __UpperCamelCase = {'feature-extraction': DecisionTransformerModel} if is_torch_available() else {} # Ignoring of a failing test from GenerationTesterMixin, as the model does not use inputs_ids __UpperCamelCase = False # Ignoring of a failing tests from ModelTesterMixin, as the model does not implement these features __UpperCamelCase = False __UpperCamelCase = False __UpperCamelCase = False __UpperCamelCase = False __UpperCamelCase = False __UpperCamelCase = False __UpperCamelCase = False __UpperCamelCase = False __UpperCamelCase = False def a_ ( self : str ) -> Tuple: '''simple docstring''' _A = DecisionTransformerModelTester(self ) _A = ConfigTester(self , config_class=a__ , hidden_size=37 ) def a_ ( self : Tuple ) -> Optional[Any]: '''simple docstring''' self.config_tester.run_common_tests() def a_ ( self : str ) -> Optional[int]: '''simple docstring''' _A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*a__ ) @slow def a_ ( self : List[Any] ) -> List[Any]: '''simple docstring''' for model_name in DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _A = DecisionTransformerModel.from_pretrained(a__ ) self.assertIsNotNone(a__ ) def a_ ( self : Optional[int] ) -> Dict: '''simple docstring''' _A , _A = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _A = model_class(a__ ) _A = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic _A = [*signature.parameters.keys()] _A = [ "states", "actions", "rewards", "returns_to_go", "timesteps", "attention_mask", ] self.assertListEqual(arg_names[: len(a__ )] , a__ ) @require_torch class snake_case ( unittest.TestCase): @slow def a_ ( self : str ) -> str: '''simple docstring''' _A = 2 # number of steps of autoregressive prediction we will perform _A = 10 # defined by the RL environment, may be normalized _A = DecisionTransformerModel.from_pretrained("edbeeching/decision-transformer-gym-hopper-expert" ) _A = model.to(a__ ) _A = model.config torch.manual_seed(0 ) _A = torch.randn(1 , 1 , config.state_dim ).to(device=a__ , dtype=torch.floataa ) # env.reset() _A = torch.tensor( [[0.2_4_2_7_9_3, -0.2_8_6_9_3_0_7_4, 0.8_7_4_2_6_1_3], [0.6_7_8_1_5_2_7_4, -0.0_8_1_0_1_0_8_5, -0.1_2_9_5_2_1_4_7]] , device=a__ ) _A = torch.tensor(a__ , device=a__ , dtype=torch.floataa ).reshape(1 , 1 , 1 ) _A = state _A = torch.zeros(1 , 0 , config.act_dim , device=a__ , dtype=torch.floataa ) _A = torch.zeros(1 , 0 , device=a__ , dtype=torch.floataa ) _A = torch.tensor(0 , device=a__ , dtype=torch.long ).reshape(1 , 1 ) for step in range(a__ ): _A = torch.cat([actions, torch.zeros(1 , 1 , config.act_dim , device=a__ )] , dim=1 ) _A = torch.cat([rewards, torch.zeros(1 , 1 , device=a__ )] , dim=1 ) _A = torch.ones(1 , states.shape[1] ).to(dtype=torch.long , device=states.device ) with torch.no_grad(): _A , _A , _A = model( states=a__ , actions=a__ , rewards=a__ , returns_to_go=a__ , timesteps=a__ , attention_mask=a__ , return_dict=a__ , ) self.assertEqual(action_pred.shape , actions.shape ) self.assertTrue(torch.allclose(action_pred[0, -1] , expected_outputs[step] , atol=1E-4 ) ) _A , _A , _A , _A = ( # env.step(action) torch.randn(1 , 1 , config.state_dim ).to(device=a__ , dtype=torch.floataa ), 1.0, False, {}, ) _A = action_pred[0, -1] _A = torch.cat([states, state] , dim=1 ) _A = returns_to_go[0, -1] - reward _A = torch.cat([returns_to_go, pred_return.reshape(1 , 1 , 1 )] , dim=1 ) _A = torch.cat( [timesteps, torch.ones((1, 1) , device=a__ , dtype=torch.long ) * (step + 1)] , dim=1 )
621
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available a_ = { "configuration_upernet": ["UperNetConfig"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ = [ "UperNetForSemanticSegmentation", "UperNetPreTrainedModel", ] if TYPE_CHECKING: from .configuration_upernet import UperNetConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_upernet import UperNetForSemanticSegmentation, UperNetPreTrainedModel else: import sys a_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
621
1
"""simple docstring""" import unittest from transformers import CamembertTokenizer, CamembertTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from transformers.utils import is_torch_available from ...test_tokenization_common import TokenizerTesterMixin a_ = get_tests_dir("fixtures/test_sentencepiece.model") a_ = get_tests_dir("fixtures/test_sentencepiece_bpe.model") a_ = "pt" if is_torch_available() else "tf" @require_sentencepiece @require_tokenizers class snake_case ( _UpperCamelCase , unittest.TestCase): __UpperCamelCase = CamembertTokenizer __UpperCamelCase = CamembertTokenizerFast __UpperCamelCase = True __UpperCamelCase = True def a_ ( self : List[str] ) -> Any: '''simple docstring''' super().setUp() # We have a SentencePiece fixture for testing _A = CamembertTokenizer(a__ ) tokenizer.save_pretrained(self.tmpdirname ) def a_ ( self : Optional[Any] ) -> Tuple: '''simple docstring''' _A = "<pad>" _A = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(a__ ) , a__ ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(a__ ) , a__ ) def a_ ( self : Tuple ) -> List[str]: '''simple docstring''' _A = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , "<s>NOTUSED" ) self.assertEqual(vocab_keys[1] , "<pad>" ) self.assertEqual(vocab_keys[-1] , "<mask>" ) self.assertEqual(len(a__ ) , 10_04 ) def a_ ( self : List[str] ) -> Any: '''simple docstring''' self.assertEqual(self.get_tokenizer().vocab_size , 10_05 ) def a_ ( self : Optional[int] ) -> List[Any]: '''simple docstring''' _A = CamembertTokenizer(a__ ) tokenizer.save_pretrained(self.tmpdirname ) _A = CamembertTokenizerFast.from_pretrained(self.tmpdirname ) _A = "I was born in 92000, and this is falsé." _A = tokenizer.encode(a__ ) _A = rust_tokenizer.encode(a__ ) self.assertListEqual(a__ , a__ ) _A = tokenizer.encode(a__ , add_special_tokens=a__ ) _A = rust_tokenizer.encode(a__ , add_special_tokens=a__ ) self.assertListEqual(a__ , a__ ) # <unk> tokens are not the same for `rust` than for `slow`. # Because spm gives back raw token instead of `unk` in EncodeAsPieces # tokens = tokenizer.tokenize(sequence) _A = tokenizer.convert_ids_to_tokens(a__ ) _A = rust_tokenizer.tokenize(a__ ) self.assertListEqual(a__ , a__ ) def a_ ( self : Tuple ) -> List[str]: '''simple docstring''' if not self.test_rust_tokenizer: return _A = self.get_tokenizer() _A = self.get_rust_tokenizer() _A = "I was born in 92000, and this is falsé." _A = tokenizer.tokenize(a__ ) _A = rust_tokenizer.tokenize(a__ ) self.assertListEqual(a__ , a__ ) _A = tokenizer.encode(a__ , add_special_tokens=a__ ) _A = rust_tokenizer.encode(a__ , add_special_tokens=a__ ) self.assertListEqual(a__ , a__ ) _A = self.get_rust_tokenizer() _A = tokenizer.encode(a__ ) _A = rust_tokenizer.encode(a__ ) self.assertListEqual(a__ , a__ ) @slow def a_ ( self : Union[str, Any] ) -> int: '''simple docstring''' _A = {"input_ids": [[5, 54, 71_96, 2_97, 30, 23, 7_76, 18, 11, 32_15, 37_05, 82_52, 22, 31_64, 11_81, 21_16, 29, 16, 8_13, 25, 7_91, 33_14, 20, 34_46, 38, 2_75_75, 1_20, 6, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [5, 4_68, 17, 11, 90_88, 20, 15_17, 8, 2_28_04, 1_88_18, 10, 38, 6_29, 6_07, 6_07, 1_42, 19, 71_96, 8_67, 56, 1_03_26, 24, 22_67, 20, 4_16, 50_72, 1_56_12, 2_33, 7_34, 7, 23_99, 27, 16, 30_15, 16_49, 7, 24, 20, 43_38, 23_99, 27, 13, 34_00, 14, 13, 61_89, 8, 9_30, 9, 6]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501 # fmt: on # camembert is a french model. So we also use french texts. _A = [ "Le transformeur est un modèle d'apprentissage profond introduit en 2017, " "utilisé principalement dans le domaine du traitement automatique des langues (TAL).", "À l'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus " "pour gérer des données séquentielles, telles que le langage naturel, pour des tâches " "telles que la traduction et la synthèse de texte.", ] self.tokenizer_integration_test_util( expected_encoding=a__ , model_name="camembert-base" , revision="3a0641d9a1aeb7e848a74299e7e4c4bca216b4cf" , sequences=a__ , )
621
"""simple docstring""" import random import unittest from torch.utils.data import BatchSampler, DataLoader, IterableDataset from accelerate import Accelerator from accelerate.data_loader import ( BatchSamplerShard, DataLoaderDispatcher, DataLoaderShard, IterableDatasetShard, SkipBatchSampler, SkipDataLoader, skip_first_batches, ) class snake_case ( _UpperCamelCase): def __init__( self : Optional[int] , a__ : str=0.0_1 , a__ : str=10_00 ) -> int: '''simple docstring''' _A = p_stop _A = max_length def __iter__( self : Any ) -> Optional[Any]: '''simple docstring''' _A = 0 _A = False while not stop and count < self.max_length: yield count count += 1 _A = random.random() < self.p_stop class snake_case ( unittest.TestCase): def a_ ( self : List[Any] , a__ : Union[str, Any] , a__ : Union[str, Any] , a__ : List[str]=False , a__ : str=True ) -> Union[str, Any]: '''simple docstring''' _A = [ BatchSamplerShard(a__ , 2 , a__ , split_batches=a__ , even_batches=a__ ) for i in range(2 ) ] _A = [list(a__ ) for batch_sampler_shard in batch_sampler_shards] if not split_batches: self.assertListEqual([len(a__ ) for shard in batch_sampler_shards] , [len(a__ ) for e in expected] ) self.assertListEqual(a__ , a__ ) def a_ ( self : List[Any] ) -> str: '''simple docstring''' _A = BatchSampler(range(24 ) , batch_size=3 , drop_last=a__ ) _A = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]], [[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]], ] self.check_batch_sampler_shards(a__ , a__ ) _A = BatchSampler(range(24 ) , batch_size=3 , drop_last=a__ ) # Expected shouldn't change self.check_batch_sampler_shards(a__ , a__ ) # Check the shards when the dataset is a round multiple of batch size but not total batch size. _A = BatchSampler(range(21 ) , batch_size=3 , drop_last=a__ ) _A = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]], [[3, 4, 5], [9, 10, 11], [15, 16, 17], [0, 1, 2]], ] self.check_batch_sampler_shards(a__ , a__ ) _A = BatchSampler(range(21 ) , batch_size=3 , drop_last=a__ ) _A = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14]], [[3, 4, 5], [9, 10, 11], [15, 16, 17]], ] self.check_batch_sampler_shards(a__ , a__ ) # Check the shards when the dataset is not a round multiple of batch size but has a multiple of # num_processes batch. _A = BatchSampler(range(22 ) , batch_size=3 , drop_last=a__ ) _A = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]], [[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 0, 1]], ] self.check_batch_sampler_shards(a__ , a__ ) _A = BatchSampler(range(22 ) , batch_size=3 , drop_last=a__ ) _A = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14]], [[3, 4, 5], [9, 10, 11], [15, 16, 17]], ] self.check_batch_sampler_shards(a__ , a__ ) # Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of # num_processes batch. _A = BatchSampler(range(20 ) , batch_size=3 , drop_last=a__ ) _A = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 0]], [[3, 4, 5], [9, 10, 11], [15, 16, 17], [1, 2, 3]], ] self.check_batch_sampler_shards(a__ , a__ ) _A = BatchSampler(range(20 ) , batch_size=3 , drop_last=a__ ) _A = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14]], [[3, 4, 5], [9, 10, 11], [15, 16, 17]], ] self.check_batch_sampler_shards(a__ , a__ ) # Check the shards when the dataset is very small. _A = BatchSampler(range(2 ) , batch_size=3 , drop_last=a__ ) _A = [[[0, 1, 0]], [[1, 0, 1]]] self.check_batch_sampler_shards(a__ , a__ ) _A = BatchSampler(range(2 ) , batch_size=3 , drop_last=a__ ) _A = [[], []] self.check_batch_sampler_shards(a__ , a__ ) def a_ ( self : int ) -> int: '''simple docstring''' _A = BatchSampler(range(24 ) , batch_size=4 , drop_last=a__ ) _A = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]], ] self.check_batch_sampler_shards(a__ , a__ , split_batches=a__ ) _A = BatchSampler(range(24 ) , batch_size=4 , drop_last=a__ ) # Expected shouldn't change self.check_batch_sampler_shards(a__ , a__ , split_batches=a__ ) # Check the shards when the dataset is not a round multiple of batch size. _A = BatchSampler(range(22 ) , batch_size=4 , drop_last=a__ ) _A = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [0, 1]], ] self.check_batch_sampler_shards(a__ , a__ , split_batches=a__ ) _A = BatchSampler(range(22 ) , batch_size=4 , drop_last=a__ ) _A = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]], ] self.check_batch_sampler_shards(a__ , a__ , split_batches=a__ ) # Check the shards when the dataset is not a round multiple of batch size or num_processes. _A = BatchSampler(range(21 ) , batch_size=4 , drop_last=a__ ) _A = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 0]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [1, 2]], ] self.check_batch_sampler_shards(a__ , a__ , split_batches=a__ ) _A = BatchSampler(range(21 ) , batch_size=4 , drop_last=a__ ) _A = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]], ] self.check_batch_sampler_shards(a__ , a__ , split_batches=a__ ) # Check the shards when the dataset is very small. _A = BatchSampler(range(2 ) , batch_size=4 , drop_last=a__ ) _A = [[[0, 1]], [[0, 1]]] self.check_batch_sampler_shards(a__ , a__ , split_batches=a__ ) _A = BatchSampler(range(2 ) , batch_size=4 , drop_last=a__ ) _A = [[], []] self.check_batch_sampler_shards(a__ , a__ , split_batches=a__ ) def a_ ( self : List[str] ) -> List[str]: '''simple docstring''' _A = BatchSampler(range(24 ) , batch_size=3 , drop_last=a__ ) _A = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]], [[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]], ] self.check_batch_sampler_shards(a__ , a__ , even_batches=a__ ) _A = BatchSampler(range(24 ) , batch_size=3 , drop_last=a__ ) # Expected shouldn't change self.check_batch_sampler_shards(a__ , a__ , even_batches=a__ ) # Check the shards when the dataset is a round multiple of batch size but not total batch size. _A = BatchSampler(range(21 ) , batch_size=3 , drop_last=a__ ) _A = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]], [[3, 4, 5], [9, 10, 11], [15, 16, 17]], ] self.check_batch_sampler_shards(a__ , a__ , even_batches=a__ ) _A = BatchSampler(range(21 ) , batch_size=3 , drop_last=a__ ) _A = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14]], [[3, 4, 5], [9, 10, 11], [15, 16, 17]], ] self.check_batch_sampler_shards(a__ , a__ , even_batches=a__ ) # Check the shards when the dataset is not a round multiple of batch size but has a multiple of # num_processes batch. _A = BatchSampler(range(22 ) , batch_size=3 , drop_last=a__ ) _A = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]], [[3, 4, 5], [9, 10, 11], [15, 16, 17], [21]], ] self.check_batch_sampler_shards(a__ , a__ , even_batches=a__ ) _A = BatchSampler(range(22 ) , batch_size=3 , drop_last=a__ ) _A = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14]], [[3, 4, 5], [9, 10, 11], [15, 16, 17]], ] self.check_batch_sampler_shards(a__ , a__ , even_batches=a__ ) # Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of # num_processes batch. _A = BatchSampler(range(20 ) , batch_size=3 , drop_last=a__ ) _A = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19]], [[3, 4, 5], [9, 10, 11], [15, 16, 17]], ] self.check_batch_sampler_shards(a__ , a__ , even_batches=a__ ) _A = BatchSampler(range(20 ) , batch_size=3 , drop_last=a__ ) _A = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14]], [[3, 4, 5], [9, 10, 11], [15, 16, 17]], ] self.check_batch_sampler_shards(a__ , a__ , even_batches=a__ ) # Check the shards when the dataset is very small. _A = BatchSampler(range(2 ) , batch_size=3 , drop_last=a__ ) _A = [[[0, 1]], []] self.check_batch_sampler_shards(a__ , a__ , even_batches=a__ ) _A = BatchSampler(range(2 ) , batch_size=3 , drop_last=a__ ) _A = [[], []] self.check_batch_sampler_shards(a__ , a__ , even_batches=a__ ) def a_ ( self : List[str] ) -> str: '''simple docstring''' _A = BatchSampler(range(24 ) , batch_size=4 , drop_last=a__ ) _A = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]], ] self.check_batch_sampler_shards(a__ , a__ , split_batches=a__ , even_batches=a__ ) _A = BatchSampler(range(24 ) , batch_size=4 , drop_last=a__ ) # Expected shouldn't change self.check_batch_sampler_shards(a__ , a__ , split_batches=a__ , even_batches=a__ ) # Check the shards when the dataset is not a round multiple of batch size. _A = BatchSampler(range(22 ) , batch_size=4 , drop_last=a__ ) _A = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]], ] self.check_batch_sampler_shards(a__ , a__ , split_batches=a__ , even_batches=a__ ) _A = BatchSampler(range(22 ) , batch_size=4 , drop_last=a__ ) _A = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]], ] self.check_batch_sampler_shards(a__ , a__ , split_batches=a__ , even_batches=a__ ) # Check the shards when the dataset is not a round multiple of batch size or num_processes. _A = BatchSampler(range(21 ) , batch_size=4 , drop_last=a__ ) _A = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]], ] self.check_batch_sampler_shards(a__ , a__ , split_batches=a__ , even_batches=a__ ) _A = BatchSampler(range(21 ) , batch_size=4 , drop_last=a__ ) _A = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]], ] self.check_batch_sampler_shards(a__ , a__ , split_batches=a__ , even_batches=a__ ) # Check the shards when the dataset is very small. _A = BatchSampler(range(2 ) , batch_size=4 , drop_last=a__ ) _A = [[[0, 1]], []] self.check_batch_sampler_shards(a__ , a__ , split_batches=a__ , even_batches=a__ ) _A = BatchSampler(range(2 ) , batch_size=4 , drop_last=a__ ) _A = [[], []] self.check_batch_sampler_shards(a__ , a__ , split_batches=a__ , even_batches=a__ ) def a_ ( self : Union[str, Any] ) -> str: '''simple docstring''' _A = [[0, 1, 2], [3, 4], [5, 6, 7, 8], [9, 10, 11], [12, 13]] _A = [BatchSamplerShard(a__ , 2 , a__ , even_batches=a__ ) for i in range(2 )] self.assertEqual(len(batch_sampler_shards[0] ) , 3 ) self.assertEqual(len(batch_sampler_shards[1] ) , 2 ) self.assertListEqual(list(batch_sampler_shards[0] ) , [[0, 1, 2], [5, 6, 7, 8], [12, 13]] ) self.assertListEqual(list(batch_sampler_shards[1] ) , [[3, 4], [9, 10, 11]] ) def a_ ( self : Optional[int] , a__ : Optional[int] , a__ : Tuple , a__ : Optional[int] , a__ : Union[str, Any]=False , a__ : int=2 , a__ : List[Any]=False ) -> str: '''simple docstring''' random.seed(a__ ) _A = list(a__ ) _A = [ IterableDatasetShard( a__ , batch_size=a__ , drop_last=a__ , num_processes=a__ , process_index=a__ , split_batches=a__ , ) for i in range(a__ ) ] _A = [] for iterable_dataset_shard in iterable_dataset_shards: # Since our random iterable dataset will be... random... we need to use a seed to get reproducible results. random.seed(a__ ) iterable_dataset_lists.append(list(a__ ) ) _A = batch_size // num_processes if split_batches else batch_size # All iterable dataset shard should have the same length, a round multiple of shard_batch_size _A = iterable_dataset_lists[0] for l in iterable_dataset_lists[1:]: self.assertEqual(len(a__ ) , len(a__ ) ) self.assertTrue(len(a__ ) % shard_batch_size == 0 ) _A = [] for idx in range(0 , len(a__ ) , a__ ): for l in iterable_dataset_lists: observed += l[idx : idx + shard_batch_size] if not drop_last: while len(a__ ) < len(a__ ): reference += reference self.assertListEqual(a__ , reference[: len(a__ )] ) def a_ ( self : List[str] ) -> List[Any]: '''simple docstring''' _A = 42 _A = RandomIterableDataset() self.check_iterable_dataset_shards(a__ , a__ , batch_size=4 , drop_last=a__ , split_batches=a__ ) self.check_iterable_dataset_shards(a__ , a__ , batch_size=4 , drop_last=a__ , split_batches=a__ ) self.check_iterable_dataset_shards(a__ , a__ , batch_size=4 , drop_last=a__ , split_batches=a__ ) self.check_iterable_dataset_shards(a__ , a__ , batch_size=4 , drop_last=a__ , split_batches=a__ ) # Edge case with a very small dataset _A = RandomIterableDataset(max_length=2 ) self.check_iterable_dataset_shards(a__ , a__ , batch_size=4 , drop_last=a__ , split_batches=a__ ) self.check_iterable_dataset_shards(a__ , a__ , batch_size=4 , drop_last=a__ , split_batches=a__ ) self.check_iterable_dataset_shards(a__ , a__ , batch_size=4 , drop_last=a__ , split_batches=a__ ) self.check_iterable_dataset_shards(a__ , a__ , batch_size=4 , drop_last=a__ , split_batches=a__ ) def a_ ( self : List[str] ) -> Dict: '''simple docstring''' _A = BatchSampler(range(16 ) , batch_size=4 , drop_last=a__ ) _A = SkipBatchSampler(a__ , 2 ) self.assertListEqual(list(a__ ) , [[8, 9, 10, 11], [12, 13, 14, 15]] ) def a_ ( self : int ) -> Union[str, Any]: '''simple docstring''' _A = SkipDataLoader(list(range(16 ) ) , batch_size=4 , skip_batches=2 ) self.assertListEqual([t.tolist() for t in dataloader] , [[8, 9, 10, 11], [12, 13, 14, 15]] ) def a_ ( self : int ) -> Optional[int]: '''simple docstring''' _A = DataLoader(list(range(16 ) ) , batch_size=4 ) _A = skip_first_batches(a__ , num_batches=2 ) self.assertListEqual([t.tolist() for t in new_dataloader] , [[8, 9, 10, 11], [12, 13, 14, 15]] ) def a_ ( self : Optional[Any] ) -> Optional[int]: '''simple docstring''' _A = DataLoaderShard(list(range(16 ) ) , batch_size=4 ) for idx, _ in enumerate(a__ ): self.assertEqual(dataloader.end_of_dataloader , idx == 3 ) # Test it also works on the second iteration for idx, _ in enumerate(a__ ): self.assertEqual(dataloader.end_of_dataloader , idx == 3 ) def a_ ( self : int ) -> int: '''simple docstring''' Accelerator() _A = DataLoaderDispatcher(range(16 ) , batch_size=4 ) for idx, _ in enumerate(a__ ): self.assertEqual(dataloader.end_of_dataloader , idx == 3 ) # Test it also works on the second iteration for idx, _ in enumerate(a__ ): self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
621
1
"""simple docstring""" import collections import os import re from pathlib import Path a_ = "src/transformers" # Matches is_xxx_available() a_ = re.compile(r"is\_([a-z_]*)_available()") # Catches a one-line _import_struct = {xxx} a_ = re.compile(r"^_import_structure\s+=\s+\{([^\}]+)\}") # Catches a line with a key-values pattern: "bla": ["foo", "bar"] a_ = re.compile(r"\s+\"\S*\":\s+\[([^\]]*)\]") # Catches a line if not is_foo_available a_ = re.compile(r"^\s*if\s+not\s+is\_[a-z_]*\_available\(\)") # Catches a line _import_struct["bla"].append("foo") a_ = re.compile(r"^\s*_import_structure\[\"\S*\"\]\.append\(\"(\S*)\"\)") # Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"] a_ = re.compile(r"^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]") # Catches a line with an object between quotes and a comma: "MyModel", a_ = re.compile(r"^\s+\"([^\"]+)\",") # Catches a line with objects between brackets only: ["foo", "bar"], a_ = re.compile(r"^\s+\[([^\]]+)\]") # Catches a line with from foo import bar, bla, boo a_ = re.compile(r"\s+from\s+\S*\s+import\s+([^\(\s].*)\n") # Catches a line with try: a_ = re.compile(r"^\s*try:") # Catches a line with else: a_ = re.compile(r"^\s*else:") def a__ ( __lowercase ) -> List[Any]: if _re_test_backend.search(__lowercase ) is None: return None _A = [b[0] for b in _re_backend.findall(__lowercase )] backends.sort() return "_and_".join(__lowercase ) def a__ ( __lowercase ) -> List[Any]: with open(__lowercase , "r" , encoding="utf-8" , newline="\n" ) as f: _A = f.readlines() _A = 0 while line_index < len(__lowercase ) and not lines[line_index].startswith("_import_structure = {" ): line_index += 1 # If this is a traditional init, just return. if line_index >= len(__lowercase ): return None # First grab the objects without a specific backend in _import_structure _A = [] while not lines[line_index].startswith("if TYPE_CHECKING" ) and find_backend(lines[line_index] ) is None: _A = lines[line_index] # If we have everything on a single line, let's deal with it. if _re_one_line_import_struct.search(__lowercase ): _A = _re_one_line_import_struct.search(__lowercase ).groups()[0] _A = re.findall(R"\[([^\]]+)\]" , __lowercase ) for imp in imports: objects.extend([obj[1:-1] for obj in imp.split(", " )] ) line_index += 1 continue _A = _re_import_struct_key_value.search(__lowercase ) if single_line_import_search is not None: _A = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(", " ) if len(__lowercase ) > 0] objects.extend(__lowercase ) elif line.startswith(" " * 8 + "\"" ): objects.append(line[9:-3] ) line_index += 1 _A = {"none": objects} # Let's continue with backend-specific objects in _import_structure while not lines[line_index].startswith("if TYPE_CHECKING" ): # If the line is an if not is_backend_available, we grab all objects associated. _A = find_backend(lines[line_index] ) # Check if the backend declaration is inside a try block: if _re_try.search(lines[line_index - 1] ) is None: _A = None if backend is not None: line_index += 1 # Scroll until we hit the else block of try-except-else while _re_else.search(lines[line_index] ) is None: line_index += 1 line_index += 1 _A = [] # Until we unindent, add backend objects to the list while len(lines[line_index] ) <= 1 or lines[line_index].startswith(" " * 4 ): _A = lines[line_index] if _re_import_struct_add_one.search(__lowercase ) is not None: objects.append(_re_import_struct_add_one.search(__lowercase ).groups()[0] ) elif _re_import_struct_add_many.search(__lowercase ) is not None: _A = _re_import_struct_add_many.search(__lowercase ).groups()[0].split(", " ) _A = [obj[1:-1] for obj in imports if len(__lowercase ) > 0] objects.extend(__lowercase ) elif _re_between_brackets.search(__lowercase ) is not None: _A = _re_between_brackets.search(__lowercase ).groups()[0].split(", " ) _A = [obj[1:-1] for obj in imports if len(__lowercase ) > 0] objects.extend(__lowercase ) elif _re_quote_object.search(__lowercase ) is not None: objects.append(_re_quote_object.search(__lowercase ).groups()[0] ) elif line.startswith(" " * 8 + "\"" ): objects.append(line[9:-3] ) elif line.startswith(" " * 12 + "\"" ): objects.append(line[13:-3] ) line_index += 1 _A = objects else: line_index += 1 # At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend _A = [] while ( line_index < len(__lowercase ) and find_backend(lines[line_index] ) is None and not lines[line_index].startswith("else" ) ): _A = lines[line_index] _A = _re_import.search(__lowercase ) if single_line_import_search is not None: objects.extend(single_line_import_search.groups()[0].split(", " ) ) elif line.startswith(" " * 8 ): objects.append(line[8:-2] ) line_index += 1 _A = {"none": objects} # Let's continue with backend-specific objects while line_index < len(__lowercase ): # If the line is an if is_backend_available, we grab all objects associated. _A = find_backend(lines[line_index] ) # Check if the backend declaration is inside a try block: if _re_try.search(lines[line_index - 1] ) is None: _A = None if backend is not None: line_index += 1 # Scroll until we hit the else block of try-except-else while _re_else.search(lines[line_index] ) is None: line_index += 1 line_index += 1 _A = [] # Until we unindent, add backend objects to the list while len(lines[line_index] ) <= 1 or lines[line_index].startswith(" " * 8 ): _A = lines[line_index] _A = _re_import.search(__lowercase ) if single_line_import_search is not None: objects.extend(single_line_import_search.groups()[0].split(", " ) ) elif line.startswith(" " * 12 ): objects.append(line[12:-2] ) line_index += 1 _A = objects else: line_index += 1 return import_dict_objects, type_hint_objects def a__ ( __lowercase , __lowercase ) -> Dict: def find_duplicates(__lowercase ): return [k for k, v in collections.Counter(__lowercase ).items() if v > 1] if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ): return ["Both sides of the init do not have the same backends!"] _A = [] for key in import_dict_objects.keys(): _A = find_duplicates(import_dict_objects[key] ) if duplicate_imports: errors.append(f"""Duplicate _import_structure definitions for: {duplicate_imports}""" ) _A = find_duplicates(type_hint_objects[key] ) if duplicate_type_hints: errors.append(f"""Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}""" ) if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ): _A = "base imports" if key == "none" else f"""{key} backend""" errors.append(f"""Differences for {name}:""" ) for a in type_hint_objects[key]: if a not in import_dict_objects[key]: errors.append(f""" {a} in TYPE_HINT but not in _import_structure.""" ) for a in import_dict_objects[key]: if a not in type_hint_objects[key]: errors.append(f""" {a} in _import_structure but not in TYPE_HINT.""" ) return errors def a__ ( ) -> int: _A = [] for root, _, files in os.walk(__lowercase ): if "__init__.py" in files: _A = os.path.join(__lowercase , "__init__.py" ) _A = parse_init(__lowercase ) if objects is not None: _A = analyze_results(*__lowercase ) if len(__lowercase ) > 0: _A = f"""Problem in {fname}, both halves do not define the same objects.\n{errors[0]}""" failures.append("\n".join(__lowercase ) ) if len(__lowercase ) > 0: raise ValueError("\n\n".join(__lowercase ) ) def a__ ( ) -> Union[str, Any]: _A = [] for path, directories, files in os.walk(__lowercase ): for folder in directories: # Ignore private modules if folder.startswith("_" ): directories.remove(__lowercase ) continue # Ignore leftovers from branches (empty folders apart from pycache) if len(list((Path(__lowercase ) / folder).glob("*.py" ) ) ) == 0: continue _A = str((Path(__lowercase ) / folder).relative_to(__lowercase ) ) _A = short_path.replace(os.path.sep , "." ) submodules.append(__lowercase ) for fname in files: if fname == "__init__.py": continue _A = str((Path(__lowercase ) / fname).relative_to(__lowercase ) ) _A = short_path.replace(".py" , "" ).replace(os.path.sep , "." ) if len(submodule.split("." ) ) == 1: submodules.append(__lowercase ) return submodules a_ = [ "convert_pytorch_checkpoint_to_tf2", "modeling_flax_pytorch_utils", "models.esm.openfold_utils", ] def a__ ( ) -> Optional[Any]: # This is to make sure the transformers module imported is the one in the repo. from transformers.utils import direct_transformers_import _A = direct_transformers_import(__lowercase ) _A = set(transformers._import_structure.keys() ) # This contains all the base keys of the _import_structure object defined in the init, but if the user is missing # some optional dependencies, they may not have all of them. Thus we read the init to read all additions and # (potentiall re-) add them. with open(os.path.join(__lowercase , "__init__.py" ) , "r" ) as f: _A = f.read() import_structure_keys.update(set(re.findall(R"import_structure\[\"([^\"]*)\"\]" , __lowercase ) ) ) _A = [ module for module in get_transformers_submodules() if module not in IGNORE_SUBMODULES and module not in import_structure_keys ] if len(__lowercase ) > 0: _A = "\n".join(f"""- {module}""" for module in module_not_registered ) raise ValueError( "The following submodules are not properly registed in the main init of Transformers:\n" f"""{list_of_modules}\n""" "Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value." ) if __name__ == "__main__": check_all_inits() check_submodules()
621
"""simple docstring""" import gc import tempfile import unittest import numpy as np import torch from diffusers import VersatileDiffusionPipeline from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device a_ = False class snake_case ( unittest.TestCase): pass @nightly @require_torch_gpu class snake_case ( unittest.TestCase): def a_ ( self : Optional[int] ) -> str: '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def a_ ( self : Tuple ) -> Any: '''simple docstring''' _A = VersatileDiffusionPipeline.from_pretrained("shi-labs/versatile-diffusion" , torch_dtype=torch.floataa ) pipe.to(a__ ) pipe.set_progress_bar_config(disable=a__ ) _A = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg" ) _A = torch.manual_seed(0 ) _A = pipe.dual_guided( prompt="first prompt" , image=a__ , text_to_image_strength=0.7_5 , generator=a__ , guidance_scale=7.5 , num_inference_steps=2 , output_type="numpy" , ).images with tempfile.TemporaryDirectory() as tmpdirname: pipe.save_pretrained(a__ ) _A = VersatileDiffusionPipeline.from_pretrained(a__ , torch_dtype=torch.floataa ) pipe.to(a__ ) pipe.set_progress_bar_config(disable=a__ ) _A = generator.manual_seed(0 ) _A = pipe.dual_guided( prompt="first prompt" , image=a__ , text_to_image_strength=0.7_5 , generator=a__ , guidance_scale=7.5 , num_inference_steps=2 , output_type="numpy" , ).images assert np.abs(image - new_image ).sum() < 1E-5, "Models don't have the same forward pass" def a_ ( self : Optional[int] ) -> List[Any]: '''simple docstring''' _A = VersatileDiffusionPipeline.from_pretrained("shi-labs/versatile-diffusion" , torch_dtype=torch.floataa ) pipe.to(a__ ) pipe.set_progress_bar_config(disable=a__ ) _A = "cyberpunk 2077" _A = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg" ) _A = torch.manual_seed(0 ) _A = pipe.dual_guided( prompt=a__ , image=a__ , text_to_image_strength=0.7_5 , generator=a__ , guidance_scale=7.5 , num_inference_steps=50 , output_type="numpy" , ).images _A = image[0, 2_53:2_56, 2_53:2_56, -1] assert image.shape == (1, 5_12, 5_12, 3) _A = np.array([0.1_4_4_8, 0.1_6_1_9, 0.1_7_4_1, 0.1_0_8_6, 0.1_1_4_7, 0.1_1_2_8, 0.1_1_9_9, 0.1_1_6_5, 0.1_0_0_1] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 _A = "A painting of a squirrel eating a burger " _A = torch.manual_seed(0 ) _A = pipe.text_to_image( prompt=a__ , generator=a__ , guidance_scale=7.5 , num_inference_steps=50 , output_type="numpy" ).images _A = image[0, 2_53:2_56, 2_53:2_56, -1] assert image.shape == (1, 5_12, 5_12, 3) _A = np.array([0.3_3_6_7, 0.3_1_6_9, 0.2_6_5_6, 0.3_8_7_0, 0.4_7_9_0, 0.3_7_9_6, 0.4_0_0_9, 0.4_8_7_8, 0.4_7_7_8] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 _A = pipe.image_variation(a__ , generator=a__ , output_type="numpy" ).images _A = image[0, 2_53:2_56, 2_53:2_56, -1] assert image.shape == (1, 5_12, 5_12, 3) _A = np.array([0.3_0_7_6, 0.3_1_2_3, 0.3_2_8_4, 0.3_7_8_2, 0.3_7_7_0, 0.3_8_9_4, 0.4_2_9_7, 0.4_3_3_1, 0.4_4_5_6] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
621
1
"""simple docstring""" import math import sys def a__ ( __lowercase ) -> int: if number != int(__lowercase ): raise ValueError("the value of input must be a natural number" ) if number < 0: raise ValueError("the value of input must not be a negative number" ) if number == 0: return 1 _A = [-1] * (number + 1) _A = 0 for i in range(1 , number + 1 ): _A = sys.maxsize _A = int(math.sqrt(__lowercase ) ) for j in range(1 , root + 1 ): _A = 1 + answers[i - (j**2)] _A = min(__lowercase , __lowercase ) _A = answer return answers[number] if __name__ == "__main__": import doctest doctest.testmod()
621
"""simple docstring""" import os import time import warnings from dataclasses import dataclass, field from enum import Enum from typing import List, Optional, Union import torch from filelock import FileLock from torch.utils.data import Dataset from ...tokenization_utils_base import PreTrainedTokenizerBase from ...utils import logging from ..processors.glue import glue_convert_examples_to_features, glue_output_modes, glue_processors from ..processors.utils import InputFeatures a_ = logging.get_logger(__name__) @dataclass class snake_case : __UpperCamelCase = field(metadata={'help': 'The name of the task to train on: ' + ', '.join(glue_processors.keys())}) __UpperCamelCase = field( metadata={'help': 'The input data dir. Should contain the .tsv files (or other data files) for the task.'}) __UpperCamelCase = field( default=128 , metadata={ 'help': ( 'The maximum total input sequence length after tokenization. Sequences longer ' 'than this will be truncated, sequences shorter will be padded.' ) } , ) __UpperCamelCase = field( default=_UpperCamelCase , metadata={'help': 'Overwrite the cached training and evaluation sets'}) def a_ ( self : Optional[Any] ) -> Tuple: '''simple docstring''' _A = self.task_name.lower() class snake_case ( _UpperCamelCase): __UpperCamelCase = 'train' __UpperCamelCase = 'dev' __UpperCamelCase = 'test' class snake_case ( _UpperCamelCase): __UpperCamelCase = 42 __UpperCamelCase = 42 __UpperCamelCase = 42 def __init__( self : Optional[int] , a__ : GlueDataTrainingArguments , a__ : PreTrainedTokenizerBase , a__ : Optional[int] = None , a__ : Union[str, Split] = Split.train , a__ : Optional[str] = None , ) -> Tuple: '''simple docstring''' warnings.warn( "This dataset will be removed from the library soon, preprocessing should be handled with the 🤗 Datasets " "library. You can have a look at this example script for pointers: " "https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py" , a__ , ) _A = args _A = glue_processors[args.task_name]() _A = glue_output_modes[args.task_name] if isinstance(a__ , a__ ): try: _A = Split[mode] except KeyError: raise KeyError("mode is not a valid split name" ) # Load data features from cache or dataset file _A = os.path.join( cache_dir if cache_dir is not None else args.data_dir , F"""cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{args.task_name}""" , ) _A = self.processor.get_labels() if args.task_name in ["mnli", "mnli-mm"] and tokenizer.__class__.__name__ in ( "RobertaTokenizer", "RobertaTokenizerFast", "XLMRobertaTokenizer", "BartTokenizer", "BartTokenizerFast", ): # HACK(label indices are swapped in RoBERTa pretrained model) _A , _A = label_list[2], label_list[1] _A = label_list # Make sure only the first process in distributed training processes the dataset, # and the others will use the cache. _A = cached_features_file + ".lock" with FileLock(a__ ): if os.path.exists(a__ ) and not args.overwrite_cache: _A = time.time() _A = torch.load(a__ ) logger.info( F"""Loading features from cached file {cached_features_file} [took %.3f s]""" , time.time() - start ) else: logger.info(F"""Creating features from dataset file at {args.data_dir}""" ) if mode == Split.dev: _A = self.processor.get_dev_examples(args.data_dir ) elif mode == Split.test: _A = self.processor.get_test_examples(args.data_dir ) else: _A = self.processor.get_train_examples(args.data_dir ) if limit_length is not None: _A = examples[:limit_length] _A = glue_convert_examples_to_features( a__ , a__ , max_length=args.max_seq_length , label_list=a__ , output_mode=self.output_mode , ) _A = time.time() torch.save(self.features , a__ ) # ^ This seems to take a lot of time so I want to investigate why and how we can improve. logger.info( F"""Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]""" ) def __len__( self : List[Any] ) -> Any: '''simple docstring''' return len(self.features ) def __getitem__( self : Tuple , a__ : Union[str, Any] ) -> InputFeatures: '''simple docstring''' return self.features[i] def a_ ( self : Optional[int] ) -> List[Any]: '''simple docstring''' return self.label_list
621
1
"""simple docstring""" import inspect import logging import os import random import shutil import tempfile import unittest import pytest import torch from torch import nn from torch.utils.data import DataLoader, TensorDataset from accelerate import Accelerator from accelerate.test_utils import execute_subprocess_async, require_cuda from accelerate.utils import ProjectConfiguration, set_seed a_ = logging.getLogger(__name__) def a__ ( __lowercase=2 , __lowercase=3 , __lowercase=16 , __lowercase = 10 , __lowercase = 2 ) -> Any: def get_dataset(__lowercase ): _A = torch.randn(batch_size * n_batches , 1 ) return TensorDataset(__lowercase , a * x + b + 0.1 * torch.randn(batch_size * n_batches , 1 ) ) _A = get_dataset(__lowercase ) _A = get_dataset(__lowercase ) _A = DataLoader(__lowercase , shuffle=__lowercase , batch_size=__lowercase , num_workers=4 ) _A = DataLoader(__lowercase , shuffle=__lowercase , batch_size=__lowercase , num_workers=4 ) return (train_dataloader, valid_dataloader) def a__ ( __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase=None ) -> Union[str, Any]: _A = [] for epoch in range(__lowercase ): # Train quickly model.train() for batch in dataloader: _A , _A = batch _A = model(__lowercase ) _A = torch.nn.functional.mse_loss(__lowercase , __lowercase ) accelerator.backward(__lowercase ) optimizer.step() optimizer.zero_grad() rands.append(random.random() ) # Introduce some randomness if scheduler is not None: scheduler.step() return rands class snake_case ( nn.Module): def __init__( self : int ) -> Optional[Any]: '''simple docstring''' super().__init__() _A = nn.Parameter(torch.randn(1 ) ) _A = nn.Parameter(torch.randn(1 ) ) def a_ ( self : Optional[int] , a__ : Dict ) -> Tuple: '''simple docstring''' return x * self.a + self.b class snake_case ( unittest.TestCase): def a_ ( self : Any ) -> str: '''simple docstring''' with tempfile.TemporaryDirectory() as tmpdir: set_seed(42 ) _A = DummyModel() _A = torch.optim.Adam(params=model.parameters() , lr=1E-3 ) _A , _A = dummy_dataloaders() _A = ProjectConfiguration(total_limit=1 , project_dir=a__ , automatic_checkpoint_naming=a__ ) # Train baseline _A = Accelerator(project_config=a__ ) _A , _A , _A , _A = accelerator.prepare( a__ , a__ , a__ , a__ ) # Save initial accelerator.save_state() # Save second state accelerator.save_state() self.assertEqual(len(os.listdir(accelerator.project_dir ) ) , 1 ) def a_ ( self : Dict ) -> List[Any]: '''simple docstring''' with tempfile.TemporaryDirectory() as tmpdir: set_seed(42 ) _A = DummyModel() _A = torch.optim.Adam(params=model.parameters() , lr=1E-3 ) _A , _A = dummy_dataloaders() # Train baseline _A = Accelerator() _A , _A , _A , _A = accelerator.prepare( a__ , a__ , a__ , a__ ) # Save initial _A = os.path.join(a__ , "initial" ) accelerator.save_state(a__ ) ((_A) , (_A)) = model.a.item(), model.b.item() _A = optimizer.state_dict() _A = train(3 , a__ , a__ , a__ , a__ ) ((_A) , (_A)) = model.a.item(), model.b.item() _A = optimizer.state_dict() # Train partially set_seed(42 ) _A = DummyModel() _A = torch.optim.Adam(params=model.parameters() , lr=1E-3 ) _A , _A = dummy_dataloaders() _A = Accelerator() _A , _A , _A , _A = accelerator.prepare( a__ , a__ , a__ , a__ ) accelerator.load_state(a__ ) ((_A) , (_A)) = model.a.item(), model.b.item() _A = optimizer.state_dict() self.assertEqual(a__ , a__ ) self.assertEqual(a__ , a__ ) self.assertEqual(a__ , a__ ) _A = train(2 , a__ , a__ , a__ , a__ ) # Save everything _A = os.path.join(a__ , "checkpoint" ) accelerator.save_state(a__ ) # Load everything back in and make sure all states work accelerator.load_state(a__ ) test_rands += train(1 , a__ , a__ , a__ , a__ ) ((_A) , (_A)) = model.a.item(), model.b.item() _A = optimizer.state_dict() self.assertEqual(a__ , a__ ) self.assertEqual(a__ , a__ ) self.assertEqual(a__ , a__ ) self.assertEqual(a__ , a__ ) def a_ ( self : int ) -> int: '''simple docstring''' with tempfile.TemporaryDirectory() as tmpdir: set_seed(42 ) _A = DummyModel() _A = torch.optim.Adam(params=model.parameters() , lr=1E-3 ) _A , _A = dummy_dataloaders() _A = ProjectConfiguration(automatic_checkpoint_naming=a__ ) # Train baseline _A = Accelerator(project_dir=a__ , project_config=a__ ) _A , _A , _A , _A = accelerator.prepare( a__ , a__ , a__ , a__ ) # Save initial accelerator.save_state() ((_A) , (_A)) = model.a.item(), model.b.item() _A = optimizer.state_dict() _A = train(3 , a__ , a__ , a__ , a__ ) ((_A) , (_A)) = model.a.item(), model.b.item() _A = optimizer.state_dict() # Train partially set_seed(42 ) _A = DummyModel() _A = torch.optim.Adam(params=model.parameters() , lr=1E-3 ) _A , _A = dummy_dataloaders() _A = ProjectConfiguration(iteration=1 , automatic_checkpoint_naming=a__ ) _A = Accelerator(project_dir=a__ , project_config=a__ ) _A , _A , _A , _A = accelerator.prepare( a__ , a__ , a__ , a__ ) accelerator.load_state(os.path.join(a__ , "checkpoints" , "checkpoint_0" ) ) ((_A) , (_A)) = model.a.item(), model.b.item() _A = optimizer.state_dict() self.assertEqual(a__ , a__ ) self.assertEqual(a__ , a__ ) self.assertEqual(a__ , a__ ) _A = train(2 , a__ , a__ , a__ , a__ ) # Save everything accelerator.save_state() # Load everything back in and make sure all states work accelerator.load_state(os.path.join(a__ , "checkpoints" , "checkpoint_1" ) ) test_rands += train(1 , a__ , a__ , a__ , a__ ) ((_A) , (_A)) = model.a.item(), model.b.item() _A = optimizer.state_dict() self.assertEqual(a__ , a__ ) self.assertEqual(a__ , a__ ) self.assertEqual(a__ , a__ ) self.assertEqual(a__ , a__ ) def a_ ( self : Dict ) -> List[str]: '''simple docstring''' _A = torch.tensor([1, 2, 3] ) _A = torch.tensor([2, 3, 4] ) _A = DummyModel() _A = torch.optim.Adam(net.parameters() ) _A = Accelerator() with self.assertRaises(a__ ) as ve: accelerator.register_for_checkpointing(a__ , a__ , a__ , a__ ) _A = str(ve.exception ) self.assertTrue("Item at index 0" in message ) self.assertTrue("Item at index 1" in message ) self.assertFalse("Item at index 2" in message ) self.assertFalse("Item at index 3" in message ) def a_ ( self : int ) -> Optional[int]: '''simple docstring''' with tempfile.TemporaryDirectory() as tmpdir: set_seed(42 ) _A = DummyModel() _A = torch.optim.Adam(params=model.parameters() , lr=1E-3 ) _A = torch.optim.lr_scheduler.StepLR(a__ , step_size=1 , gamma=0.9_9 ) _A , _A = dummy_dataloaders() _A = ProjectConfiguration(automatic_checkpoint_naming=a__ ) # Train baseline _A = Accelerator(project_dir=a__ , project_config=a__ ) _A , _A , _A , _A , _A = accelerator.prepare( a__ , a__ , a__ , a__ , a__ ) # Save initial accelerator.save_state() _A = scheduler.state_dict() train(3 , a__ , a__ , a__ , a__ , a__ ) self.assertNotEqual(a__ , scheduler.state_dict() ) # Load everything back in and make sure all states work accelerator.load_state(os.path.join(a__ , "checkpoints" , "checkpoint_0" ) ) self.assertEqual(a__ , scheduler.state_dict() ) def a_ ( self : List[str] ) -> Optional[Any]: '''simple docstring''' with tempfile.TemporaryDirectory() as tmpdir: set_seed(42 ) _A = DummyModel() _A = ProjectConfiguration(automatic_checkpoint_naming=a__ , total_limit=2 ) # Train baseline _A = Accelerator(project_dir=a__ , project_config=a__ ) _A = accelerator.prepare(a__ ) # Save 3 states: for _ in range(11 ): accelerator.save_state() self.assertTrue(not os.path.exists(os.path.join(a__ , "checkpoints" , "checkpoint_0" ) ) ) self.assertTrue(os.path.exists(os.path.join(a__ , "checkpoints" , "checkpoint_9" ) ) ) self.assertTrue(os.path.exists(os.path.join(a__ , "checkpoints" , "checkpoint_10" ) ) ) @require_cuda def a_ ( self : Tuple ) -> Dict: '''simple docstring''' _A = ["torchrun", F"""--nproc_per_node={torch.cuda.device_count()}""", inspect.getfile(self.__class__ )] execute_subprocess_async(a__ , env=os.environ.copy() ) if __name__ == "__main__": a_ = "/tmp/accelerate/state_checkpointing" a_ = DummyModel() a_ = torch.optim.Adam(params=model.parameters(), lr=1E-3) a_ = torch.optim.lr_scheduler.StepLR(optimizer, step_size=1, gamma=0.99) a_ , a_ = dummy_dataloaders() a_ = ProjectConfiguration(automatic_checkpoint_naming=True) # Train baseline a_ = Accelerator(project_dir=savedir, project_config=project_config, mixed_precision="no") if accelerator.process_index == 0: if os.path.exists(savedir): shutil.rmtree(savedir) os.makedirs(savedir) a_ , a_ , a_ , a_ , a_ = accelerator.prepare( model, optimizer, train_dataloader, valid_dataloader, scheduler ) a_ , a_ = accelerator.prepare(model, optimizer) train(3, model, train_dataloader, optimizer, accelerator, scheduler) # Check that the intial optimizer is loaded on the GPU for group in optimizer.param_groups: a_ = group["params"][0].device break assert param_device.type == accelerator.device.type a_ = model.cpu() accelerator.wait_for_everyone() accelerator.save_state() accelerator.wait_for_everyone() # Check CPU state accelerator.load_state(os.path.join(savedir, "checkpoints", "checkpoint_0"), map_location="cpu") for group in optimizer.param_groups: a_ = group["params"][0].device break assert ( param_device.type == torch.device("cpu").type ), f"Loaded optimizer states did not match, expected to be loaded on the CPU but got {param_device}" # Check device state model.to(accelerator.device) accelerator.load_state(os.path.join(savedir, "checkpoints", "checkpoint_0"), map_location="on_device") for group in optimizer.param_groups: a_ = group["params"][0].device break assert ( param_device.type == accelerator.device.type ), f"Loaded optimizer states did not match, expected to be loaded on {accelerator.device} but got {param_device}" # Check error with pytest.raises(TypeError, match="Unsupported optimizer map location passed"): accelerator.load_state(os.path.join(savedir, "checkpoints", "checkpoint_0"), map_location="invalid") accelerator.wait_for_everyone() if accelerator.process_index == 0: shutil.rmtree(savedir) accelerator.wait_for_everyone()
621
"""simple docstring""" def a__ ( __lowercase , __lowercase , __lowercase , __lowercase ) -> str: # Return True if there is node that has not iterated. _A = [False] * len(__lowercase ) _A = [] queue.append(__lowercase ) _A = True while queue: _A = queue.pop(0 ) for ind in range(len(graph[u] ) ): if visited[ind] is False and graph[u][ind] > 0: queue.append(__lowercase ) _A = True _A = u return visited[t] def a__ ( __lowercase , __lowercase , __lowercase ) -> int: # This array is filled by BFS and to store path _A = [-1] * (len(__lowercase )) _A = 0 while bfs(__lowercase , __lowercase , __lowercase , __lowercase ): _A = float("Inf" ) _A = sink while s != source: # Find the minimum value in select path _A = min(__lowercase , graph[parent[s]][s] ) _A = parent[s] max_flow += path_flow _A = sink while v != source: _A = parent[v] graph[u][v] -= path_flow graph[v][u] += path_flow _A = parent[v] return max_flow a_ = [ [0, 16, 13, 0, 0, 0], [0, 0, 10, 12, 0, 0], [0, 4, 0, 0, 14, 0], [0, 0, 9, 0, 0, 20], [0, 0, 0, 7, 0, 4], [0, 0, 0, 0, 0, 0], ] a_ , a_ = 0, 5 print(ford_fulkerson(graph, source, sink))
621
1