code
stringlengths
81
54k
code_codestyle
int64
0
721
style_context
stringlengths
91
41.9k
style_context_codestyle
int64
0
699
label
int64
0
1
from __future__ import annotations def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : int ): """simple docstring""" SCREAMING_SNAKE_CASE_ = 2 SCREAMING_SNAKE_CASE_ = [] while i * i <= n: if n % i: i += 1 else: n //= i factors.append(_SCREAMING_SNAKE_CASE ) if n > 1: factors.append(_SCREAMING_SNAKE_CASE ) return factors if __name__ == "__main__": import doctest doctest.testmod()
620
def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : int = 200 ): """simple docstring""" SCREAMING_SNAKE_CASE_ = [1, 2, 5, 10, 20, 50, 100, 200] SCREAMING_SNAKE_CASE_ = [0] * (pence + 1) SCREAMING_SNAKE_CASE_ = 1 # base case: 1 way to make 0 pence for coin in coins: for i in range(_SCREAMING_SNAKE_CASE , pence + 1 , 1 ): number_of_ways[i] += number_of_ways[i - coin] return number_of_ways[pence] if __name__ == "__main__": assert solution(200) == 73_682
620
1
import os import shutil import tempfile from unittest import TestCase from unittest.mock import patch import numpy as np from datasets import Dataset from transformers.models.realm.configuration_realm import RealmConfig from transformers.models.realm.retrieval_realm import _REALM_BLOCK_RECORDS_FILENAME, RealmRetriever from transformers.models.realm.tokenization_realm import VOCAB_FILES_NAMES, RealmTokenizer class __snake_case ( lowerCAmelCase__ ): def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = tempfile.mkdtemp() SCREAMING_SNAKE_CASE_ = 5 # Realm tok SCREAMING_SNAKE_CASE_ = [ '[UNK]', '[CLS]', '[SEP]', '[PAD]', '[MASK]', 'test', 'question', 'this', 'is', 'the', 'first', 'second', 'third', 'fourth', 'fifth', 'record', 'want', '##want', '##ed', 'wa', 'un', 'runn', '##ing', ',', 'low', 'lowest', ] SCREAMING_SNAKE_CASE_ = os.path.join(self.tmpdirname , 'realm_tokenizer') os.makedirs(_A , exist_ok=_A) SCREAMING_SNAKE_CASE_ = os.path.join(_A , VOCAB_FILES_NAMES['vocab_file']) with open(self.vocab_file , 'w' , encoding='utf-8') as vocab_writer: vocab_writer.write(''.join([x + '\n' for x in vocab_tokens])) SCREAMING_SNAKE_CASE_ = os.path.join(self.tmpdirname , 'realm_block_records') os.makedirs(_A , exist_ok=_A) def lowerCAmelCase__ ( self): return RealmTokenizer.from_pretrained(os.path.join(self.tmpdirname , 'realm_tokenizer')) def lowerCAmelCase__ ( self): shutil.rmtree(self.tmpdirname) def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = RealmConfig(num_block_records=self.num_block_records) return config def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = Dataset.from_dict( { 'id': ['0', '1'], 'question': ['foo', 'bar'], 'answers': [['Foo', 'Bar'], ['Bar']], }) return dataset def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = np.array( [ b'This is the first record', b'This is the second record', b'This is the third record', b'This is the fourth record', b'This is the fifth record', b'This is a longer longer longer record', ] , dtype=_A , ) return block_records def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = RealmRetriever( block_records=self.get_dummy_block_records() , tokenizer=self.get_tokenizer() , ) return retriever def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = self.get_config() SCREAMING_SNAKE_CASE_ = self.get_dummy_retriever() SCREAMING_SNAKE_CASE_ = retriever.tokenizer SCREAMING_SNAKE_CASE_ = np.array([0, 3] , dtype='long') SCREAMING_SNAKE_CASE_ = tokenizer(['Test question']).input_ids SCREAMING_SNAKE_CASE_ = tokenizer( ['the fourth'] , add_special_tokens=_A , return_token_type_ids=_A , return_attention_mask=_A , ).input_ids SCREAMING_SNAKE_CASE_ = config.reader_seq_len SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = retriever( _A , _A , answer_ids=_A , max_length=_A , return_tensors='np') self.assertEqual(len(_A) , 2) self.assertEqual(len(_A) , 2) self.assertEqual(len(_A) , 2) self.assertEqual(concat_inputs.input_ids.shape , (2, 10)) self.assertEqual(concat_inputs.attention_mask.shape , (2, 10)) self.assertEqual(concat_inputs.token_type_ids.shape , (2, 10)) self.assertEqual(concat_inputs.special_tokens_mask.shape , (2, 10)) self.assertEqual( tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[0]) , ['[CLS]', 'test', 'question', '[SEP]', 'this', 'is', 'the', 'first', 'record', '[SEP]'] , ) self.assertEqual( tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[1]) , ['[CLS]', 'test', 'question', '[SEP]', 'this', 'is', 'the', 'fourth', 'record', '[SEP]'] , ) def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = self.get_config() SCREAMING_SNAKE_CASE_ = self.get_dummy_retriever() SCREAMING_SNAKE_CASE_ = retriever.tokenizer SCREAMING_SNAKE_CASE_ = np.array([0, 3, 5] , dtype='long') SCREAMING_SNAKE_CASE_ = tokenizer(['Test question']).input_ids SCREAMING_SNAKE_CASE_ = tokenizer( ['the fourth', 'longer longer'] , add_special_tokens=_A , return_token_type_ids=_A , return_attention_mask=_A , ).input_ids SCREAMING_SNAKE_CASE_ = config.reader_seq_len SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = retriever( _A , _A , answer_ids=_A , max_length=_A , return_tensors='np') self.assertEqual([False, True, True] , _A) self.assertEqual([[-1, -1, -1], [6, -1, -1], [6, 7, 8]] , _A) self.assertEqual([[-1, -1, -1], [7, -1, -1], [7, 8, 9]] , _A) def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = self.get_dummy_retriever() retriever.save_pretrained(os.path.join(self.tmpdirname , 'realm_block_records')) # Test local path SCREAMING_SNAKE_CASE_ = retriever.from_pretrained(os.path.join(self.tmpdirname , 'realm_block_records')) self.assertEqual(retriever.block_records[0] , b'This is the first record') # Test mocked remote path with patch('transformers.models.realm.retrieval_realm.hf_hub_download') as mock_hf_hub_download: SCREAMING_SNAKE_CASE_ = os.path.join( os.path.join(self.tmpdirname , 'realm_block_records') , _REALM_BLOCK_RECORDS_FILENAME) SCREAMING_SNAKE_CASE_ = RealmRetriever.from_pretrained('google/realm-cc-news-pretrained-openqa') self.assertEqual(retriever.block_records[0] , b'This is the first record')
620
def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : list , _SCREAMING_SNAKE_CASE : list , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int ): """simple docstring""" if index == number_of_items: return 0 SCREAMING_SNAKE_CASE_ = 0 SCREAMING_SNAKE_CASE_ = 0 SCREAMING_SNAKE_CASE_ = knapsack(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , index + 1 ) if weights[index] <= max_weight: SCREAMING_SNAKE_CASE_ = values[index] + knapsack( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , max_weight - weights[index] , index + 1 ) return max(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) if __name__ == "__main__": import doctest doctest.testmod()
620
1
import gc import unittest import numpy as np import torch import torch.nn.functional as F from transformers import ( ClapTextConfig, ClapTextModelWithProjection, RobertaTokenizer, SpeechTaHifiGan, SpeechTaHifiGanConfig, ) from diffusers import ( AudioLDMPipeline, AutoencoderKL, DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel, ) from diffusers.utils import is_xformers_available, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism from ..pipeline_params import TEXT_TO_AUDIO_BATCH_PARAMS, TEXT_TO_AUDIO_PARAMS from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class __snake_case ( lowerCAmelCase__ , unittest.TestCase ): __lowerCAmelCase : Optional[Any] = AudioLDMPipeline __lowerCAmelCase : Optional[int] = TEXT_TO_AUDIO_PARAMS __lowerCAmelCase : Union[str, Any] = TEXT_TO_AUDIO_BATCH_PARAMS __lowerCAmelCase : Union[str, Any] = frozenset( [ 'num_inference_steps', 'num_waveforms_per_prompt', 'generator', 'latents', 'output_type', 'return_dict', 'callback', 'callback_steps', ] ) def lowerCAmelCase__ ( self): torch.manual_seed(0) SCREAMING_SNAKE_CASE_ = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=(32, 64) , class_embed_type='simple_projection' , projection_class_embeddings_input_dim=32 , class_embeddings_concat=_A , ) SCREAMING_SNAKE_CASE_ = DDIMScheduler( beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule='scaled_linear' , clip_sample=_A , set_alpha_to_one=_A , ) torch.manual_seed(0) SCREAMING_SNAKE_CASE_ = AutoencoderKL( block_out_channels=[32, 64] , in_channels=1 , out_channels=1 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , ) torch.manual_seed(0) SCREAMING_SNAKE_CASE_ = ClapTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , projection_dim=32 , ) SCREAMING_SNAKE_CASE_ = ClapTextModelWithProjection(_A) SCREAMING_SNAKE_CASE_ = RobertaTokenizer.from_pretrained('hf-internal-testing/tiny-random-roberta' , model_max_length=77) SCREAMING_SNAKE_CASE_ = SpeechTaHifiGanConfig( model_in_dim=8 , sampling_rate=16000 , upsample_initial_channel=16 , upsample_rates=[2, 2] , upsample_kernel_sizes=[4, 4] , resblock_kernel_sizes=[3, 7] , resblock_dilation_sizes=[[1, 3, 5], [1, 3, 5]] , normalize_before=_A , ) SCREAMING_SNAKE_CASE_ = SpeechTaHifiGan(_A) SCREAMING_SNAKE_CASE_ = { 'unet': unet, 'scheduler': scheduler, 'vae': vae, 'text_encoder': text_encoder, 'tokenizer': tokenizer, 'vocoder': vocoder, } return components def lowerCAmelCase__ ( self , _A , _A=0): if str(_A).startswith('mps'): SCREAMING_SNAKE_CASE_ = torch.manual_seed(_A) else: SCREAMING_SNAKE_CASE_ = torch.Generator(device=_A).manual_seed(_A) SCREAMING_SNAKE_CASE_ = { 'prompt': 'A hammer hitting a wooden surface', 'generator': generator, 'num_inference_steps': 2, 'guidance_scale': 6.0, } return inputs def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = 'cpu' # ensure determinism for the device-dependent torch.Generator SCREAMING_SNAKE_CASE_ = self.get_dummy_components() SCREAMING_SNAKE_CASE_ = AudioLDMPipeline(**_A) SCREAMING_SNAKE_CASE_ = audioldm_pipe.to(_A) audioldm_pipe.set_progress_bar_config(disable=_A) SCREAMING_SNAKE_CASE_ = self.get_dummy_inputs(_A) SCREAMING_SNAKE_CASE_ = audioldm_pipe(**_A) SCREAMING_SNAKE_CASE_ = output.audios[0] assert audio.ndim == 1 assert len(_A) == 256 SCREAMING_SNAKE_CASE_ = audio[:10] SCREAMING_SNAKE_CASE_ = np.array( [-0.0_0_5_0, 0.0_0_5_0, -0.0_0_6_0, 0.0_0_3_3, -0.0_0_2_6, 0.0_0_3_3, -0.0_0_2_7, 0.0_0_3_3, -0.0_0_2_8, 0.0_0_3_3]) assert np.abs(audio_slice - expected_slice).max() < 1E-2 def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = self.get_dummy_components() SCREAMING_SNAKE_CASE_ = AudioLDMPipeline(**_A) SCREAMING_SNAKE_CASE_ = audioldm_pipe.to(_A) SCREAMING_SNAKE_CASE_ = audioldm_pipe.to(_A) audioldm_pipe.set_progress_bar_config(disable=_A) SCREAMING_SNAKE_CASE_ = self.get_dummy_inputs(_A) SCREAMING_SNAKE_CASE_ = 3 * [inputs['prompt']] # forward SCREAMING_SNAKE_CASE_ = audioldm_pipe(**_A) SCREAMING_SNAKE_CASE_ = output.audios[0] SCREAMING_SNAKE_CASE_ = self.get_dummy_inputs(_A) SCREAMING_SNAKE_CASE_ = 3 * [inputs.pop('prompt')] SCREAMING_SNAKE_CASE_ = audioldm_pipe.tokenizer( _A , padding='max_length' , max_length=audioldm_pipe.tokenizer.model_max_length , truncation=_A , return_tensors='pt' , ) SCREAMING_SNAKE_CASE_ = text_inputs['input_ids'].to(_A) SCREAMING_SNAKE_CASE_ = audioldm_pipe.text_encoder( _A , ) SCREAMING_SNAKE_CASE_ = prompt_embeds.text_embeds # additional L_2 normalization over each hidden-state SCREAMING_SNAKE_CASE_ = F.normalize(_A , dim=-1) SCREAMING_SNAKE_CASE_ = prompt_embeds # forward SCREAMING_SNAKE_CASE_ = audioldm_pipe(**_A) SCREAMING_SNAKE_CASE_ = output.audios[0] assert np.abs(audio_a - audio_a).max() < 1E-2 def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = self.get_dummy_components() SCREAMING_SNAKE_CASE_ = AudioLDMPipeline(**_A) SCREAMING_SNAKE_CASE_ = audioldm_pipe.to(_A) SCREAMING_SNAKE_CASE_ = audioldm_pipe.to(_A) audioldm_pipe.set_progress_bar_config(disable=_A) SCREAMING_SNAKE_CASE_ = self.get_dummy_inputs(_A) SCREAMING_SNAKE_CASE_ = 3 * ['this is a negative prompt'] SCREAMING_SNAKE_CASE_ = negative_prompt SCREAMING_SNAKE_CASE_ = 3 * [inputs['prompt']] # forward SCREAMING_SNAKE_CASE_ = audioldm_pipe(**_A) SCREAMING_SNAKE_CASE_ = output.audios[0] SCREAMING_SNAKE_CASE_ = self.get_dummy_inputs(_A) SCREAMING_SNAKE_CASE_ = 3 * [inputs.pop('prompt')] SCREAMING_SNAKE_CASE_ = [] for p in [prompt, negative_prompt]: SCREAMING_SNAKE_CASE_ = audioldm_pipe.tokenizer( _A , padding='max_length' , max_length=audioldm_pipe.tokenizer.model_max_length , truncation=_A , return_tensors='pt' , ) SCREAMING_SNAKE_CASE_ = text_inputs['input_ids'].to(_A) SCREAMING_SNAKE_CASE_ = audioldm_pipe.text_encoder( _A , ) SCREAMING_SNAKE_CASE_ = text_embeds.text_embeds # additional L_2 normalization over each hidden-state SCREAMING_SNAKE_CASE_ = F.normalize(_A , dim=-1) embeds.append(_A) SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = embeds # forward SCREAMING_SNAKE_CASE_ = audioldm_pipe(**_A) SCREAMING_SNAKE_CASE_ = output.audios[0] assert np.abs(audio_a - audio_a).max() < 1E-2 def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = 'cpu' # ensure determinism for the device-dependent torch.Generator SCREAMING_SNAKE_CASE_ = self.get_dummy_components() SCREAMING_SNAKE_CASE_ = PNDMScheduler(skip_prk_steps=_A) SCREAMING_SNAKE_CASE_ = AudioLDMPipeline(**_A) SCREAMING_SNAKE_CASE_ = audioldm_pipe.to(_A) audioldm_pipe.set_progress_bar_config(disable=_A) SCREAMING_SNAKE_CASE_ = self.get_dummy_inputs(_A) SCREAMING_SNAKE_CASE_ = 'egg cracking' SCREAMING_SNAKE_CASE_ = audioldm_pipe(**_A , negative_prompt=_A) SCREAMING_SNAKE_CASE_ = output.audios[0] assert audio.ndim == 1 assert len(_A) == 256 SCREAMING_SNAKE_CASE_ = audio[:10] SCREAMING_SNAKE_CASE_ = np.array( [-0.0_0_5_1, 0.0_0_5_0, -0.0_0_6_0, 0.0_0_3_4, -0.0_0_2_6, 0.0_0_3_3, -0.0_0_2_7, 0.0_0_3_3, -0.0_0_2_8, 0.0_0_3_2]) assert np.abs(audio_slice - expected_slice).max() < 1E-2 def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = 'cpu' # ensure determinism for the device-dependent torch.Generator SCREAMING_SNAKE_CASE_ = self.get_dummy_components() SCREAMING_SNAKE_CASE_ = PNDMScheduler(skip_prk_steps=_A) SCREAMING_SNAKE_CASE_ = AudioLDMPipeline(**_A) SCREAMING_SNAKE_CASE_ = audioldm_pipe.to(_A) audioldm_pipe.set_progress_bar_config(disable=_A) SCREAMING_SNAKE_CASE_ = 'A hammer hitting a wooden surface' # test num_waveforms_per_prompt=1 (default) SCREAMING_SNAKE_CASE_ = audioldm_pipe(_A , num_inference_steps=2).audios assert audios.shape == (1, 256) # test num_waveforms_per_prompt=1 (default) for batch of prompts SCREAMING_SNAKE_CASE_ = 2 SCREAMING_SNAKE_CASE_ = audioldm_pipe([prompt] * batch_size , num_inference_steps=2).audios assert audios.shape == (batch_size, 256) # test num_waveforms_per_prompt for single prompt SCREAMING_SNAKE_CASE_ = 2 SCREAMING_SNAKE_CASE_ = audioldm_pipe(_A , num_inference_steps=2 , num_waveforms_per_prompt=_A).audios assert audios.shape == (num_waveforms_per_prompt, 256) # test num_waveforms_per_prompt for batch of prompts SCREAMING_SNAKE_CASE_ = 2 SCREAMING_SNAKE_CASE_ = audioldm_pipe( [prompt] * batch_size , num_inference_steps=2 , num_waveforms_per_prompt=_A).audios assert audios.shape == (batch_size * num_waveforms_per_prompt, 256) def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = 'cpu' # ensure determinism for the device-dependent torch.Generator SCREAMING_SNAKE_CASE_ = self.get_dummy_components() SCREAMING_SNAKE_CASE_ = AudioLDMPipeline(**_A) SCREAMING_SNAKE_CASE_ = audioldm_pipe.to(_A) audioldm_pipe.set_progress_bar_config(disable=_A) SCREAMING_SNAKE_CASE_ = audioldm_pipe.vocoder.config.sampling_rate SCREAMING_SNAKE_CASE_ = self.get_dummy_inputs(_A) SCREAMING_SNAKE_CASE_ = audioldm_pipe(audio_length_in_s=0.0_1_6 , **_A) SCREAMING_SNAKE_CASE_ = output.audios[0] assert audio.ndim == 1 assert len(_A) / vocoder_sampling_rate == 0.0_1_6 SCREAMING_SNAKE_CASE_ = audioldm_pipe(audio_length_in_s=0.0_3_2 , **_A) SCREAMING_SNAKE_CASE_ = output.audios[0] assert audio.ndim == 1 assert len(_A) / vocoder_sampling_rate == 0.0_3_2 def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = self.get_dummy_components() SCREAMING_SNAKE_CASE_ = AudioLDMPipeline(**_A) SCREAMING_SNAKE_CASE_ = audioldm_pipe.to(_A) audioldm_pipe.set_progress_bar_config(disable=_A) SCREAMING_SNAKE_CASE_ = ['hey'] SCREAMING_SNAKE_CASE_ = audioldm_pipe(_A , num_inference_steps=1) SCREAMING_SNAKE_CASE_ = output.audios.shape assert audio_shape == (1, 256) SCREAMING_SNAKE_CASE_ = audioldm_pipe.vocoder.config config.model_in_dim *= 2 SCREAMING_SNAKE_CASE_ = SpeechTaHifiGan(_A).to(_A) SCREAMING_SNAKE_CASE_ = audioldm_pipe(_A , num_inference_steps=1) SCREAMING_SNAKE_CASE_ = output.audios.shape # waveform shape is unchanged, we just have 2x the number of mel channels in the spectrogram assert audio_shape == (1, 256) def lowerCAmelCase__ ( self): self._test_attention_slicing_forward_pass(test_mean_pixel_difference=_A) def lowerCAmelCase__ ( self): self._test_inference_batch_single_identical(test_mean_pixel_difference=_A) @unittest.skipIf( torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , ) def lowerCAmelCase__ ( self): self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=_A) @slow class __snake_case ( unittest.TestCase ): def lowerCAmelCase__ ( self): super().tearDown() gc.collect() torch.cuda.empty_cache() def lowerCAmelCase__ ( self , _A , _A="cpu" , _A=torch.floataa , _A=0): SCREAMING_SNAKE_CASE_ = torch.Generator(device=_A).manual_seed(_A) SCREAMING_SNAKE_CASE_ = np.random.RandomState(_A).standard_normal((1, 8, 128, 16)) SCREAMING_SNAKE_CASE_ = torch.from_numpy(_A).to(device=_A , dtype=_A) SCREAMING_SNAKE_CASE_ = { 'prompt': 'A hammer hitting a wooden surface', 'latents': latents, 'generator': generator, 'num_inference_steps': 3, 'guidance_scale': 2.5, } return inputs def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = AudioLDMPipeline.from_pretrained('cvssp/audioldm') SCREAMING_SNAKE_CASE_ = audioldm_pipe.to(_A) audioldm_pipe.set_progress_bar_config(disable=_A) SCREAMING_SNAKE_CASE_ = self.get_inputs(_A) SCREAMING_SNAKE_CASE_ = 25 SCREAMING_SNAKE_CASE_ = audioldm_pipe(**_A).audios[0] assert audio.ndim == 1 assert len(_A) == 81920 SCREAMING_SNAKE_CASE_ = audio[77230:77240] SCREAMING_SNAKE_CASE_ = np.array( [-0.4_8_8_4, -0.4_6_0_7, 0.0_0_2_3, 0.5_0_0_7, 0.5_8_9_6, 0.5_1_5_1, 0.3_8_1_3, -0.0_2_0_8, -0.3_6_8_7, -0.4_3_1_5]) SCREAMING_SNAKE_CASE_ = np.abs(expected_slice - audio_slice).max() assert max_diff < 1E-2 def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = AudioLDMPipeline.from_pretrained('cvssp/audioldm') SCREAMING_SNAKE_CASE_ = LMSDiscreteScheduler.from_config(audioldm_pipe.scheduler.config) SCREAMING_SNAKE_CASE_ = audioldm_pipe.to(_A) audioldm_pipe.set_progress_bar_config(disable=_A) SCREAMING_SNAKE_CASE_ = self.get_inputs(_A) SCREAMING_SNAKE_CASE_ = audioldm_pipe(**_A).audios[0] assert audio.ndim == 1 assert len(_A) == 81920 SCREAMING_SNAKE_CASE_ = audio[27780:27790] SCREAMING_SNAKE_CASE_ = np.array([-0.2_1_3_1, -0.0_8_7_3, -0.0_1_2_4, -0.0_1_8_9, 0.0_5_6_9, 0.1_3_7_3, 0.1_8_8_3, 0.2_8_8_6, 0.3_2_9_7, 0.2_2_1_2]) SCREAMING_SNAKE_CASE_ = np.abs(expected_slice - audio_slice).max() assert max_diff < 3E-2
620
import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( SwiftFormerConfig, SwiftFormerForImageClassification, ViTImageProcessor, ) from transformers.utils import logging logging.set_verbosity_info() UpperCamelCase__ : Optional[int] = logging.get_logger(__name__) UpperCamelCase__ : List[Any] = torch.device("cpu") def _UpperCAmelCase ( ): """simple docstring""" SCREAMING_SNAKE_CASE_ = 'http://images.cocodataset.org/val2017/000000039769.jpg' SCREAMING_SNAKE_CASE_ = Image.open(requests.get(_SCREAMING_SNAKE_CASE , stream=_SCREAMING_SNAKE_CASE ).raw ) return im def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : int ): """simple docstring""" if swiftformer_name == "swiftformer_xs": return torch.tensor([-2.1_7_0_3E0_0, 2.1_1_0_7E0_0, -2.0_8_1_1E0_0, 8.8_6_8_5E-0_1, 2.4_3_6_0E-0_1] ) elif swiftformer_name == "swiftformer_s": return torch.tensor([3.9_6_3_6E-0_1, 2.3_4_7_8E-0_1, -1.6_9_6_3E0_0, -1.7_3_8_1E0_0, -8.6_3_3_7E-0_1] ) elif swiftformer_name == "swiftformer_l1": return torch.tensor([-4.2_7_6_8E-0_1, -4.7_4_2_9E-0_1, -1.0_8_9_7E0_0, -1.0_2_4_8E0_0, 3.5_5_2_3E-0_2] ) elif swiftformer_name == "swiftformer_l3": return torch.tensor([-2.5_3_3_0E-0_1, 2.4_2_1_1E-0_1, -6.0_1_8_5E-0_1, -8.2_7_8_9E-0_1, -6.0_4_4_6E-0_2] ) def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : Optional[Any] ): """simple docstring""" SCREAMING_SNAKE_CASE_ = dct.pop(_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE_ = val def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : Dict ): """simple docstring""" SCREAMING_SNAKE_CASE_ = [] for k in state_dict.keys(): SCREAMING_SNAKE_CASE_ = k if ".pwconv" in k: SCREAMING_SNAKE_CASE_ = k_new.replace('.pwconv' , '.point_wise_conv' ) if ".dwconv" in k: SCREAMING_SNAKE_CASE_ = k_new.replace('.dwconv' , '.depth_wise_conv' ) if ".Proj." in k: SCREAMING_SNAKE_CASE_ = k_new.replace('.Proj.' , '.proj.' ) if "patch_embed" in k_new: SCREAMING_SNAKE_CASE_ = k_new.replace('patch_embed' , 'swiftformer.patch_embed.patch_embedding' ) if "network" in k_new: SCREAMING_SNAKE_CASE_ = k_new.split('.' ) if ls[2].isdigit(): SCREAMING_SNAKE_CASE_ = 'swiftformer.encoder.network.' + ls[1] + '.blocks.' + ls[2] + '.' + '.'.join(ls[3:] ) else: SCREAMING_SNAKE_CASE_ = k_new.replace('network' , 'swiftformer.encoder.network' ) rename_keys.append((k, k_new) ) return rename_keys @torch.no_grad() def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Optional[Any] ): """simple docstring""" SCREAMING_SNAKE_CASE_ = SwiftFormerConfig() # dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size SCREAMING_SNAKE_CASE_ = 1_000 SCREAMING_SNAKE_CASE_ = 'huggingface/label-files' SCREAMING_SNAKE_CASE_ = 'imagenet-1k-id2label.json' SCREAMING_SNAKE_CASE_ = json.load(open(hf_hub_download(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , repo_type='dataset' ) , 'r' ) ) SCREAMING_SNAKE_CASE_ = {int(_SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()} SCREAMING_SNAKE_CASE_ = idalabel SCREAMING_SNAKE_CASE_ = {v: k for k, v in idalabel.items()} # size of the architecture if swiftformer_name == "swiftformer_xs": SCREAMING_SNAKE_CASE_ = [3, 3, 6, 4] SCREAMING_SNAKE_CASE_ = [48, 56, 112, 220] elif swiftformer_name == "swiftformer_s": SCREAMING_SNAKE_CASE_ = [3, 3, 9, 6] SCREAMING_SNAKE_CASE_ = [48, 64, 168, 224] elif swiftformer_name == "swiftformer_l1": SCREAMING_SNAKE_CASE_ = [4, 3, 10, 5] SCREAMING_SNAKE_CASE_ = [48, 96, 192, 384] elif swiftformer_name == "swiftformer_l3": SCREAMING_SNAKE_CASE_ = [4, 4, 12, 6] SCREAMING_SNAKE_CASE_ = [64, 128, 320, 512] # load state_dict of original model, remove and rename some keys if original_ckpt: if original_ckpt.startswith('https' ): SCREAMING_SNAKE_CASE_ = torch.hub.load_state_dict_from_url(_SCREAMING_SNAKE_CASE , map_location='cpu' , check_hash=_SCREAMING_SNAKE_CASE ) else: SCREAMING_SNAKE_CASE_ = torch.load(_SCREAMING_SNAKE_CASE , map_location='cpu' ) SCREAMING_SNAKE_CASE_ = checkpoint SCREAMING_SNAKE_CASE_ = create_rename_keys(_SCREAMING_SNAKE_CASE ) for rename_key_src, rename_key_dest in rename_keys: rename_key(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # load HuggingFace model SCREAMING_SNAKE_CASE_ = SwiftFormerForImageClassification(_SCREAMING_SNAKE_CASE ).eval() hf_model.load_state_dict(_SCREAMING_SNAKE_CASE ) # prepare test inputs SCREAMING_SNAKE_CASE_ = prepare_img() SCREAMING_SNAKE_CASE_ = ViTImageProcessor.from_pretrained('preprocessor_config' ) SCREAMING_SNAKE_CASE_ = processor(images=_SCREAMING_SNAKE_CASE , return_tensors='pt' ) # compare outputs from both models SCREAMING_SNAKE_CASE_ = get_expected_output(_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE_ = hf_model(inputs['pixel_values'] ).logits assert hf_logits.shape == torch.Size([1, 1_000] ) assert torch.allclose(hf_logits[0, 0:5] , _SCREAMING_SNAKE_CASE , atol=1E-3 ) Path(_SCREAMING_SNAKE_CASE ).mkdir(exist_ok=_SCREAMING_SNAKE_CASE ) print(f"""Saving model {swiftformer_name} to {pytorch_dump_folder_path}""" ) hf_model.save_pretrained(_SCREAMING_SNAKE_CASE ) if __name__ == "__main__": UpperCamelCase__ : str = argparse.ArgumentParser() # Required parameters parser.add_argument( "--swiftformer_name", default="swiftformer_xs", choices=["swiftformer_xs", "swiftformer_s", "swiftformer_l1", "swiftformer_l3"], type=str, help="Name of the SwiftFormer model you'd like to convert.", ) parser.add_argument( "--pytorch_dump_folder_path", default="./converted_outputs/", type=str, help="Path to the output PyTorch model directory.", ) parser.add_argument("--original_ckpt", default=None, type=str, help="Path to the original model checkpoint.") UpperCamelCase__ : Union[str, Any] = parser.parse_args() convert_swiftformer_checkpoint(args.swiftformer_name, args.pytorch_dump_folder_path, args.original_ckpt)
620
1
import warnings from ...utils import logging from .image_processing_deit import DeiTImageProcessor UpperCamelCase__ : Union[str, Any] = logging.get_logger(__name__) class __snake_case ( lowerCAmelCase__ ): def __init__( self , *_A , **_A): warnings.warn( 'The class DeiTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please' ' use DeiTImageProcessor instead.' , _A , ) super().__init__(*_A , **_A)
620
def _UpperCAmelCase ( ): """simple docstring""" for n in range(1 , 1_000_000 ): yield n * (n + 1) // 2 def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : Tuple ): """simple docstring""" SCREAMING_SNAKE_CASE_ = 1 SCREAMING_SNAKE_CASE_ = 2 while i * i <= n: SCREAMING_SNAKE_CASE_ = 0 while n % i == 0: n //= i multiplicity += 1 divisors_count *= multiplicity + 1 i += 1 if n > 1: divisors_count *= 2 return divisors_count def _UpperCAmelCase ( ): """simple docstring""" return next(i for i in triangle_number_generator() if count_divisors(_SCREAMING_SNAKE_CASE ) > 500 ) if __name__ == "__main__": print(solution())
620
1
UpperCamelCase__ : dict[str, float] = { "joule": 1.0, "kilojoule": 1_000, "megajoule": 1_000_000, "gigajoule": 1_000_000_000, "wattsecond": 1.0, "watthour": 3_600, "kilowatthour": 3_600_000, "newtonmeter": 1.0, "calorie_nutr": 4_186.8, "kilocalorie_nutr": 4_186_800.00, "electronvolt": 1.6_0_2_1_7_6_6_3_4E-1_9, "britishthermalunit_it": 1_055.05_585, "footpound": 1.35_58_18, } def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : float ): """simple docstring""" if to_type not in ENERGY_CONVERSION or from_type not in ENERGY_CONVERSION: SCREAMING_SNAKE_CASE_ = ( f"""Incorrect 'from_type' or 'to_type' value: {from_type!r}, {to_type!r}\n""" f"""Valid values are: {', '.join(_SCREAMING_SNAKE_CASE )}""" ) raise ValueError(_SCREAMING_SNAKE_CASE ) return value * ENERGY_CONVERSION[from_type] / ENERGY_CONVERSION[to_type] if __name__ == "__main__": import doctest doctest.testmod()
620
import io import itertools import json from dataclasses import dataclass from typing import Optional import pyarrow as pa import pyarrow.json as paj import datasets from datasets.table import table_cast from datasets.utils.file_utils import readline UpperCamelCase__ : Optional[int] = datasets.utils.logging.get_logger(__name__) @dataclass class __snake_case ( datasets.BuilderConfig ): __lowerCAmelCase : Optional[datasets.Features] = None __lowerCAmelCase : str = "utf-8" __lowerCAmelCase : Optional[str] = None __lowerCAmelCase : Optional[str] = None __lowerCAmelCase : bool = True # deprecated __lowerCAmelCase : Optional[int] = None # deprecated __lowerCAmelCase : int = 10 << 20 # 10MB __lowerCAmelCase : Optional[bool] = None class __snake_case ( datasets.ArrowBasedBuilder ): __lowerCAmelCase : int = JsonConfig def lowerCAmelCase__ ( self): if self.config.block_size is not None: logger.warning('The JSON loader parameter `block_size` is deprecated. Please use `chunksize` instead') SCREAMING_SNAKE_CASE_ = self.config.block_size if self.config.use_threads is not True: logger.warning( 'The JSON loader parameter `use_threads` is deprecated and doesn\'t have any effect anymore.') if self.config.newlines_in_values is not None: raise ValueError('The JSON loader parameter `newlines_in_values` is no longer supported') return datasets.DatasetInfo(features=self.config.features) def lowerCAmelCase__ ( self , _A): if not self.config.data_files: raise ValueError(f"""At least one data file must be specified, but got data_files={self.config.data_files}""") SCREAMING_SNAKE_CASE_ = dl_manager.download_and_extract(self.config.data_files) if isinstance(_A , (str, list, tuple)): SCREAMING_SNAKE_CASE_ = data_files if isinstance(_A , _A): SCREAMING_SNAKE_CASE_ = [files] SCREAMING_SNAKE_CASE_ = [dl_manager.iter_files(_A) for file in files] return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'files': files})] SCREAMING_SNAKE_CASE_ = [] for split_name, files in data_files.items(): if isinstance(_A , _A): SCREAMING_SNAKE_CASE_ = [files] SCREAMING_SNAKE_CASE_ = [dl_manager.iter_files(_A) for file in files] splits.append(datasets.SplitGenerator(name=_A , gen_kwargs={'files': files})) return splits def lowerCAmelCase__ ( self , _A): if self.config.features is not None: # adding missing columns for column_name in set(self.config.features) - set(pa_table.column_names): SCREAMING_SNAKE_CASE_ = self.config.features.arrow_schema.field(_A).type SCREAMING_SNAKE_CASE_ = pa_table.append_column(_A , pa.array([None] * len(_A) , type=_A)) # more expensive cast to support nested structures with keys in a different order # allows str <-> int/float or str to Audio for example SCREAMING_SNAKE_CASE_ = table_cast(_A , self.config.features.arrow_schema) return pa_table def lowerCAmelCase__ ( self , _A): for file_idx, file in enumerate(itertools.chain.from_iterable(_A)): # If the file is one json object and if we need to look at the list of items in one specific field if self.config.field is not None: with open(_A , encoding=self.config.encoding , errors=self.config.encoding_errors) as f: SCREAMING_SNAKE_CASE_ = json.load(_A) # We keep only the field we are interested in SCREAMING_SNAKE_CASE_ = dataset[self.config.field] # We accept two format: a list of dicts or a dict of lists if isinstance(_A , (list, tuple)): SCREAMING_SNAKE_CASE_ = set().union(*[row.keys() for row in dataset]) SCREAMING_SNAKE_CASE_ = {col: [row.get(_A) for row in dataset] for col in keys} else: SCREAMING_SNAKE_CASE_ = dataset SCREAMING_SNAKE_CASE_ = pa.Table.from_pydict(_A) yield file_idx, self._cast_table(_A) # If the file has one json object per line else: with open(_A , 'rb') as f: SCREAMING_SNAKE_CASE_ = 0 # Use block_size equal to the chunk size divided by 32 to leverage multithreading # Set a default minimum value of 16kB if the chunk size is really small SCREAMING_SNAKE_CASE_ = max(self.config.chunksize // 32 , 16 << 10) SCREAMING_SNAKE_CASE_ = ( self.config.encoding_errors if self.config.encoding_errors is not None else 'strict' ) while True: SCREAMING_SNAKE_CASE_ = f.read(self.config.chunksize) if not batch: break # Finish current line try: batch += f.readline() except (AttributeError, io.UnsupportedOperation): batch += readline(_A) # PyArrow only accepts utf-8 encoded bytes if self.config.encoding != "utf-8": SCREAMING_SNAKE_CASE_ = batch.decode(self.config.encoding , errors=_A).encode('utf-8') try: while True: try: SCREAMING_SNAKE_CASE_ = paj.read_json( io.BytesIO(_A) , read_options=paj.ReadOptions(block_size=_A)) break except (pa.ArrowInvalid, pa.ArrowNotImplementedError) as e: if ( isinstance(_A , pa.ArrowInvalid) and "straddling" not in str(_A) or block_size > len(_A) ): raise else: # Increase the block size in case it was too small. # The block size will be reset for the next file. logger.debug( f"""Batch of {len(_A)} bytes couldn't be parsed with block_size={block_size}. Retrying with block_size={block_size * 2}.""") block_size *= 2 except pa.ArrowInvalid as e: try: with open( _A , encoding=self.config.encoding , errors=self.config.encoding_errors) as f: SCREAMING_SNAKE_CASE_ = json.load(_A) except json.JSONDecodeError: logger.error(f"""Failed to read file '{file}' with error {type(_A)}: {e}""") raise e # If possible, parse the file as a list of json objects and exit the loop if isinstance(_A , _A): # list is the only sequence type supported in JSON try: SCREAMING_SNAKE_CASE_ = set().union(*[row.keys() for row in dataset]) SCREAMING_SNAKE_CASE_ = {col: [row.get(_A) for row in dataset] for col in keys} SCREAMING_SNAKE_CASE_ = pa.Table.from_pydict(_A) except (pa.ArrowInvalid, AttributeError) as e: logger.error(f"""Failed to read file '{file}' with error {type(_A)}: {e}""") raise ValueError(f"""Not able to read records in the JSON file at {file}.""") from None yield file_idx, self._cast_table(_A) break else: logger.error(f"""Failed to read file '{file}' with error {type(_A)}: {e}""") raise ValueError( f"""Not able to read records in the JSON file at {file}. """ f"""You should probably indicate the field of the JSON file containing your records. """ f"""This JSON file contain the following fields: {str(list(dataset.keys()))}. """ f"""Select the correct one and provide it as `field='XXX'` to the dataset loading method. """) from None # Uncomment for debugging (will print the Arrow table size and elements) # logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}") # logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows))) yield (file_idx, batch_idx), self._cast_table(_A) batch_idx += 1
620
1
# This model implementation is heavily inspired by https://github.com/haofanwang/ControlNet-for-Diffusers/ import gc import random import tempfile import unittest import numpy as np import torch from PIL import Image from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, ControlNetModel, DDIMScheduler, StableDiffusionControlNetImgaImgPipeline, UNetaDConditionModel, ) from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet import MultiControlNetModel from diffusers.utils import floats_tensor, load_image, load_numpy, randn_tensor, slow, torch_device from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..pipeline_params import ( IMAGE_TO_IMAGE_IMAGE_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS, ) from ..test_pipelines_common import ( PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin, ) enable_full_determinism() class __snake_case ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ): __lowerCAmelCase : Tuple = StableDiffusionControlNetImgaImgPipeline __lowerCAmelCase : str = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'height', 'width'} __lowerCAmelCase : Any = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS __lowerCAmelCase : Any = IMAGE_TO_IMAGE_IMAGE_PARAMS.union({'control_image'} ) __lowerCAmelCase : str = IMAGE_TO_IMAGE_IMAGE_PARAMS def lowerCAmelCase__ ( self): torch.manual_seed(0) SCREAMING_SNAKE_CASE_ = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , ) torch.manual_seed(0) SCREAMING_SNAKE_CASE_ = ControlNetModel( block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , ) torch.manual_seed(0) SCREAMING_SNAKE_CASE_ = DDIMScheduler( beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule='scaled_linear' , clip_sample=_A , set_alpha_to_one=_A , ) torch.manual_seed(0) SCREAMING_SNAKE_CASE_ = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , ) torch.manual_seed(0) SCREAMING_SNAKE_CASE_ = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) SCREAMING_SNAKE_CASE_ = CLIPTextModel(_A) SCREAMING_SNAKE_CASE_ = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip') SCREAMING_SNAKE_CASE_ = { 'unet': unet, 'controlnet': controlnet, 'scheduler': scheduler, 'vae': vae, 'text_encoder': text_encoder, 'tokenizer': tokenizer, 'safety_checker': None, 'feature_extractor': None, } return components def lowerCAmelCase__ ( self , _A , _A=0): if str(_A).startswith('mps'): SCREAMING_SNAKE_CASE_ = torch.manual_seed(_A) else: SCREAMING_SNAKE_CASE_ = torch.Generator(device=_A).manual_seed(_A) SCREAMING_SNAKE_CASE_ = 2 SCREAMING_SNAKE_CASE_ = randn_tensor( (1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=_A , device=torch.device(_A) , ) SCREAMING_SNAKE_CASE_ = floats_tensor(control_image.shape , rng=random.Random(_A)).to(_A) SCREAMING_SNAKE_CASE_ = image.cpu().permute(0 , 2 , 3 , 1)[0] SCREAMING_SNAKE_CASE_ = Image.fromarray(np.uinta(_A)).convert('RGB').resize((64, 64)) SCREAMING_SNAKE_CASE_ = { 'prompt': 'A painting of a squirrel eating a burger', 'generator': generator, 'num_inference_steps': 2, 'guidance_scale': 6.0, 'output_type': 'numpy', 'image': image, 'control_image': control_image, } return inputs def lowerCAmelCase__ ( self): return self._test_attention_slicing_forward_pass(expected_max_diff=2E-3) @unittest.skipIf( torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , ) def lowerCAmelCase__ ( self): self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2E-3) def lowerCAmelCase__ ( self): self._test_inference_batch_single_identical(expected_max_diff=2E-3) class __snake_case ( lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ): __lowerCAmelCase : str = StableDiffusionControlNetImgaImgPipeline __lowerCAmelCase : int = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'height', 'width'} __lowerCAmelCase : int = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS __lowerCAmelCase : List[str] = frozenset([] ) # TO_DO: add image_params once refactored VaeImageProcessor.preprocess def lowerCAmelCase__ ( self): torch.manual_seed(0) SCREAMING_SNAKE_CASE_ = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , ) torch.manual_seed(0) def init_weights(_A): if isinstance(_A , torch.nn.Convad): torch.nn.init.normal(m.weight) m.bias.data.fill_(1.0) SCREAMING_SNAKE_CASE_ = ControlNetModel( block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , ) controlneta.controlnet_down_blocks.apply(_A) torch.manual_seed(0) SCREAMING_SNAKE_CASE_ = ControlNetModel( block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , ) controlneta.controlnet_down_blocks.apply(_A) torch.manual_seed(0) SCREAMING_SNAKE_CASE_ = DDIMScheduler( beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule='scaled_linear' , clip_sample=_A , set_alpha_to_one=_A , ) torch.manual_seed(0) SCREAMING_SNAKE_CASE_ = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , ) torch.manual_seed(0) SCREAMING_SNAKE_CASE_ = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) SCREAMING_SNAKE_CASE_ = CLIPTextModel(_A) SCREAMING_SNAKE_CASE_ = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip') SCREAMING_SNAKE_CASE_ = MultiControlNetModel([controlneta, controlneta]) SCREAMING_SNAKE_CASE_ = { 'unet': unet, 'controlnet': controlnet, 'scheduler': scheduler, 'vae': vae, 'text_encoder': text_encoder, 'tokenizer': tokenizer, 'safety_checker': None, 'feature_extractor': None, } return components def lowerCAmelCase__ ( self , _A , _A=0): if str(_A).startswith('mps'): SCREAMING_SNAKE_CASE_ = torch.manual_seed(_A) else: SCREAMING_SNAKE_CASE_ = torch.Generator(device=_A).manual_seed(_A) SCREAMING_SNAKE_CASE_ = 2 SCREAMING_SNAKE_CASE_ = [ randn_tensor( (1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=_A , device=torch.device(_A) , ), randn_tensor( (1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=_A , device=torch.device(_A) , ), ] SCREAMING_SNAKE_CASE_ = floats_tensor(control_image[0].shape , rng=random.Random(_A)).to(_A) SCREAMING_SNAKE_CASE_ = image.cpu().permute(0 , 2 , 3 , 1)[0] SCREAMING_SNAKE_CASE_ = Image.fromarray(np.uinta(_A)).convert('RGB').resize((64, 64)) SCREAMING_SNAKE_CASE_ = { 'prompt': 'A painting of a squirrel eating a burger', 'generator': generator, 'num_inference_steps': 2, 'guidance_scale': 6.0, 'output_type': 'numpy', 'image': image, 'control_image': control_image, } return inputs def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = self.get_dummy_components() SCREAMING_SNAKE_CASE_ = self.pipeline_class(**_A) pipe.to(_A) SCREAMING_SNAKE_CASE_ = 1_0.0 SCREAMING_SNAKE_CASE_ = 4 SCREAMING_SNAKE_CASE_ = self.get_dummy_inputs(_A) SCREAMING_SNAKE_CASE_ = steps SCREAMING_SNAKE_CASE_ = scale SCREAMING_SNAKE_CASE_ = pipe(**_A)[0] SCREAMING_SNAKE_CASE_ = self.get_dummy_inputs(_A) SCREAMING_SNAKE_CASE_ = steps SCREAMING_SNAKE_CASE_ = scale SCREAMING_SNAKE_CASE_ = pipe(**_A , control_guidance_start=0.1 , control_guidance_end=0.2)[0] SCREAMING_SNAKE_CASE_ = self.get_dummy_inputs(_A) SCREAMING_SNAKE_CASE_ = steps SCREAMING_SNAKE_CASE_ = scale SCREAMING_SNAKE_CASE_ = pipe(**_A , control_guidance_start=[0.1, 0.3] , control_guidance_end=[0.2, 0.7])[0] SCREAMING_SNAKE_CASE_ = self.get_dummy_inputs(_A) SCREAMING_SNAKE_CASE_ = steps SCREAMING_SNAKE_CASE_ = scale SCREAMING_SNAKE_CASE_ = pipe(**_A , control_guidance_start=0.4 , control_guidance_end=[0.5, 0.8])[0] # make sure that all outputs are different assert np.sum(np.abs(output_a - output_a)) > 1E-3 assert np.sum(np.abs(output_a - output_a)) > 1E-3 assert np.sum(np.abs(output_a - output_a)) > 1E-3 def lowerCAmelCase__ ( self): return self._test_attention_slicing_forward_pass(expected_max_diff=2E-3) @unittest.skipIf( torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , ) def lowerCAmelCase__ ( self): self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2E-3) def lowerCAmelCase__ ( self): self._test_inference_batch_single_identical(expected_max_diff=2E-3) def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = self.get_dummy_components() SCREAMING_SNAKE_CASE_ = self.pipeline_class(**_A) pipe.to(_A) pipe.set_progress_bar_config(disable=_A) with tempfile.TemporaryDirectory() as tmpdir: try: # save_pretrained is not implemented for Multi-ControlNet pipe.save_pretrained(_A) except NotImplementedError: pass @slow @require_torch_gpu class __snake_case ( unittest.TestCase ): def lowerCAmelCase__ ( self): super().tearDown() gc.collect() torch.cuda.empty_cache() def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = ControlNetModel.from_pretrained('lllyasviel/sd-controlnet-canny') SCREAMING_SNAKE_CASE_ = StableDiffusionControlNetImgaImgPipeline.from_pretrained( 'runwayml/stable-diffusion-v1-5' , safety_checker=_A , controlnet=_A) pipe.enable_model_cpu_offload() pipe.set_progress_bar_config(disable=_A) SCREAMING_SNAKE_CASE_ = torch.Generator(device='cpu').manual_seed(0) SCREAMING_SNAKE_CASE_ = 'evil space-punk bird' SCREAMING_SNAKE_CASE_ = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png').resize((512, 512)) SCREAMING_SNAKE_CASE_ = load_image( 'https://huggingface.co/lllyasviel/sd-controlnet-canny/resolve/main/images/bird.png').resize((512, 512)) SCREAMING_SNAKE_CASE_ = pipe( _A , _A , control_image=_A , generator=_A , output_type='np' , num_inference_steps=50 , strength=0.6 , ) SCREAMING_SNAKE_CASE_ = output.images[0] assert image.shape == (512, 512, 3) SCREAMING_SNAKE_CASE_ = load_numpy( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/img2img.npy') assert np.abs(expected_image - image).max() < 9E-2
620
import unittest from transformers import TrOCRConfig from transformers.testing_utils import is_torch_available, require_torch, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers.models.trocr.modeling_trocr import TrOCRDecoder, TrOCRForCausalLM @require_torch class __snake_case : def __init__( self , _A , _A=99 , _A=13 , _A=16 , _A=7 , _A=True , _A=True , _A=True , _A=False , _A=True , _A=2 , _A=32 , _A=4 , _A=4 , _A=30 , _A=0 , _A=1 , _A=2 , _A=None , ): SCREAMING_SNAKE_CASE_ = parent SCREAMING_SNAKE_CASE_ = batch_size SCREAMING_SNAKE_CASE_ = decoder_seq_length # For common tests SCREAMING_SNAKE_CASE_ = self.decoder_seq_length SCREAMING_SNAKE_CASE_ = is_training SCREAMING_SNAKE_CASE_ = use_attention_mask SCREAMING_SNAKE_CASE_ = use_labels SCREAMING_SNAKE_CASE_ = vocab_size SCREAMING_SNAKE_CASE_ = d_model SCREAMING_SNAKE_CASE_ = d_model SCREAMING_SNAKE_CASE_ = decoder_layers SCREAMING_SNAKE_CASE_ = decoder_layers SCREAMING_SNAKE_CASE_ = decoder_ffn_dim SCREAMING_SNAKE_CASE_ = decoder_attention_heads SCREAMING_SNAKE_CASE_ = decoder_attention_heads SCREAMING_SNAKE_CASE_ = eos_token_id SCREAMING_SNAKE_CASE_ = bos_token_id SCREAMING_SNAKE_CASE_ = pad_token_id SCREAMING_SNAKE_CASE_ = decoder_start_token_id SCREAMING_SNAKE_CASE_ = use_cache SCREAMING_SNAKE_CASE_ = max_position_embeddings SCREAMING_SNAKE_CASE_ = None SCREAMING_SNAKE_CASE_ = decoder_seq_length SCREAMING_SNAKE_CASE_ = 2 SCREAMING_SNAKE_CASE_ = 1 def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size) SCREAMING_SNAKE_CASE_ = None if self.use_attention_mask: SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size, self.decoder_seq_length] , vocab_size=2) SCREAMING_SNAKE_CASE_ = None if self.use_labels: SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size) SCREAMING_SNAKE_CASE_ = TrOCRConfig( vocab_size=self.vocab_size , d_model=self.d_model , decoder_layers=self.decoder_layers , decoder_ffn_dim=self.decoder_ffn_dim , decoder_attention_heads=self.decoder_attention_heads , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , use_cache=self.use_cache , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , max_position_embeddings=self.max_position_embeddings , ) return (config, input_ids, attention_mask, lm_labels) def lowerCAmelCase__ ( self , _A , _A , _A , _A , ): SCREAMING_SNAKE_CASE_ = True SCREAMING_SNAKE_CASE_ = TrOCRDecoder(config=_A).to(_A).eval() SCREAMING_SNAKE_CASE_ = input_ids[:2] input_ids[input_ids == 0] += 1 # first forward pass SCREAMING_SNAKE_CASE_ = model(_A , use_cache=_A) SCREAMING_SNAKE_CASE_ = model(_A) SCREAMING_SNAKE_CASE_ = model(_A , use_cache=_A) self.parent.assertTrue(len(_A) == len(_A)) self.parent.assertTrue(len(_A) == len(_A) + 1) SCREAMING_SNAKE_CASE_ = outputs['past_key_values'] # create hypothetical next token and extent to next_input_ids SCREAMING_SNAKE_CASE_ = ids_tensor((2, 1) , config.vocab_size - 1) + 1 # append to next input_ids and SCREAMING_SNAKE_CASE_ = torch.cat([input_ids, next_tokens] , dim=-1) SCREAMING_SNAKE_CASE_ = model(_A)['last_hidden_state'] SCREAMING_SNAKE_CASE_ = model(_A , past_key_values=_A)['last_hidden_state'] # select random slice SCREAMING_SNAKE_CASE_ = ids_tensor((1,) , output_from_past.shape[-1]).item() SCREAMING_SNAKE_CASE_ = output_from_no_past[:, next_input_ids.shape[-1] - 1, random_slice_idx].detach() SCREAMING_SNAKE_CASE_ = output_from_past[:, 0, random_slice_idx].detach() # test that outputs are equal for slice assert torch.allclose(_A , _A , atol=1E-3) def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = self.prepare_config_and_inputs() SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = config_and_inputs SCREAMING_SNAKE_CASE_ = {'input_ids': input_ids, 'attention_mask': attention_mask} return config, inputs_dict @require_torch class __snake_case ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ): __lowerCAmelCase : Tuple = (TrOCRDecoder, TrOCRForCausalLM) if is_torch_available() else () __lowerCAmelCase : Union[str, Any] = (TrOCRForCausalLM,) if is_torch_available() else () __lowerCAmelCase : str = {'text-generation': TrOCRForCausalLM} if is_torch_available() else {} __lowerCAmelCase : Any = True __lowerCAmelCase : str = False def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = TrOCRStandaloneDecoderModelTester(self , is_training=_A) SCREAMING_SNAKE_CASE_ = ConfigTester(self , config_class=_A) def lowerCAmelCase__ ( self): pass def lowerCAmelCase__ ( self): pass def lowerCAmelCase__ ( self): pass def lowerCAmelCase__ ( self): self.config_tester.run_common_tests() def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_decoder_model_past(*_A) def lowerCAmelCase__ ( self): return @unittest.skip('The model doesn\'t support left padding') # and it's not used enough to be worth fixing :) def lowerCAmelCase__ ( self): pass
620
1
import os from argparse import ArgumentParser from typing import List import torch.utils.data from datasets import Dataset, IterableDataset from datasets.distributed import split_dataset_by_node UpperCamelCase__ : Optional[int] = 4 UpperCamelCase__ : int = 3 class __snake_case ( lowerCAmelCase__ ): pass def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : List[str] ): """simple docstring""" for shard in shards: for i in range(_SCREAMING_SNAKE_CASE ): yield {"i": i, "shard": shard} def _UpperCAmelCase ( ): """simple docstring""" SCREAMING_SNAKE_CASE_ = int(os.environ['RANK'] ) SCREAMING_SNAKE_CASE_ = int(os.environ['WORLD_SIZE'] ) SCREAMING_SNAKE_CASE_ = ArgumentParser() parser.add_argument('--streaming' , type=_SCREAMING_SNAKE_CASE ) parser.add_argument('--local_rank' , type=_SCREAMING_SNAKE_CASE ) parser.add_argument('--num_workers' , type=_SCREAMING_SNAKE_CASE , default=0 ) SCREAMING_SNAKE_CASE_ = parser.parse_args() SCREAMING_SNAKE_CASE_ = args.streaming SCREAMING_SNAKE_CASE_ = args.num_workers SCREAMING_SNAKE_CASE_ = {'shards': [f"""shard_{shard_idx}""" for shard_idx in range(_SCREAMING_SNAKE_CASE )]} SCREAMING_SNAKE_CASE_ = IterableDataset.from_generator(_SCREAMING_SNAKE_CASE , gen_kwargs=_SCREAMING_SNAKE_CASE ) if not streaming: SCREAMING_SNAKE_CASE_ = Dataset.from_list(list(_SCREAMING_SNAKE_CASE ) ) SCREAMING_SNAKE_CASE_ = split_dataset_by_node(_SCREAMING_SNAKE_CASE , rank=_SCREAMING_SNAKE_CASE , world_size=_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE_ = torch.utils.data.DataLoader(_SCREAMING_SNAKE_CASE , num_workers=_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE_ = NUM_SHARDS * NUM_ITEMS_PER_SHARD SCREAMING_SNAKE_CASE_ = full_size // world_size expected_local_size += int(rank < (full_size % world_size) ) SCREAMING_SNAKE_CASE_ = sum(1 for _ in dataloader ) if local_size != expected_local_size: raise FailedTestError(f"""local_size {local_size} != expected_local_size {expected_local_size}""" ) if __name__ == "__main__": main()
620
from dataclasses import dataclass from typing import Optional, Tuple, Union import torch import torch.nn as nn from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput, apply_forward_hook from .modeling_utils import ModelMixin from .vae import Decoder, DecoderOutput, Encoder, VectorQuantizer @dataclass class __snake_case ( lowerCAmelCase__ ): __lowerCAmelCase : torch.FloatTensor class __snake_case ( lowerCAmelCase__ , lowerCAmelCase__ ): @register_to_config def __init__( self , _A = 3 , _A = 3 , _A = ("DownEncoderBlock2D",) , _A = ("UpDecoderBlock2D",) , _A = (64,) , _A = 1 , _A = "silu" , _A = 3 , _A = 32 , _A = 256 , _A = 32 , _A = None , _A = 0.1_8_2_1_5 , _A = "group" , ): super().__init__() # pass init params to Encoder SCREAMING_SNAKE_CASE_ = Encoder( in_channels=_A , out_channels=_A , down_block_types=_A , block_out_channels=_A , layers_per_block=_A , act_fn=_A , norm_num_groups=_A , double_z=_A , ) SCREAMING_SNAKE_CASE_ = vq_embed_dim if vq_embed_dim is not None else latent_channels SCREAMING_SNAKE_CASE_ = nn.Convad(_A , _A , 1) SCREAMING_SNAKE_CASE_ = VectorQuantizer(_A , _A , beta=0.2_5 , remap=_A , sane_index_shape=_A) SCREAMING_SNAKE_CASE_ = nn.Convad(_A , _A , 1) # pass init params to Decoder SCREAMING_SNAKE_CASE_ = Decoder( in_channels=_A , out_channels=_A , up_block_types=_A , block_out_channels=_A , layers_per_block=_A , act_fn=_A , norm_num_groups=_A , norm_type=_A , ) @apply_forward_hook def lowerCAmelCase__ ( self , _A , _A = True): SCREAMING_SNAKE_CASE_ = self.encoder(_A) SCREAMING_SNAKE_CASE_ = self.quant_conv(_A) if not return_dict: return (h,) return VQEncoderOutput(latents=_A) @apply_forward_hook def lowerCAmelCase__ ( self , _A , _A = False , _A = True): # also go through quantization layer if not force_not_quantize: SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.quantize(_A) else: SCREAMING_SNAKE_CASE_ = h SCREAMING_SNAKE_CASE_ = self.post_quant_conv(_A) SCREAMING_SNAKE_CASE_ = self.decoder(_A , quant if self.config.norm_type == 'spatial' else None) if not return_dict: return (dec,) return DecoderOutput(sample=_A) def lowerCAmelCase__ ( self , _A , _A = True): SCREAMING_SNAKE_CASE_ = sample SCREAMING_SNAKE_CASE_ = self.encode(_A).latents SCREAMING_SNAKE_CASE_ = self.decode(_A).sample if not return_dict: return (dec,) return DecoderOutput(sample=_A)
620
1
import os import random import sys from . import cryptomath_module as cryptomath from . import rabin_miller UpperCamelCase__ : List[Any] = 3 def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : int ): """simple docstring""" print('Generating primitive root of p' ) while True: SCREAMING_SNAKE_CASE_ = random.randrange(3 , _SCREAMING_SNAKE_CASE ) if pow(_SCREAMING_SNAKE_CASE , 2 , _SCREAMING_SNAKE_CASE ) == 1: continue if pow(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) == 1: continue return g def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : int ): """simple docstring""" print('Generating prime p...' ) SCREAMING_SNAKE_CASE_ = rabin_miller.generate_large_prime(_SCREAMING_SNAKE_CASE ) # select large prime number. SCREAMING_SNAKE_CASE_ = primitive_root(_SCREAMING_SNAKE_CASE ) # one primitive root on modulo p. SCREAMING_SNAKE_CASE_ = random.randrange(3 , _SCREAMING_SNAKE_CASE ) # private_key -> have to be greater than 2 for safety. SCREAMING_SNAKE_CASE_ = cryptomath.find_mod_inverse(pow(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE_ = (key_size, e_a, e_a, p) SCREAMING_SNAKE_CASE_ = (key_size, d) return public_key, private_key def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : int ): """simple docstring""" if os.path.exists(f"""{name}_pubkey.txt""" ) or os.path.exists(f"""{name}_privkey.txt""" ): print('\nWARNING:' ) print( f"""\"{name}_pubkey.txt\" or \"{name}_privkey.txt\" already exists. \n""" 'Use a different name or delete these files and re-run this program.' ) sys.exit() SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = generate_key(_SCREAMING_SNAKE_CASE ) print(f"""\nWriting public key to file {name}_pubkey.txt...""" ) with open(f"""{name}_pubkey.txt""" , 'w' ) as fo: fo.write(f"""{public_key[0]},{public_key[1]},{public_key[2]},{public_key[3]}""" ) print(f"""Writing private key to file {name}_privkey.txt...""" ) with open(f"""{name}_privkey.txt""" , 'w' ) as fo: fo.write(f"""{private_key[0]},{private_key[1]}""" ) def _UpperCAmelCase ( ): """simple docstring""" print('Making key files...' ) make_key_files('elgamal' , 2_048 ) print('Key files generation successful' ) if __name__ == "__main__": main()
620
import logging import os from typing import Dict, List, Optional, Union import torch import torch.nn as nn from accelerate.utils.imports import ( is_abit_bnb_available, is_abit_bnb_available, is_bnb_available, ) from ..big_modeling import dispatch_model, init_empty_weights from .dataclasses import BnbQuantizationConfig from .modeling import ( find_tied_parameters, get_balanced_memory, infer_auto_device_map, load_checkpoint_in_model, offload_weight, set_module_tensor_to_device, ) if is_bnb_available(): import bitsandbytes as bnb from copy import deepcopy UpperCamelCase__ : Optional[int] = logging.getLogger(__name__) def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : torch.nn.Module , _SCREAMING_SNAKE_CASE : BnbQuantizationConfig , _SCREAMING_SNAKE_CASE : Union[str, os.PathLike] = None , _SCREAMING_SNAKE_CASE : Optional[Dict[str, Union[int, str, torch.device]]] = None , _SCREAMING_SNAKE_CASE : Optional[List[str]] = None , _SCREAMING_SNAKE_CASE : Optional[Dict[Union[int, str], Union[int, str]]] = None , _SCREAMING_SNAKE_CASE : Optional[Union[str, os.PathLike]] = None , _SCREAMING_SNAKE_CASE : bool = False , ): """simple docstring""" SCREAMING_SNAKE_CASE_ = bnb_quantization_config.load_in_abit SCREAMING_SNAKE_CASE_ = bnb_quantization_config.load_in_abit if load_in_abit and not is_abit_bnb_available(): raise ImportError( 'You have a version of `bitsandbytes` that is not compatible with 8bit quantization,' ' make sure you have the latest version of `bitsandbytes` installed.' ) if load_in_abit and not is_abit_bnb_available(): raise ValueError( 'You have a version of `bitsandbytes` that is not compatible with 4bit quantization,' 'make sure you have the latest version of `bitsandbytes` installed.' ) SCREAMING_SNAKE_CASE_ = [] # custom device map if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) and len(device_map.keys() ) > 1: SCREAMING_SNAKE_CASE_ = [key for key, value in device_map.items() if value in ['disk', 'cpu']] # We keep some modules such as the lm_head in their original dtype for numerical stability reasons if bnb_quantization_config.skip_modules is None: SCREAMING_SNAKE_CASE_ = get_keys_to_not_convert(_SCREAMING_SNAKE_CASE ) # add cpu modules to skip modules only for 4-bit modules if load_in_abit: bnb_quantization_config.skip_modules.extend(_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE_ = bnb_quantization_config.skip_modules # We add the modules we want to keep in full precision if bnb_quantization_config.keep_in_fpaa_modules is None: SCREAMING_SNAKE_CASE_ = [] SCREAMING_SNAKE_CASE_ = bnb_quantization_config.keep_in_fpaa_modules modules_to_not_convert.extend(_SCREAMING_SNAKE_CASE ) # compatibility with peft SCREAMING_SNAKE_CASE_ = load_in_abit SCREAMING_SNAKE_CASE_ = load_in_abit SCREAMING_SNAKE_CASE_ = get_parameter_device(_SCREAMING_SNAKE_CASE ) if model_device.type != "meta": # quantization of an already loaded model logger.warning( 'It is not recommended to quantize a loaded model. ' 'The model should be instantiated under the `init_empty_weights` context manager.' ) SCREAMING_SNAKE_CASE_ = replace_with_bnb_layers(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , modules_to_not_convert=_SCREAMING_SNAKE_CASE ) # convert param to the right dtype SCREAMING_SNAKE_CASE_ = bnb_quantization_config.torch_dtype for name, param in model.state_dict().items(): if any(module_to_keep_in_fpaa in name for module_to_keep_in_fpaa in keep_in_fpaa_modules ): param.to(torch.floataa ) if param.dtype != torch.floataa: SCREAMING_SNAKE_CASE_ = name.replace('.weight' , '' ).replace('.bias' , '' ) SCREAMING_SNAKE_CASE_ = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) if param is not None: param.to(torch.floataa ) elif torch.is_floating_point(_SCREAMING_SNAKE_CASE ): param.to(_SCREAMING_SNAKE_CASE ) if model_device.type == "cuda": # move everything to cpu in the first place because we can't do quantization if the weights are already on cuda model.cuda(torch.cuda.current_device() ) torch.cuda.empty_cache() elif torch.cuda.is_available(): model.to(torch.cuda.current_device() ) else: raise RuntimeError('No GPU found. A GPU is needed for quantization.' ) logger.info( f"""The model device type is {model_device.type}. However, cuda is needed for quantization.""" 'We move the model to cuda.' ) return model elif weights_location is None: raise RuntimeError( f"""`weights_location` needs to be the folder path containing the weights of the model, but we found {weights_location} """ ) else: with init_empty_weights(): SCREAMING_SNAKE_CASE_ = replace_with_bnb_layers( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , modules_to_not_convert=_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE_ = get_quantized_model_device_map( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , max_memory=_SCREAMING_SNAKE_CASE , no_split_module_classes=_SCREAMING_SNAKE_CASE , ) if offload_state_dict is None and device_map is not None and "disk" in device_map.values(): SCREAMING_SNAKE_CASE_ = True SCREAMING_SNAKE_CASE_ = any(x in list(device_map.values() ) for x in ['cpu', 'disk'] ) load_checkpoint_in_model( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , dtype=bnb_quantization_config.torch_dtype , offload_folder=_SCREAMING_SNAKE_CASE , offload_state_dict=_SCREAMING_SNAKE_CASE , keep_in_fpaa_modules=bnb_quantization_config.keep_in_fpaa_modules , offload_abit_bnb=load_in_abit and offload , ) return dispatch_model(_SCREAMING_SNAKE_CASE , device_map=_SCREAMING_SNAKE_CASE , offload_dir=_SCREAMING_SNAKE_CASE ) def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : List[str]=None , _SCREAMING_SNAKE_CASE : List[str]=None , _SCREAMING_SNAKE_CASE : Union[str, Any]=None ): """simple docstring""" if device_map is None: if torch.cuda.is_available(): SCREAMING_SNAKE_CASE_ = {'': torch.cuda.current_device()} else: raise RuntimeError('No GPU found. A GPU is needed for quantization.' ) logger.info('The device_map was not initialized.' 'Setting device_map to `{\'\':torch.cuda.current_device()}`.' ) if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): if device_map not in ["auto", "balanced", "balanced_low_0", "sequential"]: raise ValueError( 'If passing a string for `device_map`, please choose \'auto\', \'balanced\', \'balanced_low_0\' or ' '\'sequential\'.' ) SCREAMING_SNAKE_CASE_ = {} special_dtypes.update( { name: bnb_quantization_config.torch_dtype for name, _ in model.named_parameters() if any(m in name for m in bnb_quantization_config.skip_modules ) } ) special_dtypes.update( { name: torch.floataa for name, _ in model.named_parameters() if any(m in name for m in bnb_quantization_config.keep_in_fpaa_modules ) } ) SCREAMING_SNAKE_CASE_ = {} SCREAMING_SNAKE_CASE_ = special_dtypes SCREAMING_SNAKE_CASE_ = no_split_module_classes SCREAMING_SNAKE_CASE_ = bnb_quantization_config.target_dtype # get max_memory for each device. if device_map != "sequential": SCREAMING_SNAKE_CASE_ = get_balanced_memory( _SCREAMING_SNAKE_CASE , low_zero=(device_map == 'balanced_low_0') , max_memory=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , ) SCREAMING_SNAKE_CASE_ = max_memory SCREAMING_SNAKE_CASE_ = infer_auto_device_map(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): # check if don't have any quantized module on the cpu SCREAMING_SNAKE_CASE_ = bnb_quantization_config.skip_modules + bnb_quantization_config.keep_in_fpaa_modules SCREAMING_SNAKE_CASE_ = { key: device_map[key] for key in device_map.keys() if key not in modules_not_to_convert } for device in ["cpu", "disk"]: if device in device_map_without_some_modules.values(): if bnb_quantization_config.load_in_abit: raise ValueError( '\n Some modules are dispatched on the CPU or the disk. Make sure you have enough GPU RAM to fit\n the quantized model. If you want to dispatch the model on the CPU or the disk while keeping\n these modules in `torch_dtype`, you need to pass a custom `device_map` to\n `load_and_quantize_model`. Check\n https://huggingface.co/docs/accelerate/main/en/usage_guides/quantization#offload-modules-to-cpu-and-disk\n for more details.\n ' ) else: logger.info( 'Some modules are are offloaded to the CPU or the disk. Note that these modules will be converted to 8-bit' ) del device_map_without_some_modules return device_map def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int=None , _SCREAMING_SNAKE_CASE : Union[str, Any]=None ): """simple docstring""" if modules_to_not_convert is None: SCREAMING_SNAKE_CASE_ = [] SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = _replace_with_bnb_layers( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) if not has_been_replaced: logger.warning( 'You are loading your model in 8bit or 4bit but no linear modules were found in your model.' ' this can happen for some architectures such as gpt2 that uses Conv1D instead of Linear layers.' ' Please double check your model architecture, or submit an issue on github if you think this is' ' a bug.' ) return model def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : Optional[Any]=None , _SCREAMING_SNAKE_CASE : str=None , ): """simple docstring""" SCREAMING_SNAKE_CASE_ = False for name, module in model.named_children(): if current_key_name is None: SCREAMING_SNAKE_CASE_ = [] current_key_name.append(_SCREAMING_SNAKE_CASE ) if isinstance(_SCREAMING_SNAKE_CASE , nn.Linear ) and name not in modules_to_not_convert: # Check if the current key is not in the `modules_to_not_convert` SCREAMING_SNAKE_CASE_ = '.'.join(_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE_ = True for key in modules_to_not_convert: if ( (key in current_key_name_str) and (key + "." in current_key_name_str) ) or key == current_key_name_str: SCREAMING_SNAKE_CASE_ = False break if proceed: # Load bnb module with empty weight and replace ``nn.Linear` module if bnb_quantization_config.load_in_abit: SCREAMING_SNAKE_CASE_ = bnb.nn.LinearabitLt( module.in_features , module.out_features , module.bias is not None , has_fpaa_weights=_SCREAMING_SNAKE_CASE , threshold=bnb_quantization_config.llm_inta_threshold , ) elif bnb_quantization_config.load_in_abit: SCREAMING_SNAKE_CASE_ = bnb.nn.Linearabit( module.in_features , module.out_features , module.bias is not None , bnb_quantization_config.bnb_abit_compute_dtype , compress_statistics=bnb_quantization_config.bnb_abit_use_double_quant , quant_type=bnb_quantization_config.bnb_abit_quant_type , ) else: raise ValueError('load_in_8bit and load_in_4bit can\'t be both False' ) SCREAMING_SNAKE_CASE_ = module.weight.data if module.bias is not None: SCREAMING_SNAKE_CASE_ = module.bias.data bnb_module.requires_grad_(_SCREAMING_SNAKE_CASE ) setattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE_ = True if len(list(module.children() ) ) > 0: SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = _replace_with_bnb_layers( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE_ = has_been_replaced | _has_been_replaced # Remove the last key for recursion current_key_name.pop(-1 ) return model, has_been_replaced def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : Union[str, Any] ): """simple docstring""" with init_empty_weights(): SCREAMING_SNAKE_CASE_ = deepcopy(_SCREAMING_SNAKE_CASE ) # this has 0 cost since it is done inside `init_empty_weights` context manager` SCREAMING_SNAKE_CASE_ = find_tied_parameters(_SCREAMING_SNAKE_CASE ) # For compatibility with Accelerate < 0.18 if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): SCREAMING_SNAKE_CASE_ = sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() ) else: SCREAMING_SNAKE_CASE_ = sum(_SCREAMING_SNAKE_CASE , [] ) SCREAMING_SNAKE_CASE_ = len(_SCREAMING_SNAKE_CASE ) > 0 # Check if it is a base model SCREAMING_SNAKE_CASE_ = False if hasattr(_SCREAMING_SNAKE_CASE , 'base_model_prefix' ): SCREAMING_SNAKE_CASE_ = not hasattr(_SCREAMING_SNAKE_CASE , model.base_model_prefix ) # Ignore this for base models (BertModel, GPT2Model, etc.) if (not has_tied_params) and is_base_model: return [] # otherwise they have an attached head SCREAMING_SNAKE_CASE_ = list(model.named_children() ) SCREAMING_SNAKE_CASE_ = [list_modules[-1][0]] # add last module together with tied weights SCREAMING_SNAKE_CASE_ = set(_SCREAMING_SNAKE_CASE ) - set(_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE_ = list(set(_SCREAMING_SNAKE_CASE ) ) + list(_SCREAMING_SNAKE_CASE ) # remove ".weight" from the keys SCREAMING_SNAKE_CASE_ = ['.weight', '.bias'] SCREAMING_SNAKE_CASE_ = [] for name in list_untouched: for name_to_remove in names_to_remove: if name_to_remove in name: SCREAMING_SNAKE_CASE_ = name.replace(_SCREAMING_SNAKE_CASE , '' ) filtered_module_names.append(_SCREAMING_SNAKE_CASE ) return filtered_module_names def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : Dict ): """simple docstring""" for m in model.modules(): if isinstance(_SCREAMING_SNAKE_CASE , bnb.nn.Linearabit ): return True return False def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : nn.Module ): """simple docstring""" return next(parameter.parameters() ).device def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : str ): """simple docstring""" if fpaa_statistics is None: set_module_tensor_to_device(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , 0 , dtype=_SCREAMING_SNAKE_CASE , value=_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE_ = param_name SCREAMING_SNAKE_CASE_ = model if "." in tensor_name: SCREAMING_SNAKE_CASE_ = tensor_name.split('.' ) for split in splits[:-1]: SCREAMING_SNAKE_CASE_ = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) if new_module is None: raise ValueError(f"""{module} has no attribute {split}.""" ) SCREAMING_SNAKE_CASE_ = new_module SCREAMING_SNAKE_CASE_ = splits[-1] # offload weights SCREAMING_SNAKE_CASE_ = False offload_weight(module._parameters[tensor_name] , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , index=_SCREAMING_SNAKE_CASE ) if hasattr(module._parameters[tensor_name] , 'SCB' ): offload_weight( module._parameters[tensor_name].SCB , param_name.replace('weight' , 'SCB' ) , _SCREAMING_SNAKE_CASE , index=_SCREAMING_SNAKE_CASE , ) else: offload_weight(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , index=_SCREAMING_SNAKE_CASE ) offload_weight(_SCREAMING_SNAKE_CASE , param_name.replace('weight' , 'SCB' ) , _SCREAMING_SNAKE_CASE , index=_SCREAMING_SNAKE_CASE ) set_module_tensor_to_device(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , 'meta' , dtype=_SCREAMING_SNAKE_CASE , value=torch.empty(*param.size() ) )
620
1
import unittest from transformers import load_tool from .test_tools_common import ToolTesterMixin class __snake_case ( unittest.TestCase , lowerCAmelCase__ ): def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = load_tool('text-classification') self.tool.setup() SCREAMING_SNAKE_CASE_ = load_tool('text-classification' , remote=_A) def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = self.tool('That\'s quite cool' , ['positive', 'negative']) self.assertEqual(_A , 'positive') def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = self.remote_tool('That\'s quite cool' , ['positive', 'negative']) self.assertEqual(_A , 'positive') def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = self.tool(text='That\'s quite cool' , labels=['positive', 'negative']) self.assertEqual(_A , 'positive') def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = self.remote_tool(text='That\'s quite cool' , labels=['positive', 'negative']) self.assertEqual(_A , 'positive')
620
import json import os from functools import lru_cache from typing import List, Optional, Tuple import regex as re from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging UpperCamelCase__ : Union[str, Any] = logging.get_logger(__name__) UpperCamelCase__ : Optional[Any] = {"vocab_file": "vocab.json", "merges_file": "merges.txt"} # See all BART models at https://huggingface.co/models?filter=bart UpperCamelCase__ : List[str] = { "vocab_file": { "facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/vocab.json", "facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/vocab.json", "facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json", "facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json", "facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json", "yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json", }, "merges_file": { "facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/merges.txt", "facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/merges.txt", "facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt", "facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt", "facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt", "yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt", }, } UpperCamelCase__ : str = { "facebook/bart-base": 1_024, "facebook/bart-large": 1_024, "facebook/bart-large-mnli": 1_024, "facebook/bart-large-cnn": 1_024, "facebook/bart-large-xsum": 1_024, "yjernite/bart_eli5": 1_024, } @lru_cache() def _UpperCAmelCase ( ): """simple docstring""" SCREAMING_SNAKE_CASE_ = ( list(range(ord('!' ) , ord('~' ) + 1 ) ) + list(range(ord('¡' ) , ord('¬' ) + 1 ) ) + list(range(ord('®' ) , ord('ÿ' ) + 1 ) ) ) SCREAMING_SNAKE_CASE_ = bs[:] SCREAMING_SNAKE_CASE_ = 0 for b in range(2**8 ): if b not in bs: bs.append(_SCREAMING_SNAKE_CASE ) cs.append(2**8 + n ) n += 1 SCREAMING_SNAKE_CASE_ = [chr(_SCREAMING_SNAKE_CASE ) for n in cs] return dict(zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ) def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : List[str] ): """simple docstring""" SCREAMING_SNAKE_CASE_ = set() SCREAMING_SNAKE_CASE_ = word[0] for char in word[1:]: pairs.add((prev_char, char) ) SCREAMING_SNAKE_CASE_ = char return pairs class __snake_case ( lowerCAmelCase__ ): __lowerCAmelCase : str = VOCAB_FILES_NAMES __lowerCAmelCase : Any = PRETRAINED_VOCAB_FILES_MAP __lowerCAmelCase : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __lowerCAmelCase : List[Any] = ['input_ids', 'attention_mask'] def __init__( self , _A , _A , _A="replace" , _A="<s>" , _A="</s>" , _A="</s>" , _A="<s>" , _A="<unk>" , _A="<pad>" , _A="<mask>" , _A=False , **_A , ): SCREAMING_SNAKE_CASE_ = AddedToken(_A , lstrip=_A , rstrip=_A) if isinstance(_A , _A) else bos_token SCREAMING_SNAKE_CASE_ = AddedToken(_A , lstrip=_A , rstrip=_A) if isinstance(_A , _A) else eos_token SCREAMING_SNAKE_CASE_ = AddedToken(_A , lstrip=_A , rstrip=_A) if isinstance(_A , _A) else sep_token SCREAMING_SNAKE_CASE_ = AddedToken(_A , lstrip=_A , rstrip=_A) if isinstance(_A , _A) else cls_token SCREAMING_SNAKE_CASE_ = AddedToken(_A , lstrip=_A , rstrip=_A) if isinstance(_A , _A) else unk_token SCREAMING_SNAKE_CASE_ = AddedToken(_A , lstrip=_A , rstrip=_A) if isinstance(_A , _A) else pad_token # Mask token behave like a normal word, i.e. include the space before it SCREAMING_SNAKE_CASE_ = AddedToken(_A , lstrip=_A , rstrip=_A) if isinstance(_A , _A) else mask_token super().__init__( errors=_A , bos_token=_A , eos_token=_A , unk_token=_A , sep_token=_A , cls_token=_A , pad_token=_A , mask_token=_A , add_prefix_space=_A , **_A , ) with open(_A , encoding='utf-8') as vocab_handle: SCREAMING_SNAKE_CASE_ = json.load(_A) SCREAMING_SNAKE_CASE_ = {v: k for k, v in self.encoder.items()} SCREAMING_SNAKE_CASE_ = errors # how to handle errors in decoding SCREAMING_SNAKE_CASE_ = bytes_to_unicode() SCREAMING_SNAKE_CASE_ = {v: k for k, v in self.byte_encoder.items()} with open(_A , encoding='utf-8') as merges_handle: SCREAMING_SNAKE_CASE_ = merges_handle.read().split('\n')[1:-1] SCREAMING_SNAKE_CASE_ = [tuple(merge.split()) for merge in bpe_merges] SCREAMING_SNAKE_CASE_ = dict(zip(_A , range(len(_A)))) SCREAMING_SNAKE_CASE_ = {} SCREAMING_SNAKE_CASE_ = add_prefix_space # Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions SCREAMING_SNAKE_CASE_ = re.compile(r'\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+') @property def lowerCAmelCase__ ( self): return len(self.encoder) def lowerCAmelCase__ ( self): return dict(self.encoder , **self.added_tokens_encoder) def lowerCAmelCase__ ( self , _A): if token in self.cache: return self.cache[token] SCREAMING_SNAKE_CASE_ = tuple(_A) SCREAMING_SNAKE_CASE_ = get_pairs(_A) if not pairs: return token while True: SCREAMING_SNAKE_CASE_ = min(_A , key=lambda _A: self.bpe_ranks.get(_A , float('inf'))) if bigram not in self.bpe_ranks: break SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = bigram SCREAMING_SNAKE_CASE_ = [] SCREAMING_SNAKE_CASE_ = 0 while i < len(_A): try: SCREAMING_SNAKE_CASE_ = word.index(_A , _A) except ValueError: new_word.extend(word[i:]) break else: new_word.extend(word[i:j]) SCREAMING_SNAKE_CASE_ = j if word[i] == first and i < len(_A) - 1 and word[i + 1] == second: new_word.append(first + second) i += 2 else: new_word.append(word[i]) i += 1 SCREAMING_SNAKE_CASE_ = tuple(_A) SCREAMING_SNAKE_CASE_ = new_word if len(_A) == 1: break else: SCREAMING_SNAKE_CASE_ = get_pairs(_A) SCREAMING_SNAKE_CASE_ = ' '.join(_A) SCREAMING_SNAKE_CASE_ = word return word def lowerCAmelCase__ ( self , _A): SCREAMING_SNAKE_CASE_ = [] for token in re.findall(self.pat , _A): SCREAMING_SNAKE_CASE_ = ''.join( self.byte_encoder[b] for b in token.encode('utf-8')) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case) bpe_tokens.extend(bpe_token for bpe_token in self.bpe(_A).split(' ')) return bpe_tokens def lowerCAmelCase__ ( self , _A): return self.encoder.get(_A , self.encoder.get(self.unk_token)) def lowerCAmelCase__ ( self , _A): return self.decoder.get(_A) def lowerCAmelCase__ ( self , _A): SCREAMING_SNAKE_CASE_ = ''.join(_A) SCREAMING_SNAKE_CASE_ = bytearray([self.byte_decoder[c] for c in text]).decode('utf-8' , errors=self.errors) return text def lowerCAmelCase__ ( self , _A , _A = None): if not os.path.isdir(_A): logger.error(f"""Vocabulary path ({save_directory}) should be a directory""") return SCREAMING_SNAKE_CASE_ = os.path.join( _A , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file']) SCREAMING_SNAKE_CASE_ = os.path.join( _A , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file']) with open(_A , 'w' , encoding='utf-8') as f: f.write(json.dumps(self.encoder , indent=2 , sort_keys=_A , ensure_ascii=_A) + '\n') SCREAMING_SNAKE_CASE_ = 0 with open(_A , 'w' , encoding='utf-8') as writer: writer.write('#version: 0.2\n') for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda _A: kv[1]): if index != token_index: logger.warning( f"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.""" ' Please check that the tokenizer is not corrupted!') SCREAMING_SNAKE_CASE_ = token_index writer.write(' '.join(_A) + '\n') index += 1 return vocab_file, merge_file def lowerCAmelCase__ ( self , _A , _A = None): if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] SCREAMING_SNAKE_CASE_ = [self.cls_token_id] SCREAMING_SNAKE_CASE_ = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def lowerCAmelCase__ ( self , _A , _A = None , _A = False): if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=_A , token_ids_a=_A , already_has_special_tokens=_A) if token_ids_a is None: return [1] + ([0] * len(_A)) + [1] return [1] + ([0] * len(_A)) + [1, 1] + ([0] * len(_A)) + [1] def lowerCAmelCase__ ( self , _A , _A = None): SCREAMING_SNAKE_CASE_ = [self.sep_token_id] SCREAMING_SNAKE_CASE_ = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0] def lowerCAmelCase__ ( self , _A , _A=False , **_A): SCREAMING_SNAKE_CASE_ = kwargs.pop('add_prefix_space' , self.add_prefix_space) if (is_split_into_words or add_prefix_space) and (len(_A) > 0 and not text[0].isspace()): SCREAMING_SNAKE_CASE_ = ' ' + text return (text, kwargs)
620
1
from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging UpperCamelCase__ : Union[str, Any] = logging.get_logger(__name__) UpperCamelCase__ : str = { "facebook/xmod-base": "https://huggingface.co/facebook/xmod-base/resolve/main/config.json", "facebook/xmod-large-prenorm": "https://huggingface.co/facebook/xmod-large-prenorm/resolve/main/config.json", "facebook/xmod-base-13-125k": "https://huggingface.co/facebook/xmod-base-13-125k/resolve/main/config.json", "facebook/xmod-base-30-125k": "https://huggingface.co/facebook/xmod-base-30-125k/resolve/main/config.json", "facebook/xmod-base-30-195k": "https://huggingface.co/facebook/xmod-base-30-195k/resolve/main/config.json", "facebook/xmod-base-60-125k": "https://huggingface.co/facebook/xmod-base-60-125k/resolve/main/config.json", "facebook/xmod-base-60-265k": "https://huggingface.co/facebook/xmod-base-60-265k/resolve/main/config.json", "facebook/xmod-base-75-125k": "https://huggingface.co/facebook/xmod-base-75-125k/resolve/main/config.json", "facebook/xmod-base-75-269k": "https://huggingface.co/facebook/xmod-base-75-269k/resolve/main/config.json", } class __snake_case ( lowerCAmelCase__ ): __lowerCAmelCase : str = 'xmod' def __init__( self , _A=30522 , _A=768 , _A=12 , _A=12 , _A=3072 , _A="gelu" , _A=0.1 , _A=0.1 , _A=512 , _A=2 , _A=0.0_2 , _A=1E-12 , _A=1 , _A=0 , _A=2 , _A="absolute" , _A=True , _A=None , _A=False , _A=2 , _A=False , _A=True , _A=True , _A=("en_XX",) , _A=None , **_A , ): super().__init__(pad_token_id=_A , bos_token_id=_A , eos_token_id=_A , **_A) SCREAMING_SNAKE_CASE_ = vocab_size SCREAMING_SNAKE_CASE_ = hidden_size SCREAMING_SNAKE_CASE_ = num_hidden_layers SCREAMING_SNAKE_CASE_ = num_attention_heads SCREAMING_SNAKE_CASE_ = hidden_act SCREAMING_SNAKE_CASE_ = intermediate_size SCREAMING_SNAKE_CASE_ = hidden_dropout_prob SCREAMING_SNAKE_CASE_ = attention_probs_dropout_prob SCREAMING_SNAKE_CASE_ = max_position_embeddings SCREAMING_SNAKE_CASE_ = type_vocab_size SCREAMING_SNAKE_CASE_ = initializer_range SCREAMING_SNAKE_CASE_ = layer_norm_eps SCREAMING_SNAKE_CASE_ = position_embedding_type SCREAMING_SNAKE_CASE_ = use_cache SCREAMING_SNAKE_CASE_ = classifier_dropout SCREAMING_SNAKE_CASE_ = pre_norm SCREAMING_SNAKE_CASE_ = adapter_reduction_factor SCREAMING_SNAKE_CASE_ = adapter_layer_norm SCREAMING_SNAKE_CASE_ = adapter_reuse_layer_norm SCREAMING_SNAKE_CASE_ = ln_before_adapter SCREAMING_SNAKE_CASE_ = list(_A) SCREAMING_SNAKE_CASE_ = default_language class __snake_case ( lowerCAmelCase__ ): @property def lowerCAmelCase__ ( self): if self.task == "multiple-choice": SCREAMING_SNAKE_CASE_ = {0: 'batch', 1: 'choice', 2: 'sequence'} else: SCREAMING_SNAKE_CASE_ = {0: 'batch', 1: 'sequence'} return OrderedDict( [ ('input_ids', dynamic_axis), ('attention_mask', dynamic_axis), ])
620
from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCamelCase__ : str = logging.get_logger(__name__) UpperCamelCase__ : Optional[int] = { "facebook/dpr-ctx_encoder-single-nq-base": ( "https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/config.json" ), "facebook/dpr-question_encoder-single-nq-base": ( "https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/config.json" ), "facebook/dpr-reader-single-nq-base": ( "https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/config.json" ), "facebook/dpr-ctx_encoder-multiset-base": ( "https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/config.json" ), "facebook/dpr-question_encoder-multiset-base": ( "https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/config.json" ), "facebook/dpr-reader-multiset-base": ( "https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/config.json" ), } class __snake_case ( lowerCAmelCase__ ): __lowerCAmelCase : Optional[int] = 'dpr' def __init__( self , _A=30522 , _A=768 , _A=12 , _A=12 , _A=3072 , _A="gelu" , _A=0.1 , _A=0.1 , _A=512 , _A=2 , _A=0.0_2 , _A=1E-12 , _A=0 , _A="absolute" , _A = 0 , **_A , ): super().__init__(pad_token_id=_A , **_A) SCREAMING_SNAKE_CASE_ = vocab_size SCREAMING_SNAKE_CASE_ = hidden_size SCREAMING_SNAKE_CASE_ = num_hidden_layers SCREAMING_SNAKE_CASE_ = num_attention_heads SCREAMING_SNAKE_CASE_ = hidden_act SCREAMING_SNAKE_CASE_ = intermediate_size SCREAMING_SNAKE_CASE_ = hidden_dropout_prob SCREAMING_SNAKE_CASE_ = attention_probs_dropout_prob SCREAMING_SNAKE_CASE_ = max_position_embeddings SCREAMING_SNAKE_CASE_ = type_vocab_size SCREAMING_SNAKE_CASE_ = initializer_range SCREAMING_SNAKE_CASE_ = layer_norm_eps SCREAMING_SNAKE_CASE_ = projection_dim SCREAMING_SNAKE_CASE_ = position_embedding_type
620
1
import unittest from knapsack import knapsack as k class __snake_case ( unittest.TestCase ): def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = 0 SCREAMING_SNAKE_CASE_ = [0] SCREAMING_SNAKE_CASE_ = [0] SCREAMING_SNAKE_CASE_ = len(_A) self.assertEqual(k.knapsack(_A , _A , _A , _A) , 0) SCREAMING_SNAKE_CASE_ = [60] SCREAMING_SNAKE_CASE_ = [10] SCREAMING_SNAKE_CASE_ = len(_A) self.assertEqual(k.knapsack(_A , _A , _A , _A) , 0) def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = 3 SCREAMING_SNAKE_CASE_ = [1, 2, 3] SCREAMING_SNAKE_CASE_ = [3, 2, 1] SCREAMING_SNAKE_CASE_ = len(_A) self.assertEqual(k.knapsack(_A , _A , _A , _A) , 5) def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = 50 SCREAMING_SNAKE_CASE_ = [60, 100, 120] SCREAMING_SNAKE_CASE_ = [10, 20, 30] SCREAMING_SNAKE_CASE_ = len(_A) self.assertEqual(k.knapsack(_A , _A , _A , _A) , 220) if __name__ == "__main__": unittest.main()
620
import pytest import datasets # Import fixture modules as plugins UpperCamelCase__ : Union[str, Any] = ["tests.fixtures.files", "tests.fixtures.hub", "tests.fixtures.fsspec"] def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : Dict ): """simple docstring""" for item in items: if any(marker in item.keywords for marker in ['integration', 'unit'] ): continue item.add_marker(pytest.mark.unit ) def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : Optional[int] ): """simple docstring""" config.addinivalue_line('markers' , 'torchaudio_latest: mark test to run with torchaudio>=0.12' ) @pytest.fixture(autouse=_SCREAMING_SNAKE_CASE ) def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : str ): """simple docstring""" SCREAMING_SNAKE_CASE_ = tmp_path_factory.getbasetemp() / 'cache' SCREAMING_SNAKE_CASE_ = test_hf_cache_home / 'datasets' SCREAMING_SNAKE_CASE_ = test_hf_cache_home / 'metrics' SCREAMING_SNAKE_CASE_ = test_hf_cache_home / 'modules' monkeypatch.setattr('datasets.config.HF_DATASETS_CACHE' , str(_SCREAMING_SNAKE_CASE ) ) monkeypatch.setattr('datasets.config.HF_METRICS_CACHE' , str(_SCREAMING_SNAKE_CASE ) ) monkeypatch.setattr('datasets.config.HF_MODULES_CACHE' , str(_SCREAMING_SNAKE_CASE ) ) SCREAMING_SNAKE_CASE_ = test_hf_datasets_cache / 'downloads' monkeypatch.setattr('datasets.config.DOWNLOADED_DATASETS_PATH' , str(_SCREAMING_SNAKE_CASE ) ) SCREAMING_SNAKE_CASE_ = test_hf_datasets_cache / 'downloads' / 'extracted' monkeypatch.setattr('datasets.config.EXTRACTED_DATASETS_PATH' , str(_SCREAMING_SNAKE_CASE ) ) @pytest.fixture(autouse=_SCREAMING_SNAKE_CASE , scope='session' ) def _UpperCAmelCase ( ): """simple docstring""" datasets.disable_progress_bar() @pytest.fixture(autouse=_SCREAMING_SNAKE_CASE ) def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : Any ): """simple docstring""" monkeypatch.setattr('datasets.config.HF_UPDATE_DOWNLOAD_COUNTS' , _SCREAMING_SNAKE_CASE ) @pytest.fixture def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : Tuple ): """simple docstring""" monkeypatch.setattr('sqlalchemy.util.deprecations.SILENCE_UBER_WARNING' , _SCREAMING_SNAKE_CASE )
620
1
import itertools import random import unittest import numpy as np from transformers import WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST, WavaVecaConfig, WavaVecaFeatureExtractor from transformers.testing_utils import require_torch, slow from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin UpperCamelCase__ : Union[str, Any] = random.Random() def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : Optional[Any]=1.0 , _SCREAMING_SNAKE_CASE : Optional[int]=None , _SCREAMING_SNAKE_CASE : Optional[Any]=None ): """simple docstring""" if rng is None: SCREAMING_SNAKE_CASE_ = global_rng SCREAMING_SNAKE_CASE_ = [] for batch_idx in range(shape[0] ): values.append([] ) for _ in range(shape[1] ): values[-1].append(rng.random() * scale ) return values class __snake_case ( unittest.TestCase ): def __init__( self , _A , _A=7 , _A=400 , _A=2000 , _A=1 , _A=0.0 , _A=16000 , _A=True , _A=True , ): SCREAMING_SNAKE_CASE_ = parent SCREAMING_SNAKE_CASE_ = batch_size SCREAMING_SNAKE_CASE_ = min_seq_length SCREAMING_SNAKE_CASE_ = max_seq_length SCREAMING_SNAKE_CASE_ = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1) SCREAMING_SNAKE_CASE_ = feature_size SCREAMING_SNAKE_CASE_ = padding_value SCREAMING_SNAKE_CASE_ = sampling_rate SCREAMING_SNAKE_CASE_ = return_attention_mask SCREAMING_SNAKE_CASE_ = do_normalize def lowerCAmelCase__ ( self): return { "feature_size": self.feature_size, "padding_value": self.padding_value, "sampling_rate": self.sampling_rate, "return_attention_mask": self.return_attention_mask, "do_normalize": self.do_normalize, } def lowerCAmelCase__ ( self , _A=False , _A=False): def _flatten(_A): return list(itertools.chain(*_A)) if equal_length: SCREAMING_SNAKE_CASE_ = floats_list((self.batch_size, self.max_seq_length)) else: # make sure that inputs increase in size SCREAMING_SNAKE_CASE_ = [ _flatten(floats_list((x, self.feature_size))) for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff) ] if numpify: SCREAMING_SNAKE_CASE_ = [np.asarray(_A) for x in speech_inputs] return speech_inputs class __snake_case ( lowerCAmelCase__ , unittest.TestCase ): __lowerCAmelCase : str = WavaVecaFeatureExtractor def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = WavaVecaFeatureExtractionTester(self) def lowerCAmelCase__ ( self , _A): self.assertTrue(np.all(np.mean(_A , axis=0) < 1E-3)) self.assertTrue(np.all(np.abs(np.var(_A , axis=0) - 1) < 1E-3)) def lowerCAmelCase__ ( self): # Tests that all call wrap to encode_plus and batch_encode_plus SCREAMING_SNAKE_CASE_ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict()) # create three inputs of length 800, 1000, and 1200 SCREAMING_SNAKE_CASE_ = [floats_list((1, x))[0] for x in range(800 , 1400 , 200)] SCREAMING_SNAKE_CASE_ = [np.asarray(_A) for speech_input in speech_inputs] # Test not batched input SCREAMING_SNAKE_CASE_ = feat_extract(speech_inputs[0] , return_tensors='np').input_values SCREAMING_SNAKE_CASE_ = feat_extract(np_speech_inputs[0] , return_tensors='np').input_values self.assertTrue(np.allclose(_A , _A , atol=1E-3)) # Test batched SCREAMING_SNAKE_CASE_ = feat_extract(_A , return_tensors='np').input_values SCREAMING_SNAKE_CASE_ = feat_extract(_A , return_tensors='np').input_values for enc_seq_a, enc_seq_a in zip(_A , _A): self.assertTrue(np.allclose(_A , _A , atol=1E-3)) # Test 2-D numpy arrays are batched. SCREAMING_SNAKE_CASE_ = [floats_list((1, x))[0] for x in (800, 800, 800)] SCREAMING_SNAKE_CASE_ = np.asarray(_A) SCREAMING_SNAKE_CASE_ = feat_extract(_A , return_tensors='np').input_values SCREAMING_SNAKE_CASE_ = feat_extract(_A , return_tensors='np').input_values for enc_seq_a, enc_seq_a in zip(_A , _A): self.assertTrue(np.allclose(_A , _A , atol=1E-3)) def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict()) SCREAMING_SNAKE_CASE_ = [floats_list((1, x))[0] for x in range(800 , 1400 , 200)] SCREAMING_SNAKE_CASE_ = ['longest', 'max_length', 'do_not_pad'] SCREAMING_SNAKE_CASE_ = [None, 1600, None] for max_length, padding in zip(_A , _A): SCREAMING_SNAKE_CASE_ = feat_extract(_A , padding=_A , max_length=_A , return_tensors='np') SCREAMING_SNAKE_CASE_ = processed.input_values self._check_zero_mean_unit_variance(input_values[0][:800]) self.assertTrue(input_values[0][800:].sum() < 1E-6) self._check_zero_mean_unit_variance(input_values[1][:1000]) self.assertTrue(input_values[0][1000:].sum() < 1E-6) self._check_zero_mean_unit_variance(input_values[2][:1200]) def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict()) SCREAMING_SNAKE_CASE_ = range(800 , 1400 , 200) SCREAMING_SNAKE_CASE_ = [floats_list((1, x))[0] for x in lengths] SCREAMING_SNAKE_CASE_ = ['longest', 'max_length', 'do_not_pad'] SCREAMING_SNAKE_CASE_ = [None, 1600, None] for max_length, padding in zip(_A , _A): SCREAMING_SNAKE_CASE_ = feat_extract(_A , max_length=_A , padding=_A) SCREAMING_SNAKE_CASE_ = processed.input_values self._check_zero_mean_unit_variance(input_values[0][:800]) self._check_zero_mean_unit_variance(input_values[1][:1000]) self._check_zero_mean_unit_variance(input_values[2][:1200]) def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict()) SCREAMING_SNAKE_CASE_ = [floats_list((1, x))[0] for x in range(800 , 1400 , 200)] SCREAMING_SNAKE_CASE_ = feat_extract( _A , truncation=_A , max_length=1000 , padding='max_length' , return_tensors='np') SCREAMING_SNAKE_CASE_ = processed.input_values self._check_zero_mean_unit_variance(input_values[0, :800]) self._check_zero_mean_unit_variance(input_values[1]) self._check_zero_mean_unit_variance(input_values[2]) def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict()) SCREAMING_SNAKE_CASE_ = [floats_list((1, x))[0] for x in range(800 , 1400 , 200)] SCREAMING_SNAKE_CASE_ = feat_extract( _A , truncation=_A , max_length=1000 , padding='longest' , return_tensors='np') SCREAMING_SNAKE_CASE_ = processed.input_values self._check_zero_mean_unit_variance(input_values[0, :800]) self._check_zero_mean_unit_variance(input_values[1, :1000]) self._check_zero_mean_unit_variance(input_values[2]) # make sure that if max_length < longest -> then pad to max_length self.assertTrue(input_values.shape == (3, 1000)) SCREAMING_SNAKE_CASE_ = [floats_list((1, x))[0] for x in range(800 , 1400 , 200)] SCREAMING_SNAKE_CASE_ = feat_extract( _A , truncation=_A , max_length=2000 , padding='longest' , return_tensors='np') SCREAMING_SNAKE_CASE_ = processed.input_values self._check_zero_mean_unit_variance(input_values[0, :800]) self._check_zero_mean_unit_variance(input_values[1, :1000]) self._check_zero_mean_unit_variance(input_values[2]) # make sure that if max_length > longest -> then pad to longest self.assertTrue(input_values.shape == (3, 1200)) @require_torch def lowerCAmelCase__ ( self): import torch SCREAMING_SNAKE_CASE_ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict()) SCREAMING_SNAKE_CASE_ = np.random.rand(100).astype(np.floataa) SCREAMING_SNAKE_CASE_ = np_speech_inputs.tolist() for inputs in [py_speech_inputs, np_speech_inputs]: SCREAMING_SNAKE_CASE_ = feature_extractor.pad([{'input_values': inputs}] , return_tensors='np') self.assertTrue(np_processed.input_values.dtype == np.floataa) SCREAMING_SNAKE_CASE_ = feature_extractor.pad([{'input_values': inputs}] , return_tensors='pt') self.assertTrue(pt_processed.input_values.dtype == torch.floataa) @slow @require_torch def lowerCAmelCase__ ( self): # this test makes sure that models that are using # group norm don't have their feature extractor return the # attention_mask for model_id in WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST: SCREAMING_SNAKE_CASE_ = WavaVecaConfig.from_pretrained(_A) SCREAMING_SNAKE_CASE_ = WavaVecaFeatureExtractor.from_pretrained(_A) # only "layer" feature extraction norm should make use of # attention_mask self.assertEqual(feat_extract.return_attention_mask , config.feat_extract_norm == 'layer')
620
from typing import List import numpy as np def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : dict ): """simple docstring""" SCREAMING_SNAKE_CASE_ = {key: len(_SCREAMING_SNAKE_CASE ) for key, value in gen_kwargs.items() if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )} if len(set(lists_lengths.values() ) ) > 1: raise RuntimeError( ( 'Sharding is ambiguous for this dataset: ' + 'we found several data sources lists of different lengths, and we don\'t know over which list we should parallelize:\n' + '\n'.join(f"""\t- key {key} has length {length}""" for key, length in lists_lengths.items() ) + '\nTo fix this, check the \'gen_kwargs\' and make sure to use lists only for data sources, ' + 'and use tuples otherwise. In the end there should only be one single list, or several lists with the same length.' ) ) SCREAMING_SNAKE_CASE_ = max(lists_lengths.values() , default=0 ) return max(1 , _SCREAMING_SNAKE_CASE ) def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int ): """simple docstring""" SCREAMING_SNAKE_CASE_ = [] for group_idx in range(_SCREAMING_SNAKE_CASE ): SCREAMING_SNAKE_CASE_ = num_shards // max_num_jobs + (group_idx < (num_shards % max_num_jobs)) if num_shards_to_add == 0: break SCREAMING_SNAKE_CASE_ = shards_indices_per_group[-1].stop if shards_indices_per_group else 0 SCREAMING_SNAKE_CASE_ = range(_SCREAMING_SNAKE_CASE , start + num_shards_to_add ) shards_indices_per_group.append(_SCREAMING_SNAKE_CASE ) return shards_indices_per_group def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : dict , _SCREAMING_SNAKE_CASE : int ): """simple docstring""" SCREAMING_SNAKE_CASE_ = _number_of_shards_in_gen_kwargs(_SCREAMING_SNAKE_CASE ) if num_shards == 1: return [dict(_SCREAMING_SNAKE_CASE )] else: SCREAMING_SNAKE_CASE_ = _distribute_shards(num_shards=_SCREAMING_SNAKE_CASE , max_num_jobs=_SCREAMING_SNAKE_CASE ) return [ { key: [value[shard_idx] for shard_idx in shard_indices_per_group[group_idx]] if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) else value for key, value in gen_kwargs.items() } for group_idx in range(len(_SCREAMING_SNAKE_CASE ) ) ] def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : List[dict] ): """simple docstring""" return { key: [value for gen_kwargs in gen_kwargs_list for value in gen_kwargs[key]] if isinstance(gen_kwargs_list[0][key] , _SCREAMING_SNAKE_CASE ) else gen_kwargs_list[0][key] for key in gen_kwargs_list[0] } def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : np.random.Generator , _SCREAMING_SNAKE_CASE : dict ): """simple docstring""" SCREAMING_SNAKE_CASE_ = {len(_SCREAMING_SNAKE_CASE ) for value in gen_kwargs.values() if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )} SCREAMING_SNAKE_CASE_ = {} for size in list_sizes: SCREAMING_SNAKE_CASE_ = list(range(_SCREAMING_SNAKE_CASE ) ) rng.shuffle(indices_per_size[size] ) # Now let's copy the gen_kwargs and shuffle the lists based on their sizes SCREAMING_SNAKE_CASE_ = dict(_SCREAMING_SNAKE_CASE ) for key, value in shuffled_kwargs.items(): if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): SCREAMING_SNAKE_CASE_ = [value[i] for i in indices_per_size[len(_SCREAMING_SNAKE_CASE )]] return shuffled_kwargs
620
1
import io import itertools import json from dataclasses import dataclass from typing import Optional import pyarrow as pa import pyarrow.json as paj import datasets from datasets.table import table_cast from datasets.utils.file_utils import readline UpperCamelCase__ : Optional[int] = datasets.utils.logging.get_logger(__name__) @dataclass class __snake_case ( datasets.BuilderConfig ): __lowerCAmelCase : Optional[datasets.Features] = None __lowerCAmelCase : str = "utf-8" __lowerCAmelCase : Optional[str] = None __lowerCAmelCase : Optional[str] = None __lowerCAmelCase : bool = True # deprecated __lowerCAmelCase : Optional[int] = None # deprecated __lowerCAmelCase : int = 10 << 20 # 10MB __lowerCAmelCase : Optional[bool] = None class __snake_case ( datasets.ArrowBasedBuilder ): __lowerCAmelCase : int = JsonConfig def lowerCAmelCase__ ( self): if self.config.block_size is not None: logger.warning('The JSON loader parameter `block_size` is deprecated. Please use `chunksize` instead') SCREAMING_SNAKE_CASE_ = self.config.block_size if self.config.use_threads is not True: logger.warning( 'The JSON loader parameter `use_threads` is deprecated and doesn\'t have any effect anymore.') if self.config.newlines_in_values is not None: raise ValueError('The JSON loader parameter `newlines_in_values` is no longer supported') return datasets.DatasetInfo(features=self.config.features) def lowerCAmelCase__ ( self , _A): if not self.config.data_files: raise ValueError(f"""At least one data file must be specified, but got data_files={self.config.data_files}""") SCREAMING_SNAKE_CASE_ = dl_manager.download_and_extract(self.config.data_files) if isinstance(_A , (str, list, tuple)): SCREAMING_SNAKE_CASE_ = data_files if isinstance(_A , _A): SCREAMING_SNAKE_CASE_ = [files] SCREAMING_SNAKE_CASE_ = [dl_manager.iter_files(_A) for file in files] return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'files': files})] SCREAMING_SNAKE_CASE_ = [] for split_name, files in data_files.items(): if isinstance(_A , _A): SCREAMING_SNAKE_CASE_ = [files] SCREAMING_SNAKE_CASE_ = [dl_manager.iter_files(_A) for file in files] splits.append(datasets.SplitGenerator(name=_A , gen_kwargs={'files': files})) return splits def lowerCAmelCase__ ( self , _A): if self.config.features is not None: # adding missing columns for column_name in set(self.config.features) - set(pa_table.column_names): SCREAMING_SNAKE_CASE_ = self.config.features.arrow_schema.field(_A).type SCREAMING_SNAKE_CASE_ = pa_table.append_column(_A , pa.array([None] * len(_A) , type=_A)) # more expensive cast to support nested structures with keys in a different order # allows str <-> int/float or str to Audio for example SCREAMING_SNAKE_CASE_ = table_cast(_A , self.config.features.arrow_schema) return pa_table def lowerCAmelCase__ ( self , _A): for file_idx, file in enumerate(itertools.chain.from_iterable(_A)): # If the file is one json object and if we need to look at the list of items in one specific field if self.config.field is not None: with open(_A , encoding=self.config.encoding , errors=self.config.encoding_errors) as f: SCREAMING_SNAKE_CASE_ = json.load(_A) # We keep only the field we are interested in SCREAMING_SNAKE_CASE_ = dataset[self.config.field] # We accept two format: a list of dicts or a dict of lists if isinstance(_A , (list, tuple)): SCREAMING_SNAKE_CASE_ = set().union(*[row.keys() for row in dataset]) SCREAMING_SNAKE_CASE_ = {col: [row.get(_A) for row in dataset] for col in keys} else: SCREAMING_SNAKE_CASE_ = dataset SCREAMING_SNAKE_CASE_ = pa.Table.from_pydict(_A) yield file_idx, self._cast_table(_A) # If the file has one json object per line else: with open(_A , 'rb') as f: SCREAMING_SNAKE_CASE_ = 0 # Use block_size equal to the chunk size divided by 32 to leverage multithreading # Set a default minimum value of 16kB if the chunk size is really small SCREAMING_SNAKE_CASE_ = max(self.config.chunksize // 32 , 16 << 10) SCREAMING_SNAKE_CASE_ = ( self.config.encoding_errors if self.config.encoding_errors is not None else 'strict' ) while True: SCREAMING_SNAKE_CASE_ = f.read(self.config.chunksize) if not batch: break # Finish current line try: batch += f.readline() except (AttributeError, io.UnsupportedOperation): batch += readline(_A) # PyArrow only accepts utf-8 encoded bytes if self.config.encoding != "utf-8": SCREAMING_SNAKE_CASE_ = batch.decode(self.config.encoding , errors=_A).encode('utf-8') try: while True: try: SCREAMING_SNAKE_CASE_ = paj.read_json( io.BytesIO(_A) , read_options=paj.ReadOptions(block_size=_A)) break except (pa.ArrowInvalid, pa.ArrowNotImplementedError) as e: if ( isinstance(_A , pa.ArrowInvalid) and "straddling" not in str(_A) or block_size > len(_A) ): raise else: # Increase the block size in case it was too small. # The block size will be reset for the next file. logger.debug( f"""Batch of {len(_A)} bytes couldn't be parsed with block_size={block_size}. Retrying with block_size={block_size * 2}.""") block_size *= 2 except pa.ArrowInvalid as e: try: with open( _A , encoding=self.config.encoding , errors=self.config.encoding_errors) as f: SCREAMING_SNAKE_CASE_ = json.load(_A) except json.JSONDecodeError: logger.error(f"""Failed to read file '{file}' with error {type(_A)}: {e}""") raise e # If possible, parse the file as a list of json objects and exit the loop if isinstance(_A , _A): # list is the only sequence type supported in JSON try: SCREAMING_SNAKE_CASE_ = set().union(*[row.keys() for row in dataset]) SCREAMING_SNAKE_CASE_ = {col: [row.get(_A) for row in dataset] for col in keys} SCREAMING_SNAKE_CASE_ = pa.Table.from_pydict(_A) except (pa.ArrowInvalid, AttributeError) as e: logger.error(f"""Failed to read file '{file}' with error {type(_A)}: {e}""") raise ValueError(f"""Not able to read records in the JSON file at {file}.""") from None yield file_idx, self._cast_table(_A) break else: logger.error(f"""Failed to read file '{file}' with error {type(_A)}: {e}""") raise ValueError( f"""Not able to read records in the JSON file at {file}. """ f"""You should probably indicate the field of the JSON file containing your records. """ f"""This JSON file contain the following fields: {str(list(dataset.keys()))}. """ f"""Select the correct one and provide it as `field='XXX'` to the dataset loading method. """) from None # Uncomment for debugging (will print the Arrow table size and elements) # logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}") # logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows))) yield (file_idx, batch_idx), self._cast_table(_A) batch_idx += 1
620
from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCamelCase__ : List[Any] = logging.get_logger(__name__) UpperCamelCase__ : List[str] = { "microsoft/biogpt": "https://huggingface.co/microsoft/biogpt/resolve/main/config.json", # See all BioGPT models at https://huggingface.co/models?filter=biogpt } class __snake_case ( lowerCAmelCase__ ): __lowerCAmelCase : Any = 'biogpt' def __init__( self , _A=42384 , _A=1024 , _A=24 , _A=16 , _A=4096 , _A="gelu" , _A=0.1 , _A=0.1 , _A=1024 , _A=0.0_2 , _A=1E-12 , _A=True , _A=True , _A=0.0 , _A=0.0 , _A=1 , _A=0 , _A=2 , **_A , ): SCREAMING_SNAKE_CASE_ = vocab_size SCREAMING_SNAKE_CASE_ = max_position_embeddings SCREAMING_SNAKE_CASE_ = hidden_size SCREAMING_SNAKE_CASE_ = num_hidden_layers SCREAMING_SNAKE_CASE_ = num_attention_heads SCREAMING_SNAKE_CASE_ = intermediate_size SCREAMING_SNAKE_CASE_ = hidden_act SCREAMING_SNAKE_CASE_ = hidden_dropout_prob SCREAMING_SNAKE_CASE_ = attention_probs_dropout_prob SCREAMING_SNAKE_CASE_ = initializer_range SCREAMING_SNAKE_CASE_ = layer_norm_eps SCREAMING_SNAKE_CASE_ = scale_embedding SCREAMING_SNAKE_CASE_ = use_cache SCREAMING_SNAKE_CASE_ = layerdrop SCREAMING_SNAKE_CASE_ = activation_dropout super().__init__(pad_token_id=_A , bos_token_id=_A , eos_token_id=_A , **_A)
620
1
import math import os import sys def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : str ): """simple docstring""" SCREAMING_SNAKE_CASE_ = '' try: with open(_SCREAMING_SNAKE_CASE , 'rb' ) as binary_file: SCREAMING_SNAKE_CASE_ = binary_file.read() for dat in data: SCREAMING_SNAKE_CASE_ = f"""{dat:08b}""" result += curr_byte return result except OSError: print('File not accessible' ) sys.exit() def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : dict[str, str] , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : str ): """simple docstring""" lexicon.pop(_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE_ = last_match_id if math.loga(_SCREAMING_SNAKE_CASE ).is_integer(): for curr_key in lexicon: SCREAMING_SNAKE_CASE_ = '0' + lexicon[curr_key] SCREAMING_SNAKE_CASE_ = bin(_SCREAMING_SNAKE_CASE )[2:] def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : str ): """simple docstring""" SCREAMING_SNAKE_CASE_ = {'0': '0', '1': '1'} SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = '', '' SCREAMING_SNAKE_CASE_ = len(_SCREAMING_SNAKE_CASE ) for i in range(len(_SCREAMING_SNAKE_CASE ) ): curr_string += data_bits[i] if curr_string not in lexicon: continue SCREAMING_SNAKE_CASE_ = lexicon[curr_string] result += last_match_id add_key_to_lexicon(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) index += 1 SCREAMING_SNAKE_CASE_ = '' while curr_string != "" and curr_string not in lexicon: curr_string += "0" if curr_string != "": SCREAMING_SNAKE_CASE_ = lexicon[curr_string] result += last_match_id return result def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : str ): """simple docstring""" SCREAMING_SNAKE_CASE_ = os.path.getsize(_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE_ = bin(_SCREAMING_SNAKE_CASE )[2:] SCREAMING_SNAKE_CASE_ = len(_SCREAMING_SNAKE_CASE ) return "0" * (length_length - 1) + file_length_binary + compressed def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : str ): """simple docstring""" SCREAMING_SNAKE_CASE_ = 8 try: with open(_SCREAMING_SNAKE_CASE , 'wb' ) as opened_file: SCREAMING_SNAKE_CASE_ = [ to_write[i : i + byte_length] for i in range(0 , len(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE ) ] if len(result_byte_array[-1] ) % byte_length == 0: result_byte_array.append('10000000' ) else: result_byte_array[-1] += "1" + "0" * ( byte_length - len(result_byte_array[-1] ) - 1 ) for elem in result_byte_array: opened_file.write(int(_SCREAMING_SNAKE_CASE , 2 ).to_bytes(1 , byteorder='big' ) ) except OSError: print('File not accessible' ) sys.exit() def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : str ): """simple docstring""" SCREAMING_SNAKE_CASE_ = read_file_binary(_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE_ = compress_data(_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE_ = add_file_length(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) write_file_binary(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) if __name__ == "__main__": compress(sys.argv[1], sys.argv[2])
620
from typing import Dict, List, Optional, Tuple, Union import torch from ...models import AutoencoderKL, TransformeraDModel from ...schedulers import KarrasDiffusionSchedulers from ...utils import randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput class __snake_case ( lowerCAmelCase__ ): def __init__( self , _A , _A , _A , _A = None , ): super().__init__() self.register_modules(transformer=_A , vae=_A , scheduler=_A) # create a imagenet -> id dictionary for easier use SCREAMING_SNAKE_CASE_ = {} if idalabel is not None: for key, value in idalabel.items(): for label in value.split(','): SCREAMING_SNAKE_CASE_ = int(_A) SCREAMING_SNAKE_CASE_ = dict(sorted(self.labels.items())) def lowerCAmelCase__ ( self , _A): if not isinstance(_A , _A): SCREAMING_SNAKE_CASE_ = list(_A) for l in label: if l not in self.labels: raise ValueError( f"""{l} does not exist. Please make sure to select one of the following labels: \n {self.labels}.""") return [self.labels[l] for l in label] @torch.no_grad() def __call__( self , _A , _A = 4.0 , _A = None , _A = 50 , _A = "pil" , _A = True , ): SCREAMING_SNAKE_CASE_ = len(_A) SCREAMING_SNAKE_CASE_ = self.transformer.config.sample_size SCREAMING_SNAKE_CASE_ = self.transformer.config.in_channels SCREAMING_SNAKE_CASE_ = randn_tensor( shape=(batch_size, latent_channels, latent_size, latent_size) , generator=_A , device=self.device , dtype=self.transformer.dtype , ) SCREAMING_SNAKE_CASE_ = torch.cat([latents] * 2) if guidance_scale > 1 else latents SCREAMING_SNAKE_CASE_ = torch.tensor(_A , device=self.device).reshape(-1) SCREAMING_SNAKE_CASE_ = torch.tensor([1000] * batch_size , device=self.device) SCREAMING_SNAKE_CASE_ = torch.cat([class_labels, class_null] , 0) if guidance_scale > 1 else class_labels # set step values self.scheduler.set_timesteps(_A) for t in self.progress_bar(self.scheduler.timesteps): if guidance_scale > 1: SCREAMING_SNAKE_CASE_ = latent_model_input[: len(_A) // 2] SCREAMING_SNAKE_CASE_ = torch.cat([half, half] , dim=0) SCREAMING_SNAKE_CASE_ = self.scheduler.scale_model_input(_A , _A) SCREAMING_SNAKE_CASE_ = t if not torch.is_tensor(_A): # TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can # This would be a good case for the `match` statement (Python 3.10+) SCREAMING_SNAKE_CASE_ = latent_model_input.device.type == 'mps' if isinstance(_A , _A): SCREAMING_SNAKE_CASE_ = torch.floataa if is_mps else torch.floataa else: SCREAMING_SNAKE_CASE_ = torch.intaa if is_mps else torch.intaa SCREAMING_SNAKE_CASE_ = torch.tensor([timesteps] , dtype=_A , device=latent_model_input.device) elif len(timesteps.shape) == 0: SCREAMING_SNAKE_CASE_ = timesteps[None].to(latent_model_input.device) # broadcast to batch dimension in a way that's compatible with ONNX/Core ML SCREAMING_SNAKE_CASE_ = timesteps.expand(latent_model_input.shape[0]) # predict noise model_output SCREAMING_SNAKE_CASE_ = self.transformer( _A , timestep=_A , class_labels=_A).sample # perform guidance if guidance_scale > 1: SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = noise_pred[:, :latent_channels], noise_pred[:, latent_channels:] SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = torch.split(_A , len(_A) // 2 , dim=0) SCREAMING_SNAKE_CASE_ = uncond_eps + guidance_scale * (cond_eps - uncond_eps) SCREAMING_SNAKE_CASE_ = torch.cat([half_eps, half_eps] , dim=0) SCREAMING_SNAKE_CASE_ = torch.cat([eps, rest] , dim=1) # learned sigma if self.transformer.config.out_channels // 2 == latent_channels: SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = torch.split(_A , _A , dim=1) else: SCREAMING_SNAKE_CASE_ = noise_pred # compute previous image: x_t -> x_t-1 SCREAMING_SNAKE_CASE_ = self.scheduler.step(_A , _A , _A).prev_sample if guidance_scale > 1: SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = latent_model_input.chunk(2 , dim=0) else: SCREAMING_SNAKE_CASE_ = latent_model_input SCREAMING_SNAKE_CASE_ = 1 / self.vae.config.scaling_factor * latents SCREAMING_SNAKE_CASE_ = self.vae.decode(_A).sample SCREAMING_SNAKE_CASE_ = (samples / 2 + 0.5).clamp(0 , 1) # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16 SCREAMING_SNAKE_CASE_ = samples.cpu().permute(0 , 2 , 3 , 1).float().numpy() if output_type == "pil": SCREAMING_SNAKE_CASE_ = self.numpy_to_pil(_A) if not return_dict: return (samples,) return ImagePipelineOutput(images=_A)
620
1
import collections import gzip import os import urllib import numpy from tensorflow.python.framework import dtypes, random_seed from tensorflow.python.platform import gfile from tensorflow.python.util.deprecation import deprecated UpperCamelCase__ : Optional[Any] = collections.namedtuple("_Datasets", ["train", "validation", "test"]) # CVDF mirror of http://yann.lecun.com/exdb/mnist/ UpperCamelCase__ : Dict = "https://storage.googleapis.com/cvdf-datasets/mnist/" def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : Tuple ): """simple docstring""" SCREAMING_SNAKE_CASE_ = numpy.dtype(numpy.uintaa ).newbyteorder('>' ) return numpy.frombuffer(bytestream.read(4 ) , dtype=_SCREAMING_SNAKE_CASE )[0] @deprecated(_SCREAMING_SNAKE_CASE , 'Please use tf.data to implement this functionality.' ) def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : Any ): """simple docstring""" print('Extracting' , f.name ) with gzip.GzipFile(fileobj=_SCREAMING_SNAKE_CASE ) as bytestream: SCREAMING_SNAKE_CASE_ = _readaa(_SCREAMING_SNAKE_CASE ) if magic != 2_051: raise ValueError( 'Invalid magic number %d in MNIST image file: %s' % (magic, f.name) ) SCREAMING_SNAKE_CASE_ = _readaa(_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE_ = _readaa(_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE_ = _readaa(_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE_ = bytestream.read(rows * cols * num_images ) SCREAMING_SNAKE_CASE_ = numpy.frombuffer(_SCREAMING_SNAKE_CASE , dtype=numpy.uinta ) SCREAMING_SNAKE_CASE_ = data.reshape(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , 1 ) return data @deprecated(_SCREAMING_SNAKE_CASE , 'Please use tf.one_hot on tensors.' ) def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : Dict ): """simple docstring""" SCREAMING_SNAKE_CASE_ = labels_dense.shape[0] SCREAMING_SNAKE_CASE_ = numpy.arange(_SCREAMING_SNAKE_CASE ) * num_classes SCREAMING_SNAKE_CASE_ = numpy.zeros((num_labels, num_classes) ) SCREAMING_SNAKE_CASE_ = 1 return labels_one_hot @deprecated(_SCREAMING_SNAKE_CASE , 'Please use tf.data to implement this functionality.' ) def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : Optional[Any]=False , _SCREAMING_SNAKE_CASE : Union[str, Any]=10 ): """simple docstring""" print('Extracting' , f.name ) with gzip.GzipFile(fileobj=_SCREAMING_SNAKE_CASE ) as bytestream: SCREAMING_SNAKE_CASE_ = _readaa(_SCREAMING_SNAKE_CASE ) if magic != 2_049: raise ValueError( 'Invalid magic number %d in MNIST label file: %s' % (magic, f.name) ) SCREAMING_SNAKE_CASE_ = _readaa(_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE_ = bytestream.read(_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE_ = numpy.frombuffer(_SCREAMING_SNAKE_CASE , dtype=numpy.uinta ) if one_hot: return _dense_to_one_hot(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) return labels class __snake_case : @deprecated( _A , 'Please use alternatives such as official/mnist/_DataSet.py' ' from tensorflow/models.' , ) def __init__( self , _A , _A , _A=False , _A=False , _A=dtypes.floataa , _A=True , _A=None , ): SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = random_seed.get_seed(_A) # If op level seed is not set, use whatever graph level seed is returned numpy.random.seed(seeda if seed is None else seeda) SCREAMING_SNAKE_CASE_ = dtypes.as_dtype(_A).base_dtype if dtype not in (dtypes.uinta, dtypes.floataa): raise TypeError('Invalid image dtype %r, expected uint8 or float32' % dtype) if fake_data: SCREAMING_SNAKE_CASE_ = 10000 SCREAMING_SNAKE_CASE_ = one_hot else: assert ( images.shape[0] == labels.shape[0] ), f"""images.shape: {images.shape} labels.shape: {labels.shape}""" SCREAMING_SNAKE_CASE_ = images.shape[0] # Convert shape from [num examples, rows, columns, depth] # to [num examples, rows*columns] (assuming depth == 1) if reshape: assert images.shape[3] == 1 SCREAMING_SNAKE_CASE_ = images.reshape( images.shape[0] , images.shape[1] * images.shape[2]) if dtype == dtypes.floataa: # Convert from [0, 255] -> [0.0, 1.0]. SCREAMING_SNAKE_CASE_ = images.astype(numpy.floataa) SCREAMING_SNAKE_CASE_ = numpy.multiply(_A , 1.0 / 2_5_5.0) SCREAMING_SNAKE_CASE_ = images SCREAMING_SNAKE_CASE_ = labels SCREAMING_SNAKE_CASE_ = 0 SCREAMING_SNAKE_CASE_ = 0 @property def lowerCAmelCase__ ( self): return self._images @property def lowerCAmelCase__ ( self): return self._labels @property def lowerCAmelCase__ ( self): return self._num_examples @property def lowerCAmelCase__ ( self): return self._epochs_completed def lowerCAmelCase__ ( self , _A , _A=False , _A=True): if fake_data: SCREAMING_SNAKE_CASE_ = [1] * 784 SCREAMING_SNAKE_CASE_ = [1] + [0] * 9 if self.one_hot else 0 return ( [fake_image for _ in range(_A)], [fake_label for _ in range(_A)], ) SCREAMING_SNAKE_CASE_ = self._index_in_epoch # Shuffle for the first epoch if self._epochs_completed == 0 and start == 0 and shuffle: SCREAMING_SNAKE_CASE_ = numpy.arange(self._num_examples) numpy.random.shuffle(_A) SCREAMING_SNAKE_CASE_ = self.images[perma] SCREAMING_SNAKE_CASE_ = self.labels[perma] # Go to the next epoch if start + batch_size > self._num_examples: # Finished epoch self._epochs_completed += 1 # Get the rest examples in this epoch SCREAMING_SNAKE_CASE_ = self._num_examples - start SCREAMING_SNAKE_CASE_ = self._images[start : self._num_examples] SCREAMING_SNAKE_CASE_ = self._labels[start : self._num_examples] # Shuffle the data if shuffle: SCREAMING_SNAKE_CASE_ = numpy.arange(self._num_examples) numpy.random.shuffle(_A) SCREAMING_SNAKE_CASE_ = self.images[perm] SCREAMING_SNAKE_CASE_ = self.labels[perm] # Start next epoch SCREAMING_SNAKE_CASE_ = 0 SCREAMING_SNAKE_CASE_ = batch_size - rest_num_examples SCREAMING_SNAKE_CASE_ = self._index_in_epoch SCREAMING_SNAKE_CASE_ = self._images[start:end] SCREAMING_SNAKE_CASE_ = self._labels[start:end] return ( numpy.concatenate((images_rest_part, images_new_part) , axis=0), numpy.concatenate((labels_rest_part, labels_new_part) , axis=0), ) else: self._index_in_epoch += batch_size SCREAMING_SNAKE_CASE_ = self._index_in_epoch return self._images[start:end], self._labels[start:end] @deprecated(_SCREAMING_SNAKE_CASE , 'Please write your own downloading logic.' ) def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : str ): """simple docstring""" if not gfile.Exists(_SCREAMING_SNAKE_CASE ): gfile.MakeDirs(_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE_ = os.path.join(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) if not gfile.Exists(_SCREAMING_SNAKE_CASE ): urllib.request.urlretrieve(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # noqa: S310 with gfile.GFile(_SCREAMING_SNAKE_CASE ) as f: SCREAMING_SNAKE_CASE_ = f.size() print('Successfully downloaded' , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , 'bytes.' ) return filepath @deprecated( _SCREAMING_SNAKE_CASE , 'Please use alternatives such as:' ' tensorflow_datasets.load(\'mnist\')' ) def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : Union[str, Any]=False , _SCREAMING_SNAKE_CASE : Optional[Any]=False , _SCREAMING_SNAKE_CASE : List[str]=dtypes.floataa , _SCREAMING_SNAKE_CASE : Dict=True , _SCREAMING_SNAKE_CASE : Tuple=5_000 , _SCREAMING_SNAKE_CASE : str=None , _SCREAMING_SNAKE_CASE : Dict=DEFAULT_SOURCE_URL , ): """simple docstring""" if fake_data: def fake(): return _DataSet( [] , [] , fake_data=_SCREAMING_SNAKE_CASE , one_hot=_SCREAMING_SNAKE_CASE , dtype=_SCREAMING_SNAKE_CASE , seed=_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE_ = fake() SCREAMING_SNAKE_CASE_ = fake() SCREAMING_SNAKE_CASE_ = fake() return _Datasets(train=_SCREAMING_SNAKE_CASE , validation=_SCREAMING_SNAKE_CASE , test=_SCREAMING_SNAKE_CASE ) if not source_url: # empty string check SCREAMING_SNAKE_CASE_ = DEFAULT_SOURCE_URL SCREAMING_SNAKE_CASE_ = 'train-images-idx3-ubyte.gz' SCREAMING_SNAKE_CASE_ = 'train-labels-idx1-ubyte.gz' SCREAMING_SNAKE_CASE_ = 't10k-images-idx3-ubyte.gz' SCREAMING_SNAKE_CASE_ = 't10k-labels-idx1-ubyte.gz' SCREAMING_SNAKE_CASE_ = _maybe_download( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , source_url + train_images_file ) with gfile.Open(_SCREAMING_SNAKE_CASE , 'rb' ) as f: SCREAMING_SNAKE_CASE_ = _extract_images(_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE_ = _maybe_download( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , source_url + train_labels_file ) with gfile.Open(_SCREAMING_SNAKE_CASE , 'rb' ) as f: SCREAMING_SNAKE_CASE_ = _extract_labels(_SCREAMING_SNAKE_CASE , one_hot=_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE_ = _maybe_download( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , source_url + test_images_file ) with gfile.Open(_SCREAMING_SNAKE_CASE , 'rb' ) as f: SCREAMING_SNAKE_CASE_ = _extract_images(_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE_ = _maybe_download( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , source_url + test_labels_file ) with gfile.Open(_SCREAMING_SNAKE_CASE , 'rb' ) as f: SCREAMING_SNAKE_CASE_ = _extract_labels(_SCREAMING_SNAKE_CASE , one_hot=_SCREAMING_SNAKE_CASE ) if not 0 <= validation_size <= len(_SCREAMING_SNAKE_CASE ): SCREAMING_SNAKE_CASE_ = ( 'Validation size should be between 0 and ' f"""{len(_SCREAMING_SNAKE_CASE )}. Received: {validation_size}.""" ) raise ValueError(_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE_ = train_images[:validation_size] SCREAMING_SNAKE_CASE_ = train_labels[:validation_size] SCREAMING_SNAKE_CASE_ = train_images[validation_size:] SCREAMING_SNAKE_CASE_ = train_labels[validation_size:] SCREAMING_SNAKE_CASE_ = {'dtype': dtype, 'reshape': reshape, 'seed': seed} SCREAMING_SNAKE_CASE_ = _DataSet(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE_ = _DataSet(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE_ = _DataSet(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) return _Datasets(train=_SCREAMING_SNAKE_CASE , validation=_SCREAMING_SNAKE_CASE , test=_SCREAMING_SNAKE_CASE )
620
import pickle import numpy as np from matplotlib import pyplot as plt class __snake_case : def __init__( self , _A , _A , _A , _A , _A , _A=0.2 , _A=0.2): SCREAMING_SNAKE_CASE_ = bp_numa SCREAMING_SNAKE_CASE_ = bp_numa SCREAMING_SNAKE_CASE_ = bp_numa SCREAMING_SNAKE_CASE_ = conva_get[:2] SCREAMING_SNAKE_CASE_ = conva_get[2] SCREAMING_SNAKE_CASE_ = size_pa SCREAMING_SNAKE_CASE_ = rate_w SCREAMING_SNAKE_CASE_ = rate_t SCREAMING_SNAKE_CASE_ = [ np.mat(-1 * np.random.rand(self.conva[0] , self.conva[0]) + 0.5) for i in range(self.conva[1]) ] SCREAMING_SNAKE_CASE_ = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa) + 0.5) SCREAMING_SNAKE_CASE_ = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa) + 0.5) SCREAMING_SNAKE_CASE_ = -2 * np.random.rand(self.conva[1]) + 1 SCREAMING_SNAKE_CASE_ = -2 * np.random.rand(self.num_bpa) + 1 SCREAMING_SNAKE_CASE_ = -2 * np.random.rand(self.num_bpa) + 1 def lowerCAmelCase__ ( self , _A): # save model dict with pickle SCREAMING_SNAKE_CASE_ = { 'num_bp1': self.num_bpa, 'num_bp2': self.num_bpa, 'num_bp3': self.num_bpa, 'conv1': self.conva, 'step_conv1': self.step_conva, 'size_pooling1': self.size_poolinga, 'rate_weight': self.rate_weight, 'rate_thre': self.rate_thre, 'w_conv1': self.w_conva, 'wkj': self.wkj, 'vji': self.vji, 'thre_conv1': self.thre_conva, 'thre_bp2': self.thre_bpa, 'thre_bp3': self.thre_bpa, } with open(_A , 'wb') as f: pickle.dump(_A , _A) print(f"""Model saved: {save_path}""") @classmethod def lowerCAmelCase__ ( cls , _A): # read saved model with open(_A , 'rb') as f: SCREAMING_SNAKE_CASE_ = pickle.load(_A) # noqa: S301 SCREAMING_SNAKE_CASE_ = model_dic.get('conv1') conv_get.append(model_dic.get('step_conv1')) SCREAMING_SNAKE_CASE_ = model_dic.get('size_pooling1') SCREAMING_SNAKE_CASE_ = model_dic.get('num_bp1') SCREAMING_SNAKE_CASE_ = model_dic.get('num_bp2') SCREAMING_SNAKE_CASE_ = model_dic.get('num_bp3') SCREAMING_SNAKE_CASE_ = model_dic.get('rate_weight') SCREAMING_SNAKE_CASE_ = model_dic.get('rate_thre') # create model instance SCREAMING_SNAKE_CASE_ = CNN(_A , _A , _A , _A , _A , _A , _A) # modify model parameter SCREAMING_SNAKE_CASE_ = model_dic.get('w_conv1') SCREAMING_SNAKE_CASE_ = model_dic.get('wkj') SCREAMING_SNAKE_CASE_ = model_dic.get('vji') SCREAMING_SNAKE_CASE_ = model_dic.get('thre_conv1') SCREAMING_SNAKE_CASE_ = model_dic.get('thre_bp2') SCREAMING_SNAKE_CASE_ = model_dic.get('thre_bp3') return conv_ins def lowerCAmelCase__ ( self , _A): return 1 / (1 + np.exp(-1 * x)) def lowerCAmelCase__ ( self , _A): return round(_A , 3) def lowerCAmelCase__ ( self , _A , _A , _A , _A , _A): # convolution process SCREAMING_SNAKE_CASE_ = convs[0] SCREAMING_SNAKE_CASE_ = convs[1] SCREAMING_SNAKE_CASE_ = np.shape(_A)[0] # get the data slice of original image data, data_focus SCREAMING_SNAKE_CASE_ = [] for i_focus in range(0 , size_data - size_conv + 1 , _A): for j_focus in range(0 , size_data - size_conv + 1 , _A): SCREAMING_SNAKE_CASE_ = data[ i_focus : i_focus + size_conv, j_focus : j_focus + size_conv ] data_focus.append(_A) # calculate the feature map of every single kernel, and saved as list of matrix SCREAMING_SNAKE_CASE_ = [] SCREAMING_SNAKE_CASE_ = int((size_data - size_conv) / conv_step + 1) for i_map in range(_A): SCREAMING_SNAKE_CASE_ = [] for i_focus in range(len(_A)): SCREAMING_SNAKE_CASE_ = ( np.sum(np.multiply(data_focus[i_focus] , w_convs[i_map])) - thre_convs[i_map] ) featuremap.append(self.sig(_A)) SCREAMING_SNAKE_CASE_ = np.asmatrix(_A).reshape( _A , _A) data_featuremap.append(_A) # expanding the data slice to One dimenssion SCREAMING_SNAKE_CASE_ = [] for each_focus in data_focus: focusa_list.extend(self.Expand_Mat(_A)) SCREAMING_SNAKE_CASE_ = np.asarray(_A) return focus_list, data_featuremap def lowerCAmelCase__ ( self , _A , _A , _A="average_pool"): # pooling process SCREAMING_SNAKE_CASE_ = len(featuremaps[0]) SCREAMING_SNAKE_CASE_ = int(size_map / size_pooling) SCREAMING_SNAKE_CASE_ = [] for i_map in range(len(_A)): SCREAMING_SNAKE_CASE_ = featuremaps[i_map] SCREAMING_SNAKE_CASE_ = [] for i_focus in range(0 , _A , _A): for j_focus in range(0 , _A , _A): SCREAMING_SNAKE_CASE_ = feature_map[ i_focus : i_focus + size_pooling, j_focus : j_focus + size_pooling, ] if pooling_type == "average_pool": # average pooling map_pooled.append(np.average(_A)) elif pooling_type == "max_pooling": # max pooling map_pooled.append(np.max(_A)) SCREAMING_SNAKE_CASE_ = np.asmatrix(_A).reshape(_A , _A) featuremap_pooled.append(_A) return featuremap_pooled def lowerCAmelCase__ ( self , _A): # expanding three dimension data to one dimension list SCREAMING_SNAKE_CASE_ = [] for i in range(len(_A)): SCREAMING_SNAKE_CASE_ = np.shape(data[i]) SCREAMING_SNAKE_CASE_ = data[i].reshape(1 , shapes[0] * shapes[1]) SCREAMING_SNAKE_CASE_ = data_listed.getA().tolist()[0] data_expanded.extend(_A) SCREAMING_SNAKE_CASE_ = np.asarray(_A) return data_expanded def lowerCAmelCase__ ( self , _A): # expanding matrix to one dimension list SCREAMING_SNAKE_CASE_ = np.asarray(_A) SCREAMING_SNAKE_CASE_ = np.shape(_A) SCREAMING_SNAKE_CASE_ = data_mat.reshape(1 , shapes[0] * shapes[1]) return data_expanded def lowerCAmelCase__ ( self , _A , _A , _A , _A , _A): SCREAMING_SNAKE_CASE_ = [] SCREAMING_SNAKE_CASE_ = 0 for i_map in range(_A): SCREAMING_SNAKE_CASE_ = np.ones((size_map, size_map)) for i in range(0 , _A , _A): for j in range(0 , _A , _A): SCREAMING_SNAKE_CASE_ = pd_pool[ i_pool ] SCREAMING_SNAKE_CASE_ = i_pool + 1 SCREAMING_SNAKE_CASE_ = np.multiply( _A , np.multiply(out_map[i_map] , (1 - out_map[i_map]))) pd_all.append(_A) return pd_all def lowerCAmelCase__ ( self , _A , _A , _A , _A , _A , _A=bool): # model traning print('----------------------Start Training-------------------------') print((' - - Shape: Train_Data ', np.shape(_A))) print((' - - Shape: Teach_Data ', np.shape(_A))) SCREAMING_SNAKE_CASE_ = 0 SCREAMING_SNAKE_CASE_ = [] SCREAMING_SNAKE_CASE_ = 10000 while rp < n_repeat and mse >= error_accuracy: SCREAMING_SNAKE_CASE_ = 0 print(f"""-------------Learning Time {rp}--------------""") for p in range(len(_A)): # print('------------Learning Image: %d--------------'%p) SCREAMING_SNAKE_CASE_ = np.asmatrix(datas_train[p]) SCREAMING_SNAKE_CASE_ = np.asarray(datas_teach[p]) SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.convolute( _A , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , ) SCREAMING_SNAKE_CASE_ = self.pooling(_A , self.size_poolinga) SCREAMING_SNAKE_CASE_ = np.shape(_A) SCREAMING_SNAKE_CASE_ = self._expand(_A) SCREAMING_SNAKE_CASE_ = data_bp_input SCREAMING_SNAKE_CASE_ = np.dot(_A , self.vji.T) - self.thre_bpa SCREAMING_SNAKE_CASE_ = self.sig(_A) SCREAMING_SNAKE_CASE_ = np.dot(_A , self.wkj.T) - self.thre_bpa SCREAMING_SNAKE_CASE_ = self.sig(_A) # --------------Model Leaning ------------------------ # calculate error and gradient--------------- SCREAMING_SNAKE_CASE_ = np.multiply( (data_teach - bp_outa) , np.multiply(_A , (1 - bp_outa))) SCREAMING_SNAKE_CASE_ = np.multiply( np.dot(_A , self.wkj) , np.multiply(_A , (1 - bp_outa))) SCREAMING_SNAKE_CASE_ = np.dot(_A , self.vji) SCREAMING_SNAKE_CASE_ = pd_i_all / (self.size_poolinga * self.size_poolinga) SCREAMING_SNAKE_CASE_ = pd_conva_pooled.T.getA().tolist() SCREAMING_SNAKE_CASE_ = self._calculate_gradient_from_pool( _A , _A , shape_featuremapa[0] , shape_featuremapa[1] , self.size_poolinga , ) # weight and threshold learning process--------- # convolution layer for k_conv in range(self.conva[1]): SCREAMING_SNAKE_CASE_ = self._expand_mat(pd_conva_all[k_conv]) SCREAMING_SNAKE_CASE_ = self.rate_weight * np.dot(_A , _A) SCREAMING_SNAKE_CASE_ = self.w_conva[k_conv] + delta_w.reshape( (self.conva[0], self.conva[0])) SCREAMING_SNAKE_CASE_ = ( self.thre_conva[k_conv] - np.sum(pd_conva_all[k_conv]) * self.rate_thre ) # all connected layer SCREAMING_SNAKE_CASE_ = self.wkj + pd_k_all.T * bp_outa * self.rate_weight SCREAMING_SNAKE_CASE_ = self.vji + pd_j_all.T * bp_outa * self.rate_weight SCREAMING_SNAKE_CASE_ = self.thre_bpa - pd_k_all * self.rate_thre SCREAMING_SNAKE_CASE_ = self.thre_bpa - pd_j_all * self.rate_thre # calculate the sum error of all single image SCREAMING_SNAKE_CASE_ = np.sum(abs(data_teach - bp_outa)) error_count += errors # print(' ----Teach ',data_teach) # print(' ----BP_output ',bp_out3) SCREAMING_SNAKE_CASE_ = rp + 1 SCREAMING_SNAKE_CASE_ = error_count / patterns all_mse.append(_A) def draw_error(): SCREAMING_SNAKE_CASE_ = [error_accuracy for i in range(int(n_repeat * 1.2))] plt.plot(_A , '+-') plt.plot(_A , 'r--') plt.xlabel('Learning Times') plt.ylabel('All_mse') plt.grid(_A , alpha=0.5) plt.show() print('------------------Training Complished---------------------') print((' - - Training epoch: ', rp, f""" - - Mse: {mse:.6f}""")) if draw_e: draw_error() return mse def lowerCAmelCase__ ( self , _A): # model predict SCREAMING_SNAKE_CASE_ = [] print('-------------------Start Testing-------------------------') print((' - - Shape: Test_Data ', np.shape(_A))) for p in range(len(_A)): SCREAMING_SNAKE_CASE_ = np.asmatrix(datas_test[p]) SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.convolute( _A , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , ) SCREAMING_SNAKE_CASE_ = self.pooling(_A , self.size_poolinga) SCREAMING_SNAKE_CASE_ = self._expand(_A) SCREAMING_SNAKE_CASE_ = data_bp_input SCREAMING_SNAKE_CASE_ = bp_outa * self.vji.T - self.thre_bpa SCREAMING_SNAKE_CASE_ = self.sig(_A) SCREAMING_SNAKE_CASE_ = bp_outa * self.wkj.T - self.thre_bpa SCREAMING_SNAKE_CASE_ = self.sig(_A) produce_out.extend(bp_outa.getA().tolist()) SCREAMING_SNAKE_CASE_ = [list(map(self.do_round , _A)) for each in produce_out] return np.asarray(_A) def lowerCAmelCase__ ( self , _A): # return the data of image after convoluting process so we can check it out SCREAMING_SNAKE_CASE_ = np.asmatrix(_A) SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.convolute( _A , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , ) SCREAMING_SNAKE_CASE_ = self.pooling(_A , self.size_poolinga) return data_conveda, data_pooleda if __name__ == "__main__": pass
620
1
def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : int ): """simple docstring""" if n == 1 or not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): return 0 elif n == 2: return 1 else: SCREAMING_SNAKE_CASE_ = [0, 1] for i in range(2 , n + 1 ): sequence.append(sequence[i - 1] + sequence[i - 2] ) return sequence[n] def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : int ): """simple docstring""" SCREAMING_SNAKE_CASE_ = 0 SCREAMING_SNAKE_CASE_ = 2 while digits < n: index += 1 SCREAMING_SNAKE_CASE_ = len(str(fibonacci(_SCREAMING_SNAKE_CASE ) ) ) return index def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : int = 1_000 ): """simple docstring""" return fibonacci_digits_index(_SCREAMING_SNAKE_CASE ) if __name__ == "__main__": print(solution(int(str(input()).strip())))
620
import os import zipfile import requests from get_ci_error_statistics import download_artifact, get_artifacts_links def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : int=7 ): """simple docstring""" SCREAMING_SNAKE_CASE_ = None if token is not None: SCREAMING_SNAKE_CASE_ = {'Accept': 'application/vnd.github+json', 'Authorization': f"""Bearer {token}"""} # The id of a workflow (not of a workflow run) SCREAMING_SNAKE_CASE_ = '636036' SCREAMING_SNAKE_CASE_ = f"""https://api.github.com/repos/huggingface/transformers/actions/workflows/{workflow_id}/runs""" # On `main` branch + event being `schedule` + not returning PRs + only `num_runs` results url += f"""?branch=main&event=schedule&exclude_pull_requests=true&per_page={num_runs}""" SCREAMING_SNAKE_CASE_ = requests.get(_SCREAMING_SNAKE_CASE , headers=_SCREAMING_SNAKE_CASE ).json() return result["workflow_runs"] def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : str ): """simple docstring""" SCREAMING_SNAKE_CASE_ = get_daily_ci_runs(_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE_ = None for workflow_run in workflow_runs: if workflow_run["status"] == "completed": SCREAMING_SNAKE_CASE_ = workflow_run['id'] break return workflow_run_id def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Optional[int] ): """simple docstring""" SCREAMING_SNAKE_CASE_ = get_last_daily_ci_runs(_SCREAMING_SNAKE_CASE ) if workflow_run_id is not None: SCREAMING_SNAKE_CASE_ = get_artifacts_links(worflow_run_id=_SCREAMING_SNAKE_CASE , token=_SCREAMING_SNAKE_CASE ) for artifact_name in artifact_names: if artifact_name in artifacts_links: SCREAMING_SNAKE_CASE_ = artifacts_links[artifact_name] download_artifact( artifact_name=_SCREAMING_SNAKE_CASE , artifact_url=_SCREAMING_SNAKE_CASE , output_dir=_SCREAMING_SNAKE_CASE , token=_SCREAMING_SNAKE_CASE ) def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : List[Any] ): """simple docstring""" get_last_daily_ci_artifacts(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE_ = {} for artifact_name in artifact_names: SCREAMING_SNAKE_CASE_ = os.path.join(_SCREAMING_SNAKE_CASE , f"""{artifact_name}.zip""" ) if os.path.isfile(_SCREAMING_SNAKE_CASE ): SCREAMING_SNAKE_CASE_ = {} with zipfile.ZipFile(_SCREAMING_SNAKE_CASE ) as z: for filename in z.namelist(): if not os.path.isdir(_SCREAMING_SNAKE_CASE ): # read the file with z.open(_SCREAMING_SNAKE_CASE ) as f: SCREAMING_SNAKE_CASE_ = f.read().decode('UTF-8' ) return results
620
1
class __snake_case : def __init__( self , _A): # we need a list not a string, so do something to change the type SCREAMING_SNAKE_CASE_ = arr.split(',') def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = [int(self.array[0])] * len(self.array) SCREAMING_SNAKE_CASE_ = [int(self.array[0])] * len(self.array) for i in range(1 , len(self.array)): SCREAMING_SNAKE_CASE_ = max( int(self.array[i]) + sum_value[i - 1] , int(self.array[i])) SCREAMING_SNAKE_CASE_ = max(sum_value[i] , rear[i - 1]) return rear[len(self.array) - 1] if __name__ == "__main__": UpperCamelCase__ : Tuple = input("please input some numbers:") UpperCamelCase__ : Dict = SubArray(whole_array) UpperCamelCase__ : Dict = array.solve_sub_array() print(("the results is:", re))
620
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available UpperCamelCase__ : Any = { "configuration_mvp": ["MVP_PRETRAINED_CONFIG_ARCHIVE_MAP", "MvpConfig", "MvpOnnxConfig"], "tokenization_mvp": ["MvpTokenizer"], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase__ : Optional[int] = ["MvpTokenizerFast"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase__ : str = [ "MVP_PRETRAINED_MODEL_ARCHIVE_LIST", "MvpForCausalLM", "MvpForConditionalGeneration", "MvpForQuestionAnswering", "MvpForSequenceClassification", "MvpModel", "MvpPreTrainedModel", ] if TYPE_CHECKING: from .configuration_mvp import MVP_PRETRAINED_CONFIG_ARCHIVE_MAP, MvpConfig, MvpOnnxConfig from .tokenization_mvp import MvpTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_mvp_fast import MvpTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mvp import ( MVP_PRETRAINED_MODEL_ARCHIVE_LIST, MvpForCausalLM, MvpForConditionalGeneration, MvpForQuestionAnswering, MvpForSequenceClassification, MvpModel, MvpPreTrainedModel, ) else: import sys UpperCamelCase__ : Dict = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
620
1
import warnings from ...utils import is_sklearn_available, requires_backends if is_sklearn_available(): from scipy.stats import pearsonr, spearmanr from sklearn.metrics import fa_score, matthews_corrcoef UpperCamelCase__ : Optional[int] = ( "This metric will be removed from the library soon, metrics should be handled with the 🤗 Evaluate " "library. You can have a look at this example script for pointers: " "https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py" ) def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : Dict ): """simple docstring""" warnings.warn(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) requires_backends(_SCREAMING_SNAKE_CASE , 'sklearn' ) return (preds == labels).mean() def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : Optional[int] ): """simple docstring""" warnings.warn(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) requires_backends(_SCREAMING_SNAKE_CASE , 'sklearn' ) SCREAMING_SNAKE_CASE_ = simple_accuracy(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE_ = fa_score(y_true=_SCREAMING_SNAKE_CASE , y_pred=_SCREAMING_SNAKE_CASE ) return { "acc": acc, "f1": fa, "acc_and_f1": (acc + fa) / 2, } def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : List[Any] ): """simple docstring""" warnings.warn(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) requires_backends(_SCREAMING_SNAKE_CASE , 'sklearn' ) SCREAMING_SNAKE_CASE_ = pearsonr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )[0] SCREAMING_SNAKE_CASE_ = spearmanr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )[0] return { "pearson": pearson_corr, "spearmanr": spearman_corr, "corr": (pearson_corr + spearman_corr) / 2, } def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : List[str] ): """simple docstring""" warnings.warn(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) requires_backends(_SCREAMING_SNAKE_CASE , 'sklearn' ) assert len(_SCREAMING_SNAKE_CASE ) == len(_SCREAMING_SNAKE_CASE ), f"""Predictions and labels have mismatched lengths {len(_SCREAMING_SNAKE_CASE )} and {len(_SCREAMING_SNAKE_CASE )}""" if task_name == "cola": return {"mcc": matthews_corrcoef(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )} elif task_name == "sst-2": return {"acc": simple_accuracy(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )} elif task_name == "mrpc": return acc_and_fa(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) elif task_name == "sts-b": return pearson_and_spearman(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) elif task_name == "qqp": return acc_and_fa(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) elif task_name == "mnli": return {"mnli/acc": simple_accuracy(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )} elif task_name == "mnli-mm": return {"mnli-mm/acc": simple_accuracy(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )} elif task_name == "qnli": return {"acc": simple_accuracy(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )} elif task_name == "rte": return {"acc": simple_accuracy(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )} elif task_name == "wnli": return {"acc": simple_accuracy(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )} elif task_name == "hans": return {"acc": simple_accuracy(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )} else: raise KeyError(_SCREAMING_SNAKE_CASE ) def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : int ): """simple docstring""" warnings.warn(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) requires_backends(_SCREAMING_SNAKE_CASE , 'sklearn' ) if len(_SCREAMING_SNAKE_CASE ) != len(_SCREAMING_SNAKE_CASE ): raise ValueError(f"""Predictions and labels have mismatched lengths {len(_SCREAMING_SNAKE_CASE )} and {len(_SCREAMING_SNAKE_CASE )}""" ) if task_name == "xnli": return {"acc": simple_accuracy(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )} else: raise KeyError(_SCREAMING_SNAKE_CASE )
620
import inspect import os import unittest from pathlib import Path import torch import accelerate from accelerate.test_utils import execute_subprocess_async from accelerate.test_utils.testing import run_command class __snake_case ( unittest.TestCase ): __lowerCAmelCase : Dict = inspect.getfile(accelerate.test_utils ) __lowerCAmelCase : Optional[Any] = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['scripts', 'test_cli.py'] ) __lowerCAmelCase : Tuple = ['accelerate', 'launch'] __lowerCAmelCase : Union[str, Any] = Path.home() / '.cache/huggingface/accelerate' __lowerCAmelCase : List[str] = 'default_config.yaml' __lowerCAmelCase : List[Any] = config_folder / config_file __lowerCAmelCase : str = config_folder / '_default_config.yaml' __lowerCAmelCase : Optional[int] = Path('tests/test_configs' ) @classmethod def lowerCAmelCase__ ( cls): if cls.config_path.is_file(): cls.config_path.rename(cls.changed_path) @classmethod def lowerCAmelCase__ ( cls): if cls.changed_path.is_file(): cls.changed_path.rename(cls.config_path) def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = self.base_cmd if torch.cuda.is_available() and (torch.cuda.device_count() > 1): cmd += ["--multi_gpu"] execute_subprocess_async(cmd + [self.test_file_path] , env=os.environ.copy()) def lowerCAmelCase__ ( self): for config in sorted(self.test_config_path.glob('**/*.yaml')): with self.subTest(config_file=_A): execute_subprocess_async( self.base_cmd + ['--config_file', str(_A), self.test_file_path] , env=os.environ.copy()) def lowerCAmelCase__ ( self): execute_subprocess_async(['accelerate', 'test'] , env=os.environ.copy()) class __snake_case ( unittest.TestCase ): __lowerCAmelCase : Optional[Any] = 'test-tpu' __lowerCAmelCase : str = 'us-central1-a' __lowerCAmelCase : Union[str, Any] = 'ls' __lowerCAmelCase : Union[str, Any] = ['accelerate', 'tpu-config'] __lowerCAmelCase : Union[str, Any] = 'cd /usr/share' __lowerCAmelCase : List[Any] = 'tests/test_samples/test_command_file.sh' __lowerCAmelCase : Dict = 'Running gcloud compute tpus tpu-vm ssh' def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = run_command( self.cmd + ['--command', self.command, '--tpu_zone', self.tpu_zone, '--tpu_name', self.tpu_name, '--debug'] , return_stdout=_A , ) self.assertIn( f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all""" , _A , ) def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = run_command( self.cmd + [ '--config_file', 'tests/test_configs/0_12_0.yaml', '--command', self.command, '--tpu_zone', self.tpu_zone, '--tpu_name', self.tpu_name, '--debug', ] , return_stdout=_A , ) self.assertIn( f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all""" , _A , ) def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = run_command( self.cmd + ['--config_file', 'tests/test_configs/latest.yaml', '--debug'] , return_stdout=_A) self.assertIn( f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo \"hello world\"; echo \"this is a second command\" --worker all""" , _A , ) def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = run_command( self.cmd + ['--config_file', 'tests/test_configs/latest.yaml', '--command', self.command, '--debug'] , return_stdout=_A , ) self.assertIn( f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all""" , _A , ) def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = run_command( self.cmd + [ '--config_file', 'tests/test_configs/latest.yaml', '--command', self.command, '--command', 'echo "Hello World"', '--debug', ] , return_stdout=_A , ) self.assertIn( f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls; echo \"Hello World\" --worker all""" , _A , ) def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = run_command( self.cmd + ['--config_file', 'tests/test_configs/latest.yaml', '--command_file', self.command_file, '--debug'] , return_stdout=_A , ) self.assertIn( f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo \"hello world\"; echo \"this is a second command\" --worker all""" , _A , ) def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = run_command( self.cmd + [ '--config_file', 'tests/test_configs/0_12_0.yaml', '--command_file', self.command_file, '--tpu_zone', self.tpu_zone, '--tpu_name', self.tpu_name, '--debug', ] , return_stdout=_A , ) self.assertIn( f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo \"hello world\"; echo \"this is a second command\" --worker all""" , _A , ) def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = run_command( self.cmd + ['--config_file', 'tests/test_configs/latest.yaml', '--install_accelerate', '--debug'] , return_stdout=_A , ) self.assertIn( f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate -U; echo \"hello world\"; echo \"this is a second command\" --worker all""" , _A , ) def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = run_command( self.cmd + [ '--config_file', 'tests/test_configs/latest.yaml', '--install_accelerate', '--accelerate_version', '12.0.0', '--debug', ] , return_stdout=_A , ) self.assertIn( f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate==12.0.0; echo \"hello world\"; echo \"this is a second command\" --worker all""" , _A , )
620
1
import itertools import json import linecache import os import pickle import re import socket import string from collections import Counter from logging import getLogger from pathlib import Path from typing import Callable, Dict, Iterable, List import git import torch from torch.utils.data import Dataset from transformers import BartTokenizer, RagTokenizer, TaTokenizer def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : List[str]=True , _SCREAMING_SNAKE_CASE : int="pt" ): """simple docstring""" SCREAMING_SNAKE_CASE_ = {'add_prefix_space': True} if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) and not line.startswith(' ' ) else {} SCREAMING_SNAKE_CASE_ = padding_side return tokenizer( [line] , max_length=_SCREAMING_SNAKE_CASE , padding='max_length' if pad_to_max_length else None , truncation=_SCREAMING_SNAKE_CASE , return_tensors=_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , ) def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : List[Any]=None , ): """simple docstring""" SCREAMING_SNAKE_CASE_ = input_ids.ne(_SCREAMING_SNAKE_CASE ).any(dim=0 ) if attention_mask is None: return input_ids[:, keep_column_mask] else: return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask]) class __snake_case ( lowerCAmelCase__ ): def __init__( self , _A , _A , _A , _A , _A="train" , _A=None , _A=None , _A=None , _A="" , ): super().__init__() SCREAMING_SNAKE_CASE_ = Path(_A).joinpath(type_path + '.source') SCREAMING_SNAKE_CASE_ = Path(_A).joinpath(type_path + '.target') SCREAMING_SNAKE_CASE_ = self.get_char_lens(self.src_file) SCREAMING_SNAKE_CASE_ = max_source_length SCREAMING_SNAKE_CASE_ = max_target_length assert min(self.src_lens) > 0, f"""found empty line in {self.src_file}""" SCREAMING_SNAKE_CASE_ = tokenizer SCREAMING_SNAKE_CASE_ = prefix if n_obs is not None: SCREAMING_SNAKE_CASE_ = self.src_lens[:n_obs] SCREAMING_SNAKE_CASE_ = src_lang SCREAMING_SNAKE_CASE_ = tgt_lang def __len__( self): return len(self.src_lens) def __getitem__( self , _A): SCREAMING_SNAKE_CASE_ = index + 1 # linecache starts at 1 SCREAMING_SNAKE_CASE_ = self.prefix + linecache.getline(str(self.src_file) , _A).rstrip('\n') SCREAMING_SNAKE_CASE_ = linecache.getline(str(self.tgt_file) , _A).rstrip('\n') assert source_line, f"""empty source line for index {index}""" assert tgt_line, f"""empty tgt line for index {index}""" # Need to add eos token manually for T5 if isinstance(self.tokenizer , _A): source_line += self.tokenizer.eos_token tgt_line += self.tokenizer.eos_token # Pad source and target to the right SCREAMING_SNAKE_CASE_ = ( self.tokenizer.question_encoder if isinstance(self.tokenizer , _A) else self.tokenizer ) SCREAMING_SNAKE_CASE_ = self.tokenizer.generator if isinstance(self.tokenizer , _A) else self.tokenizer SCREAMING_SNAKE_CASE_ = encode_line(_A , _A , self.max_source_length , 'right') SCREAMING_SNAKE_CASE_ = encode_line(_A , _A , self.max_target_length , 'right') SCREAMING_SNAKE_CASE_ = source_inputs['input_ids'].squeeze() SCREAMING_SNAKE_CASE_ = target_inputs['input_ids'].squeeze() SCREAMING_SNAKE_CASE_ = source_inputs['attention_mask'].squeeze() return { "input_ids": source_ids, "attention_mask": src_mask, "decoder_input_ids": target_ids, } @staticmethod def lowerCAmelCase__ ( _A): return [len(_A) for x in Path(_A).open().readlines()] def lowerCAmelCase__ ( self , _A): SCREAMING_SNAKE_CASE_ = torch.stack([x['input_ids'] for x in batch]) SCREAMING_SNAKE_CASE_ = torch.stack([x['attention_mask'] for x in batch]) SCREAMING_SNAKE_CASE_ = torch.stack([x['decoder_input_ids'] for x in batch]) SCREAMING_SNAKE_CASE_ = ( self.tokenizer.generator.pad_token_id if isinstance(self.tokenizer , _A) else self.tokenizer.pad_token_id ) SCREAMING_SNAKE_CASE_ = ( self.tokenizer.question_encoder.pad_token_id if isinstance(self.tokenizer , _A) else self.tokenizer.pad_token_id ) SCREAMING_SNAKE_CASE_ = trim_batch(_A , _A) SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = trim_batch(_A , _A , attention_mask=_A) SCREAMING_SNAKE_CASE_ = { 'input_ids': source_ids, 'attention_mask': source_mask, 'decoder_input_ids': y, } return batch UpperCamelCase__ : Tuple = getLogger(__name__) def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : List[List] ): """simple docstring""" return list(itertools.chain.from_iterable(_SCREAMING_SNAKE_CASE ) ) def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : str ): """simple docstring""" SCREAMING_SNAKE_CASE_ = get_git_info() save_json(_SCREAMING_SNAKE_CASE , os.path.join(_SCREAMING_SNAKE_CASE , 'git_log.json' ) ) def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : int=4 , **_SCREAMING_SNAKE_CASE : Dict ): """simple docstring""" with open(_SCREAMING_SNAKE_CASE , 'w' ) as f: json.dump(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , indent=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : List[str] ): """simple docstring""" with open(_SCREAMING_SNAKE_CASE ) as f: return json.load(_SCREAMING_SNAKE_CASE ) def _UpperCAmelCase ( ): """simple docstring""" SCREAMING_SNAKE_CASE_ = git.Repo(search_parent_directories=_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE_ = { 'repo_id': str(_SCREAMING_SNAKE_CASE ), 'repo_sha': str(repo.head.object.hexsha ), 'repo_branch': str(repo.active_branch ), 'hostname': str(socket.gethostname() ), } return repo_infos def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : Callable , _SCREAMING_SNAKE_CASE : Iterable ): """simple docstring""" return list(map(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ) def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : Optional[Any] ): """simple docstring""" with open(_SCREAMING_SNAKE_CASE , 'wb' ) as f: return pickle.dump(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : Union[str, Any] ): """simple docstring""" def remove_articles(_SCREAMING_SNAKE_CASE : str ): return re.sub(r'\b(a|an|the)\b' , ' ' , _SCREAMING_SNAKE_CASE ) def white_space_fix(_SCREAMING_SNAKE_CASE : Any ): return " ".join(text.split() ) def remove_punc(_SCREAMING_SNAKE_CASE : Optional[Any] ): SCREAMING_SNAKE_CASE_ = set(string.punctuation ) return "".join(ch for ch in text if ch not in exclude ) def lower(_SCREAMING_SNAKE_CASE : Optional[int] ): return text.lower() return white_space_fix(remove_articles(remove_punc(lower(_SCREAMING_SNAKE_CASE ) ) ) ) def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : Optional[int] ): """simple docstring""" SCREAMING_SNAKE_CASE_ = normalize_answer(_SCREAMING_SNAKE_CASE ).split() SCREAMING_SNAKE_CASE_ = normalize_answer(_SCREAMING_SNAKE_CASE ).split() SCREAMING_SNAKE_CASE_ = Counter(_SCREAMING_SNAKE_CASE ) & Counter(_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE_ = sum(common.values() ) if num_same == 0: return 0 SCREAMING_SNAKE_CASE_ = 1.0 * num_same / len(_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE_ = 1.0 * num_same / len(_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE_ = (2 * precision * recall) / (precision + recall) return fa def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : Dict ): """simple docstring""" return normalize_answer(_SCREAMING_SNAKE_CASE ) == normalize_answer(_SCREAMING_SNAKE_CASE ) def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : List[str] ): """simple docstring""" assert len(_SCREAMING_SNAKE_CASE ) == len(_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE_ = 0 for hypo, pred in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): em += exact_match_score(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) if len(_SCREAMING_SNAKE_CASE ) > 0: em /= len(_SCREAMING_SNAKE_CASE ) return {"em": em} def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : Tuple ): """simple docstring""" return model_prefix.startswith('rag' ) def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Any ): """simple docstring""" SCREAMING_SNAKE_CASE_ = {p: p for p in extra_params} # T5 models don't have `dropout` param, they have `dropout_rate` instead SCREAMING_SNAKE_CASE_ = 'dropout_rate' for p in extra_params: if getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): if not hasattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) and not hasattr(_SCREAMING_SNAKE_CASE , equivalent_param[p] ): logger.info('config doesn\'t have a `{}` attribute'.format(_SCREAMING_SNAKE_CASE ) ) delattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) continue SCREAMING_SNAKE_CASE_ = p if hasattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) else equivalent_param[p] setattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ) delattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) return hparams, config
620
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_speech_available, is_torch_available, ) UpperCamelCase__ : Tuple = { "configuration_trocr": ["TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP", "TrOCRConfig"], "processing_trocr": ["TrOCRProcessor"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase__ : Tuple = [ "TROCR_PRETRAINED_MODEL_ARCHIVE_LIST", "TrOCRForCausalLM", "TrOCRPreTrainedModel", ] if TYPE_CHECKING: from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig from .processing_trocr import TrOCRProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel else: import sys UpperCamelCase__ : str = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
620
1
import hashlib import unittest from typing import Dict import numpy as np from transformers import ( MODEL_FOR_MASK_GENERATION_MAPPING, TF_MODEL_FOR_MASK_GENERATION_MAPPING, is_vision_available, pipeline, ) from transformers.pipelines import MaskGenerationPipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_tf, require_torch, require_vision, slow, ) if is_vision_available(): from PIL import Image else: class __snake_case : @staticmethod def lowerCAmelCase__ ( *_A , **_A): pass def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : Image ): """simple docstring""" SCREAMING_SNAKE_CASE_ = hashlib.mda(image.tobytes() ) return m.hexdigest()[:10] def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : Image ): """simple docstring""" SCREAMING_SNAKE_CASE_ = np.array(_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE_ = npimg.shape return {"hash": hashimage(_SCREAMING_SNAKE_CASE ), "shape": shape} @is_pipeline_test @require_vision @require_torch class __snake_case ( unittest.TestCase ): __lowerCAmelCase : Dict = dict( (list(MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if MODEL_FOR_MASK_GENERATION_MAPPING else []) ) __lowerCAmelCase : int = dict( (list(TF_MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if TF_MODEL_FOR_MASK_GENERATION_MAPPING else []) ) def lowerCAmelCase__ ( self , _A , _A , _A): SCREAMING_SNAKE_CASE_ = MaskGenerationPipeline(model=_A , image_processor=_A) return image_segmenter, [ "./tests/fixtures/tests_samples/COCO/000000039769.png", "./tests/fixtures/tests_samples/COCO/000000039769.png", ] def lowerCAmelCase__ ( self , _A , _A): pass @require_tf @unittest.skip('Image segmentation not implemented in TF') def lowerCAmelCase__ ( self): pass @slow @require_torch def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = pipeline('mask-generation' , model='facebook/sam-vit-huge') SCREAMING_SNAKE_CASE_ = image_segmenter('http://images.cocodataset.org/val2017/000000039769.jpg' , points_per_batch=256) # Shortening by hashing SCREAMING_SNAKE_CASE_ = [] for i, o in enumerate(outputs['masks']): new_outupt += [{"mask": mask_to_test_readable(_A), "scores": outputs["scores"][i]}] # fmt: off self.assertEqual( nested_simplify(_A , decimals=4) , [ {'mask': {'hash': '115ad19f5f', 'shape': (480, 640)}, 'scores': 1.0_4_4_4}, {'mask': {'hash': '6affa964c6', 'shape': (480, 640)}, 'scores': 1.0_2_1}, {'mask': {'hash': 'dfe28a0388', 'shape': (480, 640)}, 'scores': 1.0_1_6_7}, {'mask': {'hash': 'c0a5f4a318', 'shape': (480, 640)}, 'scores': 1.0_1_3_2}, {'mask': {'hash': 'fe8065c197', 'shape': (480, 640)}, 'scores': 1.0_0_5_3}, {'mask': {'hash': 'e2d0b7a0b7', 'shape': (480, 640)}, 'scores': 0.9_9_6_7}, {'mask': {'hash': '453c7844bd', 'shape': (480, 640)}, 'scores': 0.9_9_3}, {'mask': {'hash': '3d44f2926d', 'shape': (480, 640)}, 'scores': 0.9_9_0_9}, {'mask': {'hash': '64033ddc3f', 'shape': (480, 640)}, 'scores': 0.9_8_7_9}, {'mask': {'hash': '801064ff79', 'shape': (480, 640)}, 'scores': 0.9_8_3_4}, {'mask': {'hash': '6172f276ef', 'shape': (480, 640)}, 'scores': 0.9_7_1_6}, {'mask': {'hash': 'b49e60e084', 'shape': (480, 640)}, 'scores': 0.9_6_1_2}, {'mask': {'hash': 'a811e775fd', 'shape': (480, 640)}, 'scores': 0.9_5_9_9}, {'mask': {'hash': 'a6a8ebcf4b', 'shape': (480, 640)}, 'scores': 0.9_5_5_2}, {'mask': {'hash': '9d8257e080', 'shape': (480, 640)}, 'scores': 0.9_5_3_2}, {'mask': {'hash': '32de6454a8', 'shape': (480, 640)}, 'scores': 0.9_5_1_6}, {'mask': {'hash': 'af3d4af2c8', 'shape': (480, 640)}, 'scores': 0.9_4_9_9}, {'mask': {'hash': '3c6db475fb', 'shape': (480, 640)}, 'scores': 0.9_4_8_3}, {'mask': {'hash': 'c290813fb9', 'shape': (480, 640)}, 'scores': 0.9_4_6_4}, {'mask': {'hash': 'b6f0b8f606', 'shape': (480, 640)}, 'scores': 0.9_4_3}, {'mask': {'hash': '92ce16bfdf', 'shape': (480, 640)}, 'scores': 0.9_4_3}, {'mask': {'hash': 'c749b25868', 'shape': (480, 640)}, 'scores': 0.9_4_0_8}, {'mask': {'hash': 'efb6cab859', 'shape': (480, 640)}, 'scores': 0.9_3_3_5}, {'mask': {'hash': '1ff2eafb30', 'shape': (480, 640)}, 'scores': 0.9_3_2_6}, {'mask': {'hash': '788b798e24', 'shape': (480, 640)}, 'scores': 0.9_2_6_2}, {'mask': {'hash': 'abea804f0e', 'shape': (480, 640)}, 'scores': 0.8_9_9_9}, {'mask': {'hash': '7b9e8ddb73', 'shape': (480, 640)}, 'scores': 0.8_9_8_6}, {'mask': {'hash': 'cd24047c8a', 'shape': (480, 640)}, 'scores': 0.8_9_8_4}, {'mask': {'hash': '6943e6bcbd', 'shape': (480, 640)}, 'scores': 0.8_8_7_3}, {'mask': {'hash': 'b5f47c9191', 'shape': (480, 640)}, 'scores': 0.8_8_7_1} ] , ) # fmt: on @require_torch @slow def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = 'facebook/sam-vit-huge' SCREAMING_SNAKE_CASE_ = pipeline('mask-generation' , model=_A) SCREAMING_SNAKE_CASE_ = image_segmenter( 'http://images.cocodataset.org/val2017/000000039769.jpg' , pred_iou_thresh=1 , points_per_batch=256) # Shortening by hashing SCREAMING_SNAKE_CASE_ = [] for i, o in enumerate(outputs['masks']): new_outupt += [{"mask": mask_to_test_readable(_A), "scores": outputs["scores"][i]}] self.assertEqual( nested_simplify(_A , decimals=4) , [ {'mask': {'hash': '115ad19f5f', 'shape': (480, 640)}, 'scores': 1.0_4_4_4}, {'mask': {'hash': '6affa964c6', 'shape': (480, 640)}, 'scores': 1.0_2_1_0}, {'mask': {'hash': 'dfe28a0388', 'shape': (480, 640)}, 'scores': 1.0_1_6_7}, {'mask': {'hash': 'c0a5f4a318', 'shape': (480, 640)}, 'scores': 1.0_1_3_2}, {'mask': {'hash': 'fe8065c197', 'shape': (480, 640)}, 'scores': 1.0_0_5_3}, ] , )
620
from multiprocessing import Lock, Pipe, Process # lock used to ensure that two processes do not access a pipe at the same time UpperCamelCase__ : int = Lock() def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : Any ): """simple docstring""" global process_lock # we perform n swaps since after n swaps we know we are sorted # we *could* stop early if we are sorted already, but it takes as long to # find out we are sorted as it does to sort the list with this algorithm for i in range(0 , 10 ): if (i + position) % 2 == 0 and r_send is not None: # send your value to your right neighbor process_lock.acquire() r_send[1].send(_SCREAMING_SNAKE_CASE ) process_lock.release() # receive your right neighbor's value process_lock.acquire() SCREAMING_SNAKE_CASE_ = rr_cv[0].recv() process_lock.release() # take the lower value since you are on the left SCREAMING_SNAKE_CASE_ = min(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) elif (i + position) % 2 != 0 and l_send is not None: # send your value to your left neighbor process_lock.acquire() l_send[1].send(_SCREAMING_SNAKE_CASE ) process_lock.release() # receive your left neighbor's value process_lock.acquire() SCREAMING_SNAKE_CASE_ = lr_cv[0].recv() process_lock.release() # take the higher value since you are on the right SCREAMING_SNAKE_CASE_ = max(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # after all swaps are performed, send the values back to main result_pipe[1].send(_SCREAMING_SNAKE_CASE ) def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : Union[str, Any] ): """simple docstring""" SCREAMING_SNAKE_CASE_ = [] SCREAMING_SNAKE_CASE_ = [] # initialize the list of pipes where the values will be retrieved for _ in arr: result_pipe.append(Pipe() ) # creates the processes # the first and last process only have one neighbor so they are made outside # of the loop SCREAMING_SNAKE_CASE_ = Pipe() SCREAMING_SNAKE_CASE_ = Pipe() process_array_.append( Process( target=_SCREAMING_SNAKE_CASE , args=(0, arr[0], None, temp_rs, None, temp_rr, result_pipe[0]) , ) ) SCREAMING_SNAKE_CASE_ = temp_rs SCREAMING_SNAKE_CASE_ = temp_rr for i in range(1 , len(_SCREAMING_SNAKE_CASE ) - 1 ): SCREAMING_SNAKE_CASE_ = Pipe() SCREAMING_SNAKE_CASE_ = Pipe() process_array_.append( Process( target=_SCREAMING_SNAKE_CASE , args=(i, arr[i], temp_ls, temp_rs, temp_lr, temp_rr, result_pipe[i]) , ) ) SCREAMING_SNAKE_CASE_ = temp_rs SCREAMING_SNAKE_CASE_ = temp_rr process_array_.append( Process( target=_SCREAMING_SNAKE_CASE , args=( len(_SCREAMING_SNAKE_CASE ) - 1, arr[len(_SCREAMING_SNAKE_CASE ) - 1], temp_ls, None, temp_lr, None, result_pipe[len(_SCREAMING_SNAKE_CASE ) - 1], ) , ) ) # start the processes for p in process_array_: p.start() # wait for the processes to end and write their values to the list for p in range(0 , len(_SCREAMING_SNAKE_CASE ) ): SCREAMING_SNAKE_CASE_ = result_pipe[p][0].recv() process_array_[p].join() return arr def _UpperCAmelCase ( ): """simple docstring""" SCREAMING_SNAKE_CASE_ = list(range(10 , 0 , -1 ) ) print('Initial List' ) print(*_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE_ = odd_even_transposition(_SCREAMING_SNAKE_CASE ) print('Sorted List\n' ) print(*_SCREAMING_SNAKE_CASE ) if __name__ == "__main__": main()
620
1
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available UpperCamelCase__ : Any = { "configuration_gpt_neo": ["GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP", "GPTNeoConfig", "GPTNeoOnnxConfig"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase__ : Union[str, Any] = [ "GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST", "GPTNeoForCausalLM", "GPTNeoForQuestionAnswering", "GPTNeoForSequenceClassification", "GPTNeoForTokenClassification", "GPTNeoModel", "GPTNeoPreTrainedModel", "load_tf_weights_in_gpt_neo", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase__ : Any = [ "FlaxGPTNeoForCausalLM", "FlaxGPTNeoModel", "FlaxGPTNeoPreTrainedModel", ] if TYPE_CHECKING: from .configuration_gpt_neo import GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoConfig, GPTNeoOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_gpt_neo import ( GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST, GPTNeoForCausalLM, GPTNeoForQuestionAnswering, GPTNeoForSequenceClassification, GPTNeoForTokenClassification, GPTNeoModel, GPTNeoPreTrainedModel, load_tf_weights_in_gpt_neo, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_gpt_neo import FlaxGPTNeoForCausalLM, FlaxGPTNeoModel, FlaxGPTNeoPreTrainedModel else: import sys UpperCamelCase__ : str = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
620
import unittest from transformers import load_tool from .test_tools_common import ToolTesterMixin UpperCamelCase__ : int = "\nHugging Face was founded in 2016 by French entrepreneurs Clément Delangue, Julien Chaumond, and Thomas Wolf originally as a company that developed a chatbot app targeted at teenagers.[2] After open-sourcing the model behind the chatbot, the company pivoted to focus on being a platform for machine learning.\n\nIn March 2021, Hugging Face raised $40 million in a Series B funding round.[3]\n\nOn April 28, 2021, the company launched the BigScience Research Workshop in collaboration with several other research groups to release an open large language model.[4] In 2022, the workshop concluded with the announcement of BLOOM, a multilingual large language model with 176 billion parameters.[5]\n" class __snake_case ( unittest.TestCase , lowerCAmelCase__ ): def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = load_tool('text-question-answering') self.tool.setup() SCREAMING_SNAKE_CASE_ = load_tool('text-question-answering' , remote=_A) def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = self.tool(_A , 'What did Hugging Face do in April 2021?') self.assertEqual(_A , 'launched the BigScience Research Workshop') def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = self.remote_tool(_A , 'What did Hugging Face do in April 2021?') self.assertEqual(_A , 'launched the BigScience Research Workshop') def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = self.tool(text=_A , question='What did Hugging Face do in April 2021?') self.assertEqual(_A , 'launched the BigScience Research Workshop') def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = self.remote_tool(text=_A , question='What did Hugging Face do in April 2021?') self.assertEqual(_A , 'launched the BigScience Research Workshop')
620
1
from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCamelCase__ : List[Any] = logging.get_logger(__name__) UpperCamelCase__ : List[str] = { "microsoft/biogpt": "https://huggingface.co/microsoft/biogpt/resolve/main/config.json", # See all BioGPT models at https://huggingface.co/models?filter=biogpt } class __snake_case ( lowerCAmelCase__ ): __lowerCAmelCase : Any = 'biogpt' def __init__( self , _A=42384 , _A=1024 , _A=24 , _A=16 , _A=4096 , _A="gelu" , _A=0.1 , _A=0.1 , _A=1024 , _A=0.0_2 , _A=1E-12 , _A=True , _A=True , _A=0.0 , _A=0.0 , _A=1 , _A=0 , _A=2 , **_A , ): SCREAMING_SNAKE_CASE_ = vocab_size SCREAMING_SNAKE_CASE_ = max_position_embeddings SCREAMING_SNAKE_CASE_ = hidden_size SCREAMING_SNAKE_CASE_ = num_hidden_layers SCREAMING_SNAKE_CASE_ = num_attention_heads SCREAMING_SNAKE_CASE_ = intermediate_size SCREAMING_SNAKE_CASE_ = hidden_act SCREAMING_SNAKE_CASE_ = hidden_dropout_prob SCREAMING_SNAKE_CASE_ = attention_probs_dropout_prob SCREAMING_SNAKE_CASE_ = initializer_range SCREAMING_SNAKE_CASE_ = layer_norm_eps SCREAMING_SNAKE_CASE_ = scale_embedding SCREAMING_SNAKE_CASE_ = use_cache SCREAMING_SNAKE_CASE_ = layerdrop SCREAMING_SNAKE_CASE_ = activation_dropout super().__init__(pad_token_id=_A , bos_token_id=_A , eos_token_id=_A , **_A)
620
import unittest import numpy as np from datasets import load_dataset from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import BeitImageProcessor class __snake_case ( unittest.TestCase ): def __init__( self , _A , _A=7 , _A=3 , _A=18 , _A=30 , _A=400 , _A=True , _A=None , _A=True , _A=None , _A=True , _A=[0.5, 0.5, 0.5] , _A=[0.5, 0.5, 0.5] , _A=False , ): SCREAMING_SNAKE_CASE_ = size if size is not None else {'height': 20, 'width': 20} SCREAMING_SNAKE_CASE_ = crop_size if crop_size is not None else {'height': 18, 'width': 18} SCREAMING_SNAKE_CASE_ = parent SCREAMING_SNAKE_CASE_ = batch_size SCREAMING_SNAKE_CASE_ = num_channels SCREAMING_SNAKE_CASE_ = image_size SCREAMING_SNAKE_CASE_ = min_resolution SCREAMING_SNAKE_CASE_ = max_resolution SCREAMING_SNAKE_CASE_ = do_resize SCREAMING_SNAKE_CASE_ = size SCREAMING_SNAKE_CASE_ = do_center_crop SCREAMING_SNAKE_CASE_ = crop_size SCREAMING_SNAKE_CASE_ = do_normalize SCREAMING_SNAKE_CASE_ = image_mean SCREAMING_SNAKE_CASE_ = image_std SCREAMING_SNAKE_CASE_ = do_reduce_labels def lowerCAmelCase__ ( self): return { "do_resize": self.do_resize, "size": self.size, "do_center_crop": self.do_center_crop, "crop_size": self.crop_size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "do_reduce_labels": self.do_reduce_labels, } def _UpperCAmelCase ( ): """simple docstring""" SCREAMING_SNAKE_CASE_ = load_dataset('hf-internal-testing/fixtures_ade20k' , split='test' ) SCREAMING_SNAKE_CASE_ = Image.open(dataset[0]['file'] ) SCREAMING_SNAKE_CASE_ = Image.open(dataset[1]['file'] ) return image, map def _UpperCAmelCase ( ): """simple docstring""" SCREAMING_SNAKE_CASE_ = load_dataset('hf-internal-testing/fixtures_ade20k' , split='test' ) SCREAMING_SNAKE_CASE_ = Image.open(ds[0]['file'] ) SCREAMING_SNAKE_CASE_ = Image.open(ds[1]['file'] ) SCREAMING_SNAKE_CASE_ = Image.open(ds[2]['file'] ) SCREAMING_SNAKE_CASE_ = Image.open(ds[3]['file'] ) return [imagea, imagea], [mapa, mapa] @require_torch @require_vision class __snake_case ( lowerCAmelCase__ , unittest.TestCase ): __lowerCAmelCase : Union[str, Any] = BeitImageProcessor if is_vision_available() else None def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = BeitImageProcessingTester(self) @property def lowerCAmelCase__ ( self): return self.image_processor_tester.prepare_image_processor_dict() def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = self.image_processing_class(**self.image_processor_dict) self.assertTrue(hasattr(_A , 'do_resize')) self.assertTrue(hasattr(_A , 'size')) self.assertTrue(hasattr(_A , 'do_center_crop')) self.assertTrue(hasattr(_A , 'center_crop')) self.assertTrue(hasattr(_A , 'do_normalize')) self.assertTrue(hasattr(_A , 'image_mean')) self.assertTrue(hasattr(_A , 'image_std')) def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = self.image_processing_class.from_dict(self.image_processor_dict) self.assertEqual(image_processor.size , {'height': 20, 'width': 20}) self.assertEqual(image_processor.crop_size , {'height': 18, 'width': 18}) self.assertEqual(image_processor.do_reduce_labels , _A) SCREAMING_SNAKE_CASE_ = self.image_processing_class.from_dict( self.image_processor_dict , size=42 , crop_size=84 , reduce_labels=_A) self.assertEqual(image_processor.size , {'height': 42, 'width': 42}) self.assertEqual(image_processor.crop_size , {'height': 84, 'width': 84}) self.assertEqual(image_processor.do_reduce_labels , _A) def lowerCAmelCase__ ( self): pass def lowerCAmelCase__ ( self): # Initialize image_processing SCREAMING_SNAKE_CASE_ = self.image_processing_class(**self.image_processor_dict) # create random PIL images SCREAMING_SNAKE_CASE_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A) for image in image_inputs: self.assertIsInstance(_A , Image.Image) # Test not batched input SCREAMING_SNAKE_CASE_ = image_processing(image_inputs[0] , return_tensors='pt').pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) # Test batched SCREAMING_SNAKE_CASE_ = image_processing(_A , return_tensors='pt').pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) def lowerCAmelCase__ ( self): # Initialize image_processing SCREAMING_SNAKE_CASE_ = self.image_processing_class(**self.image_processor_dict) # create random numpy tensors SCREAMING_SNAKE_CASE_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A , numpify=_A) for image in image_inputs: self.assertIsInstance(_A , np.ndarray) # Test not batched input SCREAMING_SNAKE_CASE_ = image_processing(image_inputs[0] , return_tensors='pt').pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) # Test batched SCREAMING_SNAKE_CASE_ = image_processing(_A , return_tensors='pt').pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) def lowerCAmelCase__ ( self): # Initialize image_processing SCREAMING_SNAKE_CASE_ = self.image_processing_class(**self.image_processor_dict) # create random PyTorch tensors SCREAMING_SNAKE_CASE_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A , torchify=_A) for image in image_inputs: self.assertIsInstance(_A , torch.Tensor) # Test not batched input SCREAMING_SNAKE_CASE_ = image_processing(image_inputs[0] , return_tensors='pt').pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) # Test batched SCREAMING_SNAKE_CASE_ = image_processing(_A , return_tensors='pt').pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) def lowerCAmelCase__ ( self): # Initialize image_processing SCREAMING_SNAKE_CASE_ = self.image_processing_class(**self.image_processor_dict) # create random PyTorch tensors SCREAMING_SNAKE_CASE_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A , torchify=_A) SCREAMING_SNAKE_CASE_ = [] for image in image_inputs: self.assertIsInstance(_A , torch.Tensor) maps.append(torch.zeros(image.shape[-2:]).long()) # Test not batched input SCREAMING_SNAKE_CASE_ = image_processing(image_inputs[0] , maps[0] , return_tensors='pt') self.assertEqual( encoding['pixel_values'].shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) self.assertEqual( encoding['labels'].shape , ( 1, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) self.assertEqual(encoding['labels'].dtype , torch.long) self.assertTrue(encoding['labels'].min().item() >= 0) self.assertTrue(encoding['labels'].max().item() <= 255) # Test batched SCREAMING_SNAKE_CASE_ = image_processing(_A , _A , return_tensors='pt') self.assertEqual( encoding['pixel_values'].shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) self.assertEqual( encoding['labels'].shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) self.assertEqual(encoding['labels'].dtype , torch.long) self.assertTrue(encoding['labels'].min().item() >= 0) self.assertTrue(encoding['labels'].max().item() <= 255) # Test not batched input (PIL images) SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = prepare_semantic_single_inputs() SCREAMING_SNAKE_CASE_ = image_processing(_A , _A , return_tensors='pt') self.assertEqual( encoding['pixel_values'].shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) self.assertEqual( encoding['labels'].shape , ( 1, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) self.assertEqual(encoding['labels'].dtype , torch.long) self.assertTrue(encoding['labels'].min().item() >= 0) self.assertTrue(encoding['labels'].max().item() <= 255) # Test batched input (PIL images) SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = prepare_semantic_batch_inputs() SCREAMING_SNAKE_CASE_ = image_processing(_A , _A , return_tensors='pt') self.assertEqual( encoding['pixel_values'].shape , ( 2, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) self.assertEqual( encoding['labels'].shape , ( 2, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) self.assertEqual(encoding['labels'].dtype , torch.long) self.assertTrue(encoding['labels'].min().item() >= 0) self.assertTrue(encoding['labels'].max().item() <= 255) def lowerCAmelCase__ ( self): # Initialize image_processing SCREAMING_SNAKE_CASE_ = self.image_processing_class(**self.image_processor_dict) # ADE20k has 150 classes, and the background is included, so labels should be between 0 and 150 SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = prepare_semantic_single_inputs() SCREAMING_SNAKE_CASE_ = image_processing(_A , _A , return_tensors='pt') self.assertTrue(encoding['labels'].min().item() >= 0) self.assertTrue(encoding['labels'].max().item() <= 150) SCREAMING_SNAKE_CASE_ = True SCREAMING_SNAKE_CASE_ = image_processing(_A , _A , return_tensors='pt') self.assertTrue(encoding['labels'].min().item() >= 0) self.assertTrue(encoding['labels'].max().item() <= 255)
620
1
from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, convert_to_rgb, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( OPENAI_CLIP_MEAN, OPENAI_CLIP_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging UpperCamelCase__ : Optional[Any] = logging.get_logger(__name__) if is_vision_available(): import PIL class __snake_case ( lowerCAmelCase__ ): __lowerCAmelCase : List[Any] = ['pixel_values'] def __init__( self , _A = True , _A = None , _A = PILImageResampling.BICUBIC , _A = True , _A = None , _A = True , _A = 1 / 255 , _A = True , _A = None , _A = None , _A = True , **_A , ): super().__init__(**_A) SCREAMING_SNAKE_CASE_ = size if size is not None else {'shortest_edge': 224} SCREAMING_SNAKE_CASE_ = get_size_dict(_A , default_to_square=_A) SCREAMING_SNAKE_CASE_ = crop_size if crop_size is not None else {'height': 224, 'width': 224} SCREAMING_SNAKE_CASE_ = get_size_dict(_A , default_to_square=_A , param_name='crop_size') SCREAMING_SNAKE_CASE_ = do_resize SCREAMING_SNAKE_CASE_ = size SCREAMING_SNAKE_CASE_ = resample SCREAMING_SNAKE_CASE_ = do_center_crop SCREAMING_SNAKE_CASE_ = crop_size SCREAMING_SNAKE_CASE_ = do_rescale SCREAMING_SNAKE_CASE_ = rescale_factor SCREAMING_SNAKE_CASE_ = do_normalize SCREAMING_SNAKE_CASE_ = image_mean if image_mean is not None else OPENAI_CLIP_MEAN SCREAMING_SNAKE_CASE_ = image_std if image_std is not None else OPENAI_CLIP_STD SCREAMING_SNAKE_CASE_ = do_convert_rgb def lowerCAmelCase__ ( self , _A , _A , _A = PILImageResampling.BICUBIC , _A = None , **_A , ): SCREAMING_SNAKE_CASE_ = get_size_dict(_A , default_to_square=_A) if "shortest_edge" not in size: raise ValueError(f"""The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}""") SCREAMING_SNAKE_CASE_ = get_resize_output_image_size(_A , size=size['shortest_edge'] , default_to_square=_A) return resize(_A , size=_A , resample=_A , data_format=_A , **_A) def lowerCAmelCase__ ( self , _A , _A , _A = None , **_A , ): SCREAMING_SNAKE_CASE_ = get_size_dict(_A) if "height" not in size or "width" not in size: raise ValueError(f"""The `size` parameter must contain the keys (height, width). Got {size.keys()}""") return center_crop(_A , size=(size['height'], size['width']) , data_format=_A , **_A) def lowerCAmelCase__ ( self , _A , _A , _A = None , **_A , ): return rescale(_A , scale=_A , data_format=_A , **_A) def lowerCAmelCase__ ( self , _A , _A , _A , _A = None , **_A , ): return normalize(_A , mean=_A , std=_A , data_format=_A , **_A) def lowerCAmelCase__ ( self , _A , _A = None , _A = None , _A = None , _A = None , _A = None , _A = None , _A = None , _A = None , _A = None , _A = None , _A = None , _A = None , _A = ChannelDimension.FIRST , **_A , ): SCREAMING_SNAKE_CASE_ = do_resize if do_resize is not None else self.do_resize SCREAMING_SNAKE_CASE_ = size if size is not None else self.size SCREAMING_SNAKE_CASE_ = get_size_dict(_A , param_name='size' , default_to_square=_A) SCREAMING_SNAKE_CASE_ = resample if resample is not None else self.resample SCREAMING_SNAKE_CASE_ = do_center_crop if do_center_crop is not None else self.do_center_crop SCREAMING_SNAKE_CASE_ = crop_size if crop_size is not None else self.crop_size SCREAMING_SNAKE_CASE_ = get_size_dict(_A , param_name='crop_size' , default_to_square=_A) SCREAMING_SNAKE_CASE_ = do_rescale if do_rescale is not None else self.do_rescale SCREAMING_SNAKE_CASE_ = rescale_factor if rescale_factor is not None else self.rescale_factor SCREAMING_SNAKE_CASE_ = do_normalize if do_normalize is not None else self.do_normalize SCREAMING_SNAKE_CASE_ = image_mean if image_mean is not None else self.image_mean SCREAMING_SNAKE_CASE_ = image_std if image_std is not None else self.image_std SCREAMING_SNAKE_CASE_ = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb SCREAMING_SNAKE_CASE_ = make_list_of_images(_A) if not valid_images(_A): raise ValueError( 'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ' 'torch.Tensor, tf.Tensor or jax.ndarray.') if do_resize and size is None: raise ValueError('Size must be specified if do_resize is True.') if do_center_crop and crop_size is None: raise ValueError('Crop size must be specified if do_center_crop is True.') if do_rescale and rescale_factor is None: raise ValueError('Rescale factor must be specified if do_rescale is True.') if do_normalize and (image_mean is None or image_std is None): raise ValueError('Image mean and std must be specified if do_normalize is True.') # PIL RGBA images are converted to RGB if do_convert_rgb: SCREAMING_SNAKE_CASE_ = [convert_to_rgb(_A) for image in images] # All transformations expect numpy arrays. SCREAMING_SNAKE_CASE_ = [to_numpy_array(_A) for image in images] if do_resize: SCREAMING_SNAKE_CASE_ = [self.resize(image=_A , size=_A , resample=_A) for image in images] if do_center_crop: SCREAMING_SNAKE_CASE_ = [self.center_crop(image=_A , size=_A) for image in images] if do_rescale: SCREAMING_SNAKE_CASE_ = [self.rescale(image=_A , scale=_A) for image in images] if do_normalize: SCREAMING_SNAKE_CASE_ = [self.normalize(image=_A , mean=_A , std=_A) for image in images] SCREAMING_SNAKE_CASE_ = [to_channel_dimension_format(_A , _A) for image in images] SCREAMING_SNAKE_CASE_ = {'pixel_values': images} return BatchFeature(data=_A , tensor_type=_A)
620
def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : int = 200 ): """simple docstring""" SCREAMING_SNAKE_CASE_ = [1, 2, 5, 10, 20, 50, 100, 200] SCREAMING_SNAKE_CASE_ = [0] * (pence + 1) SCREAMING_SNAKE_CASE_ = 1 # base case: 1 way to make 0 pence for coin in coins: for i in range(_SCREAMING_SNAKE_CASE , pence + 1 , 1 ): number_of_ways[i] += number_of_ways[i - coin] return number_of_ways[pence] if __name__ == "__main__": assert solution(200) == 73_682
620
1
def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : int ): """simple docstring""" return number & 1 == 0 if __name__ == "__main__": import doctest doctest.testmod()
620
def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : list , _SCREAMING_SNAKE_CASE : list , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int ): """simple docstring""" if index == number_of_items: return 0 SCREAMING_SNAKE_CASE_ = 0 SCREAMING_SNAKE_CASE_ = 0 SCREAMING_SNAKE_CASE_ = knapsack(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , index + 1 ) if weights[index] <= max_weight: SCREAMING_SNAKE_CASE_ = values[index] + knapsack( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , max_weight - weights[index] , index + 1 ) return max(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) if __name__ == "__main__": import doctest doctest.testmod()
620
1
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available UpperCamelCase__ : Any = { "configuration_mvp": ["MVP_PRETRAINED_CONFIG_ARCHIVE_MAP", "MvpConfig", "MvpOnnxConfig"], "tokenization_mvp": ["MvpTokenizer"], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase__ : Optional[int] = ["MvpTokenizerFast"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase__ : str = [ "MVP_PRETRAINED_MODEL_ARCHIVE_LIST", "MvpForCausalLM", "MvpForConditionalGeneration", "MvpForQuestionAnswering", "MvpForSequenceClassification", "MvpModel", "MvpPreTrainedModel", ] if TYPE_CHECKING: from .configuration_mvp import MVP_PRETRAINED_CONFIG_ARCHIVE_MAP, MvpConfig, MvpOnnxConfig from .tokenization_mvp import MvpTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_mvp_fast import MvpTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mvp import ( MVP_PRETRAINED_MODEL_ARCHIVE_LIST, MvpForCausalLM, MvpForConditionalGeneration, MvpForQuestionAnswering, MvpForSequenceClassification, MvpModel, MvpPreTrainedModel, ) else: import sys UpperCamelCase__ : Dict = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
620
import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( SwiftFormerConfig, SwiftFormerForImageClassification, ViTImageProcessor, ) from transformers.utils import logging logging.set_verbosity_info() UpperCamelCase__ : Optional[int] = logging.get_logger(__name__) UpperCamelCase__ : List[Any] = torch.device("cpu") def _UpperCAmelCase ( ): """simple docstring""" SCREAMING_SNAKE_CASE_ = 'http://images.cocodataset.org/val2017/000000039769.jpg' SCREAMING_SNAKE_CASE_ = Image.open(requests.get(_SCREAMING_SNAKE_CASE , stream=_SCREAMING_SNAKE_CASE ).raw ) return im def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : int ): """simple docstring""" if swiftformer_name == "swiftformer_xs": return torch.tensor([-2.1_7_0_3E0_0, 2.1_1_0_7E0_0, -2.0_8_1_1E0_0, 8.8_6_8_5E-0_1, 2.4_3_6_0E-0_1] ) elif swiftformer_name == "swiftformer_s": return torch.tensor([3.9_6_3_6E-0_1, 2.3_4_7_8E-0_1, -1.6_9_6_3E0_0, -1.7_3_8_1E0_0, -8.6_3_3_7E-0_1] ) elif swiftformer_name == "swiftformer_l1": return torch.tensor([-4.2_7_6_8E-0_1, -4.7_4_2_9E-0_1, -1.0_8_9_7E0_0, -1.0_2_4_8E0_0, 3.5_5_2_3E-0_2] ) elif swiftformer_name == "swiftformer_l3": return torch.tensor([-2.5_3_3_0E-0_1, 2.4_2_1_1E-0_1, -6.0_1_8_5E-0_1, -8.2_7_8_9E-0_1, -6.0_4_4_6E-0_2] ) def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : Optional[Any] ): """simple docstring""" SCREAMING_SNAKE_CASE_ = dct.pop(_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE_ = val def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : Dict ): """simple docstring""" SCREAMING_SNAKE_CASE_ = [] for k in state_dict.keys(): SCREAMING_SNAKE_CASE_ = k if ".pwconv" in k: SCREAMING_SNAKE_CASE_ = k_new.replace('.pwconv' , '.point_wise_conv' ) if ".dwconv" in k: SCREAMING_SNAKE_CASE_ = k_new.replace('.dwconv' , '.depth_wise_conv' ) if ".Proj." in k: SCREAMING_SNAKE_CASE_ = k_new.replace('.Proj.' , '.proj.' ) if "patch_embed" in k_new: SCREAMING_SNAKE_CASE_ = k_new.replace('patch_embed' , 'swiftformer.patch_embed.patch_embedding' ) if "network" in k_new: SCREAMING_SNAKE_CASE_ = k_new.split('.' ) if ls[2].isdigit(): SCREAMING_SNAKE_CASE_ = 'swiftformer.encoder.network.' + ls[1] + '.blocks.' + ls[2] + '.' + '.'.join(ls[3:] ) else: SCREAMING_SNAKE_CASE_ = k_new.replace('network' , 'swiftformer.encoder.network' ) rename_keys.append((k, k_new) ) return rename_keys @torch.no_grad() def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Optional[Any] ): """simple docstring""" SCREAMING_SNAKE_CASE_ = SwiftFormerConfig() # dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size SCREAMING_SNAKE_CASE_ = 1_000 SCREAMING_SNAKE_CASE_ = 'huggingface/label-files' SCREAMING_SNAKE_CASE_ = 'imagenet-1k-id2label.json' SCREAMING_SNAKE_CASE_ = json.load(open(hf_hub_download(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , repo_type='dataset' ) , 'r' ) ) SCREAMING_SNAKE_CASE_ = {int(_SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()} SCREAMING_SNAKE_CASE_ = idalabel SCREAMING_SNAKE_CASE_ = {v: k for k, v in idalabel.items()} # size of the architecture if swiftformer_name == "swiftformer_xs": SCREAMING_SNAKE_CASE_ = [3, 3, 6, 4] SCREAMING_SNAKE_CASE_ = [48, 56, 112, 220] elif swiftformer_name == "swiftformer_s": SCREAMING_SNAKE_CASE_ = [3, 3, 9, 6] SCREAMING_SNAKE_CASE_ = [48, 64, 168, 224] elif swiftformer_name == "swiftformer_l1": SCREAMING_SNAKE_CASE_ = [4, 3, 10, 5] SCREAMING_SNAKE_CASE_ = [48, 96, 192, 384] elif swiftformer_name == "swiftformer_l3": SCREAMING_SNAKE_CASE_ = [4, 4, 12, 6] SCREAMING_SNAKE_CASE_ = [64, 128, 320, 512] # load state_dict of original model, remove and rename some keys if original_ckpt: if original_ckpt.startswith('https' ): SCREAMING_SNAKE_CASE_ = torch.hub.load_state_dict_from_url(_SCREAMING_SNAKE_CASE , map_location='cpu' , check_hash=_SCREAMING_SNAKE_CASE ) else: SCREAMING_SNAKE_CASE_ = torch.load(_SCREAMING_SNAKE_CASE , map_location='cpu' ) SCREAMING_SNAKE_CASE_ = checkpoint SCREAMING_SNAKE_CASE_ = create_rename_keys(_SCREAMING_SNAKE_CASE ) for rename_key_src, rename_key_dest in rename_keys: rename_key(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # load HuggingFace model SCREAMING_SNAKE_CASE_ = SwiftFormerForImageClassification(_SCREAMING_SNAKE_CASE ).eval() hf_model.load_state_dict(_SCREAMING_SNAKE_CASE ) # prepare test inputs SCREAMING_SNAKE_CASE_ = prepare_img() SCREAMING_SNAKE_CASE_ = ViTImageProcessor.from_pretrained('preprocessor_config' ) SCREAMING_SNAKE_CASE_ = processor(images=_SCREAMING_SNAKE_CASE , return_tensors='pt' ) # compare outputs from both models SCREAMING_SNAKE_CASE_ = get_expected_output(_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE_ = hf_model(inputs['pixel_values'] ).logits assert hf_logits.shape == torch.Size([1, 1_000] ) assert torch.allclose(hf_logits[0, 0:5] , _SCREAMING_SNAKE_CASE , atol=1E-3 ) Path(_SCREAMING_SNAKE_CASE ).mkdir(exist_ok=_SCREAMING_SNAKE_CASE ) print(f"""Saving model {swiftformer_name} to {pytorch_dump_folder_path}""" ) hf_model.save_pretrained(_SCREAMING_SNAKE_CASE ) if __name__ == "__main__": UpperCamelCase__ : str = argparse.ArgumentParser() # Required parameters parser.add_argument( "--swiftformer_name", default="swiftformer_xs", choices=["swiftformer_xs", "swiftformer_s", "swiftformer_l1", "swiftformer_l3"], type=str, help="Name of the SwiftFormer model you'd like to convert.", ) parser.add_argument( "--pytorch_dump_folder_path", default="./converted_outputs/", type=str, help="Path to the output PyTorch model directory.", ) parser.add_argument("--original_ckpt", default=None, type=str, help="Path to the original model checkpoint.") UpperCamelCase__ : Union[str, Any] = parser.parse_args() convert_swiftformer_checkpoint(args.swiftformer_name, args.pytorch_dump_folder_path, args.original_ckpt)
620
1
import argparse from torch import nn # transformers_old should correspond to branch `save_old_prophetnet_model_structure` here # original prophetnet_checkpoints are saved under `patrickvonplaten/..._old` respectively from transformers_old.modeling_prophetnet import ( ProphetNetForConditionalGeneration as ProphetNetForConditionalGenerationOld, ) from transformers_old.modeling_xlm_prophetnet import ( XLMProphetNetForConditionalGeneration as XLMProphetNetForConditionalGenerationOld, ) from transformers import ProphetNetForConditionalGeneration, XLMProphetNetForConditionalGeneration, logging UpperCamelCase__ : Dict = logging.get_logger(__name__) logging.set_verbosity_info() def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : str ): """simple docstring""" if "xprophetnet" in prophetnet_checkpoint_path: SCREAMING_SNAKE_CASE_ = XLMProphetNetForConditionalGenerationOld.from_pretrained(_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = XLMProphetNetForConditionalGeneration.from_pretrained( _SCREAMING_SNAKE_CASE , output_loading_info=_SCREAMING_SNAKE_CASE ) else: SCREAMING_SNAKE_CASE_ = ProphetNetForConditionalGenerationOld.from_pretrained(_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = ProphetNetForConditionalGeneration.from_pretrained( _SCREAMING_SNAKE_CASE , output_loading_info=_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE_ = ['key_proj', 'value_proj', 'query_proj'] SCREAMING_SNAKE_CASE_ = { 'self_attn': 'ngram_self_attn', 'cross_attn': 'encoder_attn', 'cross_attn_layer_norm': 'encoder_attn_layer_norm', 'feed_forward_layer_norm': 'final_layer_norm', 'feed_forward': '', 'intermediate': 'fc1', 'output': 'fc2', 'key_proj': 'k_proj', 'query_proj': 'q_proj', 'value_proj': 'v_proj', 'word_embeddings': 'embed_tokens', 'embeddings_layer_norm': 'emb_layer_norm', 'relative_pos_embeddings': 'relative_linear', 'ngram_embeddings': 'ngram_input_embed', 'position_embeddings': 'embed_positions', } for key in loading_info["missing_keys"]: SCREAMING_SNAKE_CASE_ = key.split('.' ) if attributes[0] == "lm_head": SCREAMING_SNAKE_CASE_ = prophet SCREAMING_SNAKE_CASE_ = prophet_old else: SCREAMING_SNAKE_CASE_ = prophet.prophetnet SCREAMING_SNAKE_CASE_ = prophet_old.model SCREAMING_SNAKE_CASE_ = False for attribute in attributes: if attribute in mapping: SCREAMING_SNAKE_CASE_ = mapping[attribute] if not hasattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) and len(_SCREAMING_SNAKE_CASE ) > 0: SCREAMING_SNAKE_CASE_ = attribute elif hasattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): SCREAMING_SNAKE_CASE_ = attribute if attribute == "weight": assert old_model.weight.shape == model.weight.shape, "Shapes have to match!" SCREAMING_SNAKE_CASE_ = old_model.weight logger.info(f"""{attribute} is initialized.""" ) SCREAMING_SNAKE_CASE_ = True break elif attribute == "bias": assert old_model.bias.shape == model.bias.shape, "Shapes have to match!" SCREAMING_SNAKE_CASE_ = old_model.bias logger.info(f"""{attribute} is initialized""" ) SCREAMING_SNAKE_CASE_ = True break elif attribute in special_keys and hasattr(_SCREAMING_SNAKE_CASE , 'in_proj_weight' ): SCREAMING_SNAKE_CASE_ = old_model.in_proj_weight.shape[0] // 3 SCREAMING_SNAKE_CASE_ = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) param.weight.shape == old_model.in_proj_weight[:embed_dim, :].shape, "Shapes have to match" param.bias.shape == old_model.in_proj_bias[:embed_dim].shape, "Shapes have to match" if attribute == "query_proj": SCREAMING_SNAKE_CASE_ = nn.Parameter(old_model.in_proj_weight[:embed_dim, :] ) SCREAMING_SNAKE_CASE_ = nn.Parameter(old_model.in_proj_bias[:embed_dim] ) elif attribute == "key_proj": SCREAMING_SNAKE_CASE_ = nn.Parameter(old_model.in_proj_weight[embed_dim : 2 * embed_dim, :] ) SCREAMING_SNAKE_CASE_ = nn.Parameter(old_model.in_proj_bias[embed_dim : 2 * embed_dim] ) elif attribute == "value_proj": SCREAMING_SNAKE_CASE_ = nn.Parameter(old_model.in_proj_weight[2 * embed_dim :, :] ) SCREAMING_SNAKE_CASE_ = nn.Parameter(old_model.in_proj_bias[2 * embed_dim :] ) SCREAMING_SNAKE_CASE_ = True break elif attribute == "position_embeddings": assert ( model.position_embeddings.weight.shape[-1] == old_model.embed_positions.weight.shape[-1] ), "Hidden size has to match" assert model.position_embeddings.weight.shape[0] == 512, "We want 512 position_embeddings." SCREAMING_SNAKE_CASE_ = nn.Parameter(old_model.embed_positions.weight[:512, :] ) SCREAMING_SNAKE_CASE_ = True break if attribute.isdigit(): SCREAMING_SNAKE_CASE_ = model[int(_SCREAMING_SNAKE_CASE )] SCREAMING_SNAKE_CASE_ = old_model[int(_SCREAMING_SNAKE_CASE )] else: SCREAMING_SNAKE_CASE_ = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) if old_attribute == "": SCREAMING_SNAKE_CASE_ = old_model else: if not hasattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): raise ValueError(f"""{old_model} does not have {old_attribute}""" ) SCREAMING_SNAKE_CASE_ = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) if not is_key_init: raise ValueError(f"""{key} was not correctly initialized!""" ) print(f"""Saving model to {pytorch_dump_folder_path}""" ) prophet.save_pretrained(_SCREAMING_SNAKE_CASE ) if __name__ == "__main__": UpperCamelCase__ : List[str] = argparse.ArgumentParser() # Required parameters parser.add_argument( "--prophetnet_checkpoint_path", default=None, type=str, required=True, help="Path the official PyTorch dump." ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model." ) UpperCamelCase__ : Any = parser.parse_args() convert_prophetnet_checkpoint_to_pytorch(args.prophetnet_checkpoint_path, args.pytorch_dump_folder_path)
620
def _UpperCAmelCase ( ): """simple docstring""" for n in range(1 , 1_000_000 ): yield n * (n + 1) // 2 def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : Tuple ): """simple docstring""" SCREAMING_SNAKE_CASE_ = 1 SCREAMING_SNAKE_CASE_ = 2 while i * i <= n: SCREAMING_SNAKE_CASE_ = 0 while n % i == 0: n //= i multiplicity += 1 divisors_count *= multiplicity + 1 i += 1 if n > 1: divisors_count *= 2 return divisors_count def _UpperCAmelCase ( ): """simple docstring""" return next(i for i in triangle_number_generator() if count_divisors(_SCREAMING_SNAKE_CASE ) > 500 ) if __name__ == "__main__": print(solution())
620
1
def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : int ): """simple docstring""" SCREAMING_SNAKE_CASE_ = int(_SCREAMING_SNAKE_CASE ) if n_element < 1: SCREAMING_SNAKE_CASE_ = ValueError('a should be a positive number' ) raise my_error SCREAMING_SNAKE_CASE_ = [1] SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = (0, 0, 0) SCREAMING_SNAKE_CASE_ = 1 while index < n_element: while hamming_list[i] * 2 <= hamming_list[-1]: i += 1 while hamming_list[j] * 3 <= hamming_list[-1]: j += 1 while hamming_list[k] * 5 <= hamming_list[-1]: k += 1 hamming_list.append( min(hamming_list[i] * 2 , hamming_list[j] * 3 , hamming_list[k] * 5 ) ) index += 1 return hamming_list if __name__ == "__main__": UpperCamelCase__ : str = input("Enter the last number (nth term) of the Hamming Number Series: ") print("Formula of Hamming Number Series => 2^i * 3^j * 5^k") UpperCamelCase__ : str = hamming(int(n)) print("-----------------------------------------------------") print(F'The list with nth numbers is: {hamming_numbers}') print("-----------------------------------------------------")
620
import io import itertools import json from dataclasses import dataclass from typing import Optional import pyarrow as pa import pyarrow.json as paj import datasets from datasets.table import table_cast from datasets.utils.file_utils import readline UpperCamelCase__ : Optional[int] = datasets.utils.logging.get_logger(__name__) @dataclass class __snake_case ( datasets.BuilderConfig ): __lowerCAmelCase : Optional[datasets.Features] = None __lowerCAmelCase : str = "utf-8" __lowerCAmelCase : Optional[str] = None __lowerCAmelCase : Optional[str] = None __lowerCAmelCase : bool = True # deprecated __lowerCAmelCase : Optional[int] = None # deprecated __lowerCAmelCase : int = 10 << 20 # 10MB __lowerCAmelCase : Optional[bool] = None class __snake_case ( datasets.ArrowBasedBuilder ): __lowerCAmelCase : int = JsonConfig def lowerCAmelCase__ ( self): if self.config.block_size is not None: logger.warning('The JSON loader parameter `block_size` is deprecated. Please use `chunksize` instead') SCREAMING_SNAKE_CASE_ = self.config.block_size if self.config.use_threads is not True: logger.warning( 'The JSON loader parameter `use_threads` is deprecated and doesn\'t have any effect anymore.') if self.config.newlines_in_values is not None: raise ValueError('The JSON loader parameter `newlines_in_values` is no longer supported') return datasets.DatasetInfo(features=self.config.features) def lowerCAmelCase__ ( self , _A): if not self.config.data_files: raise ValueError(f"""At least one data file must be specified, but got data_files={self.config.data_files}""") SCREAMING_SNAKE_CASE_ = dl_manager.download_and_extract(self.config.data_files) if isinstance(_A , (str, list, tuple)): SCREAMING_SNAKE_CASE_ = data_files if isinstance(_A , _A): SCREAMING_SNAKE_CASE_ = [files] SCREAMING_SNAKE_CASE_ = [dl_manager.iter_files(_A) for file in files] return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'files': files})] SCREAMING_SNAKE_CASE_ = [] for split_name, files in data_files.items(): if isinstance(_A , _A): SCREAMING_SNAKE_CASE_ = [files] SCREAMING_SNAKE_CASE_ = [dl_manager.iter_files(_A) for file in files] splits.append(datasets.SplitGenerator(name=_A , gen_kwargs={'files': files})) return splits def lowerCAmelCase__ ( self , _A): if self.config.features is not None: # adding missing columns for column_name in set(self.config.features) - set(pa_table.column_names): SCREAMING_SNAKE_CASE_ = self.config.features.arrow_schema.field(_A).type SCREAMING_SNAKE_CASE_ = pa_table.append_column(_A , pa.array([None] * len(_A) , type=_A)) # more expensive cast to support nested structures with keys in a different order # allows str <-> int/float or str to Audio for example SCREAMING_SNAKE_CASE_ = table_cast(_A , self.config.features.arrow_schema) return pa_table def lowerCAmelCase__ ( self , _A): for file_idx, file in enumerate(itertools.chain.from_iterable(_A)): # If the file is one json object and if we need to look at the list of items in one specific field if self.config.field is not None: with open(_A , encoding=self.config.encoding , errors=self.config.encoding_errors) as f: SCREAMING_SNAKE_CASE_ = json.load(_A) # We keep only the field we are interested in SCREAMING_SNAKE_CASE_ = dataset[self.config.field] # We accept two format: a list of dicts or a dict of lists if isinstance(_A , (list, tuple)): SCREAMING_SNAKE_CASE_ = set().union(*[row.keys() for row in dataset]) SCREAMING_SNAKE_CASE_ = {col: [row.get(_A) for row in dataset] for col in keys} else: SCREAMING_SNAKE_CASE_ = dataset SCREAMING_SNAKE_CASE_ = pa.Table.from_pydict(_A) yield file_idx, self._cast_table(_A) # If the file has one json object per line else: with open(_A , 'rb') as f: SCREAMING_SNAKE_CASE_ = 0 # Use block_size equal to the chunk size divided by 32 to leverage multithreading # Set a default minimum value of 16kB if the chunk size is really small SCREAMING_SNAKE_CASE_ = max(self.config.chunksize // 32 , 16 << 10) SCREAMING_SNAKE_CASE_ = ( self.config.encoding_errors if self.config.encoding_errors is not None else 'strict' ) while True: SCREAMING_SNAKE_CASE_ = f.read(self.config.chunksize) if not batch: break # Finish current line try: batch += f.readline() except (AttributeError, io.UnsupportedOperation): batch += readline(_A) # PyArrow only accepts utf-8 encoded bytes if self.config.encoding != "utf-8": SCREAMING_SNAKE_CASE_ = batch.decode(self.config.encoding , errors=_A).encode('utf-8') try: while True: try: SCREAMING_SNAKE_CASE_ = paj.read_json( io.BytesIO(_A) , read_options=paj.ReadOptions(block_size=_A)) break except (pa.ArrowInvalid, pa.ArrowNotImplementedError) as e: if ( isinstance(_A , pa.ArrowInvalid) and "straddling" not in str(_A) or block_size > len(_A) ): raise else: # Increase the block size in case it was too small. # The block size will be reset for the next file. logger.debug( f"""Batch of {len(_A)} bytes couldn't be parsed with block_size={block_size}. Retrying with block_size={block_size * 2}.""") block_size *= 2 except pa.ArrowInvalid as e: try: with open( _A , encoding=self.config.encoding , errors=self.config.encoding_errors) as f: SCREAMING_SNAKE_CASE_ = json.load(_A) except json.JSONDecodeError: logger.error(f"""Failed to read file '{file}' with error {type(_A)}: {e}""") raise e # If possible, parse the file as a list of json objects and exit the loop if isinstance(_A , _A): # list is the only sequence type supported in JSON try: SCREAMING_SNAKE_CASE_ = set().union(*[row.keys() for row in dataset]) SCREAMING_SNAKE_CASE_ = {col: [row.get(_A) for row in dataset] for col in keys} SCREAMING_SNAKE_CASE_ = pa.Table.from_pydict(_A) except (pa.ArrowInvalid, AttributeError) as e: logger.error(f"""Failed to read file '{file}' with error {type(_A)}: {e}""") raise ValueError(f"""Not able to read records in the JSON file at {file}.""") from None yield file_idx, self._cast_table(_A) break else: logger.error(f"""Failed to read file '{file}' with error {type(_A)}: {e}""") raise ValueError( f"""Not able to read records in the JSON file at {file}. """ f"""You should probably indicate the field of the JSON file containing your records. """ f"""This JSON file contain the following fields: {str(list(dataset.keys()))}. """ f"""Select the correct one and provide it as `field='XXX'` to the dataset loading method. """) from None # Uncomment for debugging (will print the Arrow table size and elements) # logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}") # logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows))) yield (file_idx, batch_idx), self._cast_table(_A) batch_idx += 1
620
1
import unittest from transformers import AutoTokenizer, FalconConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( FalconForCausalLM, FalconForQuestionAnswering, FalconForSequenceClassification, FalconForTokenClassification, FalconModel, ) class __snake_case : def __init__( self , _A , _A=3 , _A=7 , _A=True , _A=True , _A=False , _A=True , _A=99 , _A=32 , _A=5 , _A=4 , _A=37 , _A="gelu" , _A=0.1 , _A=0.1 , _A=512 , _A=16 , _A=2 , _A=0.0_2 , _A=3 , _A=4 , _A=None , ): SCREAMING_SNAKE_CASE_ = parent SCREAMING_SNAKE_CASE_ = batch_size SCREAMING_SNAKE_CASE_ = seq_length SCREAMING_SNAKE_CASE_ = is_training SCREAMING_SNAKE_CASE_ = use_input_mask SCREAMING_SNAKE_CASE_ = use_token_type_ids SCREAMING_SNAKE_CASE_ = use_labels SCREAMING_SNAKE_CASE_ = vocab_size SCREAMING_SNAKE_CASE_ = hidden_size SCREAMING_SNAKE_CASE_ = num_hidden_layers SCREAMING_SNAKE_CASE_ = num_attention_heads SCREAMING_SNAKE_CASE_ = intermediate_size SCREAMING_SNAKE_CASE_ = hidden_act SCREAMING_SNAKE_CASE_ = hidden_dropout_prob SCREAMING_SNAKE_CASE_ = attention_probs_dropout_prob SCREAMING_SNAKE_CASE_ = max_position_embeddings SCREAMING_SNAKE_CASE_ = type_vocab_size SCREAMING_SNAKE_CASE_ = type_sequence_label_size SCREAMING_SNAKE_CASE_ = initializer_range SCREAMING_SNAKE_CASE_ = num_labels SCREAMING_SNAKE_CASE_ = num_choices SCREAMING_SNAKE_CASE_ = scope def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size) SCREAMING_SNAKE_CASE_ = None if self.use_input_mask: SCREAMING_SNAKE_CASE_ = random_attention_mask([self.batch_size, self.seq_length]) SCREAMING_SNAKE_CASE_ = None SCREAMING_SNAKE_CASE_ = None SCREAMING_SNAKE_CASE_ = None SCREAMING_SNAKE_CASE_ = None if self.use_labels: SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size] , self.type_sequence_label_size) SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels) SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size] , self.num_choices) SCREAMING_SNAKE_CASE_ = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def lowerCAmelCase__ ( self): return FalconConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_A , initializer_range=self.initializer_range , pad_token_id=1 , new_decoder_architecture=_A , ) def lowerCAmelCase__ ( self , _A , _A , _A , _A , _A , _A , _A): SCREAMING_SNAKE_CASE_ = FalconModel(config=_A) model.to(_A) model.eval() SCREAMING_SNAKE_CASE_ = model(_A , attention_mask=_A) SCREAMING_SNAKE_CASE_ = model(_A) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size)) def lowerCAmelCase__ ( self , _A , _A , _A , _A , _A , _A , _A , _A , _A , ): SCREAMING_SNAKE_CASE_ = True SCREAMING_SNAKE_CASE_ = FalconModel(_A) model.to(_A) model.eval() SCREAMING_SNAKE_CASE_ = model( _A , attention_mask=_A , encoder_hidden_states=_A , encoder_attention_mask=_A , ) SCREAMING_SNAKE_CASE_ = model( _A , attention_mask=_A , encoder_hidden_states=_A , ) SCREAMING_SNAKE_CASE_ = model(_A , attention_mask=_A) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size)) def lowerCAmelCase__ ( self , _A , _A , _A , _A , _A , _A , _A , _A , _A , ): SCREAMING_SNAKE_CASE_ = FalconForCausalLM(config=_A) model.to(_A) model.eval() SCREAMING_SNAKE_CASE_ = model(_A , attention_mask=_A , labels=_A) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size)) def lowerCAmelCase__ ( self , _A , _A , _A , _A , _A , _A , _A , _A , _A , ): SCREAMING_SNAKE_CASE_ = True SCREAMING_SNAKE_CASE_ = True SCREAMING_SNAKE_CASE_ = FalconForCausalLM(config=_A) model.to(_A) model.eval() # first forward pass SCREAMING_SNAKE_CASE_ = model( _A , attention_mask=_A , encoder_hidden_states=_A , encoder_attention_mask=_A , use_cache=_A , ) SCREAMING_SNAKE_CASE_ = outputs.past_key_values # create hypothetical multiple next token and extent to next_input_ids SCREAMING_SNAKE_CASE_ = ids_tensor((self.batch_size, 3) , config.vocab_size) SCREAMING_SNAKE_CASE_ = ids_tensor((self.batch_size, 3) , vocab_size=2) # append to next input_ids and SCREAMING_SNAKE_CASE_ = torch.cat([input_ids, next_tokens] , dim=-1) SCREAMING_SNAKE_CASE_ = torch.cat([input_mask, next_mask] , dim=-1) SCREAMING_SNAKE_CASE_ = model( _A , attention_mask=_A , encoder_hidden_states=_A , encoder_attention_mask=_A , output_hidden_states=_A , )['hidden_states'][0] SCREAMING_SNAKE_CASE_ = model( _A , attention_mask=_A , encoder_hidden_states=_A , encoder_attention_mask=_A , past_key_values=_A , output_hidden_states=_A , )['hidden_states'][0] # select random slice SCREAMING_SNAKE_CASE_ = ids_tensor((1,) , output_from_past.shape[-1]).item() SCREAMING_SNAKE_CASE_ = output_from_no_past[:, -3:, random_slice_idx].detach() SCREAMING_SNAKE_CASE_ = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1]) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(_A , _A , atol=1E-3)) def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = self.prepare_config_and_inputs() ( ( SCREAMING_SNAKE_CASE_ ) , ( SCREAMING_SNAKE_CASE_ ) , ( SCREAMING_SNAKE_CASE_ ) , ( SCREAMING_SNAKE_CASE_ ) , ( SCREAMING_SNAKE_CASE_ ) , ( SCREAMING_SNAKE_CASE_ ) , ( SCREAMING_SNAKE_CASE_ ) , ) = config_and_inputs SCREAMING_SNAKE_CASE_ = {'input_ids': input_ids, 'attention_mask': input_mask} return config, inputs_dict @require_torch class __snake_case ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ): __lowerCAmelCase : Optional[int] = ( ( FalconModel, FalconForCausalLM, FalconForSequenceClassification, FalconForTokenClassification, FalconForQuestionAnswering, ) if is_torch_available() else () ) __lowerCAmelCase : Any = (FalconForCausalLM,) if is_torch_available() else () __lowerCAmelCase : Optional[int] = ( { 'feature-extraction': FalconModel, 'text-classification': FalconForSequenceClassification, 'text-generation': FalconForCausalLM, 'question-answering': FalconForQuestionAnswering, 'token-classification': FalconForTokenClassification, 'zero-shot': FalconForSequenceClassification, } if is_torch_available() else {} ) __lowerCAmelCase : Tuple = False __lowerCAmelCase : Optional[Any] = False def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = FalconModelTester(self) SCREAMING_SNAKE_CASE_ = ConfigTester(self , config_class=_A , hidden_size=37) def lowerCAmelCase__ ( self): self.config_tester.run_common_tests() def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_A) def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ , *SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs() for alibi in [True, False]: SCREAMING_SNAKE_CASE_ = alibi self.model_tester.create_and_check_model(_A , *_A) def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs_for_common() SCREAMING_SNAKE_CASE_ = 3 SCREAMING_SNAKE_CASE_ = input_dict['input_ids'] SCREAMING_SNAKE_CASE_ = input_ids.ne(1).to(_A) SCREAMING_SNAKE_CASE_ = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size) SCREAMING_SNAKE_CASE_ = FalconForSequenceClassification(_A) model.to(_A) model.eval() SCREAMING_SNAKE_CASE_ = model(_A , attention_mask=_A , labels=_A) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels)) def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs_for_common() SCREAMING_SNAKE_CASE_ = 3 SCREAMING_SNAKE_CASE_ = 'single_label_classification' SCREAMING_SNAKE_CASE_ = input_dict['input_ids'] SCREAMING_SNAKE_CASE_ = input_ids.ne(1).to(_A) SCREAMING_SNAKE_CASE_ = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size) SCREAMING_SNAKE_CASE_ = FalconForSequenceClassification(_A) model.to(_A) model.eval() SCREAMING_SNAKE_CASE_ = model(_A , attention_mask=_A , labels=_A) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels)) def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs_for_common() SCREAMING_SNAKE_CASE_ = input_dict['input_ids'] SCREAMING_SNAKE_CASE_ = FalconForCausalLM(_A) model.to(_A) model.eval() SCREAMING_SNAKE_CASE_ = model(_A , use_cache=_A) SCREAMING_SNAKE_CASE_ = input_ids.shape[0] SCREAMING_SNAKE_CASE_ = model._convert_to_rw_cache(result.past_key_values) SCREAMING_SNAKE_CASE_ = model._convert_cache_to_standard_format(_A , _A) for layer in range(len(_A)): for tensor_idx in range(2): self.assertTrue(rw_cache[layer][tensor_idx].ndim == 3) self.assertTrue(result.past_key_values[layer][tensor_idx].ndim == 4) self.assertTrue( torch.all(result.past_key_values[layer][tensor_idx] == standard_cache[layer][tensor_idx])) def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs_for_common() SCREAMING_SNAKE_CASE_ = 3 SCREAMING_SNAKE_CASE_ = 'multi_label_classification' SCREAMING_SNAKE_CASE_ = input_dict['input_ids'] SCREAMING_SNAKE_CASE_ = input_ids.ne(1).to(_A) SCREAMING_SNAKE_CASE_ = ids_tensor( [self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size).to(torch.float) SCREAMING_SNAKE_CASE_ = FalconForSequenceClassification(_A) model.to(_A) model.eval() SCREAMING_SNAKE_CASE_ = model(_A , attention_mask=_A , labels=_A) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels)) def lowerCAmelCase__ ( self): # Falcon can have different numbers of KV-heads than the number of query heads, so we need # to override this test to use the right head counts. for model_class in self.all_generative_model_classes: SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs_for_common() # If it doesn't support cache, pass the test if not hasattr(_A , 'use_cache'): return SCREAMING_SNAKE_CASE_ = model_class(_A).to(_A) if "use_cache" not in inputs: SCREAMING_SNAKE_CASE_ = True SCREAMING_SNAKE_CASE_ = model(**_A) # If "past_key_values" is not returned, pass the test (e.g. RWKV uses a different cache name and format) if "past_key_values" not in outputs: return SCREAMING_SNAKE_CASE_ = ( getattr(_A , 'decoder_layers' , _A) or getattr(_A , 'num_decoder_layers' , _A) or config.num_hidden_layers ) SCREAMING_SNAKE_CASE_ = getattr(_A , 'num_kv_heads' , config.num_attention_heads) SCREAMING_SNAKE_CASE_ = getattr(_A , 'd_model' , config.hidden_size) SCREAMING_SNAKE_CASE_ = embed_dim // num_attention_heads SCREAMING_SNAKE_CASE_ = outputs['past_key_values'] self.assertEqual(len(_A) , _A) SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = inputs['input_ids'].shape for i in range(_A): if config.new_decoder_architecture: SCREAMING_SNAKE_CASE_ = config.num_attention_heads elif config.multi_query: SCREAMING_SNAKE_CASE_ = 1 self.assertEqual(len(past_kv[0]) , 2) # K V for the decoder = 2 self.assertEqual( past_kv[i][0].shape , (batch_size, num_attention_heads, seq_length, per_head_embed_dim)) self.assertEqual( past_kv[i][1].shape , (batch_size, num_attention_heads, seq_length, per_head_embed_dim)) @require_torch class __snake_case ( unittest.TestCase ): @slow def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = AutoTokenizer.from_pretrained('Rocketknight1/falcon-rw-1b') SCREAMING_SNAKE_CASE_ = FalconForCausalLM.from_pretrained('Rocketknight1/falcon-rw-1b') model.eval() model.to(_A) SCREAMING_SNAKE_CASE_ = tokenizer('My favorite food is' , return_tensors='pt').to(_A) SCREAMING_SNAKE_CASE_ = ( 'My favorite food is pizza. I love it so much that I have a pizza party every year for my birthday.' ) SCREAMING_SNAKE_CASE_ = model.generate(**_A , do_sample=_A , max_new_tokens=19) SCREAMING_SNAKE_CASE_ = tokenizer.batch_decode(_A)[0] self.assertEqual(_A , _A) @slow def lowerCAmelCase__ ( self): # The big models are way too big for the CI, so we use tiny random models that resemble their # architectures but with much smaller and fewer layers for repo in ["Rocketknight1/tiny-random-falcon-7b", "Rocketknight1/tiny-random-falcon-40b"]: SCREAMING_SNAKE_CASE_ = AutoTokenizer.from_pretrained(_A) SCREAMING_SNAKE_CASE_ = FalconForCausalLM.from_pretrained(_A) model.eval() model.to(_A) SCREAMING_SNAKE_CASE_ = tokenizer('My favorite food is' , return_tensors='pt').to(_A) # We just test that these run without errors - the models are randomly initialized # and so the actual text outputs will be garbage model.generate(**_A , do_sample=_A , max_new_tokens=4) model.generate(**_A , do_sample=_A , max_new_tokens=4) model.generate(**_A , num_beams=2 , max_new_tokens=4) @slow def lowerCAmelCase__ ( self): # The big models are way too big for the CI, so we use tiny random models that resemble their # architectures but with much smaller and fewer layers with torch.no_grad(): for repo in [ "Rocketknight1/falcon-rw-1b", "Rocketknight1/tiny-random-falcon-7b", "Rocketknight1/tiny-random-falcon-40b", ]: SCREAMING_SNAKE_CASE_ = AutoTokenizer.from_pretrained(_A) SCREAMING_SNAKE_CASE_ = FalconForCausalLM.from_pretrained(_A) model.eval() model.to(device=_A) SCREAMING_SNAKE_CASE_ = tokenizer('My favorite food is' , return_tensors='pt').to(_A) # Test results are the same with and without cache SCREAMING_SNAKE_CASE_ = model.generate(**_A , do_sample=_A , max_new_tokens=20 , use_cache=_A) SCREAMING_SNAKE_CASE_ = model.generate(**_A , do_sample=_A , max_new_tokens=20 , use_cache=_A) self.assertTrue((outputs_cache - outputs_no_cache).sum().item() == 0)
620
import unittest from transformers import TrOCRConfig from transformers.testing_utils import is_torch_available, require_torch, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers.models.trocr.modeling_trocr import TrOCRDecoder, TrOCRForCausalLM @require_torch class __snake_case : def __init__( self , _A , _A=99 , _A=13 , _A=16 , _A=7 , _A=True , _A=True , _A=True , _A=False , _A=True , _A=2 , _A=32 , _A=4 , _A=4 , _A=30 , _A=0 , _A=1 , _A=2 , _A=None , ): SCREAMING_SNAKE_CASE_ = parent SCREAMING_SNAKE_CASE_ = batch_size SCREAMING_SNAKE_CASE_ = decoder_seq_length # For common tests SCREAMING_SNAKE_CASE_ = self.decoder_seq_length SCREAMING_SNAKE_CASE_ = is_training SCREAMING_SNAKE_CASE_ = use_attention_mask SCREAMING_SNAKE_CASE_ = use_labels SCREAMING_SNAKE_CASE_ = vocab_size SCREAMING_SNAKE_CASE_ = d_model SCREAMING_SNAKE_CASE_ = d_model SCREAMING_SNAKE_CASE_ = decoder_layers SCREAMING_SNAKE_CASE_ = decoder_layers SCREAMING_SNAKE_CASE_ = decoder_ffn_dim SCREAMING_SNAKE_CASE_ = decoder_attention_heads SCREAMING_SNAKE_CASE_ = decoder_attention_heads SCREAMING_SNAKE_CASE_ = eos_token_id SCREAMING_SNAKE_CASE_ = bos_token_id SCREAMING_SNAKE_CASE_ = pad_token_id SCREAMING_SNAKE_CASE_ = decoder_start_token_id SCREAMING_SNAKE_CASE_ = use_cache SCREAMING_SNAKE_CASE_ = max_position_embeddings SCREAMING_SNAKE_CASE_ = None SCREAMING_SNAKE_CASE_ = decoder_seq_length SCREAMING_SNAKE_CASE_ = 2 SCREAMING_SNAKE_CASE_ = 1 def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size) SCREAMING_SNAKE_CASE_ = None if self.use_attention_mask: SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size, self.decoder_seq_length] , vocab_size=2) SCREAMING_SNAKE_CASE_ = None if self.use_labels: SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size) SCREAMING_SNAKE_CASE_ = TrOCRConfig( vocab_size=self.vocab_size , d_model=self.d_model , decoder_layers=self.decoder_layers , decoder_ffn_dim=self.decoder_ffn_dim , decoder_attention_heads=self.decoder_attention_heads , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , use_cache=self.use_cache , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , max_position_embeddings=self.max_position_embeddings , ) return (config, input_ids, attention_mask, lm_labels) def lowerCAmelCase__ ( self , _A , _A , _A , _A , ): SCREAMING_SNAKE_CASE_ = True SCREAMING_SNAKE_CASE_ = TrOCRDecoder(config=_A).to(_A).eval() SCREAMING_SNAKE_CASE_ = input_ids[:2] input_ids[input_ids == 0] += 1 # first forward pass SCREAMING_SNAKE_CASE_ = model(_A , use_cache=_A) SCREAMING_SNAKE_CASE_ = model(_A) SCREAMING_SNAKE_CASE_ = model(_A , use_cache=_A) self.parent.assertTrue(len(_A) == len(_A)) self.parent.assertTrue(len(_A) == len(_A) + 1) SCREAMING_SNAKE_CASE_ = outputs['past_key_values'] # create hypothetical next token and extent to next_input_ids SCREAMING_SNAKE_CASE_ = ids_tensor((2, 1) , config.vocab_size - 1) + 1 # append to next input_ids and SCREAMING_SNAKE_CASE_ = torch.cat([input_ids, next_tokens] , dim=-1) SCREAMING_SNAKE_CASE_ = model(_A)['last_hidden_state'] SCREAMING_SNAKE_CASE_ = model(_A , past_key_values=_A)['last_hidden_state'] # select random slice SCREAMING_SNAKE_CASE_ = ids_tensor((1,) , output_from_past.shape[-1]).item() SCREAMING_SNAKE_CASE_ = output_from_no_past[:, next_input_ids.shape[-1] - 1, random_slice_idx].detach() SCREAMING_SNAKE_CASE_ = output_from_past[:, 0, random_slice_idx].detach() # test that outputs are equal for slice assert torch.allclose(_A , _A , atol=1E-3) def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = self.prepare_config_and_inputs() SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = config_and_inputs SCREAMING_SNAKE_CASE_ = {'input_ids': input_ids, 'attention_mask': attention_mask} return config, inputs_dict @require_torch class __snake_case ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ): __lowerCAmelCase : Tuple = (TrOCRDecoder, TrOCRForCausalLM) if is_torch_available() else () __lowerCAmelCase : Union[str, Any] = (TrOCRForCausalLM,) if is_torch_available() else () __lowerCAmelCase : str = {'text-generation': TrOCRForCausalLM} if is_torch_available() else {} __lowerCAmelCase : Any = True __lowerCAmelCase : str = False def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = TrOCRStandaloneDecoderModelTester(self , is_training=_A) SCREAMING_SNAKE_CASE_ = ConfigTester(self , config_class=_A) def lowerCAmelCase__ ( self): pass def lowerCAmelCase__ ( self): pass def lowerCAmelCase__ ( self): pass def lowerCAmelCase__ ( self): self.config_tester.run_common_tests() def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_decoder_model_past(*_A) def lowerCAmelCase__ ( self): return @unittest.skip('The model doesn\'t support left padding') # and it's not used enough to be worth fixing :) def lowerCAmelCase__ ( self): pass
620
1
from google.protobuf import descriptor as _descriptor from google.protobuf import descriptor_pool as _descriptor_pool from google.protobuf import symbol_database as _symbol_database from google.protobuf.internal import builder as _builder # @@protoc_insertion_point(imports) UpperCamelCase__ : str = _symbol_database.Default() UpperCamelCase__ : int = _descriptor_pool.Default().AddSerializedFile( b"\n\x19sentencepiece_model.proto\x12\rsentencepiece\"\x80\x0c\n\x0bTrainerSpec\x12\r\n\x05input\x18\x01 \x03(\t\x12\x14\n\x0cinput_format\x18\x07 \x01(\t\x12\x14\n\x0cmodel_prefix\x18\x02 \x01(\t\x12\x41\n\nmodel_type\x18\x03 \x01(\x0e\x32$.sentencepiece.TrainerSpec.ModelType:\x07UNIGRAM\x12\x18\n\nvocab_size\x18\x04 \x01(\x05:\x04\x38\x30\x30\x30\x12\x17\n\x0f\x61\x63\x63\x65pt_language\x18\x05 \x03(\t\x12 \n\x15self_test_sample_size\x18\x06 \x01(\x05:\x01\x30\x12*\n\x1b\x65nable_differential_privacy\x18\x32 \x01(\x08:\x05\x66\x61lse\x12+\n differential_privacy_noise_level\x18\x33 \x01(\x02:\x01\x30\x12\x32\n\'differential_privacy_clipping_threshold\x18\x34 \x01(\x04:\x01\x30\x12\"\n\x12\x63haracter_coverage\x18\n \x01(\x02:\x06\x30.9995\x12\x1e\n\x13input_sentence_size\x18\x0b \x01(\x04:\x01\x30\x12$\n\x16shuffle_input_sentence\x18\x13 \x01(\x08:\x04true\x12 \n\x14mining_sentence_size\x18\x0c \x01(\x05\x42\x02\x18\x01\x12\"\n\x16training_sentence_size\x18\r \x01(\x05\x42\x02\x18\x01\x12(\n\x17seed_sentencepiece_size\x18\x0e \x01(\x05:\x07\x31\x30\x30\x30\x30\x30\x30\x12\x1e\n\x10shrinking_factor\x18\x0f \x01(\x02:\x04\x30.75\x12!\n\x13max_sentence_length\x18\x12 \x01(\x05:\x04\x34\x31\x39\x32\x12\x17\n\x0bnum_threads\x18\x10 \x01(\x05:\x02\x31\x36\x12\x1d\n\x12num_sub_iterations\x18\x11 \x01(\x05:\x01\x32\x12$\n\x18max_sentencepiece_length\x18\x14 \x01(\x05:\x02\x31\x36\x12%\n\x17split_by_unicode_script\x18\x15 \x01(\x08:\x04true\x12\x1d\n\x0fsplit_by_number\x18\x17 \x01(\x08:\x04true\x12!\n\x13split_by_whitespace\x18\x16 \x01(\x08:\x04true\x12)\n\x1atreat_whitespace_as_suffix\x18\x18 \x01(\x08:\x05\x66\x61lse\x12+\n\x1c\x61llow_whitespace_only_pieces\x18\x1a \x01(\x08:\x05\x66\x61lse\x12\x1b\n\x0csplit_digits\x18\x19 \x01(\x08:\x05\x66\x61lse\x12#\n\x19pretokenization_delimiter\x18\x35 \x01(\t:\x00\x12\x17\n\x0f\x63ontrol_symbols\x18\x1e \x03(\t\x12\x1c\n\x14user_defined_symbols\x18\x1f \x03(\t\x12\x16\n\x0erequired_chars\x18$ \x01(\t\x12\x1c\n\rbyte_fallback\x18# \x01(\x08:\x05\x66\x61lse\x12+\n\x1dvocabulary_output_piece_score\x18 \x01(\x08:\x04true\x12\x1e\n\x10hard_vocab_limit\x18! \x01(\x08:\x04true\x12\x1c\n\ruse_all_vocab\x18\" \x01(\x08:\x05\x66\x61lse\x12\x11\n\x06unk_id\x18( \x01(\x05:\x01\x30\x12\x11\n\x06\x62os_id\x18) \x01(\x05:\x01\x31\x12\x11\n\x06\x65os_id\x18* \x01(\x05:\x01\x32\x12\x12\n\x06pad_id\x18+ \x01(\x05:\x02-1\x12\x18\n\tunk_piece\x18- \x01(\t:\x05<unk>\x12\x16\n\tbos_piece\x18. \x01(\t:\x03<s>\x12\x17\n\teos_piece\x18/ \x01(\t:\x04</s>\x12\x18\n\tpad_piece\x18\x30 \x01(\t:\x05<pad>\x12\x1a\n\x0bunk_surface\x18, \x01(\t:\x05 \xe2\x81\x87 \x12+\n\x1ctrain_extremely_large_corpus\x18\x31 \x01(\x08:\x05\x66\x61lse\"5\n\tModelType\x12\x0b\n\x07UNIGRAM\x10\x01\x12\x07\n\x03\x42PE\x10\x02\x12\x08\n\x04WORD\x10\x03\x12\x08\n\x04\x43HAR\x10\x04*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"\xd1\x01\n\x0eNormalizerSpec\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x1c\n\x14precompiled_charsmap\x18\x02 \x01(\x0c\x12\x1e\n\x10\x61\x64\x64_dummy_prefix\x18\x03 \x01(\x08:\x04true\x12&\n\x18remove_extra_whitespaces\x18\x04 \x01(\x08:\x04true\x12 \n\x12\x65scape_whitespaces\x18\x05 \x01(\x08:\x04true\x12\x1e\n\x16normalization_rule_tsv\x18\x06 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"y\n\x0cSelfTestData\x12\x33\n\x07samples\x18\x01 \x03(\x0b\x32\".sentencepiece.SelfTestData.Sample\x1a)\n\x06Sample\x12\r\n\x05input\x18\x01 \x01(\t\x12\x10\n\x08\x65xpected\x18\x02 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"\xfe\x03\n\nModelProto\x12\x37\n\x06pieces\x18\x01 \x03(\x0b\x32\'.sentencepiece.ModelProto.SentencePiece\x12\x30\n\x0ctrainer_spec\x18\x02 \x01(\x0b\x32\x1a.sentencepiece.TrainerSpec\x12\x36\n\x0fnormalizer_spec\x18\x03 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x12\x33\n\x0eself_test_data\x18\x04 \x01(\x0b\x32\x1b.sentencepiece.SelfTestData\x12\x38\n\x11\x64\x65normalizer_spec\x18\x05 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x1a\xd2\x01\n\rSentencePiece\x12\r\n\x05piece\x18\x01 \x01(\t\x12\r\n\x05score\x18\x02 \x01(\x02\x12\x42\n\x04type\x18\x03 \x01(\x0e\x32,.sentencepiece.ModelProto.SentencePiece.Type:\x06NORMAL\"T\n\x04Type\x12\n\n\x06NORMAL\x10\x01\x12\x0b\n\x07UNKNOWN\x10\x02\x12\x0b\n\x07\x43ONTROL\x10\x03\x12\x10\n\x0cUSER_DEFINED\x10\x04\x12\x08\n\x04\x42YTE\x10\x06\x12\n\n\x06UNUSED\x10\x05*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\x42\x02H\x03" ) UpperCamelCase__ : Optional[int] = globals() _builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals) _builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, "sentencepiece_model_pb2", _globals) if _descriptor._USE_C_DESCRIPTORS is False: UpperCamelCase__ : Optional[int] = None UpperCamelCase__ : Union[str, Any] = b"H\003" # (generated by protobuf compiler, but `_TRAINERSPEC` is not defined) # _TRAINERSPEC.fields_by_name["mining_sentence_size"]._options = None # _TRAINERSPEC.fields_by_name["mining_sentence_size"]._serialized_options = b"\030\001" # _TRAINERSPEC.fields_by_name["training_sentence_size"]._options = None # _TRAINERSPEC.fields_by_name["training_sentence_size"]._serialized_options = b"\030\001" UpperCamelCase__ : Optional[int] = 45 UpperCamelCase__ : Union[str, Any] = 1_581 UpperCamelCase__ : Union[str, Any] = 1_517 UpperCamelCase__ : Dict = 1_570 UpperCamelCase__ : int = 1_584 UpperCamelCase__ : Any = 1_793 UpperCamelCase__ : Optional[int] = 1_795 UpperCamelCase__ : Union[str, Any] = 1_916 UpperCamelCase__ : Any = 1_864 UpperCamelCase__ : Optional[int] = 1_905 UpperCamelCase__ : Tuple = 1_919 UpperCamelCase__ : List[Any] = 2_429 UpperCamelCase__ : Optional[int] = 2_208 UpperCamelCase__ : Optional[Any] = 2_418 UpperCamelCase__ : int = 2_323 UpperCamelCase__ : List[Any] = 2_407 # @@protoc_insertion_point(module_scope)
620
from dataclasses import dataclass from typing import Optional, Tuple, Union import torch import torch.nn as nn from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput, apply_forward_hook from .modeling_utils import ModelMixin from .vae import Decoder, DecoderOutput, Encoder, VectorQuantizer @dataclass class __snake_case ( lowerCAmelCase__ ): __lowerCAmelCase : torch.FloatTensor class __snake_case ( lowerCAmelCase__ , lowerCAmelCase__ ): @register_to_config def __init__( self , _A = 3 , _A = 3 , _A = ("DownEncoderBlock2D",) , _A = ("UpDecoderBlock2D",) , _A = (64,) , _A = 1 , _A = "silu" , _A = 3 , _A = 32 , _A = 256 , _A = 32 , _A = None , _A = 0.1_8_2_1_5 , _A = "group" , ): super().__init__() # pass init params to Encoder SCREAMING_SNAKE_CASE_ = Encoder( in_channels=_A , out_channels=_A , down_block_types=_A , block_out_channels=_A , layers_per_block=_A , act_fn=_A , norm_num_groups=_A , double_z=_A , ) SCREAMING_SNAKE_CASE_ = vq_embed_dim if vq_embed_dim is not None else latent_channels SCREAMING_SNAKE_CASE_ = nn.Convad(_A , _A , 1) SCREAMING_SNAKE_CASE_ = VectorQuantizer(_A , _A , beta=0.2_5 , remap=_A , sane_index_shape=_A) SCREAMING_SNAKE_CASE_ = nn.Convad(_A , _A , 1) # pass init params to Decoder SCREAMING_SNAKE_CASE_ = Decoder( in_channels=_A , out_channels=_A , up_block_types=_A , block_out_channels=_A , layers_per_block=_A , act_fn=_A , norm_num_groups=_A , norm_type=_A , ) @apply_forward_hook def lowerCAmelCase__ ( self , _A , _A = True): SCREAMING_SNAKE_CASE_ = self.encoder(_A) SCREAMING_SNAKE_CASE_ = self.quant_conv(_A) if not return_dict: return (h,) return VQEncoderOutput(latents=_A) @apply_forward_hook def lowerCAmelCase__ ( self , _A , _A = False , _A = True): # also go through quantization layer if not force_not_quantize: SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.quantize(_A) else: SCREAMING_SNAKE_CASE_ = h SCREAMING_SNAKE_CASE_ = self.post_quant_conv(_A) SCREAMING_SNAKE_CASE_ = self.decoder(_A , quant if self.config.norm_type == 'spatial' else None) if not return_dict: return (dec,) return DecoderOutput(sample=_A) def lowerCAmelCase__ ( self , _A , _A = True): SCREAMING_SNAKE_CASE_ = sample SCREAMING_SNAKE_CASE_ = self.encode(_A).latents SCREAMING_SNAKE_CASE_ = self.decode(_A).sample if not return_dict: return (dec,) return DecoderOutput(sample=_A)
620
1
import unittest from diffusers.pipelines.pipeline_utils import is_safetensors_compatible class __snake_case ( unittest.TestCase ): def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = [ 'safety_checker/pytorch_model.bin', 'safety_checker/model.safetensors', 'vae/diffusion_pytorch_model.bin', 'vae/diffusion_pytorch_model.safetensors', 'text_encoder/pytorch_model.bin', 'text_encoder/model.safetensors', 'unet/diffusion_pytorch_model.bin', 'unet/diffusion_pytorch_model.safetensors', ] self.assertTrue(is_safetensors_compatible(_A)) def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = [ 'unet/diffusion_pytorch_model.bin', 'unet/diffusion_pytorch_model.safetensors', ] self.assertTrue(is_safetensors_compatible(_A)) def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = [ 'safety_checker/pytorch_model.bin', 'safety_checker/model.safetensors', 'vae/diffusion_pytorch_model.bin', 'vae/diffusion_pytorch_model.safetensors', 'text_encoder/pytorch_model.bin', 'text_encoder/model.safetensors', 'unet/diffusion_pytorch_model.bin', # Removed: 'unet/diffusion_pytorch_model.safetensors', ] self.assertFalse(is_safetensors_compatible(_A)) def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = [ 'text_encoder/pytorch_model.bin', 'text_encoder/model.safetensors', ] self.assertTrue(is_safetensors_compatible(_A)) def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = [ 'safety_checker/pytorch_model.bin', 'safety_checker/model.safetensors', 'vae/diffusion_pytorch_model.bin', 'vae/diffusion_pytorch_model.safetensors', 'text_encoder/pytorch_model.bin', # Removed: 'text_encoder/model.safetensors', 'unet/diffusion_pytorch_model.bin', 'unet/diffusion_pytorch_model.safetensors', ] self.assertFalse(is_safetensors_compatible(_A)) def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = [ 'safety_checker/pytorch_model.fp16.bin', 'safety_checker/model.fp16.safetensors', 'vae/diffusion_pytorch_model.fp16.bin', 'vae/diffusion_pytorch_model.fp16.safetensors', 'text_encoder/pytorch_model.fp16.bin', 'text_encoder/model.fp16.safetensors', 'unet/diffusion_pytorch_model.fp16.bin', 'unet/diffusion_pytorch_model.fp16.safetensors', ] SCREAMING_SNAKE_CASE_ = 'fp16' self.assertTrue(is_safetensors_compatible(_A , variant=_A)) def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = [ 'unet/diffusion_pytorch_model.fp16.bin', 'unet/diffusion_pytorch_model.fp16.safetensors', ] SCREAMING_SNAKE_CASE_ = 'fp16' self.assertTrue(is_safetensors_compatible(_A , variant=_A)) def lowerCAmelCase__ ( self): # pass variant but use the non-variant filenames SCREAMING_SNAKE_CASE_ = [ 'unet/diffusion_pytorch_model.bin', 'unet/diffusion_pytorch_model.safetensors', ] SCREAMING_SNAKE_CASE_ = 'fp16' self.assertTrue(is_safetensors_compatible(_A , variant=_A)) def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = [ 'safety_checker/pytorch_model.fp16.bin', 'safety_checker/model.fp16.safetensors', 'vae/diffusion_pytorch_model.fp16.bin', 'vae/diffusion_pytorch_model.fp16.safetensors', 'text_encoder/pytorch_model.fp16.bin', 'text_encoder/model.fp16.safetensors', 'unet/diffusion_pytorch_model.fp16.bin', # Removed: 'unet/diffusion_pytorch_model.fp16.safetensors', ] SCREAMING_SNAKE_CASE_ = 'fp16' self.assertFalse(is_safetensors_compatible(_A , variant=_A)) def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = [ 'text_encoder/pytorch_model.fp16.bin', 'text_encoder/model.fp16.safetensors', ] SCREAMING_SNAKE_CASE_ = 'fp16' self.assertTrue(is_safetensors_compatible(_A , variant=_A)) def lowerCAmelCase__ ( self): # pass variant but use the non-variant filenames SCREAMING_SNAKE_CASE_ = [ 'text_encoder/pytorch_model.bin', 'text_encoder/model.safetensors', ] SCREAMING_SNAKE_CASE_ = 'fp16' self.assertTrue(is_safetensors_compatible(_A , variant=_A)) def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = [ 'safety_checker/pytorch_model.fp16.bin', 'safety_checker/model.fp16.safetensors', 'vae/diffusion_pytorch_model.fp16.bin', 'vae/diffusion_pytorch_model.fp16.safetensors', 'text_encoder/pytorch_model.fp16.bin', # 'text_encoder/model.fp16.safetensors', 'unet/diffusion_pytorch_model.fp16.bin', 'unet/diffusion_pytorch_model.fp16.safetensors', ] SCREAMING_SNAKE_CASE_ = 'fp16' self.assertFalse(is_safetensors_compatible(_A , variant=_A))
620
import logging import os from typing import Dict, List, Optional, Union import torch import torch.nn as nn from accelerate.utils.imports import ( is_abit_bnb_available, is_abit_bnb_available, is_bnb_available, ) from ..big_modeling import dispatch_model, init_empty_weights from .dataclasses import BnbQuantizationConfig from .modeling import ( find_tied_parameters, get_balanced_memory, infer_auto_device_map, load_checkpoint_in_model, offload_weight, set_module_tensor_to_device, ) if is_bnb_available(): import bitsandbytes as bnb from copy import deepcopy UpperCamelCase__ : Optional[int] = logging.getLogger(__name__) def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : torch.nn.Module , _SCREAMING_SNAKE_CASE : BnbQuantizationConfig , _SCREAMING_SNAKE_CASE : Union[str, os.PathLike] = None , _SCREAMING_SNAKE_CASE : Optional[Dict[str, Union[int, str, torch.device]]] = None , _SCREAMING_SNAKE_CASE : Optional[List[str]] = None , _SCREAMING_SNAKE_CASE : Optional[Dict[Union[int, str], Union[int, str]]] = None , _SCREAMING_SNAKE_CASE : Optional[Union[str, os.PathLike]] = None , _SCREAMING_SNAKE_CASE : bool = False , ): """simple docstring""" SCREAMING_SNAKE_CASE_ = bnb_quantization_config.load_in_abit SCREAMING_SNAKE_CASE_ = bnb_quantization_config.load_in_abit if load_in_abit and not is_abit_bnb_available(): raise ImportError( 'You have a version of `bitsandbytes` that is not compatible with 8bit quantization,' ' make sure you have the latest version of `bitsandbytes` installed.' ) if load_in_abit and not is_abit_bnb_available(): raise ValueError( 'You have a version of `bitsandbytes` that is not compatible with 4bit quantization,' 'make sure you have the latest version of `bitsandbytes` installed.' ) SCREAMING_SNAKE_CASE_ = [] # custom device map if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) and len(device_map.keys() ) > 1: SCREAMING_SNAKE_CASE_ = [key for key, value in device_map.items() if value in ['disk', 'cpu']] # We keep some modules such as the lm_head in their original dtype for numerical stability reasons if bnb_quantization_config.skip_modules is None: SCREAMING_SNAKE_CASE_ = get_keys_to_not_convert(_SCREAMING_SNAKE_CASE ) # add cpu modules to skip modules only for 4-bit modules if load_in_abit: bnb_quantization_config.skip_modules.extend(_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE_ = bnb_quantization_config.skip_modules # We add the modules we want to keep in full precision if bnb_quantization_config.keep_in_fpaa_modules is None: SCREAMING_SNAKE_CASE_ = [] SCREAMING_SNAKE_CASE_ = bnb_quantization_config.keep_in_fpaa_modules modules_to_not_convert.extend(_SCREAMING_SNAKE_CASE ) # compatibility with peft SCREAMING_SNAKE_CASE_ = load_in_abit SCREAMING_SNAKE_CASE_ = load_in_abit SCREAMING_SNAKE_CASE_ = get_parameter_device(_SCREAMING_SNAKE_CASE ) if model_device.type != "meta": # quantization of an already loaded model logger.warning( 'It is not recommended to quantize a loaded model. ' 'The model should be instantiated under the `init_empty_weights` context manager.' ) SCREAMING_SNAKE_CASE_ = replace_with_bnb_layers(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , modules_to_not_convert=_SCREAMING_SNAKE_CASE ) # convert param to the right dtype SCREAMING_SNAKE_CASE_ = bnb_quantization_config.torch_dtype for name, param in model.state_dict().items(): if any(module_to_keep_in_fpaa in name for module_to_keep_in_fpaa in keep_in_fpaa_modules ): param.to(torch.floataa ) if param.dtype != torch.floataa: SCREAMING_SNAKE_CASE_ = name.replace('.weight' , '' ).replace('.bias' , '' ) SCREAMING_SNAKE_CASE_ = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) if param is not None: param.to(torch.floataa ) elif torch.is_floating_point(_SCREAMING_SNAKE_CASE ): param.to(_SCREAMING_SNAKE_CASE ) if model_device.type == "cuda": # move everything to cpu in the first place because we can't do quantization if the weights are already on cuda model.cuda(torch.cuda.current_device() ) torch.cuda.empty_cache() elif torch.cuda.is_available(): model.to(torch.cuda.current_device() ) else: raise RuntimeError('No GPU found. A GPU is needed for quantization.' ) logger.info( f"""The model device type is {model_device.type}. However, cuda is needed for quantization.""" 'We move the model to cuda.' ) return model elif weights_location is None: raise RuntimeError( f"""`weights_location` needs to be the folder path containing the weights of the model, but we found {weights_location} """ ) else: with init_empty_weights(): SCREAMING_SNAKE_CASE_ = replace_with_bnb_layers( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , modules_to_not_convert=_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE_ = get_quantized_model_device_map( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , max_memory=_SCREAMING_SNAKE_CASE , no_split_module_classes=_SCREAMING_SNAKE_CASE , ) if offload_state_dict is None and device_map is not None and "disk" in device_map.values(): SCREAMING_SNAKE_CASE_ = True SCREAMING_SNAKE_CASE_ = any(x in list(device_map.values() ) for x in ['cpu', 'disk'] ) load_checkpoint_in_model( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , dtype=bnb_quantization_config.torch_dtype , offload_folder=_SCREAMING_SNAKE_CASE , offload_state_dict=_SCREAMING_SNAKE_CASE , keep_in_fpaa_modules=bnb_quantization_config.keep_in_fpaa_modules , offload_abit_bnb=load_in_abit and offload , ) return dispatch_model(_SCREAMING_SNAKE_CASE , device_map=_SCREAMING_SNAKE_CASE , offload_dir=_SCREAMING_SNAKE_CASE ) def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : List[str]=None , _SCREAMING_SNAKE_CASE : List[str]=None , _SCREAMING_SNAKE_CASE : Union[str, Any]=None ): """simple docstring""" if device_map is None: if torch.cuda.is_available(): SCREAMING_SNAKE_CASE_ = {'': torch.cuda.current_device()} else: raise RuntimeError('No GPU found. A GPU is needed for quantization.' ) logger.info('The device_map was not initialized.' 'Setting device_map to `{\'\':torch.cuda.current_device()}`.' ) if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): if device_map not in ["auto", "balanced", "balanced_low_0", "sequential"]: raise ValueError( 'If passing a string for `device_map`, please choose \'auto\', \'balanced\', \'balanced_low_0\' or ' '\'sequential\'.' ) SCREAMING_SNAKE_CASE_ = {} special_dtypes.update( { name: bnb_quantization_config.torch_dtype for name, _ in model.named_parameters() if any(m in name for m in bnb_quantization_config.skip_modules ) } ) special_dtypes.update( { name: torch.floataa for name, _ in model.named_parameters() if any(m in name for m in bnb_quantization_config.keep_in_fpaa_modules ) } ) SCREAMING_SNAKE_CASE_ = {} SCREAMING_SNAKE_CASE_ = special_dtypes SCREAMING_SNAKE_CASE_ = no_split_module_classes SCREAMING_SNAKE_CASE_ = bnb_quantization_config.target_dtype # get max_memory for each device. if device_map != "sequential": SCREAMING_SNAKE_CASE_ = get_balanced_memory( _SCREAMING_SNAKE_CASE , low_zero=(device_map == 'balanced_low_0') , max_memory=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , ) SCREAMING_SNAKE_CASE_ = max_memory SCREAMING_SNAKE_CASE_ = infer_auto_device_map(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): # check if don't have any quantized module on the cpu SCREAMING_SNAKE_CASE_ = bnb_quantization_config.skip_modules + bnb_quantization_config.keep_in_fpaa_modules SCREAMING_SNAKE_CASE_ = { key: device_map[key] for key in device_map.keys() if key not in modules_not_to_convert } for device in ["cpu", "disk"]: if device in device_map_without_some_modules.values(): if bnb_quantization_config.load_in_abit: raise ValueError( '\n Some modules are dispatched on the CPU or the disk. Make sure you have enough GPU RAM to fit\n the quantized model. If you want to dispatch the model on the CPU or the disk while keeping\n these modules in `torch_dtype`, you need to pass a custom `device_map` to\n `load_and_quantize_model`. Check\n https://huggingface.co/docs/accelerate/main/en/usage_guides/quantization#offload-modules-to-cpu-and-disk\n for more details.\n ' ) else: logger.info( 'Some modules are are offloaded to the CPU or the disk. Note that these modules will be converted to 8-bit' ) del device_map_without_some_modules return device_map def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int=None , _SCREAMING_SNAKE_CASE : Union[str, Any]=None ): """simple docstring""" if modules_to_not_convert is None: SCREAMING_SNAKE_CASE_ = [] SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = _replace_with_bnb_layers( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) if not has_been_replaced: logger.warning( 'You are loading your model in 8bit or 4bit but no linear modules were found in your model.' ' this can happen for some architectures such as gpt2 that uses Conv1D instead of Linear layers.' ' Please double check your model architecture, or submit an issue on github if you think this is' ' a bug.' ) return model def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : Optional[Any]=None , _SCREAMING_SNAKE_CASE : str=None , ): """simple docstring""" SCREAMING_SNAKE_CASE_ = False for name, module in model.named_children(): if current_key_name is None: SCREAMING_SNAKE_CASE_ = [] current_key_name.append(_SCREAMING_SNAKE_CASE ) if isinstance(_SCREAMING_SNAKE_CASE , nn.Linear ) and name not in modules_to_not_convert: # Check if the current key is not in the `modules_to_not_convert` SCREAMING_SNAKE_CASE_ = '.'.join(_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE_ = True for key in modules_to_not_convert: if ( (key in current_key_name_str) and (key + "." in current_key_name_str) ) or key == current_key_name_str: SCREAMING_SNAKE_CASE_ = False break if proceed: # Load bnb module with empty weight and replace ``nn.Linear` module if bnb_quantization_config.load_in_abit: SCREAMING_SNAKE_CASE_ = bnb.nn.LinearabitLt( module.in_features , module.out_features , module.bias is not None , has_fpaa_weights=_SCREAMING_SNAKE_CASE , threshold=bnb_quantization_config.llm_inta_threshold , ) elif bnb_quantization_config.load_in_abit: SCREAMING_SNAKE_CASE_ = bnb.nn.Linearabit( module.in_features , module.out_features , module.bias is not None , bnb_quantization_config.bnb_abit_compute_dtype , compress_statistics=bnb_quantization_config.bnb_abit_use_double_quant , quant_type=bnb_quantization_config.bnb_abit_quant_type , ) else: raise ValueError('load_in_8bit and load_in_4bit can\'t be both False' ) SCREAMING_SNAKE_CASE_ = module.weight.data if module.bias is not None: SCREAMING_SNAKE_CASE_ = module.bias.data bnb_module.requires_grad_(_SCREAMING_SNAKE_CASE ) setattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE_ = True if len(list(module.children() ) ) > 0: SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = _replace_with_bnb_layers( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE_ = has_been_replaced | _has_been_replaced # Remove the last key for recursion current_key_name.pop(-1 ) return model, has_been_replaced def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : Union[str, Any] ): """simple docstring""" with init_empty_weights(): SCREAMING_SNAKE_CASE_ = deepcopy(_SCREAMING_SNAKE_CASE ) # this has 0 cost since it is done inside `init_empty_weights` context manager` SCREAMING_SNAKE_CASE_ = find_tied_parameters(_SCREAMING_SNAKE_CASE ) # For compatibility with Accelerate < 0.18 if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): SCREAMING_SNAKE_CASE_ = sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() ) else: SCREAMING_SNAKE_CASE_ = sum(_SCREAMING_SNAKE_CASE , [] ) SCREAMING_SNAKE_CASE_ = len(_SCREAMING_SNAKE_CASE ) > 0 # Check if it is a base model SCREAMING_SNAKE_CASE_ = False if hasattr(_SCREAMING_SNAKE_CASE , 'base_model_prefix' ): SCREAMING_SNAKE_CASE_ = not hasattr(_SCREAMING_SNAKE_CASE , model.base_model_prefix ) # Ignore this for base models (BertModel, GPT2Model, etc.) if (not has_tied_params) and is_base_model: return [] # otherwise they have an attached head SCREAMING_SNAKE_CASE_ = list(model.named_children() ) SCREAMING_SNAKE_CASE_ = [list_modules[-1][0]] # add last module together with tied weights SCREAMING_SNAKE_CASE_ = set(_SCREAMING_SNAKE_CASE ) - set(_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE_ = list(set(_SCREAMING_SNAKE_CASE ) ) + list(_SCREAMING_SNAKE_CASE ) # remove ".weight" from the keys SCREAMING_SNAKE_CASE_ = ['.weight', '.bias'] SCREAMING_SNAKE_CASE_ = [] for name in list_untouched: for name_to_remove in names_to_remove: if name_to_remove in name: SCREAMING_SNAKE_CASE_ = name.replace(_SCREAMING_SNAKE_CASE , '' ) filtered_module_names.append(_SCREAMING_SNAKE_CASE ) return filtered_module_names def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : Dict ): """simple docstring""" for m in model.modules(): if isinstance(_SCREAMING_SNAKE_CASE , bnb.nn.Linearabit ): return True return False def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : nn.Module ): """simple docstring""" return next(parameter.parameters() ).device def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : str ): """simple docstring""" if fpaa_statistics is None: set_module_tensor_to_device(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , 0 , dtype=_SCREAMING_SNAKE_CASE , value=_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE_ = param_name SCREAMING_SNAKE_CASE_ = model if "." in tensor_name: SCREAMING_SNAKE_CASE_ = tensor_name.split('.' ) for split in splits[:-1]: SCREAMING_SNAKE_CASE_ = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) if new_module is None: raise ValueError(f"""{module} has no attribute {split}.""" ) SCREAMING_SNAKE_CASE_ = new_module SCREAMING_SNAKE_CASE_ = splits[-1] # offload weights SCREAMING_SNAKE_CASE_ = False offload_weight(module._parameters[tensor_name] , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , index=_SCREAMING_SNAKE_CASE ) if hasattr(module._parameters[tensor_name] , 'SCB' ): offload_weight( module._parameters[tensor_name].SCB , param_name.replace('weight' , 'SCB' ) , _SCREAMING_SNAKE_CASE , index=_SCREAMING_SNAKE_CASE , ) else: offload_weight(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , index=_SCREAMING_SNAKE_CASE ) offload_weight(_SCREAMING_SNAKE_CASE , param_name.replace('weight' , 'SCB' ) , _SCREAMING_SNAKE_CASE , index=_SCREAMING_SNAKE_CASE ) set_module_tensor_to_device(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , 'meta' , dtype=_SCREAMING_SNAKE_CASE , value=torch.empty(*param.size() ) )
620
1
import unittest from transformers import SPIECE_UNDERLINE from transformers.models.speechta import SpeechTaTokenizer from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from transformers.tokenization_utils import AddedToken from ...test_tokenization_common import TokenizerTesterMixin UpperCamelCase__ : Optional[Any] = get_tests_dir("fixtures/test_sentencepiece_bpe_char.model") @require_sentencepiece @require_tokenizers class __snake_case ( lowerCAmelCase__ , unittest.TestCase ): __lowerCAmelCase : Tuple = SpeechTaTokenizer __lowerCAmelCase : int = False __lowerCAmelCase : int = True def lowerCAmelCase__ ( self): super().setUp() # We have a SentencePiece fixture for testing SCREAMING_SNAKE_CASE_ = SpeechTaTokenizer(_A) SCREAMING_SNAKE_CASE_ = AddedToken('<mask>' , lstrip=_A , rstrip=_A) SCREAMING_SNAKE_CASE_ = mask_token tokenizer.add_special_tokens({'mask_token': mask_token}) tokenizer.add_tokens(['<ctc_blank>']) tokenizer.save_pretrained(self.tmpdirname) def lowerCAmelCase__ ( self , _A): SCREAMING_SNAKE_CASE_ = 'this is a test' SCREAMING_SNAKE_CASE_ = 'this is a test' return input_text, output_text def lowerCAmelCase__ ( self , _A , _A=False , _A=20 , _A=5): SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.get_input_output_texts(_A) SCREAMING_SNAKE_CASE_ = tokenizer.encode(_A , add_special_tokens=_A) SCREAMING_SNAKE_CASE_ = tokenizer.decode(_A , clean_up_tokenization_spaces=_A) return text, ids def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = '<pad>' SCREAMING_SNAKE_CASE_ = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(_A) , _A) self.assertEqual(self.get_tokenizer()._convert_id_to_token(_A) , _A) def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = list(self.get_tokenizer().get_vocab().keys()) self.assertEqual(vocab_keys[0] , '<s>') self.assertEqual(vocab_keys[1] , '<pad>') self.assertEqual(vocab_keys[-4] , 'œ') self.assertEqual(vocab_keys[-2] , '<mask>') self.assertEqual(vocab_keys[-1] , '<ctc_blank>') self.assertEqual(len(_A) , 81) def lowerCAmelCase__ ( self): self.assertEqual(self.get_tokenizer().vocab_size , 79) def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = self.get_tokenizers(do_lower_case=_A) for tokenizer in tokenizers: with self.subTest(f"""{tokenizer.__class__.__name__}"""): SCREAMING_SNAKE_CASE_ = tokenizer.vocab_size SCREAMING_SNAKE_CASE_ = len(_A) self.assertNotEqual(_A , 0) # We usually have added tokens from the start in tests because our vocab fixtures are # smaller than the original vocabs - let's not assert this # self.assertEqual(vocab_size, all_size) SCREAMING_SNAKE_CASE_ = ['aaaaa bbbbbb', 'cccccccccdddddddd'] SCREAMING_SNAKE_CASE_ = tokenizer.add_tokens(_A) SCREAMING_SNAKE_CASE_ = tokenizer.vocab_size SCREAMING_SNAKE_CASE_ = len(_A) self.assertNotEqual(_A , 0) self.assertEqual(_A , _A) self.assertEqual(_A , len(_A)) self.assertEqual(_A , all_size + len(_A)) SCREAMING_SNAKE_CASE_ = tokenizer.encode('aaaaa bbbbbb low cccccccccdddddddd l' , add_special_tokens=_A) self.assertGreaterEqual(len(_A) , 4) self.assertGreater(tokens[0] , tokenizer.vocab_size - 1) self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1) SCREAMING_SNAKE_CASE_ = {'eos_token': '>>>>|||<||<<|<<', 'pad_token': '<<<<<|||>|>>>>|>'} SCREAMING_SNAKE_CASE_ = tokenizer.add_special_tokens(_A) SCREAMING_SNAKE_CASE_ = tokenizer.vocab_size SCREAMING_SNAKE_CASE_ = len(_A) self.assertNotEqual(_A , 0) self.assertEqual(_A , _A) self.assertEqual(_A , len(_A)) self.assertEqual(_A , all_size_a + len(_A)) SCREAMING_SNAKE_CASE_ = tokenizer.encode( '>>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l' , add_special_tokens=_A) self.assertGreaterEqual(len(_A) , 6) self.assertGreater(tokens[0] , tokenizer.vocab_size - 1) self.assertGreater(tokens[0] , tokens[1]) self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1) self.assertGreater(tokens[-3] , tokens[-4]) self.assertEqual(tokens[0] , tokenizer.eos_token_id) self.assertEqual(tokens[-3] , tokenizer.pad_token_id) def lowerCAmelCase__ ( self): pass def lowerCAmelCase__ ( self): pass def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = self.get_tokenizer() SCREAMING_SNAKE_CASE_ = tokenizer.tokenize('This is a test') # fmt: off self.assertListEqual(_A , [SPIECE_UNDERLINE, 'T', 'h', 'i', 's', SPIECE_UNDERLINE, 'i', 's', SPIECE_UNDERLINE, 'a', SPIECE_UNDERLINE, 't', 'e', 's', 't']) # fmt: on self.assertListEqual( tokenizer.convert_tokens_to_ids(_A) , [4, 32, 11, 10, 12, 4, 10, 12, 4, 7, 4, 6, 5, 12, 6] , ) SCREAMING_SNAKE_CASE_ = tokenizer.tokenize('I was born in 92000, and this is falsé.') self.assertListEqual( _A , [SPIECE_UNDERLINE, 'I', SPIECE_UNDERLINE, 'w', 'a', 's', SPIECE_UNDERLINE, 'b', 'o', 'r', 'n', SPIECE_UNDERLINE, 'i', 'n', SPIECE_UNDERLINE, '92000', ',', SPIECE_UNDERLINE, 'a', 'n', 'd', SPIECE_UNDERLINE, 't', 'h', 'i', 's', SPIECE_UNDERLINE, 'i', 's', SPIECE_UNDERLINE, 'f', 'a', 'l', 's', 'é', '.']) SCREAMING_SNAKE_CASE_ = tokenizer.convert_tokens_to_ids(_A) # fmt: off self.assertListEqual(_A , [4, 30, 4, 20, 7, 12, 4, 25, 8, 13, 9, 4, 10, 9, 4, 3, 23, 4, 7, 9, 14, 4, 6, 11, 10, 12, 4, 10, 12, 4, 19, 7, 15, 12, 73, 26]) # fmt: on SCREAMING_SNAKE_CASE_ = tokenizer.convert_ids_to_tokens(_A) self.assertListEqual( _A , [SPIECE_UNDERLINE, 'I', SPIECE_UNDERLINE, 'w', 'a', 's', SPIECE_UNDERLINE, 'b', 'o', 'r', 'n', SPIECE_UNDERLINE, 'i', 'n', SPIECE_UNDERLINE, '<unk>', ',', SPIECE_UNDERLINE, 'a', 'n', 'd', SPIECE_UNDERLINE, 't', 'h', 'i', 's', SPIECE_UNDERLINE, 'i', 's', SPIECE_UNDERLINE, 'f', 'a', 'l', 's', 'é', '.']) @slow def lowerCAmelCase__ ( self): # Use custom sequence because this tokenizer does not handle numbers. SCREAMING_SNAKE_CASE_ = [ 'Transformers (formerly known as pytorch-transformers and pytorch-pretrained-bert) provides ' 'general-purpose architectures (BERT, GPT, RoBERTa, XLM, DistilBert, XLNet...) for Natural ' 'Language Understanding (NLU) and Natural Language Generation (NLG) with over thirty-two pretrained ' 'models in one hundred plus languages and deep interoperability between Jax, PyTorch and TensorFlow.', 'BERT is designed to pre-train deep bidirectional representations from unlabeled text by jointly ' 'conditioning on both left and right context in all layers.', 'The quick brown fox jumps over the lazy dog.', ] # fmt: off SCREAMING_SNAKE_CASE_ = { 'input_ids': [ [4, 32, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 64, 19, 8, 13, 18, 5, 13, 15, 22, 4, 28, 9, 8, 20, 9, 4, 7, 12, 4, 24, 22, 6, 8, 13, 17, 11, 39, 6, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 7, 9, 14, 4, 24, 22, 6, 8, 13, 17, 11, 39, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 39, 25, 5, 13, 6, 63, 4, 24, 13, 8, 27, 10, 14, 5, 12, 4, 21, 5, 9, 5, 13, 7, 15, 39, 24, 16, 13, 24, 8, 12, 5, 4, 7, 13, 17, 11, 10, 6, 5, 17, 6, 16, 13, 5, 12, 4, 64, 40, 47, 54, 32, 23, 4, 53, 49, 32, 23, 4, 54, 8, 40, 47, 54, 32, 7, 23, 4, 69, 52, 43, 23, 4, 51, 10, 12, 6, 10, 15, 40, 5, 13, 6, 23, 4, 69, 52, 48, 5, 6, 26, 26, 26, 63, 4, 19, 8, 13, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 61, 9, 14, 5, 13, 12, 6, 7, 9, 14, 10, 9, 21, 4, 64, 48, 52, 61, 63, 4, 7, 9, 14, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 53, 5, 9, 5, 13, 7, 6, 10, 8, 9, 4, 64, 48, 52, 53, 63, 4, 20, 10, 6, 11, 4, 8, 27, 5, 13, 4, 6, 11, 10, 13, 6, 22, 39, 6, 20, 8, 4, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 4, 18, 8, 14, 5, 15, 12, 4, 10, 9, 4, 8, 9, 5, 4, 11, 16, 9, 14, 13, 5, 14, 4, 24, 15, 16, 12, 4, 15, 7, 9, 21, 16, 7, 21, 5, 12, 4, 7, 9, 14, 4, 14, 5, 5, 24, 4, 10, 9, 6, 5, 13, 8, 24, 5, 13, 7, 25, 10, 15, 10, 6, 22, 4, 25, 5, 6, 20, 5, 5, 9, 4, 58, 7, 37, 23, 4, 49, 22, 32, 8, 13, 17, 11, 4, 7, 9, 14, 4, 32, 5, 9, 12, 8, 13, 55, 15, 8, 20, 26, 2], [4, 40, 47, 54, 32, 4, 10, 12, 4, 14, 5, 12, 10, 21, 9, 5, 14, 4, 6, 8, 4, 24, 13, 5, 39, 6, 13, 7, 10, 9, 4, 14, 5, 5, 24, 4, 25, 10, 14, 10, 13, 5, 17, 6, 10, 8, 9, 7, 15, 4, 13, 5, 24, 13, 5, 12, 5, 9, 6, 7, 6, 10, 8, 9, 12, 4, 19, 13, 8, 18, 4, 16, 9, 15, 7, 25, 5, 15, 5, 14, 4, 6, 5, 37, 6, 4, 25, 22, 4, 46, 8, 10, 9, 6, 15, 22, 4, 17, 8, 9, 14, 10, 6, 10, 8, 9, 10, 9, 21, 4, 8, 9, 4, 25, 8, 6, 11, 4, 15, 5, 19, 6, 4, 7, 9, 14, 4, 13, 10, 21, 11, 6, 4, 17, 8, 9, 6, 5, 37, 6, 4, 10, 9, 4, 7, 15, 15, 4, 15, 7, 22, 5, 13, 12, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [4, 32, 11, 5, 4, 45, 16, 10, 17, 28, 4, 25, 13, 8, 20, 9, 4, 19, 8, 37, 4, 46, 16, 18, 24, 12, 4, 8, 27, 5, 13, 4, 6, 11, 5, 4, 15, 7, 57, 22, 4, 14, 8, 21, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], ], 'attention_mask': [ [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], ] } # fmt: on self.tokenizer_integration_test_util( expected_encoding=_A , model_name='microsoft/speecht5_asr' , revision='c5ef64c71905caeccde0e4462ef3f9077224c524' , sequences=_A , )
620
import json import os from functools import lru_cache from typing import List, Optional, Tuple import regex as re from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging UpperCamelCase__ : Union[str, Any] = logging.get_logger(__name__) UpperCamelCase__ : Optional[Any] = {"vocab_file": "vocab.json", "merges_file": "merges.txt"} # See all BART models at https://huggingface.co/models?filter=bart UpperCamelCase__ : List[str] = { "vocab_file": { "facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/vocab.json", "facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/vocab.json", "facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json", "facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json", "facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json", "yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json", }, "merges_file": { "facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/merges.txt", "facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/merges.txt", "facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt", "facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt", "facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt", "yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt", }, } UpperCamelCase__ : str = { "facebook/bart-base": 1_024, "facebook/bart-large": 1_024, "facebook/bart-large-mnli": 1_024, "facebook/bart-large-cnn": 1_024, "facebook/bart-large-xsum": 1_024, "yjernite/bart_eli5": 1_024, } @lru_cache() def _UpperCAmelCase ( ): """simple docstring""" SCREAMING_SNAKE_CASE_ = ( list(range(ord('!' ) , ord('~' ) + 1 ) ) + list(range(ord('¡' ) , ord('¬' ) + 1 ) ) + list(range(ord('®' ) , ord('ÿ' ) + 1 ) ) ) SCREAMING_SNAKE_CASE_ = bs[:] SCREAMING_SNAKE_CASE_ = 0 for b in range(2**8 ): if b not in bs: bs.append(_SCREAMING_SNAKE_CASE ) cs.append(2**8 + n ) n += 1 SCREAMING_SNAKE_CASE_ = [chr(_SCREAMING_SNAKE_CASE ) for n in cs] return dict(zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ) def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : List[str] ): """simple docstring""" SCREAMING_SNAKE_CASE_ = set() SCREAMING_SNAKE_CASE_ = word[0] for char in word[1:]: pairs.add((prev_char, char) ) SCREAMING_SNAKE_CASE_ = char return pairs class __snake_case ( lowerCAmelCase__ ): __lowerCAmelCase : str = VOCAB_FILES_NAMES __lowerCAmelCase : Any = PRETRAINED_VOCAB_FILES_MAP __lowerCAmelCase : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __lowerCAmelCase : List[Any] = ['input_ids', 'attention_mask'] def __init__( self , _A , _A , _A="replace" , _A="<s>" , _A="</s>" , _A="</s>" , _A="<s>" , _A="<unk>" , _A="<pad>" , _A="<mask>" , _A=False , **_A , ): SCREAMING_SNAKE_CASE_ = AddedToken(_A , lstrip=_A , rstrip=_A) if isinstance(_A , _A) else bos_token SCREAMING_SNAKE_CASE_ = AddedToken(_A , lstrip=_A , rstrip=_A) if isinstance(_A , _A) else eos_token SCREAMING_SNAKE_CASE_ = AddedToken(_A , lstrip=_A , rstrip=_A) if isinstance(_A , _A) else sep_token SCREAMING_SNAKE_CASE_ = AddedToken(_A , lstrip=_A , rstrip=_A) if isinstance(_A , _A) else cls_token SCREAMING_SNAKE_CASE_ = AddedToken(_A , lstrip=_A , rstrip=_A) if isinstance(_A , _A) else unk_token SCREAMING_SNAKE_CASE_ = AddedToken(_A , lstrip=_A , rstrip=_A) if isinstance(_A , _A) else pad_token # Mask token behave like a normal word, i.e. include the space before it SCREAMING_SNAKE_CASE_ = AddedToken(_A , lstrip=_A , rstrip=_A) if isinstance(_A , _A) else mask_token super().__init__( errors=_A , bos_token=_A , eos_token=_A , unk_token=_A , sep_token=_A , cls_token=_A , pad_token=_A , mask_token=_A , add_prefix_space=_A , **_A , ) with open(_A , encoding='utf-8') as vocab_handle: SCREAMING_SNAKE_CASE_ = json.load(_A) SCREAMING_SNAKE_CASE_ = {v: k for k, v in self.encoder.items()} SCREAMING_SNAKE_CASE_ = errors # how to handle errors in decoding SCREAMING_SNAKE_CASE_ = bytes_to_unicode() SCREAMING_SNAKE_CASE_ = {v: k for k, v in self.byte_encoder.items()} with open(_A , encoding='utf-8') as merges_handle: SCREAMING_SNAKE_CASE_ = merges_handle.read().split('\n')[1:-1] SCREAMING_SNAKE_CASE_ = [tuple(merge.split()) for merge in bpe_merges] SCREAMING_SNAKE_CASE_ = dict(zip(_A , range(len(_A)))) SCREAMING_SNAKE_CASE_ = {} SCREAMING_SNAKE_CASE_ = add_prefix_space # Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions SCREAMING_SNAKE_CASE_ = re.compile(r'\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+') @property def lowerCAmelCase__ ( self): return len(self.encoder) def lowerCAmelCase__ ( self): return dict(self.encoder , **self.added_tokens_encoder) def lowerCAmelCase__ ( self , _A): if token in self.cache: return self.cache[token] SCREAMING_SNAKE_CASE_ = tuple(_A) SCREAMING_SNAKE_CASE_ = get_pairs(_A) if not pairs: return token while True: SCREAMING_SNAKE_CASE_ = min(_A , key=lambda _A: self.bpe_ranks.get(_A , float('inf'))) if bigram not in self.bpe_ranks: break SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = bigram SCREAMING_SNAKE_CASE_ = [] SCREAMING_SNAKE_CASE_ = 0 while i < len(_A): try: SCREAMING_SNAKE_CASE_ = word.index(_A , _A) except ValueError: new_word.extend(word[i:]) break else: new_word.extend(word[i:j]) SCREAMING_SNAKE_CASE_ = j if word[i] == first and i < len(_A) - 1 and word[i + 1] == second: new_word.append(first + second) i += 2 else: new_word.append(word[i]) i += 1 SCREAMING_SNAKE_CASE_ = tuple(_A) SCREAMING_SNAKE_CASE_ = new_word if len(_A) == 1: break else: SCREAMING_SNAKE_CASE_ = get_pairs(_A) SCREAMING_SNAKE_CASE_ = ' '.join(_A) SCREAMING_SNAKE_CASE_ = word return word def lowerCAmelCase__ ( self , _A): SCREAMING_SNAKE_CASE_ = [] for token in re.findall(self.pat , _A): SCREAMING_SNAKE_CASE_ = ''.join( self.byte_encoder[b] for b in token.encode('utf-8')) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case) bpe_tokens.extend(bpe_token for bpe_token in self.bpe(_A).split(' ')) return bpe_tokens def lowerCAmelCase__ ( self , _A): return self.encoder.get(_A , self.encoder.get(self.unk_token)) def lowerCAmelCase__ ( self , _A): return self.decoder.get(_A) def lowerCAmelCase__ ( self , _A): SCREAMING_SNAKE_CASE_ = ''.join(_A) SCREAMING_SNAKE_CASE_ = bytearray([self.byte_decoder[c] for c in text]).decode('utf-8' , errors=self.errors) return text def lowerCAmelCase__ ( self , _A , _A = None): if not os.path.isdir(_A): logger.error(f"""Vocabulary path ({save_directory}) should be a directory""") return SCREAMING_SNAKE_CASE_ = os.path.join( _A , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file']) SCREAMING_SNAKE_CASE_ = os.path.join( _A , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file']) with open(_A , 'w' , encoding='utf-8') as f: f.write(json.dumps(self.encoder , indent=2 , sort_keys=_A , ensure_ascii=_A) + '\n') SCREAMING_SNAKE_CASE_ = 0 with open(_A , 'w' , encoding='utf-8') as writer: writer.write('#version: 0.2\n') for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda _A: kv[1]): if index != token_index: logger.warning( f"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.""" ' Please check that the tokenizer is not corrupted!') SCREAMING_SNAKE_CASE_ = token_index writer.write(' '.join(_A) + '\n') index += 1 return vocab_file, merge_file def lowerCAmelCase__ ( self , _A , _A = None): if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] SCREAMING_SNAKE_CASE_ = [self.cls_token_id] SCREAMING_SNAKE_CASE_ = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def lowerCAmelCase__ ( self , _A , _A = None , _A = False): if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=_A , token_ids_a=_A , already_has_special_tokens=_A) if token_ids_a is None: return [1] + ([0] * len(_A)) + [1] return [1] + ([0] * len(_A)) + [1, 1] + ([0] * len(_A)) + [1] def lowerCAmelCase__ ( self , _A , _A = None): SCREAMING_SNAKE_CASE_ = [self.sep_token_id] SCREAMING_SNAKE_CASE_ = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0] def lowerCAmelCase__ ( self , _A , _A=False , **_A): SCREAMING_SNAKE_CASE_ = kwargs.pop('add_prefix_space' , self.add_prefix_space) if (is_split_into_words or add_prefix_space) and (len(_A) > 0 and not text[0].isspace()): SCREAMING_SNAKE_CASE_ = ' ' + text return (text, kwargs)
620
1
import dataclasses import json import warnings from dataclasses import dataclass, field from time import time from typing import List from ..utils import logging UpperCamelCase__ : Union[str, Any] = logging.get_logger(__name__) def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : Optional[int]=None , _SCREAMING_SNAKE_CASE : Any=None ): """simple docstring""" return field(default_factory=lambda: default , metadata=_SCREAMING_SNAKE_CASE ) @dataclass class __snake_case : __lowerCAmelCase : List[str] = list_field( default=[] , metadata={ 'help': ( 'Model checkpoints to be provided to the AutoModel classes. Leave blank to benchmark the base version' ' of all available models' ) } , ) __lowerCAmelCase : List[int] = list_field( default=[8] , metadata={'help': 'List of batch sizes for which memory and time performance will be evaluated'} ) __lowerCAmelCase : List[int] = list_field( default=[8, 32, 128, 512] , metadata={'help': 'List of sequence lengths for which memory and time performance will be evaluated'} , ) __lowerCAmelCase : bool = field( default=lowerCAmelCase__ , metadata={'help': 'Whether to benchmark inference of model. Inference can be disabled via --no-inference.'} , ) __lowerCAmelCase : bool = field( default=lowerCAmelCase__ , metadata={'help': 'Whether to run on available cuda devices. Cuda can be disabled via --no-cuda.'} , ) __lowerCAmelCase : bool = field( default=lowerCAmelCase__ , metadata={'help': 'Whether to run on available tpu devices. TPU can be disabled via --no-tpu.'} ) __lowerCAmelCase : bool = field(default=lowerCAmelCase__ , metadata={'help': 'Use FP16 to accelerate inference.'} ) __lowerCAmelCase : bool = field(default=lowerCAmelCase__ , metadata={'help': 'Benchmark training of model'} ) __lowerCAmelCase : bool = field(default=lowerCAmelCase__ , metadata={'help': 'Verbose memory tracing'} ) __lowerCAmelCase : bool = field( default=lowerCAmelCase__ , metadata={'help': 'Whether to perform speed measurements. Speed measurements can be disabled via --no-speed.'} , ) __lowerCAmelCase : bool = field( default=lowerCAmelCase__ , metadata={ 'help': 'Whether to perform memory measurements. Memory measurements can be disabled via --no-memory' } , ) __lowerCAmelCase : bool = field(default=lowerCAmelCase__ , metadata={'help': 'Trace memory line by line'} ) __lowerCAmelCase : bool = field(default=lowerCAmelCase__ , metadata={'help': 'Save result to a CSV file'} ) __lowerCAmelCase : bool = field(default=lowerCAmelCase__ , metadata={'help': 'Save all print statements in a log file'} ) __lowerCAmelCase : bool = field(default=lowerCAmelCase__ , metadata={'help': 'Whether to print environment information'} ) __lowerCAmelCase : bool = field( default=lowerCAmelCase__ , metadata={ 'help': ( 'Whether to use multiprocessing for memory and speed measurement. It is highly recommended to use' ' multiprocessing for accurate CPU and GPU memory measurements. This option should only be disabled' ' for debugging / testing and on TPU.' ) } , ) __lowerCAmelCase : str = field( default=f"""inference_time_{round(time() )}.csv""" , metadata={'help': 'CSV filename used if saving time results to csv.'} , ) __lowerCAmelCase : str = field( default=f"""inference_memory_{round(time() )}.csv""" , metadata={'help': 'CSV filename used if saving memory results to csv.'} , ) __lowerCAmelCase : str = field( default=f"""train_time_{round(time() )}.csv""" , metadata={'help': 'CSV filename used if saving time results to csv for training.'} , ) __lowerCAmelCase : str = field( default=f"""train_memory_{round(time() )}.csv""" , metadata={'help': 'CSV filename used if saving memory results to csv for training.'} , ) __lowerCAmelCase : str = field( default=f"""env_info_{round(time() )}.csv""" , metadata={'help': 'CSV filename used if saving environment information.'} , ) __lowerCAmelCase : str = field( default=f"""log_{round(time() )}.csv""" , metadata={'help': 'Log filename used if print statements are saved in log.'} , ) __lowerCAmelCase : int = field(default=3 , metadata={'help': 'Times an experiment will be run.'} ) __lowerCAmelCase : bool = field( default=lowerCAmelCase__ , metadata={ 'help': ( 'Instead of loading the model as defined in `config.architectures` if exists, just load the pretrain' ' model weights.' ) } , ) def lowerCAmelCase__ ( self): warnings.warn( f"""The class {self.__class__} is deprecated. Hugging Face Benchmarking utils""" ' are deprecated in general and it is advised to use external Benchmarking libraries ' ' to benchmark Transformer models.' , _A , ) def lowerCAmelCase__ ( self): return json.dumps(dataclasses.asdict(self) , indent=2) @property def lowerCAmelCase__ ( self): if len(self.models) <= 0: raise ValueError( 'Please make sure you provide at least one model name / model identifier, *e.g.* `--models' ' bert-base-cased` or `args.models = [\'bert-base-cased\'].') return self.models @property def lowerCAmelCase__ ( self): if not self.multi_process: return False elif self.is_tpu: logger.info('Multiprocessing is currently not possible on TPU.') return False else: return True
620
from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCamelCase__ : str = logging.get_logger(__name__) UpperCamelCase__ : Optional[int] = { "facebook/dpr-ctx_encoder-single-nq-base": ( "https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/config.json" ), "facebook/dpr-question_encoder-single-nq-base": ( "https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/config.json" ), "facebook/dpr-reader-single-nq-base": ( "https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/config.json" ), "facebook/dpr-ctx_encoder-multiset-base": ( "https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/config.json" ), "facebook/dpr-question_encoder-multiset-base": ( "https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/config.json" ), "facebook/dpr-reader-multiset-base": ( "https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/config.json" ), } class __snake_case ( lowerCAmelCase__ ): __lowerCAmelCase : Optional[int] = 'dpr' def __init__( self , _A=30522 , _A=768 , _A=12 , _A=12 , _A=3072 , _A="gelu" , _A=0.1 , _A=0.1 , _A=512 , _A=2 , _A=0.0_2 , _A=1E-12 , _A=0 , _A="absolute" , _A = 0 , **_A , ): super().__init__(pad_token_id=_A , **_A) SCREAMING_SNAKE_CASE_ = vocab_size SCREAMING_SNAKE_CASE_ = hidden_size SCREAMING_SNAKE_CASE_ = num_hidden_layers SCREAMING_SNAKE_CASE_ = num_attention_heads SCREAMING_SNAKE_CASE_ = hidden_act SCREAMING_SNAKE_CASE_ = intermediate_size SCREAMING_SNAKE_CASE_ = hidden_dropout_prob SCREAMING_SNAKE_CASE_ = attention_probs_dropout_prob SCREAMING_SNAKE_CASE_ = max_position_embeddings SCREAMING_SNAKE_CASE_ = type_vocab_size SCREAMING_SNAKE_CASE_ = initializer_range SCREAMING_SNAKE_CASE_ = layer_norm_eps SCREAMING_SNAKE_CASE_ = projection_dim SCREAMING_SNAKE_CASE_ = position_embedding_type
620
1
from typing import List, Optional, Tuple, Union import torch from ...models import UNetaDModel from ...schedulers import ScoreSdeVeScheduler from ...utils import randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput class __snake_case ( lowerCAmelCase__ ): __lowerCAmelCase : UNetaDModel __lowerCAmelCase : ScoreSdeVeScheduler def __init__( self , _A , _A): super().__init__() self.register_modules(unet=_A , scheduler=_A) @torch.no_grad() def __call__( self , _A = 1 , _A = 2000 , _A = None , _A = "pil" , _A = True , **_A , ): SCREAMING_SNAKE_CASE_ = self.unet.config.sample_size SCREAMING_SNAKE_CASE_ = (batch_size, 3, img_size, img_size) SCREAMING_SNAKE_CASE_ = self.unet SCREAMING_SNAKE_CASE_ = randn_tensor(_A , generator=_A) * self.scheduler.init_noise_sigma SCREAMING_SNAKE_CASE_ = sample.to(self.device) self.scheduler.set_timesteps(_A) self.scheduler.set_sigmas(_A) for i, t in enumerate(self.progress_bar(self.scheduler.timesteps)): SCREAMING_SNAKE_CASE_ = self.scheduler.sigmas[i] * torch.ones(shape[0] , device=self.device) # correction step for _ in range(self.scheduler.config.correct_steps): SCREAMING_SNAKE_CASE_ = self.unet(_A , _A).sample SCREAMING_SNAKE_CASE_ = self.scheduler.step_correct(_A , _A , generator=_A).prev_sample # prediction step SCREAMING_SNAKE_CASE_ = model(_A , _A).sample SCREAMING_SNAKE_CASE_ = self.scheduler.step_pred(_A , _A , _A , generator=_A) SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = output.prev_sample, output.prev_sample_mean SCREAMING_SNAKE_CASE_ = sample_mean.clamp(0 , 1) SCREAMING_SNAKE_CASE_ = sample.cpu().permute(0 , 2 , 3 , 1).numpy() if output_type == "pil": SCREAMING_SNAKE_CASE_ = self.numpy_to_pil(_A) if not return_dict: return (sample,) return ImagePipelineOutput(images=_A)
620
import pytest import datasets # Import fixture modules as plugins UpperCamelCase__ : Union[str, Any] = ["tests.fixtures.files", "tests.fixtures.hub", "tests.fixtures.fsspec"] def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : Dict ): """simple docstring""" for item in items: if any(marker in item.keywords for marker in ['integration', 'unit'] ): continue item.add_marker(pytest.mark.unit ) def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : Optional[int] ): """simple docstring""" config.addinivalue_line('markers' , 'torchaudio_latest: mark test to run with torchaudio>=0.12' ) @pytest.fixture(autouse=_SCREAMING_SNAKE_CASE ) def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : str ): """simple docstring""" SCREAMING_SNAKE_CASE_ = tmp_path_factory.getbasetemp() / 'cache' SCREAMING_SNAKE_CASE_ = test_hf_cache_home / 'datasets' SCREAMING_SNAKE_CASE_ = test_hf_cache_home / 'metrics' SCREAMING_SNAKE_CASE_ = test_hf_cache_home / 'modules' monkeypatch.setattr('datasets.config.HF_DATASETS_CACHE' , str(_SCREAMING_SNAKE_CASE ) ) monkeypatch.setattr('datasets.config.HF_METRICS_CACHE' , str(_SCREAMING_SNAKE_CASE ) ) monkeypatch.setattr('datasets.config.HF_MODULES_CACHE' , str(_SCREAMING_SNAKE_CASE ) ) SCREAMING_SNAKE_CASE_ = test_hf_datasets_cache / 'downloads' monkeypatch.setattr('datasets.config.DOWNLOADED_DATASETS_PATH' , str(_SCREAMING_SNAKE_CASE ) ) SCREAMING_SNAKE_CASE_ = test_hf_datasets_cache / 'downloads' / 'extracted' monkeypatch.setattr('datasets.config.EXTRACTED_DATASETS_PATH' , str(_SCREAMING_SNAKE_CASE ) ) @pytest.fixture(autouse=_SCREAMING_SNAKE_CASE , scope='session' ) def _UpperCAmelCase ( ): """simple docstring""" datasets.disable_progress_bar() @pytest.fixture(autouse=_SCREAMING_SNAKE_CASE ) def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : Any ): """simple docstring""" monkeypatch.setattr('datasets.config.HF_UPDATE_DOWNLOAD_COUNTS' , _SCREAMING_SNAKE_CASE ) @pytest.fixture def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : Tuple ): """simple docstring""" monkeypatch.setattr('sqlalchemy.util.deprecations.SILENCE_UBER_WARNING' , _SCREAMING_SNAKE_CASE )
620
1
import warnings from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding class __snake_case ( lowerCAmelCase__ ): __lowerCAmelCase : Optional[int] = ['image_processor', 'tokenizer'] __lowerCAmelCase : Dict = 'ChineseCLIPImageProcessor' __lowerCAmelCase : List[str] = ('BertTokenizer', 'BertTokenizerFast') def __init__( self , _A=None , _A=None , **_A): SCREAMING_SNAKE_CASE_ = None if "feature_extractor" in kwargs: warnings.warn( 'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`' ' instead.' , _A , ) SCREAMING_SNAKE_CASE_ = kwargs.pop('feature_extractor') SCREAMING_SNAKE_CASE_ = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError('You need to specify an `image_processor`.') if tokenizer is None: raise ValueError('You need to specify a `tokenizer`.') super().__init__(_A , _A) SCREAMING_SNAKE_CASE_ = self.image_processor def __call__( self , _A=None , _A=None , _A=None , **_A): if text is None and images is None: raise ValueError('You have to specify either text or images. Both cannot be none.') if text is not None: SCREAMING_SNAKE_CASE_ = self.tokenizer(_A , return_tensors=_A , **_A) if images is not None: SCREAMING_SNAKE_CASE_ = self.image_processor(_A , return_tensors=_A , **_A) if text is not None and images is not None: SCREAMING_SNAKE_CASE_ = image_features.pixel_values return encoding elif text is not None: return encoding else: return BatchEncoding(data=dict(**_A) , tensor_type=_A) def lowerCAmelCase__ ( self , *_A , **_A): return self.tokenizer.batch_decode(*_A , **_A) def lowerCAmelCase__ ( self , *_A , **_A): return self.tokenizer.decode(*_A , **_A) @property def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = self.tokenizer.model_input_names SCREAMING_SNAKE_CASE_ = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names)) @property def lowerCAmelCase__ ( self): warnings.warn( '`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , _A , ) return self.image_processor_class
620
from typing import List import numpy as np def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : dict ): """simple docstring""" SCREAMING_SNAKE_CASE_ = {key: len(_SCREAMING_SNAKE_CASE ) for key, value in gen_kwargs.items() if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )} if len(set(lists_lengths.values() ) ) > 1: raise RuntimeError( ( 'Sharding is ambiguous for this dataset: ' + 'we found several data sources lists of different lengths, and we don\'t know over which list we should parallelize:\n' + '\n'.join(f"""\t- key {key} has length {length}""" for key, length in lists_lengths.items() ) + '\nTo fix this, check the \'gen_kwargs\' and make sure to use lists only for data sources, ' + 'and use tuples otherwise. In the end there should only be one single list, or several lists with the same length.' ) ) SCREAMING_SNAKE_CASE_ = max(lists_lengths.values() , default=0 ) return max(1 , _SCREAMING_SNAKE_CASE ) def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int ): """simple docstring""" SCREAMING_SNAKE_CASE_ = [] for group_idx in range(_SCREAMING_SNAKE_CASE ): SCREAMING_SNAKE_CASE_ = num_shards // max_num_jobs + (group_idx < (num_shards % max_num_jobs)) if num_shards_to_add == 0: break SCREAMING_SNAKE_CASE_ = shards_indices_per_group[-1].stop if shards_indices_per_group else 0 SCREAMING_SNAKE_CASE_ = range(_SCREAMING_SNAKE_CASE , start + num_shards_to_add ) shards_indices_per_group.append(_SCREAMING_SNAKE_CASE ) return shards_indices_per_group def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : dict , _SCREAMING_SNAKE_CASE : int ): """simple docstring""" SCREAMING_SNAKE_CASE_ = _number_of_shards_in_gen_kwargs(_SCREAMING_SNAKE_CASE ) if num_shards == 1: return [dict(_SCREAMING_SNAKE_CASE )] else: SCREAMING_SNAKE_CASE_ = _distribute_shards(num_shards=_SCREAMING_SNAKE_CASE , max_num_jobs=_SCREAMING_SNAKE_CASE ) return [ { key: [value[shard_idx] for shard_idx in shard_indices_per_group[group_idx]] if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) else value for key, value in gen_kwargs.items() } for group_idx in range(len(_SCREAMING_SNAKE_CASE ) ) ] def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : List[dict] ): """simple docstring""" return { key: [value for gen_kwargs in gen_kwargs_list for value in gen_kwargs[key]] if isinstance(gen_kwargs_list[0][key] , _SCREAMING_SNAKE_CASE ) else gen_kwargs_list[0][key] for key in gen_kwargs_list[0] } def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : np.random.Generator , _SCREAMING_SNAKE_CASE : dict ): """simple docstring""" SCREAMING_SNAKE_CASE_ = {len(_SCREAMING_SNAKE_CASE ) for value in gen_kwargs.values() if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )} SCREAMING_SNAKE_CASE_ = {} for size in list_sizes: SCREAMING_SNAKE_CASE_ = list(range(_SCREAMING_SNAKE_CASE ) ) rng.shuffle(indices_per_size[size] ) # Now let's copy the gen_kwargs and shuffle the lists based on their sizes SCREAMING_SNAKE_CASE_ = dict(_SCREAMING_SNAKE_CASE ) for key, value in shuffled_kwargs.items(): if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): SCREAMING_SNAKE_CASE_ = [value[i] for i in indices_per_size[len(_SCREAMING_SNAKE_CASE )]] return shuffled_kwargs
620
1
import unittest import numpy as np from diffusers import OnnxStableDiffusionInpaintPipelineLegacy from diffusers.utils.testing_utils import ( is_onnx_available, load_image, load_numpy, nightly, require_onnxruntime, require_torch_gpu, ) if is_onnx_available(): import onnxruntime as ort @nightly @require_onnxruntime @require_torch_gpu class __snake_case ( unittest.TestCase ): @property def lowerCAmelCase__ ( self): return ( "CUDAExecutionProvider", { "gpu_mem_limit": "15000000000", # 15GB "arena_extend_strategy": "kSameAsRequested", }, ) @property def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = ort.SessionOptions() SCREAMING_SNAKE_CASE_ = False return options def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/in_paint/overture-creations-5sI6fQgYIuo.png') SCREAMING_SNAKE_CASE_ = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/in_paint/overture-creations-5sI6fQgYIuo_mask.png') SCREAMING_SNAKE_CASE_ = load_numpy( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/in_paint/red_cat_sitting_on_a_park_bench_onnx.npy') # using the PNDM scheduler by default SCREAMING_SNAKE_CASE_ = OnnxStableDiffusionInpaintPipelineLegacy.from_pretrained( 'CompVis/stable-diffusion-v1-4' , revision='onnx' , safety_checker=_A , feature_extractor=_A , provider=self.gpu_provider , sess_options=self.gpu_options , ) pipe.set_progress_bar_config(disable=_A) SCREAMING_SNAKE_CASE_ = 'A red cat sitting on a park bench' SCREAMING_SNAKE_CASE_ = np.random.RandomState(0) SCREAMING_SNAKE_CASE_ = pipe( prompt=_A , image=_A , mask_image=_A , strength=0.7_5 , guidance_scale=7.5 , num_inference_steps=15 , generator=_A , output_type='np' , ) SCREAMING_SNAKE_CASE_ = output.images[0] assert image.shape == (512, 512, 3) assert np.abs(expected_image - image).max() < 1E-2
620
from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCamelCase__ : List[Any] = logging.get_logger(__name__) UpperCamelCase__ : List[str] = { "microsoft/biogpt": "https://huggingface.co/microsoft/biogpt/resolve/main/config.json", # See all BioGPT models at https://huggingface.co/models?filter=biogpt } class __snake_case ( lowerCAmelCase__ ): __lowerCAmelCase : Any = 'biogpt' def __init__( self , _A=42384 , _A=1024 , _A=24 , _A=16 , _A=4096 , _A="gelu" , _A=0.1 , _A=0.1 , _A=1024 , _A=0.0_2 , _A=1E-12 , _A=True , _A=True , _A=0.0 , _A=0.0 , _A=1 , _A=0 , _A=2 , **_A , ): SCREAMING_SNAKE_CASE_ = vocab_size SCREAMING_SNAKE_CASE_ = max_position_embeddings SCREAMING_SNAKE_CASE_ = hidden_size SCREAMING_SNAKE_CASE_ = num_hidden_layers SCREAMING_SNAKE_CASE_ = num_attention_heads SCREAMING_SNAKE_CASE_ = intermediate_size SCREAMING_SNAKE_CASE_ = hidden_act SCREAMING_SNAKE_CASE_ = hidden_dropout_prob SCREAMING_SNAKE_CASE_ = attention_probs_dropout_prob SCREAMING_SNAKE_CASE_ = initializer_range SCREAMING_SNAKE_CASE_ = layer_norm_eps SCREAMING_SNAKE_CASE_ = scale_embedding SCREAMING_SNAKE_CASE_ = use_cache SCREAMING_SNAKE_CASE_ = layerdrop SCREAMING_SNAKE_CASE_ = activation_dropout super().__init__(pad_token_id=_A , bos_token_id=_A , eos_token_id=_A , **_A)
620
1
from dataclasses import dataclass from typing import List, Optional, Union import numpy as np import torch from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available @dataclass class __snake_case ( lowerCAmelCase__ ): __lowerCAmelCase : Union[List[np.ndarray], torch.FloatTensor] try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import * # noqa F403 else: from .pipeline_text_to_video_synth import TextToVideoSDPipeline from .pipeline_text_to_video_synth_imgaimg import VideoToVideoSDPipeline # noqa: F401 from .pipeline_text_to_video_zero import TextToVideoZeroPipeline
620
from typing import Dict, List, Optional, Tuple, Union import torch from ...models import AutoencoderKL, TransformeraDModel from ...schedulers import KarrasDiffusionSchedulers from ...utils import randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput class __snake_case ( lowerCAmelCase__ ): def __init__( self , _A , _A , _A , _A = None , ): super().__init__() self.register_modules(transformer=_A , vae=_A , scheduler=_A) # create a imagenet -> id dictionary for easier use SCREAMING_SNAKE_CASE_ = {} if idalabel is not None: for key, value in idalabel.items(): for label in value.split(','): SCREAMING_SNAKE_CASE_ = int(_A) SCREAMING_SNAKE_CASE_ = dict(sorted(self.labels.items())) def lowerCAmelCase__ ( self , _A): if not isinstance(_A , _A): SCREAMING_SNAKE_CASE_ = list(_A) for l in label: if l not in self.labels: raise ValueError( f"""{l} does not exist. Please make sure to select one of the following labels: \n {self.labels}.""") return [self.labels[l] for l in label] @torch.no_grad() def __call__( self , _A , _A = 4.0 , _A = None , _A = 50 , _A = "pil" , _A = True , ): SCREAMING_SNAKE_CASE_ = len(_A) SCREAMING_SNAKE_CASE_ = self.transformer.config.sample_size SCREAMING_SNAKE_CASE_ = self.transformer.config.in_channels SCREAMING_SNAKE_CASE_ = randn_tensor( shape=(batch_size, latent_channels, latent_size, latent_size) , generator=_A , device=self.device , dtype=self.transformer.dtype , ) SCREAMING_SNAKE_CASE_ = torch.cat([latents] * 2) if guidance_scale > 1 else latents SCREAMING_SNAKE_CASE_ = torch.tensor(_A , device=self.device).reshape(-1) SCREAMING_SNAKE_CASE_ = torch.tensor([1000] * batch_size , device=self.device) SCREAMING_SNAKE_CASE_ = torch.cat([class_labels, class_null] , 0) if guidance_scale > 1 else class_labels # set step values self.scheduler.set_timesteps(_A) for t in self.progress_bar(self.scheduler.timesteps): if guidance_scale > 1: SCREAMING_SNAKE_CASE_ = latent_model_input[: len(_A) // 2] SCREAMING_SNAKE_CASE_ = torch.cat([half, half] , dim=0) SCREAMING_SNAKE_CASE_ = self.scheduler.scale_model_input(_A , _A) SCREAMING_SNAKE_CASE_ = t if not torch.is_tensor(_A): # TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can # This would be a good case for the `match` statement (Python 3.10+) SCREAMING_SNAKE_CASE_ = latent_model_input.device.type == 'mps' if isinstance(_A , _A): SCREAMING_SNAKE_CASE_ = torch.floataa if is_mps else torch.floataa else: SCREAMING_SNAKE_CASE_ = torch.intaa if is_mps else torch.intaa SCREAMING_SNAKE_CASE_ = torch.tensor([timesteps] , dtype=_A , device=latent_model_input.device) elif len(timesteps.shape) == 0: SCREAMING_SNAKE_CASE_ = timesteps[None].to(latent_model_input.device) # broadcast to batch dimension in a way that's compatible with ONNX/Core ML SCREAMING_SNAKE_CASE_ = timesteps.expand(latent_model_input.shape[0]) # predict noise model_output SCREAMING_SNAKE_CASE_ = self.transformer( _A , timestep=_A , class_labels=_A).sample # perform guidance if guidance_scale > 1: SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = noise_pred[:, :latent_channels], noise_pred[:, latent_channels:] SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = torch.split(_A , len(_A) // 2 , dim=0) SCREAMING_SNAKE_CASE_ = uncond_eps + guidance_scale * (cond_eps - uncond_eps) SCREAMING_SNAKE_CASE_ = torch.cat([half_eps, half_eps] , dim=0) SCREAMING_SNAKE_CASE_ = torch.cat([eps, rest] , dim=1) # learned sigma if self.transformer.config.out_channels // 2 == latent_channels: SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = torch.split(_A , _A , dim=1) else: SCREAMING_SNAKE_CASE_ = noise_pred # compute previous image: x_t -> x_t-1 SCREAMING_SNAKE_CASE_ = self.scheduler.step(_A , _A , _A).prev_sample if guidance_scale > 1: SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = latent_model_input.chunk(2 , dim=0) else: SCREAMING_SNAKE_CASE_ = latent_model_input SCREAMING_SNAKE_CASE_ = 1 / self.vae.config.scaling_factor * latents SCREAMING_SNAKE_CASE_ = self.vae.decode(_A).sample SCREAMING_SNAKE_CASE_ = (samples / 2 + 0.5).clamp(0 , 1) # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16 SCREAMING_SNAKE_CASE_ = samples.cpu().permute(0 , 2 , 3 , 1).float().numpy() if output_type == "pil": SCREAMING_SNAKE_CASE_ = self.numpy_to_pil(_A) if not return_dict: return (samples,) return ImagePipelineOutput(images=_A)
620
1
import collections import tempfile import unittest import numpy as np from transformers.testing_utils import ( is_pt_flax_cross_test, require_flax, require_torch, require_vision, slow, torch_device, ) from transformers.utils import is_flax_available, is_torch_available, is_vision_available from ...test_modeling_flax_common import floats_tensor, ids_tensor, random_attention_mask from ..bert.test_modeling_flax_bert import FlaxBertModelTester from ..clip.test_modeling_flax_clip import FlaxCLIPVisionModelTester from ..vit.test_modeling_flax_vit import FlaxViTModelTester if is_flax_available(): from transformers import ( FlaxBertModel, FlaxCLIPVisionModel, FlaxVisionTextDualEncoderModel, FlaxViTModel, VisionTextDualEncoderConfig, VisionTextDualEncoderProcessor, ) from transformers.modeling_flax_pytorch_utils import ( convert_pytorch_state_dict_to_flax, load_flax_weights_in_pytorch_model, ) if is_torch_available(): import torch from transformers import VisionTextDualEncoderModel if is_vision_available(): from PIL import Image def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : Union[str, Any] ): """simple docstring""" if isinstance(_SCREAMING_SNAKE_CASE , collections.abc.Iterable ): return x return (x, x) @require_flax class __snake_case : def lowerCAmelCase__ ( self , _A , _A): pass def lowerCAmelCase__ ( self): pass def lowerCAmelCase__ ( self): pass def lowerCAmelCase__ ( self , _A , _A , _A): SCREAMING_SNAKE_CASE_ = np.abs((a - b)).max() self.assertLessEqual(_A , _A , f"""Difference between torch and flax is {diff} (>= {tol}).""") def lowerCAmelCase__ ( self , _A , _A , _A , _A , _A=None , **_A): SCREAMING_SNAKE_CASE_ = VisionTextDualEncoderConfig.from_vision_text_configs(_A , _A) SCREAMING_SNAKE_CASE_ = FlaxVisionTextDualEncoderModel(_A) SCREAMING_SNAKE_CASE_ = model(input_ids=_A , pixel_values=_A , attention_mask=_A) self.assertEqual(output['text_embeds'].shape , (input_ids.shape[0], config.projection_dim)) self.assertEqual(output['image_embeds'].shape , (pixel_values.shape[0], config.projection_dim)) def lowerCAmelCase__ ( self , _A , _A , _A , _A , _A=None , **_A): SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.get_vision_text_model(_A , _A) SCREAMING_SNAKE_CASE_ = {'vision_model': vision_model, 'text_model': text_model} SCREAMING_SNAKE_CASE_ = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**_A) SCREAMING_SNAKE_CASE_ = model(input_ids=_A , pixel_values=_A , attention_mask=_A) self.assertEqual(output['text_embeds'].shape , (input_ids.shape[0], model.config.projection_dim)) self.assertEqual(output['image_embeds'].shape , (pixel_values.shape[0], model.config.projection_dim)) def lowerCAmelCase__ ( self , _A , _A , _A , _A , _A=None , **_A): SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.get_vision_text_model(_A , _A) SCREAMING_SNAKE_CASE_ = {'vision_model': vision_model, 'text_model': text_model} SCREAMING_SNAKE_CASE_ = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**_A) SCREAMING_SNAKE_CASE_ = model(input_ids=_A , pixel_values=_A , attention_mask=_A) SCREAMING_SNAKE_CASE_ = output[0] with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(_A) SCREAMING_SNAKE_CASE_ = FlaxVisionTextDualEncoderModel.from_pretrained(_A) SCREAMING_SNAKE_CASE_ = model(input_ids=_A , pixel_values=_A , attention_mask=_A) SCREAMING_SNAKE_CASE_ = after_output[0] SCREAMING_SNAKE_CASE_ = np.amax(np.abs(out_a - out_a)) self.assertLessEqual(_A , 1E-3) def lowerCAmelCase__ ( self , _A , _A , _A , _A , _A=None , **_A): SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.get_vision_text_model(_A , _A) SCREAMING_SNAKE_CASE_ = {'vision_model': vision_model, 'text_model': text_model} SCREAMING_SNAKE_CASE_ = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**_A) SCREAMING_SNAKE_CASE_ = model( input_ids=_A , pixel_values=_A , attention_mask=_A , output_attentions=_A) SCREAMING_SNAKE_CASE_ = output.vision_model_output.attentions self.assertEqual(len(_A) , vision_config.num_hidden_layers) # in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token) SCREAMING_SNAKE_CASE_ = to_atuple(vision_model.config.image_size) SCREAMING_SNAKE_CASE_ = to_atuple(vision_model.config.patch_size) SCREAMING_SNAKE_CASE_ = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) SCREAMING_SNAKE_CASE_ = num_patches + 1 self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len)) SCREAMING_SNAKE_CASE_ = output.text_model_output.attentions self.assertEqual(len(_A) , text_config.num_hidden_layers) self.assertEqual( text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , ) def lowerCAmelCase__ ( self , _A , _A , _A): pt_model.to(_A) pt_model.eval() # prepare inputs SCREAMING_SNAKE_CASE_ = inputs_dict SCREAMING_SNAKE_CASE_ = {k: torch.tensor(v.tolist()) for k, v in flax_inputs.items()} with torch.no_grad(): SCREAMING_SNAKE_CASE_ = pt_model(**_A).to_tuple() SCREAMING_SNAKE_CASE_ = fx_model(**_A).to_tuple() self.assertEqual(len(_A) , len(_A) , 'Output lengths differ between Flax and PyTorch') for fx_output, pt_output in zip(fx_outputs[:4] , pt_outputs[:4]): self.assert_almost_equals(_A , pt_output.numpy() , 4E-2) # PT -> Flax with tempfile.TemporaryDirectory() as tmpdirname: pt_model.save_pretrained(_A) SCREAMING_SNAKE_CASE_ = FlaxVisionTextDualEncoderModel.from_pretrained(_A , from_pt=_A) SCREAMING_SNAKE_CASE_ = fx_model_loaded(**_A).to_tuple() self.assertEqual(len(_A) , len(_A) , 'Output lengths differ between Flax and PyTorch') for fx_output_loaded, pt_output in zip(fx_outputs_loaded[:4] , pt_outputs[:4]): self.assert_almost_equals(_A , pt_output.numpy() , 4E-2) # Flax -> PT with tempfile.TemporaryDirectory() as tmpdirname: fx_model.save_pretrained(_A) SCREAMING_SNAKE_CASE_ = VisionTextDualEncoderModel.from_pretrained(_A , from_flax=_A) pt_model_loaded.to(_A) pt_model_loaded.eval() with torch.no_grad(): SCREAMING_SNAKE_CASE_ = pt_model_loaded(**_A).to_tuple() self.assertEqual(len(_A) , len(_A) , 'Output lengths differ between Flax and PyTorch') for fx_output, pt_output_loaded in zip(fx_outputs[:4] , pt_outputs_loaded[:4]): self.assert_almost_equals(_A , pt_output_loaded.numpy() , 4E-2) def lowerCAmelCase__ ( self , _A , _A , _A): SCREAMING_SNAKE_CASE_ = VisionTextDualEncoderConfig.from_vision_text_configs(_A , _A) SCREAMING_SNAKE_CASE_ = VisionTextDualEncoderModel(_A) SCREAMING_SNAKE_CASE_ = FlaxVisionTextDualEncoderModel(_A) SCREAMING_SNAKE_CASE_ = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , _A) SCREAMING_SNAKE_CASE_ = fx_state self.check_pt_flax_equivalence(_A , _A , _A) def lowerCAmelCase__ ( self , _A , _A , _A): SCREAMING_SNAKE_CASE_ = VisionTextDualEncoderConfig.from_vision_text_configs(_A , _A) SCREAMING_SNAKE_CASE_ = VisionTextDualEncoderModel(_A) SCREAMING_SNAKE_CASE_ = FlaxVisionTextDualEncoderModel(_A) SCREAMING_SNAKE_CASE_ = load_flax_weights_in_pytorch_model(_A , fx_model.params) self.check_pt_flax_equivalence(_A , _A , _A) def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = self.prepare_config_and_inputs() self.check_model_from_pretrained_configs(**_A) def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = self.prepare_config_and_inputs() self.check_vision_text_dual_encoder_from_pretrained(**_A) def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = self.prepare_config_and_inputs() self.check_save_load(**_A) def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = self.prepare_config_and_inputs() self.check_vision_text_output_attention(**_A) @is_pt_flax_cross_test def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = self.prepare_config_and_inputs() SCREAMING_SNAKE_CASE_ = config_inputs_dict.pop('vision_config') SCREAMING_SNAKE_CASE_ = config_inputs_dict.pop('text_config') SCREAMING_SNAKE_CASE_ = config_inputs_dict self.check_equivalence_pt_to_flax(_A , _A , _A) self.check_equivalence_flax_to_pt(_A , _A , _A) @slow def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.get_pretrained_model_and_inputs() SCREAMING_SNAKE_CASE_ = model_a(**_A) SCREAMING_SNAKE_CASE_ = outputs[0] with tempfile.TemporaryDirectory() as tmp_dirname: model_a.save_pretrained(_A) SCREAMING_SNAKE_CASE_ = FlaxVisionTextDualEncoderModel.from_pretrained(_A) SCREAMING_SNAKE_CASE_ = model_a(**_A) SCREAMING_SNAKE_CASE_ = after_outputs[0] SCREAMING_SNAKE_CASE_ = np.amax(np.abs(out_a - out_a)) self.assertLessEqual(_A , 1E-5) @require_flax class __snake_case ( lowerCAmelCase__ , unittest.TestCase ): def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained( 'hf-internal-testing/tiny-random-vit' , 'hf-internal-testing/tiny-bert' , vision_from_pt=_A , text_from_pt=_A , ) SCREAMING_SNAKE_CASE_ = 13 SCREAMING_SNAKE_CASE_ = floats_tensor( [ batch_size, model.config.vision_config.num_channels, model.config.vision_config.image_size, model.config.vision_config.image_size, ]) SCREAMING_SNAKE_CASE_ = ids_tensor([batch_size, 4] , model.config.text_config.vocab_size) SCREAMING_SNAKE_CASE_ = random_attention_mask([batch_size, 4]) SCREAMING_SNAKE_CASE_ = {'pixel_values': pixel_values, 'input_ids': input_ids, 'attention_mask': attention_mask} return model, inputs def lowerCAmelCase__ ( self , _A , _A): SCREAMING_SNAKE_CASE_ = FlaxViTModel(_A) SCREAMING_SNAKE_CASE_ = FlaxBertModel(_A) return vision_model, text_model def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = FlaxViTModelTester(self) SCREAMING_SNAKE_CASE_ = FlaxBertModelTester(self) SCREAMING_SNAKE_CASE_ = vit_model_tester.prepare_config_and_inputs() SCREAMING_SNAKE_CASE_ = bert_model_tester.prepare_config_and_inputs() SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = vision_config_and_inputs SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = text_config_and_inputs # make sure that cross attention layers are added return { "text_config": text_config, "vision_config": vision_config, "pixel_values": pixel_values, "attention_mask": attention_mask, "input_ids": input_ids, "token_type_ids": token_type_ids, } @require_torch class __snake_case ( lowerCAmelCase__ , unittest.TestCase ): def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained( 'hf-internal-testing/tiny-random-clip' , 'hf-internal-testing/tiny-bert' , vision_from_pt=_A , text_from_pt=_A , ) SCREAMING_SNAKE_CASE_ = 13 SCREAMING_SNAKE_CASE_ = floats_tensor( [ batch_size, model.config.vision_config.num_channels, model.config.vision_config.image_size, model.config.vision_config.image_size, ]) SCREAMING_SNAKE_CASE_ = ids_tensor([batch_size, 4] , model.config.text_config.vocab_size) SCREAMING_SNAKE_CASE_ = random_attention_mask([batch_size, 4]) SCREAMING_SNAKE_CASE_ = {'pixel_values': pixel_values, 'input_ids': input_ids, 'attention_mask': attention_mask} return model, inputs def lowerCAmelCase__ ( self , _A , _A): SCREAMING_SNAKE_CASE_ = FlaxCLIPVisionModel(_A) SCREAMING_SNAKE_CASE_ = FlaxBertModel(_A) return vision_model, text_model def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = FlaxCLIPVisionModelTester(self) SCREAMING_SNAKE_CASE_ = FlaxBertModelTester(self) SCREAMING_SNAKE_CASE_ = clip_model_tester.prepare_config_and_inputs() SCREAMING_SNAKE_CASE_ = bert_model_tester.prepare_config_and_inputs() SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = vision_config_and_inputs SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = text_config_and_inputs # make sure that cross attention layers are added return { "text_config": text_config, "vision_config": vision_config, "pixel_values": pixel_values, "attention_mask": attention_mask, "input_ids": input_ids, "token_type_ids": token_type_ids, } @require_flax @require_vision class __snake_case ( unittest.TestCase ): @slow def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = FlaxVisionTextDualEncoderModel.from_pretrained('clip-italian/clip-italian' , logit_scale_init_value=1.0) SCREAMING_SNAKE_CASE_ = VisionTextDualEncoderProcessor.from_pretrained('clip-italian/clip-italian') SCREAMING_SNAKE_CASE_ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png') SCREAMING_SNAKE_CASE_ = processor( text=['una foto di un gatto', 'una foto di un cane'] , images=_A , padding=_A , return_tensors='np') SCREAMING_SNAKE_CASE_ = model(**_A) # verify the logits self.assertEqual(outputs.logits_per_image.shape , (inputs.pixel_values.shape[0], inputs.input_ids.shape[0])) self.assertEqual( outputs.logits_per_text.shape , (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) , ) SCREAMING_SNAKE_CASE_ = np.array([[1.2_2_8_4_7_2_7, 0.3_1_0_4_1_2_2]]) self.assertTrue(np.allclose(outputs.logits_per_image , _A , atol=1E-3))
620
import pickle import numpy as np from matplotlib import pyplot as plt class __snake_case : def __init__( self , _A , _A , _A , _A , _A , _A=0.2 , _A=0.2): SCREAMING_SNAKE_CASE_ = bp_numa SCREAMING_SNAKE_CASE_ = bp_numa SCREAMING_SNAKE_CASE_ = bp_numa SCREAMING_SNAKE_CASE_ = conva_get[:2] SCREAMING_SNAKE_CASE_ = conva_get[2] SCREAMING_SNAKE_CASE_ = size_pa SCREAMING_SNAKE_CASE_ = rate_w SCREAMING_SNAKE_CASE_ = rate_t SCREAMING_SNAKE_CASE_ = [ np.mat(-1 * np.random.rand(self.conva[0] , self.conva[0]) + 0.5) for i in range(self.conva[1]) ] SCREAMING_SNAKE_CASE_ = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa) + 0.5) SCREAMING_SNAKE_CASE_ = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa) + 0.5) SCREAMING_SNAKE_CASE_ = -2 * np.random.rand(self.conva[1]) + 1 SCREAMING_SNAKE_CASE_ = -2 * np.random.rand(self.num_bpa) + 1 SCREAMING_SNAKE_CASE_ = -2 * np.random.rand(self.num_bpa) + 1 def lowerCAmelCase__ ( self , _A): # save model dict with pickle SCREAMING_SNAKE_CASE_ = { 'num_bp1': self.num_bpa, 'num_bp2': self.num_bpa, 'num_bp3': self.num_bpa, 'conv1': self.conva, 'step_conv1': self.step_conva, 'size_pooling1': self.size_poolinga, 'rate_weight': self.rate_weight, 'rate_thre': self.rate_thre, 'w_conv1': self.w_conva, 'wkj': self.wkj, 'vji': self.vji, 'thre_conv1': self.thre_conva, 'thre_bp2': self.thre_bpa, 'thre_bp3': self.thre_bpa, } with open(_A , 'wb') as f: pickle.dump(_A , _A) print(f"""Model saved: {save_path}""") @classmethod def lowerCAmelCase__ ( cls , _A): # read saved model with open(_A , 'rb') as f: SCREAMING_SNAKE_CASE_ = pickle.load(_A) # noqa: S301 SCREAMING_SNAKE_CASE_ = model_dic.get('conv1') conv_get.append(model_dic.get('step_conv1')) SCREAMING_SNAKE_CASE_ = model_dic.get('size_pooling1') SCREAMING_SNAKE_CASE_ = model_dic.get('num_bp1') SCREAMING_SNAKE_CASE_ = model_dic.get('num_bp2') SCREAMING_SNAKE_CASE_ = model_dic.get('num_bp3') SCREAMING_SNAKE_CASE_ = model_dic.get('rate_weight') SCREAMING_SNAKE_CASE_ = model_dic.get('rate_thre') # create model instance SCREAMING_SNAKE_CASE_ = CNN(_A , _A , _A , _A , _A , _A , _A) # modify model parameter SCREAMING_SNAKE_CASE_ = model_dic.get('w_conv1') SCREAMING_SNAKE_CASE_ = model_dic.get('wkj') SCREAMING_SNAKE_CASE_ = model_dic.get('vji') SCREAMING_SNAKE_CASE_ = model_dic.get('thre_conv1') SCREAMING_SNAKE_CASE_ = model_dic.get('thre_bp2') SCREAMING_SNAKE_CASE_ = model_dic.get('thre_bp3') return conv_ins def lowerCAmelCase__ ( self , _A): return 1 / (1 + np.exp(-1 * x)) def lowerCAmelCase__ ( self , _A): return round(_A , 3) def lowerCAmelCase__ ( self , _A , _A , _A , _A , _A): # convolution process SCREAMING_SNAKE_CASE_ = convs[0] SCREAMING_SNAKE_CASE_ = convs[1] SCREAMING_SNAKE_CASE_ = np.shape(_A)[0] # get the data slice of original image data, data_focus SCREAMING_SNAKE_CASE_ = [] for i_focus in range(0 , size_data - size_conv + 1 , _A): for j_focus in range(0 , size_data - size_conv + 1 , _A): SCREAMING_SNAKE_CASE_ = data[ i_focus : i_focus + size_conv, j_focus : j_focus + size_conv ] data_focus.append(_A) # calculate the feature map of every single kernel, and saved as list of matrix SCREAMING_SNAKE_CASE_ = [] SCREAMING_SNAKE_CASE_ = int((size_data - size_conv) / conv_step + 1) for i_map in range(_A): SCREAMING_SNAKE_CASE_ = [] for i_focus in range(len(_A)): SCREAMING_SNAKE_CASE_ = ( np.sum(np.multiply(data_focus[i_focus] , w_convs[i_map])) - thre_convs[i_map] ) featuremap.append(self.sig(_A)) SCREAMING_SNAKE_CASE_ = np.asmatrix(_A).reshape( _A , _A) data_featuremap.append(_A) # expanding the data slice to One dimenssion SCREAMING_SNAKE_CASE_ = [] for each_focus in data_focus: focusa_list.extend(self.Expand_Mat(_A)) SCREAMING_SNAKE_CASE_ = np.asarray(_A) return focus_list, data_featuremap def lowerCAmelCase__ ( self , _A , _A , _A="average_pool"): # pooling process SCREAMING_SNAKE_CASE_ = len(featuremaps[0]) SCREAMING_SNAKE_CASE_ = int(size_map / size_pooling) SCREAMING_SNAKE_CASE_ = [] for i_map in range(len(_A)): SCREAMING_SNAKE_CASE_ = featuremaps[i_map] SCREAMING_SNAKE_CASE_ = [] for i_focus in range(0 , _A , _A): for j_focus in range(0 , _A , _A): SCREAMING_SNAKE_CASE_ = feature_map[ i_focus : i_focus + size_pooling, j_focus : j_focus + size_pooling, ] if pooling_type == "average_pool": # average pooling map_pooled.append(np.average(_A)) elif pooling_type == "max_pooling": # max pooling map_pooled.append(np.max(_A)) SCREAMING_SNAKE_CASE_ = np.asmatrix(_A).reshape(_A , _A) featuremap_pooled.append(_A) return featuremap_pooled def lowerCAmelCase__ ( self , _A): # expanding three dimension data to one dimension list SCREAMING_SNAKE_CASE_ = [] for i in range(len(_A)): SCREAMING_SNAKE_CASE_ = np.shape(data[i]) SCREAMING_SNAKE_CASE_ = data[i].reshape(1 , shapes[0] * shapes[1]) SCREAMING_SNAKE_CASE_ = data_listed.getA().tolist()[0] data_expanded.extend(_A) SCREAMING_SNAKE_CASE_ = np.asarray(_A) return data_expanded def lowerCAmelCase__ ( self , _A): # expanding matrix to one dimension list SCREAMING_SNAKE_CASE_ = np.asarray(_A) SCREAMING_SNAKE_CASE_ = np.shape(_A) SCREAMING_SNAKE_CASE_ = data_mat.reshape(1 , shapes[0] * shapes[1]) return data_expanded def lowerCAmelCase__ ( self , _A , _A , _A , _A , _A): SCREAMING_SNAKE_CASE_ = [] SCREAMING_SNAKE_CASE_ = 0 for i_map in range(_A): SCREAMING_SNAKE_CASE_ = np.ones((size_map, size_map)) for i in range(0 , _A , _A): for j in range(0 , _A , _A): SCREAMING_SNAKE_CASE_ = pd_pool[ i_pool ] SCREAMING_SNAKE_CASE_ = i_pool + 1 SCREAMING_SNAKE_CASE_ = np.multiply( _A , np.multiply(out_map[i_map] , (1 - out_map[i_map]))) pd_all.append(_A) return pd_all def lowerCAmelCase__ ( self , _A , _A , _A , _A , _A , _A=bool): # model traning print('----------------------Start Training-------------------------') print((' - - Shape: Train_Data ', np.shape(_A))) print((' - - Shape: Teach_Data ', np.shape(_A))) SCREAMING_SNAKE_CASE_ = 0 SCREAMING_SNAKE_CASE_ = [] SCREAMING_SNAKE_CASE_ = 10000 while rp < n_repeat and mse >= error_accuracy: SCREAMING_SNAKE_CASE_ = 0 print(f"""-------------Learning Time {rp}--------------""") for p in range(len(_A)): # print('------------Learning Image: %d--------------'%p) SCREAMING_SNAKE_CASE_ = np.asmatrix(datas_train[p]) SCREAMING_SNAKE_CASE_ = np.asarray(datas_teach[p]) SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.convolute( _A , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , ) SCREAMING_SNAKE_CASE_ = self.pooling(_A , self.size_poolinga) SCREAMING_SNAKE_CASE_ = np.shape(_A) SCREAMING_SNAKE_CASE_ = self._expand(_A) SCREAMING_SNAKE_CASE_ = data_bp_input SCREAMING_SNAKE_CASE_ = np.dot(_A , self.vji.T) - self.thre_bpa SCREAMING_SNAKE_CASE_ = self.sig(_A) SCREAMING_SNAKE_CASE_ = np.dot(_A , self.wkj.T) - self.thre_bpa SCREAMING_SNAKE_CASE_ = self.sig(_A) # --------------Model Leaning ------------------------ # calculate error and gradient--------------- SCREAMING_SNAKE_CASE_ = np.multiply( (data_teach - bp_outa) , np.multiply(_A , (1 - bp_outa))) SCREAMING_SNAKE_CASE_ = np.multiply( np.dot(_A , self.wkj) , np.multiply(_A , (1 - bp_outa))) SCREAMING_SNAKE_CASE_ = np.dot(_A , self.vji) SCREAMING_SNAKE_CASE_ = pd_i_all / (self.size_poolinga * self.size_poolinga) SCREAMING_SNAKE_CASE_ = pd_conva_pooled.T.getA().tolist() SCREAMING_SNAKE_CASE_ = self._calculate_gradient_from_pool( _A , _A , shape_featuremapa[0] , shape_featuremapa[1] , self.size_poolinga , ) # weight and threshold learning process--------- # convolution layer for k_conv in range(self.conva[1]): SCREAMING_SNAKE_CASE_ = self._expand_mat(pd_conva_all[k_conv]) SCREAMING_SNAKE_CASE_ = self.rate_weight * np.dot(_A , _A) SCREAMING_SNAKE_CASE_ = self.w_conva[k_conv] + delta_w.reshape( (self.conva[0], self.conva[0])) SCREAMING_SNAKE_CASE_ = ( self.thre_conva[k_conv] - np.sum(pd_conva_all[k_conv]) * self.rate_thre ) # all connected layer SCREAMING_SNAKE_CASE_ = self.wkj + pd_k_all.T * bp_outa * self.rate_weight SCREAMING_SNAKE_CASE_ = self.vji + pd_j_all.T * bp_outa * self.rate_weight SCREAMING_SNAKE_CASE_ = self.thre_bpa - pd_k_all * self.rate_thre SCREAMING_SNAKE_CASE_ = self.thre_bpa - pd_j_all * self.rate_thre # calculate the sum error of all single image SCREAMING_SNAKE_CASE_ = np.sum(abs(data_teach - bp_outa)) error_count += errors # print(' ----Teach ',data_teach) # print(' ----BP_output ',bp_out3) SCREAMING_SNAKE_CASE_ = rp + 1 SCREAMING_SNAKE_CASE_ = error_count / patterns all_mse.append(_A) def draw_error(): SCREAMING_SNAKE_CASE_ = [error_accuracy for i in range(int(n_repeat * 1.2))] plt.plot(_A , '+-') plt.plot(_A , 'r--') plt.xlabel('Learning Times') plt.ylabel('All_mse') plt.grid(_A , alpha=0.5) plt.show() print('------------------Training Complished---------------------') print((' - - Training epoch: ', rp, f""" - - Mse: {mse:.6f}""")) if draw_e: draw_error() return mse def lowerCAmelCase__ ( self , _A): # model predict SCREAMING_SNAKE_CASE_ = [] print('-------------------Start Testing-------------------------') print((' - - Shape: Test_Data ', np.shape(_A))) for p in range(len(_A)): SCREAMING_SNAKE_CASE_ = np.asmatrix(datas_test[p]) SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.convolute( _A , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , ) SCREAMING_SNAKE_CASE_ = self.pooling(_A , self.size_poolinga) SCREAMING_SNAKE_CASE_ = self._expand(_A) SCREAMING_SNAKE_CASE_ = data_bp_input SCREAMING_SNAKE_CASE_ = bp_outa * self.vji.T - self.thre_bpa SCREAMING_SNAKE_CASE_ = self.sig(_A) SCREAMING_SNAKE_CASE_ = bp_outa * self.wkj.T - self.thre_bpa SCREAMING_SNAKE_CASE_ = self.sig(_A) produce_out.extend(bp_outa.getA().tolist()) SCREAMING_SNAKE_CASE_ = [list(map(self.do_round , _A)) for each in produce_out] return np.asarray(_A) def lowerCAmelCase__ ( self , _A): # return the data of image after convoluting process so we can check it out SCREAMING_SNAKE_CASE_ = np.asmatrix(_A) SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.convolute( _A , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , ) SCREAMING_SNAKE_CASE_ = self.pooling(_A , self.size_poolinga) return data_conveda, data_pooleda if __name__ == "__main__": pass
620
1
import argparse import collections import numpy as np import torch from flax import traverse_util from tax import checkpoints from transformers import MTaConfig, UMTaEncoderModel, UMTaForConditionalGeneration from transformers.utils import logging logging.set_verbosity_info() def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : Union[str, Any] ): """simple docstring""" return params[f"""{prefix}/{prefix}/relpos_bias/rel_embedding"""][:, i, :] def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : Optional[Any]="attention" ): """simple docstring""" SCREAMING_SNAKE_CASE_ = SCREAMING_SNAKE_CASE_ = np.ascontiguousarray(params[f"""{prefix}/{prefix}/{layer_name}/key/kernel"""][:, i, :, :] ) SCREAMING_SNAKE_CASE_ = k_tmp.reshape(k_tmp.shape[0] , k_tmp.shape[1] * k_tmp.shape[2] ) SCREAMING_SNAKE_CASE_ = np.ascontiguousarray(params[f"""{prefix}/{prefix}/{layer_name}/out/kernel"""][:, i, :, :] ) SCREAMING_SNAKE_CASE_ = o_tmp.reshape(o_tmp.shape[0] * o_tmp.shape[1] , o_tmp.shape[2] ) SCREAMING_SNAKE_CASE_ = np.ascontiguousarray(params[f"""{prefix}/{prefix}/{layer_name}/query/kernel"""][:, i, :, :] ) SCREAMING_SNAKE_CASE_ = q_tmp.reshape(q_tmp.shape[0] , q_tmp.shape[1] * q_tmp.shape[2] ) SCREAMING_SNAKE_CASE_ = np.ascontiguousarray(params[f"""{prefix}/{prefix}/{layer_name}/value/kernel"""][:, i, :, :] ) SCREAMING_SNAKE_CASE_ = v_tmp.reshape(v_tmp.shape[0] , v_tmp.shape[1] * v_tmp.shape[2] ) return k, o, q, v def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Optional[int]=False ): """simple docstring""" if split_mlp_wi: SCREAMING_SNAKE_CASE_ = params[f"""{prefix}/{prefix}/mlp/wi_0/kernel"""][:, i, :] SCREAMING_SNAKE_CASE_ = params[f"""{prefix}/{prefix}/mlp/wi_1/kernel"""][:, i, :] SCREAMING_SNAKE_CASE_ = (wi_a, wi_a) else: SCREAMING_SNAKE_CASE_ = params[f"""{prefix}/{prefix}/mlp/wi/kernel"""][:, i, :] SCREAMING_SNAKE_CASE_ = params[f"""{prefix}/{prefix}/mlp/wo/kernel"""][:, i, :] return wi, wo def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : Optional[Any] ): """simple docstring""" return params[f"""{prefix}/{prefix}/{layer_name}/scale"""][:, i] def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : dict , *, _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : bool , _SCREAMING_SNAKE_CASE : bool = False ): """simple docstring""" SCREAMING_SNAKE_CASE_ = traverse_util.flatten_dict(variables['target'] ) SCREAMING_SNAKE_CASE_ = {'/'.join(_SCREAMING_SNAKE_CASE ): v for k, v in old.items()} # v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi SCREAMING_SNAKE_CASE_ = 'encoder/encoder/mlp/wi_0/kernel' in old print('Split MLP:' , _SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE_ = collections.OrderedDict() # Shared embeddings. SCREAMING_SNAKE_CASE_ = old['token_embedder/embedding'] # Encoder. for i in range(_SCREAMING_SNAKE_CASE ): # Block i, layer 0 (Self Attention). SCREAMING_SNAKE_CASE_ = tax_layer_norm_lookup(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , 'encoder' , 'pre_attention_layer_norm' ) SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = tax_attention_lookup(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , 'encoder' , 'attention' ) SCREAMING_SNAKE_CASE_ = layer_norm SCREAMING_SNAKE_CASE_ = k.T SCREAMING_SNAKE_CASE_ = o.T SCREAMING_SNAKE_CASE_ = q.T SCREAMING_SNAKE_CASE_ = v.T # Block i, layer 1 (MLP). SCREAMING_SNAKE_CASE_ = tax_layer_norm_lookup(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , 'encoder' , 'pre_mlp_layer_norm' ) SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = tax_mlp_lookup(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , 'encoder' , _SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE_ = layer_norm if split_mlp_wi: SCREAMING_SNAKE_CASE_ = wi[0].T SCREAMING_SNAKE_CASE_ = wi[1].T else: SCREAMING_SNAKE_CASE_ = wi.T SCREAMING_SNAKE_CASE_ = wo.T if scalable_attention: # convert the rel_embedding of each layer SCREAMING_SNAKE_CASE_ = tax_relpos_bias_lookup( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , 'encoder' ).T SCREAMING_SNAKE_CASE_ = old['encoder/encoder_norm/scale'] if not scalable_attention: SCREAMING_SNAKE_CASE_ = tax_relpos_bias_lookup( _SCREAMING_SNAKE_CASE , 0 , 'encoder' ).T SCREAMING_SNAKE_CASE_ = tax_relpos_bias_lookup( _SCREAMING_SNAKE_CASE , 0 , 'decoder' ).T if not is_encoder_only: # Decoder. for i in range(_SCREAMING_SNAKE_CASE ): # Block i, layer 0 (Self Attention). SCREAMING_SNAKE_CASE_ = tax_layer_norm_lookup(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , 'decoder' , 'pre_self_attention_layer_norm' ) SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = tax_attention_lookup(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , 'decoder' , 'self_attention' ) SCREAMING_SNAKE_CASE_ = layer_norm SCREAMING_SNAKE_CASE_ = k.T SCREAMING_SNAKE_CASE_ = o.T SCREAMING_SNAKE_CASE_ = q.T SCREAMING_SNAKE_CASE_ = v.T # Block i, layer 1 (Cross Attention). SCREAMING_SNAKE_CASE_ = tax_layer_norm_lookup(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , 'decoder' , 'pre_cross_attention_layer_norm' ) SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = tax_attention_lookup(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , 'decoder' , 'encoder_decoder_attention' ) SCREAMING_SNAKE_CASE_ = layer_norm SCREAMING_SNAKE_CASE_ = k.T SCREAMING_SNAKE_CASE_ = o.T SCREAMING_SNAKE_CASE_ = q.T SCREAMING_SNAKE_CASE_ = v.T # Block i, layer 2 (MLP). SCREAMING_SNAKE_CASE_ = tax_layer_norm_lookup(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , 'decoder' , 'pre_mlp_layer_norm' ) SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = tax_mlp_lookup(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , 'decoder' , _SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE_ = layer_norm if split_mlp_wi: SCREAMING_SNAKE_CASE_ = wi[0].T SCREAMING_SNAKE_CASE_ = wi[1].T else: SCREAMING_SNAKE_CASE_ = wi.T SCREAMING_SNAKE_CASE_ = wo.T if scalable_attention: # convert the rel_embedding of each layer SCREAMING_SNAKE_CASE_ = tax_relpos_bias_lookup(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , 'decoder' ).T SCREAMING_SNAKE_CASE_ = old['decoder/decoder_norm/scale'] # LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead) if "decoder/logits_dense/kernel" in old: SCREAMING_SNAKE_CASE_ = old['decoder/logits_dense/kernel'].T return new def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : bool ): """simple docstring""" SCREAMING_SNAKE_CASE_ = collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] ) # Add what is missing. if "encoder.embed_tokens.weight" not in state_dict: SCREAMING_SNAKE_CASE_ = state_dict['shared.weight'] if not is_encoder_only: if "decoder.embed_tokens.weight" not in state_dict: SCREAMING_SNAKE_CASE_ = state_dict['shared.weight'] if "lm_head.weight" not in state_dict: # For old 1.0 models. print('Using shared word embeddings as lm_head.' ) SCREAMING_SNAKE_CASE_ = state_dict['shared.weight'] return state_dict def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : List[Any] ): """simple docstring""" SCREAMING_SNAKE_CASE_ = checkpoints.load_tax_checkpoint(_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE_ = convert_tax_to_pytorch( _SCREAMING_SNAKE_CASE , num_layers=config.num_layers , is_encoder_only=_SCREAMING_SNAKE_CASE , scalable_attention=_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE_ = make_state_dict(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) model.load_state_dict(_SCREAMING_SNAKE_CASE , strict=_SCREAMING_SNAKE_CASE ) def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : bool = False , _SCREAMING_SNAKE_CASE : bool = False , ): """simple docstring""" SCREAMING_SNAKE_CASE_ = MTaConfig.from_json_file(_SCREAMING_SNAKE_CASE ) print(f"""Building PyTorch model from configuration: {config}""" ) # Non-v1.1 checkpoints could also use T5Model, but this works for all. # The v1.0 checkpoints will simply have an LM head that is the word embeddings. if is_encoder_only: SCREAMING_SNAKE_CASE_ = UMTaEncoderModel(_SCREAMING_SNAKE_CASE ) else: SCREAMING_SNAKE_CASE_ = UMTaForConditionalGeneration(_SCREAMING_SNAKE_CASE ) # Load weights from tf checkpoint load_tax_weights_in_ta(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # Save pytorch-model print(f"""Save PyTorch model to {pytorch_dump_path}""" ) model.save_pretrained(_SCREAMING_SNAKE_CASE ) # Verify that we can load the checkpoint. model.from_pretrained(_SCREAMING_SNAKE_CASE ) print('Done' ) if __name__ == "__main__": UpperCamelCase__ : Optional[Any] = argparse.ArgumentParser(description="Converts a native T5X checkpoint into a PyTorch checkpoint.") # Required parameters parser.add_argument( "--t5x_checkpoint_path", default=None, type=str, required=True, help="Path to the T5X checkpoint." ) parser.add_argument( "--config_file", default=None, type=str, required=True, help="The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.", ) parser.add_argument( "--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model." ) parser.add_argument( "--is_encoder_only", action="store_true", help="Check if the model is encoder-decoder model", default=False ) parser.add_argument( "--scalable_attention", action="store_true", help="Whether the model uses scaled attention (umt5 model)", default=False, ) UpperCamelCase__ : Tuple = parser.parse_args() convert_tax_checkpoint_to_pytorch( args.tax_checkpoint_path, args.config_file, args.pytorch_dump_path, args.is_encoder_only, args.scalable_attention, )
620
import os import zipfile import requests from get_ci_error_statistics import download_artifact, get_artifacts_links def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : int=7 ): """simple docstring""" SCREAMING_SNAKE_CASE_ = None if token is not None: SCREAMING_SNAKE_CASE_ = {'Accept': 'application/vnd.github+json', 'Authorization': f"""Bearer {token}"""} # The id of a workflow (not of a workflow run) SCREAMING_SNAKE_CASE_ = '636036' SCREAMING_SNAKE_CASE_ = f"""https://api.github.com/repos/huggingface/transformers/actions/workflows/{workflow_id}/runs""" # On `main` branch + event being `schedule` + not returning PRs + only `num_runs` results url += f"""?branch=main&event=schedule&exclude_pull_requests=true&per_page={num_runs}""" SCREAMING_SNAKE_CASE_ = requests.get(_SCREAMING_SNAKE_CASE , headers=_SCREAMING_SNAKE_CASE ).json() return result["workflow_runs"] def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : str ): """simple docstring""" SCREAMING_SNAKE_CASE_ = get_daily_ci_runs(_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE_ = None for workflow_run in workflow_runs: if workflow_run["status"] == "completed": SCREAMING_SNAKE_CASE_ = workflow_run['id'] break return workflow_run_id def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Optional[int] ): """simple docstring""" SCREAMING_SNAKE_CASE_ = get_last_daily_ci_runs(_SCREAMING_SNAKE_CASE ) if workflow_run_id is not None: SCREAMING_SNAKE_CASE_ = get_artifacts_links(worflow_run_id=_SCREAMING_SNAKE_CASE , token=_SCREAMING_SNAKE_CASE ) for artifact_name in artifact_names: if artifact_name in artifacts_links: SCREAMING_SNAKE_CASE_ = artifacts_links[artifact_name] download_artifact( artifact_name=_SCREAMING_SNAKE_CASE , artifact_url=_SCREAMING_SNAKE_CASE , output_dir=_SCREAMING_SNAKE_CASE , token=_SCREAMING_SNAKE_CASE ) def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : List[Any] ): """simple docstring""" get_last_daily_ci_artifacts(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE_ = {} for artifact_name in artifact_names: SCREAMING_SNAKE_CASE_ = os.path.join(_SCREAMING_SNAKE_CASE , f"""{artifact_name}.zip""" ) if os.path.isfile(_SCREAMING_SNAKE_CASE ): SCREAMING_SNAKE_CASE_ = {} with zipfile.ZipFile(_SCREAMING_SNAKE_CASE ) as z: for filename in z.namelist(): if not os.path.isdir(_SCREAMING_SNAKE_CASE ): # read the file with z.open(_SCREAMING_SNAKE_CASE ) as f: SCREAMING_SNAKE_CASE_ = f.read().decode('UTF-8' ) return results
620
1
UpperCamelCase__ : int = [ "DownloadConfig", "DownloadManager", "DownloadMode", "StreamingDownloadManager", ] from .download_config import DownloadConfig from .download_manager import DownloadManager, DownloadMode from .streaming_download_manager import StreamingDownloadManager
620
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available UpperCamelCase__ : Any = { "configuration_mvp": ["MVP_PRETRAINED_CONFIG_ARCHIVE_MAP", "MvpConfig", "MvpOnnxConfig"], "tokenization_mvp": ["MvpTokenizer"], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase__ : Optional[int] = ["MvpTokenizerFast"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase__ : str = [ "MVP_PRETRAINED_MODEL_ARCHIVE_LIST", "MvpForCausalLM", "MvpForConditionalGeneration", "MvpForQuestionAnswering", "MvpForSequenceClassification", "MvpModel", "MvpPreTrainedModel", ] if TYPE_CHECKING: from .configuration_mvp import MVP_PRETRAINED_CONFIG_ARCHIVE_MAP, MvpConfig, MvpOnnxConfig from .tokenization_mvp import MvpTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_mvp_fast import MvpTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mvp import ( MVP_PRETRAINED_MODEL_ARCHIVE_LIST, MvpForCausalLM, MvpForConditionalGeneration, MvpForQuestionAnswering, MvpForSequenceClassification, MvpModel, MvpPreTrainedModel, ) else: import sys UpperCamelCase__ : Dict = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
620
1
import argparse import json import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from torchvision import transforms from transformers import BitImageProcessor, FocalNetConfig, FocalNetForImageClassification from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : Optional[int] ): """simple docstring""" SCREAMING_SNAKE_CASE_ = [2, 2, 6, 2] if 'tiny' in model_name else [2, 2, 18, 2] SCREAMING_SNAKE_CASE_ = True if 'large' in model_name or 'huge' in model_name else False SCREAMING_SNAKE_CASE_ = True if 'large' in model_name or 'huge' in model_name else False SCREAMING_SNAKE_CASE_ = True if 'large' in model_name or 'huge' in model_name else False if "large" in model_name or "xlarge" in model_name or "huge" in model_name: if "fl3" in model_name: SCREAMING_SNAKE_CASE_ = [3, 3, 3, 3] SCREAMING_SNAKE_CASE_ = [5, 5, 5, 5] elif "fl4" in model_name: SCREAMING_SNAKE_CASE_ = [4, 4, 4, 4] SCREAMING_SNAKE_CASE_ = [3, 3, 3, 3] if "tiny" in model_name or "small" in model_name or "base" in model_name: SCREAMING_SNAKE_CASE_ = [3, 3, 3, 3] if "lrf" in model_name: SCREAMING_SNAKE_CASE_ = [3, 3, 3, 3] else: SCREAMING_SNAKE_CASE_ = [2, 2, 2, 2] if "tiny" in model_name: SCREAMING_SNAKE_CASE_ = 96 elif "small" in model_name: SCREAMING_SNAKE_CASE_ = 96 elif "base" in model_name: SCREAMING_SNAKE_CASE_ = 128 elif "large" in model_name: SCREAMING_SNAKE_CASE_ = 192 elif "xlarge" in model_name: SCREAMING_SNAKE_CASE_ = 256 elif "huge" in model_name: SCREAMING_SNAKE_CASE_ = 352 # set label information SCREAMING_SNAKE_CASE_ = 'huggingface/label-files' if "large" in model_name or "huge" in model_name: SCREAMING_SNAKE_CASE_ = 'imagenet-22k-id2label.json' else: SCREAMING_SNAKE_CASE_ = 'imagenet-1k-id2label.json' SCREAMING_SNAKE_CASE_ = json.load(open(hf_hub_download(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , repo_type='dataset' ) , 'r' ) ) SCREAMING_SNAKE_CASE_ = {int(_SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()} SCREAMING_SNAKE_CASE_ = {v: k for k, v in idalabel.items()} SCREAMING_SNAKE_CASE_ = FocalNetConfig( embed_dim=_SCREAMING_SNAKE_CASE , depths=_SCREAMING_SNAKE_CASE , focal_levels=_SCREAMING_SNAKE_CASE , focal_windows=_SCREAMING_SNAKE_CASE , use_conv_embed=_SCREAMING_SNAKE_CASE , idalabel=_SCREAMING_SNAKE_CASE , labelaid=_SCREAMING_SNAKE_CASE , use_post_layernorm=_SCREAMING_SNAKE_CASE , use_layerscale=_SCREAMING_SNAKE_CASE , ) return config def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : Dict ): """simple docstring""" if "patch_embed.proj" in name: SCREAMING_SNAKE_CASE_ = name.replace('patch_embed.proj' , 'embeddings.patch_embeddings.projection' ) if "patch_embed.norm" in name: SCREAMING_SNAKE_CASE_ = name.replace('patch_embed.norm' , 'embeddings.norm' ) if "layers" in name: SCREAMING_SNAKE_CASE_ = 'encoder.' + name if "encoder.layers" in name: SCREAMING_SNAKE_CASE_ = name.replace('encoder.layers' , 'encoder.stages' ) if "downsample.proj" in name: SCREAMING_SNAKE_CASE_ = name.replace('downsample.proj' , 'downsample.projection' ) if "blocks" in name: SCREAMING_SNAKE_CASE_ = name.replace('blocks' , 'layers' ) if "modulation.f.weight" in name or "modulation.f.bias" in name: SCREAMING_SNAKE_CASE_ = name.replace('modulation.f' , 'modulation.projection_in' ) if "modulation.h.weight" in name or "modulation.h.bias" in name: SCREAMING_SNAKE_CASE_ = name.replace('modulation.h' , 'modulation.projection_context' ) if "modulation.proj.weight" in name or "modulation.proj.bias" in name: SCREAMING_SNAKE_CASE_ = name.replace('modulation.proj' , 'modulation.projection_out' ) if name == "norm.weight": SCREAMING_SNAKE_CASE_ = 'layernorm.weight' if name == "norm.bias": SCREAMING_SNAKE_CASE_ = 'layernorm.bias' if "head" in name: SCREAMING_SNAKE_CASE_ = name.replace('head' , 'classifier' ) else: SCREAMING_SNAKE_CASE_ = 'focalnet.' + name return name def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : Tuple=False ): """simple docstring""" SCREAMING_SNAKE_CASE_ = { 'focalnet-tiny': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_srf.pth', 'focalnet-tiny-lrf': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_lrf.pth', 'focalnet-small': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_srf.pth', 'focalnet-small-lrf': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_lrf.pth', 'focalnet-base': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_srf.pth', 'focalnet-base-lrf': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_lrf.pth', 'focalnet-large-lrf-fl3': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384.pth', 'focalnet-large-lrf-fl4': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384_fl4.pth', 'focalnet-xlarge-lrf-fl3': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384.pth', 'focalnet-xlarge-lrf-fl4': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384_fl4.pth', } # fmt: on SCREAMING_SNAKE_CASE_ = model_name_to_url[model_name] print('Checkpoint URL: ' , _SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE_ = torch.hub.load_state_dict_from_url(_SCREAMING_SNAKE_CASE , map_location='cpu' )['model'] # rename keys for key in state_dict.copy().keys(): SCREAMING_SNAKE_CASE_ = state_dict.pop(_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE_ = val SCREAMING_SNAKE_CASE_ = get_focalnet_config(_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE_ = FocalNetForImageClassification(_SCREAMING_SNAKE_CASE ) model.eval() # load state dict model.load_state_dict(_SCREAMING_SNAKE_CASE ) # verify conversion SCREAMING_SNAKE_CASE_ = 'http://images.cocodataset.org/val2017/000000039769.jpg' SCREAMING_SNAKE_CASE_ = BitImageProcessor( do_resize=_SCREAMING_SNAKE_CASE , size={'shortest_edge': 256} , resample=PILImageResampling.BILINEAR , do_center_crop=_SCREAMING_SNAKE_CASE , crop_size=224 , do_normalize=_SCREAMING_SNAKE_CASE , image_mean=_SCREAMING_SNAKE_CASE , image_std=_SCREAMING_SNAKE_CASE , ) SCREAMING_SNAKE_CASE_ = Image.open(requests.get(_SCREAMING_SNAKE_CASE , stream=_SCREAMING_SNAKE_CASE ).raw ) SCREAMING_SNAKE_CASE_ = processor(images=_SCREAMING_SNAKE_CASE , return_tensors='pt' ) SCREAMING_SNAKE_CASE_ = transforms.Compose( [ transforms.Resize(256 ), transforms.CenterCrop(224 ), transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225] ), ] ) SCREAMING_SNAKE_CASE_ = image_transforms(_SCREAMING_SNAKE_CASE ).unsqueeze(0 ) # verify pixel_values assert torch.allclose(inputs.pixel_values , _SCREAMING_SNAKE_CASE , atol=1E-4 ) SCREAMING_SNAKE_CASE_ = model(**_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE_ = outputs.logits.argmax(-1 ).item() print('Predicted class:' , model.config.idalabel[predicted_class_idx] ) print('First values of logits:' , outputs.logits[0, :3] ) if model_name == "focalnet-tiny": SCREAMING_SNAKE_CASE_ = torch.tensor([0.2166, -0.4368, 0.2191] ) elif model_name == "focalnet-tiny-lrf": SCREAMING_SNAKE_CASE_ = torch.tensor([1.1669, 0.0125, -0.1695] ) elif model_name == "focalnet-small": SCREAMING_SNAKE_CASE_ = torch.tensor([0.4917, -0.0430, 0.1341] ) elif model_name == "focalnet-small-lrf": SCREAMING_SNAKE_CASE_ = torch.tensor([-0.2588, -0.5342, -0.2331] ) elif model_name == "focalnet-base": SCREAMING_SNAKE_CASE_ = torch.tensor([-0.1655, -0.4090, -0.1730] ) elif model_name == "focalnet-base-lrf": SCREAMING_SNAKE_CASE_ = torch.tensor([0.5306, -0.0483, -0.3928] ) assert torch.allclose(outputs.logits[0, :3] , _SCREAMING_SNAKE_CASE , atol=1E-4 ) print('Looks ok!' ) if pytorch_dump_folder_path is not None: print(f"""Saving model and processor of {model_name} to {pytorch_dump_folder_path}""" ) model.save_pretrained(_SCREAMING_SNAKE_CASE ) processor.save_pretrained(_SCREAMING_SNAKE_CASE ) if push_to_hub: print(f"""Pushing model and processor of {model_name} to the hub...""" ) model.push_to_hub(f"""{model_name}""" ) processor.push_to_hub(f"""{model_name}""" ) if __name__ == "__main__": UpperCamelCase__ : Any = argparse.ArgumentParser() # Required parameters parser.add_argument( "--model_name", default="focalnet-tiny", type=str, help="Name of the FocalNet model you'd like to convert.", ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory." ) parser.add_argument( "--push_to_hub", action="store_true", help="Whether to push the model and processor to the hub.", ) UpperCamelCase__ : Dict = parser.parse_args() convert_focalnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
620
import inspect import os import unittest from pathlib import Path import torch import accelerate from accelerate.test_utils import execute_subprocess_async from accelerate.test_utils.testing import run_command class __snake_case ( unittest.TestCase ): __lowerCAmelCase : Dict = inspect.getfile(accelerate.test_utils ) __lowerCAmelCase : Optional[Any] = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['scripts', 'test_cli.py'] ) __lowerCAmelCase : Tuple = ['accelerate', 'launch'] __lowerCAmelCase : Union[str, Any] = Path.home() / '.cache/huggingface/accelerate' __lowerCAmelCase : List[str] = 'default_config.yaml' __lowerCAmelCase : List[Any] = config_folder / config_file __lowerCAmelCase : str = config_folder / '_default_config.yaml' __lowerCAmelCase : Optional[int] = Path('tests/test_configs' ) @classmethod def lowerCAmelCase__ ( cls): if cls.config_path.is_file(): cls.config_path.rename(cls.changed_path) @classmethod def lowerCAmelCase__ ( cls): if cls.changed_path.is_file(): cls.changed_path.rename(cls.config_path) def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = self.base_cmd if torch.cuda.is_available() and (torch.cuda.device_count() > 1): cmd += ["--multi_gpu"] execute_subprocess_async(cmd + [self.test_file_path] , env=os.environ.copy()) def lowerCAmelCase__ ( self): for config in sorted(self.test_config_path.glob('**/*.yaml')): with self.subTest(config_file=_A): execute_subprocess_async( self.base_cmd + ['--config_file', str(_A), self.test_file_path] , env=os.environ.copy()) def lowerCAmelCase__ ( self): execute_subprocess_async(['accelerate', 'test'] , env=os.environ.copy()) class __snake_case ( unittest.TestCase ): __lowerCAmelCase : Optional[Any] = 'test-tpu' __lowerCAmelCase : str = 'us-central1-a' __lowerCAmelCase : Union[str, Any] = 'ls' __lowerCAmelCase : Union[str, Any] = ['accelerate', 'tpu-config'] __lowerCAmelCase : Union[str, Any] = 'cd /usr/share' __lowerCAmelCase : List[Any] = 'tests/test_samples/test_command_file.sh' __lowerCAmelCase : Dict = 'Running gcloud compute tpus tpu-vm ssh' def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = run_command( self.cmd + ['--command', self.command, '--tpu_zone', self.tpu_zone, '--tpu_name', self.tpu_name, '--debug'] , return_stdout=_A , ) self.assertIn( f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all""" , _A , ) def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = run_command( self.cmd + [ '--config_file', 'tests/test_configs/0_12_0.yaml', '--command', self.command, '--tpu_zone', self.tpu_zone, '--tpu_name', self.tpu_name, '--debug', ] , return_stdout=_A , ) self.assertIn( f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all""" , _A , ) def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = run_command( self.cmd + ['--config_file', 'tests/test_configs/latest.yaml', '--debug'] , return_stdout=_A) self.assertIn( f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo \"hello world\"; echo \"this is a second command\" --worker all""" , _A , ) def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = run_command( self.cmd + ['--config_file', 'tests/test_configs/latest.yaml', '--command', self.command, '--debug'] , return_stdout=_A , ) self.assertIn( f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all""" , _A , ) def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = run_command( self.cmd + [ '--config_file', 'tests/test_configs/latest.yaml', '--command', self.command, '--command', 'echo "Hello World"', '--debug', ] , return_stdout=_A , ) self.assertIn( f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls; echo \"Hello World\" --worker all""" , _A , ) def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = run_command( self.cmd + ['--config_file', 'tests/test_configs/latest.yaml', '--command_file', self.command_file, '--debug'] , return_stdout=_A , ) self.assertIn( f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo \"hello world\"; echo \"this is a second command\" --worker all""" , _A , ) def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = run_command( self.cmd + [ '--config_file', 'tests/test_configs/0_12_0.yaml', '--command_file', self.command_file, '--tpu_zone', self.tpu_zone, '--tpu_name', self.tpu_name, '--debug', ] , return_stdout=_A , ) self.assertIn( f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo \"hello world\"; echo \"this is a second command\" --worker all""" , _A , ) def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = run_command( self.cmd + ['--config_file', 'tests/test_configs/latest.yaml', '--install_accelerate', '--debug'] , return_stdout=_A , ) self.assertIn( f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate -U; echo \"hello world\"; echo \"this is a second command\" --worker all""" , _A , ) def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = run_command( self.cmd + [ '--config_file', 'tests/test_configs/latest.yaml', '--install_accelerate', '--accelerate_version', '12.0.0', '--debug', ] , return_stdout=_A , ) self.assertIn( f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate==12.0.0; echo \"hello world\"; echo \"this is a second command\" --worker all""" , _A , )
620
1
def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : int ): """simple docstring""" if number > 0: raise ValueError('input must be a negative integer' ) SCREAMING_SNAKE_CASE_ = len(bin(_SCREAMING_SNAKE_CASE )[3:] ) SCREAMING_SNAKE_CASE_ = bin(abs(_SCREAMING_SNAKE_CASE ) - (1 << binary_number_length) )[3:] SCREAMING_SNAKE_CASE_ = ( ( '1' + '0' * (binary_number_length - len(_SCREAMING_SNAKE_CASE )) + twos_complement_number ) if number < 0 else '0' ) return "0b" + twos_complement_number if __name__ == "__main__": import doctest doctest.testmod()
620
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_speech_available, is_torch_available, ) UpperCamelCase__ : Tuple = { "configuration_trocr": ["TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP", "TrOCRConfig"], "processing_trocr": ["TrOCRProcessor"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase__ : Tuple = [ "TROCR_PRETRAINED_MODEL_ARCHIVE_LIST", "TrOCRForCausalLM", "TrOCRPreTrainedModel", ] if TYPE_CHECKING: from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig from .processing_trocr import TrOCRProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel else: import sys UpperCamelCase__ : str = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
620
1
import inspect import unittest import numpy as np from tests.test_modeling_common import floats_tensor from transformers import DetrConfig, MaskFormerConfig, SwinConfig, is_torch_available, is_vision_available from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import MaskFormerForInstanceSegmentation, MaskFormerModel if is_vision_available(): from transformers import MaskFormerImageProcessor if is_vision_available(): from PIL import Image class __snake_case : def __init__( self , _A , _A=2 , _A=True , _A=False , _A=10 , _A=3 , _A=32 * 4 , _A=32 * 6 , _A=4 , _A=32 , ): SCREAMING_SNAKE_CASE_ = parent SCREAMING_SNAKE_CASE_ = batch_size SCREAMING_SNAKE_CASE_ = is_training SCREAMING_SNAKE_CASE_ = use_auxiliary_loss SCREAMING_SNAKE_CASE_ = num_queries SCREAMING_SNAKE_CASE_ = num_channels SCREAMING_SNAKE_CASE_ = min_size SCREAMING_SNAKE_CASE_ = max_size SCREAMING_SNAKE_CASE_ = num_labels SCREAMING_SNAKE_CASE_ = mask_feature_size def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size]).to( _A) SCREAMING_SNAKE_CASE_ = torch.ones([self.batch_size, self.min_size, self.max_size] , device=_A) SCREAMING_SNAKE_CASE_ = ( torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=_A) > 0.5 ).float() SCREAMING_SNAKE_CASE_ = (torch.rand((self.batch_size, self.num_labels) , device=_A) > 0.5).long() SCREAMING_SNAKE_CASE_ = self.get_config() return config, pixel_values, pixel_mask, mask_labels, class_labels def lowerCAmelCase__ ( self): return MaskFormerConfig.from_backbone_and_decoder_configs( backbone_config=SwinConfig( depths=[1, 1, 1, 1] , ) , decoder_config=DetrConfig( decoder_ffn_dim=128 , num_queries=self.num_queries , decoder_attention_heads=2 , d_model=self.mask_feature_size , ) , mask_feature_size=self.mask_feature_size , fpn_feature_size=self.mask_feature_size , num_channels=self.num_channels , num_labels=self.num_labels , ) def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.prepare_config_and_inputs() SCREAMING_SNAKE_CASE_ = {'pixel_values': pixel_values, 'pixel_mask': pixel_mask} return config, inputs_dict def lowerCAmelCase__ ( self , _A , _A): SCREAMING_SNAKE_CASE_ = output.encoder_hidden_states SCREAMING_SNAKE_CASE_ = output.pixel_decoder_hidden_states SCREAMING_SNAKE_CASE_ = output.transformer_decoder_hidden_states self.parent.assertTrue(len(_A) , len(config.backbone_config.depths)) self.parent.assertTrue(len(_A) , len(config.backbone_config.depths)) self.parent.assertTrue(len(_A) , config.decoder_config.decoder_layers) def lowerCAmelCase__ ( self , _A , _A , _A , _A=False): with torch.no_grad(): SCREAMING_SNAKE_CASE_ = MaskFormerModel(config=_A) model.to(_A) model.eval() SCREAMING_SNAKE_CASE_ = model(pixel_values=_A , pixel_mask=_A) SCREAMING_SNAKE_CASE_ = model(_A , output_hidden_states=_A) # the correct shape of output.transformer_decoder_hidden_states ensure the correcteness of the # encoder and pixel decoder self.parent.assertEqual( output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.mask_feature_size) , ) # let's ensure the other two hidden state exists self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None) self.parent.assertTrue(output.encoder_last_hidden_state is not None) if output_hidden_states: self.check_output_hidden_state(_A , _A) def lowerCAmelCase__ ( self , _A , _A , _A , _A , _A): SCREAMING_SNAKE_CASE_ = MaskFormerForInstanceSegmentation(config=_A) model.to(_A) model.eval() def comm_check_on_output(_A): # let's still check that all the required stuff is there self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None) self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None) self.parent.assertTrue(result.encoder_last_hidden_state is not None) # okay, now we need to check the logits shape # due to the encoder compression, masks have a //4 spatial size self.parent.assertEqual( result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , ) # + 1 for null class self.parent.assertEqual( result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1)) with torch.no_grad(): SCREAMING_SNAKE_CASE_ = model(pixel_values=_A , pixel_mask=_A) SCREAMING_SNAKE_CASE_ = model(_A) comm_check_on_output(_A) SCREAMING_SNAKE_CASE_ = model( pixel_values=_A , pixel_mask=_A , mask_labels=_A , class_labels=_A) comm_check_on_output(_A) self.parent.assertTrue(result.loss is not None) self.parent.assertEqual(result.loss.shape , torch.Size([1])) @require_torch class __snake_case ( lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ): __lowerCAmelCase : List[str] = (MaskFormerModel, MaskFormerForInstanceSegmentation) if is_torch_available() else () __lowerCAmelCase : str = ( {'feature-extraction': MaskFormerModel, 'image-segmentation': MaskFormerForInstanceSegmentation} if is_torch_available() else {} ) __lowerCAmelCase : Optional[Any] = False __lowerCAmelCase : Optional[Any] = False __lowerCAmelCase : Dict = False __lowerCAmelCase : Tuple = False def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = MaskFormerModelTester(self) SCREAMING_SNAKE_CASE_ = ConfigTester(self , config_class=_A , has_text_modality=_A) def lowerCAmelCase__ ( self): self.config_tester.run_common_tests() def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.create_and_check_maskformer_model(_A , **_A , output_hidden_states=_A) def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_maskformer_instance_segmentation_head_model(*_A) @unittest.skip(reason='MaskFormer does not use inputs_embeds') def lowerCAmelCase__ ( self): pass @unittest.skip(reason='MaskFormer does not have a get_input_embeddings method') def lowerCAmelCase__ ( self): pass @unittest.skip(reason='MaskFormer is not a generative model') def lowerCAmelCase__ ( self): pass @unittest.skip(reason='MaskFormer does not use token embeddings') def lowerCAmelCase__ ( self): pass @require_torch_multi_gpu @unittest.skip( reason='MaskFormer has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`') def lowerCAmelCase__ ( self): pass @unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.') def lowerCAmelCase__ ( self): pass def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: SCREAMING_SNAKE_CASE_ = model_class(_A) SCREAMING_SNAKE_CASE_ = inspect.signature(model.forward) # signature.parameters is an OrderedDict => so arg_names order is deterministic SCREAMING_SNAKE_CASE_ = [*signature.parameters.keys()] SCREAMING_SNAKE_CASE_ = ['pixel_values'] self.assertListEqual(arg_names[:1] , _A) @slow def lowerCAmelCase__ ( self): for model_name in ["facebook/maskformer-swin-small-coco"]: SCREAMING_SNAKE_CASE_ = MaskFormerModel.from_pretrained(_A) self.assertIsNotNone(_A) def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = (self.model_tester.min_size,) * 2 SCREAMING_SNAKE_CASE_ = { 'pixel_values': torch.randn((2, 3, *size) , device=_A), 'mask_labels': torch.randn((2, 10, *size) , device=_A), 'class_labels': torch.zeros(2 , 10 , device=_A).long(), } SCREAMING_SNAKE_CASE_ = MaskFormerForInstanceSegmentation(MaskFormerConfig()).to(_A) SCREAMING_SNAKE_CASE_ = model(**_A) self.assertTrue(outputs.loss is not None) def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.create_and_check_maskformer_model(_A , **_A , output_hidden_states=_A) def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: SCREAMING_SNAKE_CASE_ = model_class(_A).to(_A) SCREAMING_SNAKE_CASE_ = model(**_A , output_attentions=_A) self.assertTrue(outputs.attentions is not None) def lowerCAmelCase__ ( self): if not self.model_tester.is_training: return # only MaskFormerForInstanceSegmentation has the loss SCREAMING_SNAKE_CASE_ = self.all_model_classes[1] SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs() SCREAMING_SNAKE_CASE_ = model_class(_A) model.to(_A) model.train() SCREAMING_SNAKE_CASE_ = model(_A , mask_labels=_A , class_labels=_A).loss loss.backward() def lowerCAmelCase__ ( self): # only MaskFormerForInstanceSegmentation has the loss SCREAMING_SNAKE_CASE_ = self.all_model_classes[1] SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs() SCREAMING_SNAKE_CASE_ = True SCREAMING_SNAKE_CASE_ = True SCREAMING_SNAKE_CASE_ = model_class(_A) model.to(_A) model.train() SCREAMING_SNAKE_CASE_ = model(_A , mask_labels=_A , class_labels=_A) SCREAMING_SNAKE_CASE_ = outputs.encoder_hidden_states[0] encoder_hidden_states.retain_grad() SCREAMING_SNAKE_CASE_ = outputs.pixel_decoder_hidden_states[0] pixel_decoder_hidden_states.retain_grad() # we requires_grad=True in inputs_embeds (line 2152), the original implementation don't SCREAMING_SNAKE_CASE_ = outputs.transformer_decoder_hidden_states[0] transformer_decoder_hidden_states.retain_grad() SCREAMING_SNAKE_CASE_ = outputs.attentions[0] attentions.retain_grad() outputs.loss.backward(retain_graph=_A) self.assertIsNotNone(encoder_hidden_states.grad) self.assertIsNotNone(pixel_decoder_hidden_states.grad) self.assertIsNotNone(transformer_decoder_hidden_states.grad) self.assertIsNotNone(attentions.grad) UpperCamelCase__ : Any = 1E-4 def _UpperCAmelCase ( ): """simple docstring""" SCREAMING_SNAKE_CASE_ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) return image @require_vision @slow class __snake_case ( unittest.TestCase ): @cached_property def lowerCAmelCase__ ( self): return ( MaskFormerImageProcessor.from_pretrained('facebook/maskformer-swin-small-coco') if is_vision_available() else None ) def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = MaskFormerModel.from_pretrained('facebook/maskformer-swin-small-coco').to(_A) SCREAMING_SNAKE_CASE_ = self.default_image_processor SCREAMING_SNAKE_CASE_ = prepare_img() SCREAMING_SNAKE_CASE_ = image_processor(_A , return_tensors='pt').to(_A) SCREAMING_SNAKE_CASE_ = inputs['pixel_values'].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0) # check size self.assertEqual(_A , (1, 3, 800, 1088)) with torch.no_grad(): SCREAMING_SNAKE_CASE_ = model(**_A) SCREAMING_SNAKE_CASE_ = torch.tensor( [[-0.0_4_8_2, 0.9_2_2_8, 0.4_9_5_1], [-0.2_5_4_7, 0.8_0_1_7, 0.8_5_2_7], [-0.0_0_6_9, 0.3_3_8_5, -0.0_0_8_9]]).to(_A) self.assertTrue( torch.allclose( outputs.encoder_last_hidden_state[0, 0, :3, :3] , _A , atol=_A)) SCREAMING_SNAKE_CASE_ = torch.tensor( [[-0.8_4_2_2, -0.8_4_3_4, -0.9_7_1_8], [-1.0_1_4_4, -0.5_5_6_5, -0.4_1_9_5], [-1.0_0_3_8, -0.4_4_8_4, -0.1_9_6_1]]).to(_A) self.assertTrue( torch.allclose( outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , _A , atol=_A)) SCREAMING_SNAKE_CASE_ = torch.tensor( [[0.2_8_5_2, -0.0_1_5_9, 0.9_7_3_5], [0.6_2_5_4, 0.1_8_5_8, 0.8_5_2_9], [-0.0_6_8_0, -0.4_1_1_6, 1.8_4_1_3]]).to(_A) self.assertTrue( torch.allclose( outputs.transformer_decoder_last_hidden_state[0, :3, :3] , _A , atol=_A)) def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = ( MaskFormerForInstanceSegmentation.from_pretrained('facebook/maskformer-swin-small-coco') .to(_A) .eval() ) SCREAMING_SNAKE_CASE_ = self.default_image_processor SCREAMING_SNAKE_CASE_ = prepare_img() SCREAMING_SNAKE_CASE_ = image_processor(_A , return_tensors='pt').to(_A) SCREAMING_SNAKE_CASE_ = inputs['pixel_values'].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0) # check size self.assertEqual(_A , (1, 3, 800, 1088)) with torch.no_grad(): SCREAMING_SNAKE_CASE_ = model(**_A) # masks_queries_logits SCREAMING_SNAKE_CASE_ = outputs.masks_queries_logits self.assertEqual( masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , ) SCREAMING_SNAKE_CASE_ = [ [-1.3_7_3_7_1_2_4, -1.7_7_2_4_9_3_7, -1.9_3_6_4_2_3_3], [-1.5_9_7_7_2_8_1, -1.9_8_6_7_9_3_9, -2.1_5_2_3_6_9_5], [-1.5_7_9_5_3_9_8, -1.9_2_6_9_8_3_2, -2.0_9_3_9_4_2], ] SCREAMING_SNAKE_CASE_ = torch.tensor(_A).to(_A) self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , _A , atol=_A)) # class_queries_logits SCREAMING_SNAKE_CASE_ = outputs.class_queries_logits self.assertEqual( class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1)) SCREAMING_SNAKE_CASE_ = torch.tensor( [ [1.6_512E00, -5.2_572E00, -3.3_519E00], [3.6_169E-02, -5.9_025E00, -2.9_313E00], [1.0_766E-04, -7.7_630E00, -5.1_263E00], ]).to(_A) self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , _A , atol=_A)) def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = ( MaskFormerForInstanceSegmentation.from_pretrained('facebook/maskformer-resnet101-coco-stuff') .to(_A) .eval() ) SCREAMING_SNAKE_CASE_ = self.default_image_processor SCREAMING_SNAKE_CASE_ = prepare_img() SCREAMING_SNAKE_CASE_ = image_processor(_A , return_tensors='pt').to(_A) SCREAMING_SNAKE_CASE_ = inputs['pixel_values'].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0) # check size self.assertEqual(_A , (1, 3, 800, 1088)) with torch.no_grad(): SCREAMING_SNAKE_CASE_ = model(**_A) # masks_queries_logits SCREAMING_SNAKE_CASE_ = outputs.masks_queries_logits self.assertEqual( masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , ) SCREAMING_SNAKE_CASE_ = [[-0.9_0_4_6, -2.6_3_6_6, -4.6_0_6_2], [-3.4_1_7_9, -5.7_8_9_0, -8.8_0_5_7], [-4.9_1_7_9, -7.6_5_6_0, -1_0.7_7_1_1]] SCREAMING_SNAKE_CASE_ = torch.tensor(_A).to(_A) self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , _A , atol=_A)) # class_queries_logits SCREAMING_SNAKE_CASE_ = outputs.class_queries_logits self.assertEqual( class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1)) SCREAMING_SNAKE_CASE_ = torch.tensor( [[4.7_1_8_8, -3.2_5_8_5, -2.8_8_5_7], [6.6_8_7_1, -2.9_1_8_1, -1.2_4_8_7], [7.2_4_4_9, -2.2_7_6_4, -2.1_8_7_4]]).to(_A) self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , _A , atol=_A)) def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = ( MaskFormerForInstanceSegmentation.from_pretrained('facebook/maskformer-swin-small-coco') .to(_A) .eval() ) SCREAMING_SNAKE_CASE_ = self.default_image_processor SCREAMING_SNAKE_CASE_ = image_processor( [np.zeros((3, 800, 1333)), np.zeros((3, 800, 1333))] , segmentation_maps=[np.zeros((384, 384)).astype(np.floataa), np.zeros((384, 384)).astype(np.floataa)] , return_tensors='pt' , ) SCREAMING_SNAKE_CASE_ = inputs['pixel_values'].to(_A) SCREAMING_SNAKE_CASE_ = [el.to(_A) for el in inputs['mask_labels']] SCREAMING_SNAKE_CASE_ = [el.to(_A) for el in inputs['class_labels']] with torch.no_grad(): SCREAMING_SNAKE_CASE_ = model(**_A) self.assertTrue(outputs.loss is not None)
620
from multiprocessing import Lock, Pipe, Process # lock used to ensure that two processes do not access a pipe at the same time UpperCamelCase__ : int = Lock() def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : Any ): """simple docstring""" global process_lock # we perform n swaps since after n swaps we know we are sorted # we *could* stop early if we are sorted already, but it takes as long to # find out we are sorted as it does to sort the list with this algorithm for i in range(0 , 10 ): if (i + position) % 2 == 0 and r_send is not None: # send your value to your right neighbor process_lock.acquire() r_send[1].send(_SCREAMING_SNAKE_CASE ) process_lock.release() # receive your right neighbor's value process_lock.acquire() SCREAMING_SNAKE_CASE_ = rr_cv[0].recv() process_lock.release() # take the lower value since you are on the left SCREAMING_SNAKE_CASE_ = min(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) elif (i + position) % 2 != 0 and l_send is not None: # send your value to your left neighbor process_lock.acquire() l_send[1].send(_SCREAMING_SNAKE_CASE ) process_lock.release() # receive your left neighbor's value process_lock.acquire() SCREAMING_SNAKE_CASE_ = lr_cv[0].recv() process_lock.release() # take the higher value since you are on the right SCREAMING_SNAKE_CASE_ = max(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # after all swaps are performed, send the values back to main result_pipe[1].send(_SCREAMING_SNAKE_CASE ) def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : Union[str, Any] ): """simple docstring""" SCREAMING_SNAKE_CASE_ = [] SCREAMING_SNAKE_CASE_ = [] # initialize the list of pipes where the values will be retrieved for _ in arr: result_pipe.append(Pipe() ) # creates the processes # the first and last process only have one neighbor so they are made outside # of the loop SCREAMING_SNAKE_CASE_ = Pipe() SCREAMING_SNAKE_CASE_ = Pipe() process_array_.append( Process( target=_SCREAMING_SNAKE_CASE , args=(0, arr[0], None, temp_rs, None, temp_rr, result_pipe[0]) , ) ) SCREAMING_SNAKE_CASE_ = temp_rs SCREAMING_SNAKE_CASE_ = temp_rr for i in range(1 , len(_SCREAMING_SNAKE_CASE ) - 1 ): SCREAMING_SNAKE_CASE_ = Pipe() SCREAMING_SNAKE_CASE_ = Pipe() process_array_.append( Process( target=_SCREAMING_SNAKE_CASE , args=(i, arr[i], temp_ls, temp_rs, temp_lr, temp_rr, result_pipe[i]) , ) ) SCREAMING_SNAKE_CASE_ = temp_rs SCREAMING_SNAKE_CASE_ = temp_rr process_array_.append( Process( target=_SCREAMING_SNAKE_CASE , args=( len(_SCREAMING_SNAKE_CASE ) - 1, arr[len(_SCREAMING_SNAKE_CASE ) - 1], temp_ls, None, temp_lr, None, result_pipe[len(_SCREAMING_SNAKE_CASE ) - 1], ) , ) ) # start the processes for p in process_array_: p.start() # wait for the processes to end and write their values to the list for p in range(0 , len(_SCREAMING_SNAKE_CASE ) ): SCREAMING_SNAKE_CASE_ = result_pipe[p][0].recv() process_array_[p].join() return arr def _UpperCAmelCase ( ): """simple docstring""" SCREAMING_SNAKE_CASE_ = list(range(10 , 0 , -1 ) ) print('Initial List' ) print(*_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE_ = odd_even_transposition(_SCREAMING_SNAKE_CASE ) print('Sorted List\n' ) print(*_SCREAMING_SNAKE_CASE ) if __name__ == "__main__": main()
620
1
import copy from ...configuration_utils import PretrainedConfig from ...utils import logging from ..auto import CONFIG_MAPPING UpperCamelCase__ : str = logging.get_logger(__name__) UpperCamelCase__ : str = { "ut/deta": "https://huggingface.co/ut/deta/resolve/main/config.json", } class __snake_case ( lowerCAmelCase__ ): __lowerCAmelCase : Union[str, Any] = 'deta' __lowerCAmelCase : Optional[Any] = { 'hidden_size': 'd_model', 'num_attention_heads': 'encoder_attention_heads', } def __init__( self , _A=None , _A=900 , _A=2048 , _A=6 , _A=2048 , _A=8 , _A=6 , _A=1024 , _A=8 , _A=0.0 , _A=True , _A="relu" , _A=256 , _A=0.1 , _A=0.0 , _A=0.0 , _A=0.0_2 , _A=1.0 , _A=True , _A=False , _A="sine" , _A=5 , _A=4 , _A=4 , _A=True , _A=300 , _A=True , _A=True , _A=1 , _A=5 , _A=2 , _A=1 , _A=1 , _A=5 , _A=2 , _A=0.1 , _A=0.2_5 , **_A , ): if backbone_config is None: logger.info('`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.') SCREAMING_SNAKE_CASE_ = CONFIG_MAPPING['resnet'](out_features=['stage2', 'stage3', 'stage4']) else: if isinstance(_A , _A): SCREAMING_SNAKE_CASE_ = backbone_config.pop('model_type') SCREAMING_SNAKE_CASE_ = CONFIG_MAPPING[backbone_model_type] SCREAMING_SNAKE_CASE_ = config_class.from_dict(_A) SCREAMING_SNAKE_CASE_ = backbone_config SCREAMING_SNAKE_CASE_ = num_queries SCREAMING_SNAKE_CASE_ = max_position_embeddings SCREAMING_SNAKE_CASE_ = d_model SCREAMING_SNAKE_CASE_ = encoder_ffn_dim SCREAMING_SNAKE_CASE_ = encoder_layers SCREAMING_SNAKE_CASE_ = encoder_attention_heads SCREAMING_SNAKE_CASE_ = decoder_ffn_dim SCREAMING_SNAKE_CASE_ = decoder_layers SCREAMING_SNAKE_CASE_ = decoder_attention_heads SCREAMING_SNAKE_CASE_ = dropout SCREAMING_SNAKE_CASE_ = attention_dropout SCREAMING_SNAKE_CASE_ = activation_dropout SCREAMING_SNAKE_CASE_ = activation_function SCREAMING_SNAKE_CASE_ = init_std SCREAMING_SNAKE_CASE_ = init_xavier_std SCREAMING_SNAKE_CASE_ = encoder_layerdrop SCREAMING_SNAKE_CASE_ = auxiliary_loss SCREAMING_SNAKE_CASE_ = position_embedding_type # deformable attributes SCREAMING_SNAKE_CASE_ = num_feature_levels SCREAMING_SNAKE_CASE_ = encoder_n_points SCREAMING_SNAKE_CASE_ = decoder_n_points SCREAMING_SNAKE_CASE_ = two_stage SCREAMING_SNAKE_CASE_ = two_stage_num_proposals SCREAMING_SNAKE_CASE_ = with_box_refine SCREAMING_SNAKE_CASE_ = assign_first_stage if two_stage is True and with_box_refine is False: raise ValueError('If two_stage is True, with_box_refine must be True.') # Hungarian matcher SCREAMING_SNAKE_CASE_ = class_cost SCREAMING_SNAKE_CASE_ = bbox_cost SCREAMING_SNAKE_CASE_ = giou_cost # Loss coefficients SCREAMING_SNAKE_CASE_ = mask_loss_coefficient SCREAMING_SNAKE_CASE_ = dice_loss_coefficient SCREAMING_SNAKE_CASE_ = bbox_loss_coefficient SCREAMING_SNAKE_CASE_ = giou_loss_coefficient SCREAMING_SNAKE_CASE_ = eos_coefficient SCREAMING_SNAKE_CASE_ = focal_alpha super().__init__(is_encoder_decoder=_A , **_A) @property def lowerCAmelCase__ ( self): return self.encoder_attention_heads @property def lowerCAmelCase__ ( self): return self.d_model def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = copy.deepcopy(self.__dict__) SCREAMING_SNAKE_CASE_ = self.backbone_config.to_dict() SCREAMING_SNAKE_CASE_ = self.__class__.model_type return output
620
import unittest from transformers import load_tool from .test_tools_common import ToolTesterMixin UpperCamelCase__ : int = "\nHugging Face was founded in 2016 by French entrepreneurs Clément Delangue, Julien Chaumond, and Thomas Wolf originally as a company that developed a chatbot app targeted at teenagers.[2] After open-sourcing the model behind the chatbot, the company pivoted to focus on being a platform for machine learning.\n\nIn March 2021, Hugging Face raised $40 million in a Series B funding round.[3]\n\nOn April 28, 2021, the company launched the BigScience Research Workshop in collaboration with several other research groups to release an open large language model.[4] In 2022, the workshop concluded with the announcement of BLOOM, a multilingual large language model with 176 billion parameters.[5]\n" class __snake_case ( unittest.TestCase , lowerCAmelCase__ ): def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = load_tool('text-question-answering') self.tool.setup() SCREAMING_SNAKE_CASE_ = load_tool('text-question-answering' , remote=_A) def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = self.tool(_A , 'What did Hugging Face do in April 2021?') self.assertEqual(_A , 'launched the BigScience Research Workshop') def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = self.remote_tool(_A , 'What did Hugging Face do in April 2021?') self.assertEqual(_A , 'launched the BigScience Research Workshop') def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = self.tool(text=_A , question='What did Hugging Face do in April 2021?') self.assertEqual(_A , 'launched the BigScience Research Workshop') def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = self.remote_tool(text=_A , question='What did Hugging Face do in April 2021?') self.assertEqual(_A , 'launched the BigScience Research Workshop')
620
1
from typing import Any class __snake_case : def __init__( self , _A): SCREAMING_SNAKE_CASE_ = data SCREAMING_SNAKE_CASE_ = None class __snake_case : def __init__( self): SCREAMING_SNAKE_CASE_ = None def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = self.head while temp is not None: print(temp.data , end=' ') SCREAMING_SNAKE_CASE_ = temp.next print() def lowerCAmelCase__ ( self , _A): SCREAMING_SNAKE_CASE_ = Node(_A) SCREAMING_SNAKE_CASE_ = self.head SCREAMING_SNAKE_CASE_ = new_node def lowerCAmelCase__ ( self , _A , _A): if node_data_a == node_data_a: return else: SCREAMING_SNAKE_CASE_ = self.head while node_a is not None and node_a.data != node_data_a: SCREAMING_SNAKE_CASE_ = node_a.next SCREAMING_SNAKE_CASE_ = self.head while node_a is not None and node_a.data != node_data_a: SCREAMING_SNAKE_CASE_ = node_a.next if node_a is None or node_a is None: return SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = node_a.data, node_a.data if __name__ == "__main__": UpperCamelCase__ : Tuple = LinkedList() for i in range(5, 0, -1): ll.push(i) ll.print_list() ll.swap_nodes(1, 4) print("After swapping") ll.print_list()
620
import unittest import numpy as np from datasets import load_dataset from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import BeitImageProcessor class __snake_case ( unittest.TestCase ): def __init__( self , _A , _A=7 , _A=3 , _A=18 , _A=30 , _A=400 , _A=True , _A=None , _A=True , _A=None , _A=True , _A=[0.5, 0.5, 0.5] , _A=[0.5, 0.5, 0.5] , _A=False , ): SCREAMING_SNAKE_CASE_ = size if size is not None else {'height': 20, 'width': 20} SCREAMING_SNAKE_CASE_ = crop_size if crop_size is not None else {'height': 18, 'width': 18} SCREAMING_SNAKE_CASE_ = parent SCREAMING_SNAKE_CASE_ = batch_size SCREAMING_SNAKE_CASE_ = num_channels SCREAMING_SNAKE_CASE_ = image_size SCREAMING_SNAKE_CASE_ = min_resolution SCREAMING_SNAKE_CASE_ = max_resolution SCREAMING_SNAKE_CASE_ = do_resize SCREAMING_SNAKE_CASE_ = size SCREAMING_SNAKE_CASE_ = do_center_crop SCREAMING_SNAKE_CASE_ = crop_size SCREAMING_SNAKE_CASE_ = do_normalize SCREAMING_SNAKE_CASE_ = image_mean SCREAMING_SNAKE_CASE_ = image_std SCREAMING_SNAKE_CASE_ = do_reduce_labels def lowerCAmelCase__ ( self): return { "do_resize": self.do_resize, "size": self.size, "do_center_crop": self.do_center_crop, "crop_size": self.crop_size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "do_reduce_labels": self.do_reduce_labels, } def _UpperCAmelCase ( ): """simple docstring""" SCREAMING_SNAKE_CASE_ = load_dataset('hf-internal-testing/fixtures_ade20k' , split='test' ) SCREAMING_SNAKE_CASE_ = Image.open(dataset[0]['file'] ) SCREAMING_SNAKE_CASE_ = Image.open(dataset[1]['file'] ) return image, map def _UpperCAmelCase ( ): """simple docstring""" SCREAMING_SNAKE_CASE_ = load_dataset('hf-internal-testing/fixtures_ade20k' , split='test' ) SCREAMING_SNAKE_CASE_ = Image.open(ds[0]['file'] ) SCREAMING_SNAKE_CASE_ = Image.open(ds[1]['file'] ) SCREAMING_SNAKE_CASE_ = Image.open(ds[2]['file'] ) SCREAMING_SNAKE_CASE_ = Image.open(ds[3]['file'] ) return [imagea, imagea], [mapa, mapa] @require_torch @require_vision class __snake_case ( lowerCAmelCase__ , unittest.TestCase ): __lowerCAmelCase : Union[str, Any] = BeitImageProcessor if is_vision_available() else None def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = BeitImageProcessingTester(self) @property def lowerCAmelCase__ ( self): return self.image_processor_tester.prepare_image_processor_dict() def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = self.image_processing_class(**self.image_processor_dict) self.assertTrue(hasattr(_A , 'do_resize')) self.assertTrue(hasattr(_A , 'size')) self.assertTrue(hasattr(_A , 'do_center_crop')) self.assertTrue(hasattr(_A , 'center_crop')) self.assertTrue(hasattr(_A , 'do_normalize')) self.assertTrue(hasattr(_A , 'image_mean')) self.assertTrue(hasattr(_A , 'image_std')) def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = self.image_processing_class.from_dict(self.image_processor_dict) self.assertEqual(image_processor.size , {'height': 20, 'width': 20}) self.assertEqual(image_processor.crop_size , {'height': 18, 'width': 18}) self.assertEqual(image_processor.do_reduce_labels , _A) SCREAMING_SNAKE_CASE_ = self.image_processing_class.from_dict( self.image_processor_dict , size=42 , crop_size=84 , reduce_labels=_A) self.assertEqual(image_processor.size , {'height': 42, 'width': 42}) self.assertEqual(image_processor.crop_size , {'height': 84, 'width': 84}) self.assertEqual(image_processor.do_reduce_labels , _A) def lowerCAmelCase__ ( self): pass def lowerCAmelCase__ ( self): # Initialize image_processing SCREAMING_SNAKE_CASE_ = self.image_processing_class(**self.image_processor_dict) # create random PIL images SCREAMING_SNAKE_CASE_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A) for image in image_inputs: self.assertIsInstance(_A , Image.Image) # Test not batched input SCREAMING_SNAKE_CASE_ = image_processing(image_inputs[0] , return_tensors='pt').pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) # Test batched SCREAMING_SNAKE_CASE_ = image_processing(_A , return_tensors='pt').pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) def lowerCAmelCase__ ( self): # Initialize image_processing SCREAMING_SNAKE_CASE_ = self.image_processing_class(**self.image_processor_dict) # create random numpy tensors SCREAMING_SNAKE_CASE_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A , numpify=_A) for image in image_inputs: self.assertIsInstance(_A , np.ndarray) # Test not batched input SCREAMING_SNAKE_CASE_ = image_processing(image_inputs[0] , return_tensors='pt').pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) # Test batched SCREAMING_SNAKE_CASE_ = image_processing(_A , return_tensors='pt').pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) def lowerCAmelCase__ ( self): # Initialize image_processing SCREAMING_SNAKE_CASE_ = self.image_processing_class(**self.image_processor_dict) # create random PyTorch tensors SCREAMING_SNAKE_CASE_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A , torchify=_A) for image in image_inputs: self.assertIsInstance(_A , torch.Tensor) # Test not batched input SCREAMING_SNAKE_CASE_ = image_processing(image_inputs[0] , return_tensors='pt').pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) # Test batched SCREAMING_SNAKE_CASE_ = image_processing(_A , return_tensors='pt').pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) def lowerCAmelCase__ ( self): # Initialize image_processing SCREAMING_SNAKE_CASE_ = self.image_processing_class(**self.image_processor_dict) # create random PyTorch tensors SCREAMING_SNAKE_CASE_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A , torchify=_A) SCREAMING_SNAKE_CASE_ = [] for image in image_inputs: self.assertIsInstance(_A , torch.Tensor) maps.append(torch.zeros(image.shape[-2:]).long()) # Test not batched input SCREAMING_SNAKE_CASE_ = image_processing(image_inputs[0] , maps[0] , return_tensors='pt') self.assertEqual( encoding['pixel_values'].shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) self.assertEqual( encoding['labels'].shape , ( 1, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) self.assertEqual(encoding['labels'].dtype , torch.long) self.assertTrue(encoding['labels'].min().item() >= 0) self.assertTrue(encoding['labels'].max().item() <= 255) # Test batched SCREAMING_SNAKE_CASE_ = image_processing(_A , _A , return_tensors='pt') self.assertEqual( encoding['pixel_values'].shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) self.assertEqual( encoding['labels'].shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) self.assertEqual(encoding['labels'].dtype , torch.long) self.assertTrue(encoding['labels'].min().item() >= 0) self.assertTrue(encoding['labels'].max().item() <= 255) # Test not batched input (PIL images) SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = prepare_semantic_single_inputs() SCREAMING_SNAKE_CASE_ = image_processing(_A , _A , return_tensors='pt') self.assertEqual( encoding['pixel_values'].shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) self.assertEqual( encoding['labels'].shape , ( 1, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) self.assertEqual(encoding['labels'].dtype , torch.long) self.assertTrue(encoding['labels'].min().item() >= 0) self.assertTrue(encoding['labels'].max().item() <= 255) # Test batched input (PIL images) SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = prepare_semantic_batch_inputs() SCREAMING_SNAKE_CASE_ = image_processing(_A , _A , return_tensors='pt') self.assertEqual( encoding['pixel_values'].shape , ( 2, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) self.assertEqual( encoding['labels'].shape , ( 2, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) self.assertEqual(encoding['labels'].dtype , torch.long) self.assertTrue(encoding['labels'].min().item() >= 0) self.assertTrue(encoding['labels'].max().item() <= 255) def lowerCAmelCase__ ( self): # Initialize image_processing SCREAMING_SNAKE_CASE_ = self.image_processing_class(**self.image_processor_dict) # ADE20k has 150 classes, and the background is included, so labels should be between 0 and 150 SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = prepare_semantic_single_inputs() SCREAMING_SNAKE_CASE_ = image_processing(_A , _A , return_tensors='pt') self.assertTrue(encoding['labels'].min().item() >= 0) self.assertTrue(encoding['labels'].max().item() <= 150) SCREAMING_SNAKE_CASE_ = True SCREAMING_SNAKE_CASE_ = image_processing(_A , _A , return_tensors='pt') self.assertTrue(encoding['labels'].min().item() >= 0) self.assertTrue(encoding['labels'].max().item() <= 255)
620
1
from multiprocessing import Lock, Pipe, Process # lock used to ensure that two processes do not access a pipe at the same time UpperCamelCase__ : int = Lock() def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : Any ): """simple docstring""" global process_lock # we perform n swaps since after n swaps we know we are sorted # we *could* stop early if we are sorted already, but it takes as long to # find out we are sorted as it does to sort the list with this algorithm for i in range(0 , 10 ): if (i + position) % 2 == 0 and r_send is not None: # send your value to your right neighbor process_lock.acquire() r_send[1].send(_SCREAMING_SNAKE_CASE ) process_lock.release() # receive your right neighbor's value process_lock.acquire() SCREAMING_SNAKE_CASE_ = rr_cv[0].recv() process_lock.release() # take the lower value since you are on the left SCREAMING_SNAKE_CASE_ = min(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) elif (i + position) % 2 != 0 and l_send is not None: # send your value to your left neighbor process_lock.acquire() l_send[1].send(_SCREAMING_SNAKE_CASE ) process_lock.release() # receive your left neighbor's value process_lock.acquire() SCREAMING_SNAKE_CASE_ = lr_cv[0].recv() process_lock.release() # take the higher value since you are on the right SCREAMING_SNAKE_CASE_ = max(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # after all swaps are performed, send the values back to main result_pipe[1].send(_SCREAMING_SNAKE_CASE ) def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : Union[str, Any] ): """simple docstring""" SCREAMING_SNAKE_CASE_ = [] SCREAMING_SNAKE_CASE_ = [] # initialize the list of pipes where the values will be retrieved for _ in arr: result_pipe.append(Pipe() ) # creates the processes # the first and last process only have one neighbor so they are made outside # of the loop SCREAMING_SNAKE_CASE_ = Pipe() SCREAMING_SNAKE_CASE_ = Pipe() process_array_.append( Process( target=_SCREAMING_SNAKE_CASE , args=(0, arr[0], None, temp_rs, None, temp_rr, result_pipe[0]) , ) ) SCREAMING_SNAKE_CASE_ = temp_rs SCREAMING_SNAKE_CASE_ = temp_rr for i in range(1 , len(_SCREAMING_SNAKE_CASE ) - 1 ): SCREAMING_SNAKE_CASE_ = Pipe() SCREAMING_SNAKE_CASE_ = Pipe() process_array_.append( Process( target=_SCREAMING_SNAKE_CASE , args=(i, arr[i], temp_ls, temp_rs, temp_lr, temp_rr, result_pipe[i]) , ) ) SCREAMING_SNAKE_CASE_ = temp_rs SCREAMING_SNAKE_CASE_ = temp_rr process_array_.append( Process( target=_SCREAMING_SNAKE_CASE , args=( len(_SCREAMING_SNAKE_CASE ) - 1, arr[len(_SCREAMING_SNAKE_CASE ) - 1], temp_ls, None, temp_lr, None, result_pipe[len(_SCREAMING_SNAKE_CASE ) - 1], ) , ) ) # start the processes for p in process_array_: p.start() # wait for the processes to end and write their values to the list for p in range(0 , len(_SCREAMING_SNAKE_CASE ) ): SCREAMING_SNAKE_CASE_ = result_pipe[p][0].recv() process_array_[p].join() return arr def _UpperCAmelCase ( ): """simple docstring""" SCREAMING_SNAKE_CASE_ = list(range(10 , 0 , -1 ) ) print('Initial List' ) print(*_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE_ = odd_even_transposition(_SCREAMING_SNAKE_CASE ) print('Sorted List\n' ) print(*_SCREAMING_SNAKE_CASE ) if __name__ == "__main__": main()
620
def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : int = 200 ): """simple docstring""" SCREAMING_SNAKE_CASE_ = [1, 2, 5, 10, 20, 50, 100, 200] SCREAMING_SNAKE_CASE_ = [0] * (pence + 1) SCREAMING_SNAKE_CASE_ = 1 # base case: 1 way to make 0 pence for coin in coins: for i in range(_SCREAMING_SNAKE_CASE , pence + 1 , 1 ): number_of_ways[i] += number_of_ways[i - coin] return number_of_ways[pence] if __name__ == "__main__": assert solution(200) == 73_682
620
1
from __future__ import annotations import math import random from collections.abc import Collection from typing import overload class __snake_case : def __init__( self , _A = None): if components is None: SCREAMING_SNAKE_CASE_ = [] SCREAMING_SNAKE_CASE_ = list(_A) def __len__( self): return len(self.__components) def __str__( self): return "(" + ",".join(map(_A , self.__components)) + ")" def __add__( self , _A): SCREAMING_SNAKE_CASE_ = len(self) if size == len(_A): SCREAMING_SNAKE_CASE_ = [self.__components[i] + other.component(_A) for i in range(_A)] return Vector(_A) else: raise Exception('must have the same size') def __sub__( self , _A): SCREAMING_SNAKE_CASE_ = len(self) if size == len(_A): SCREAMING_SNAKE_CASE_ = [self.__components[i] - other.component(_A) for i in range(_A)] return Vector(_A) else: # error case raise Exception('must have the same size') @overload def __mul__( self , _A): ... @overload def __mul__( self , _A): ... def __mul__( self , _A): if isinstance(_A , (float, int)): SCREAMING_SNAKE_CASE_ = [c * other for c in self.__components] return Vector(_A) elif isinstance(_A , _A) and len(self) == len(_A): SCREAMING_SNAKE_CASE_ = len(self) SCREAMING_SNAKE_CASE_ = [self.__components[i] * other.component(_A) for i in range(_A)] return sum(_A) else: # error case raise Exception('invalid operand!') def lowerCAmelCase__ ( self): return Vector(self.__components) def lowerCAmelCase__ ( self , _A): if isinstance(_A , _A) and -len(self.__components) <= i < len(self.__components): return self.__components[i] else: raise Exception('index out of range') def lowerCAmelCase__ ( self , _A , _A): assert -len(self.__components) <= pos < len(self.__components) SCREAMING_SNAKE_CASE_ = value def lowerCAmelCase__ ( self): if len(self.__components) == 0: raise Exception('Vector is empty') SCREAMING_SNAKE_CASE_ = [c**2 for c in self.__components] return math.sqrt(sum(_A)) def lowerCAmelCase__ ( self , _A , _A = False): SCREAMING_SNAKE_CASE_ = self * other SCREAMING_SNAKE_CASE_ = self.euclidean_length() * other.euclidean_length() if deg: return math.degrees(math.acos(num / den)) else: return math.acos(num / den) def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : int ): """simple docstring""" assert isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) return Vector([0] * dimension ) def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int ): """simple docstring""" assert isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) and (isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )) SCREAMING_SNAKE_CASE_ = [0] * dimension SCREAMING_SNAKE_CASE_ = 1 return Vector(_SCREAMING_SNAKE_CASE ) def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : float , _SCREAMING_SNAKE_CASE : Vector , _SCREAMING_SNAKE_CASE : Vector ): """simple docstring""" assert ( isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) and isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) and (isinstance(_SCREAMING_SNAKE_CASE , (int, float) )) ) return x * scalar + y def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int ): """simple docstring""" random.seed(_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE_ = [random.randint(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for _ in range(_SCREAMING_SNAKE_CASE )] return Vector(_SCREAMING_SNAKE_CASE ) class __snake_case : def __init__( self , _A , _A , _A): SCREAMING_SNAKE_CASE_ = matrix SCREAMING_SNAKE_CASE_ = w SCREAMING_SNAKE_CASE_ = h def __str__( self): SCREAMING_SNAKE_CASE_ = '' for i in range(self.__height): ans += "|" for j in range(self.__width): if j < self.__width - 1: ans += str(self.__matrix[i][j]) + "," else: ans += str(self.__matrix[i][j]) + "|\n" return ans def __add__( self , _A): if self.__width == other.width() and self.__height == other.height(): SCREAMING_SNAKE_CASE_ = [] for i in range(self.__height): SCREAMING_SNAKE_CASE_ = [ self.__matrix[i][j] + other.component(_A , _A) for j in range(self.__width) ] matrix.append(_A) return Matrix(_A , self.__width , self.__height) else: raise Exception('matrix must have the same dimension!') def __sub__( self , _A): if self.__width == other.width() and self.__height == other.height(): SCREAMING_SNAKE_CASE_ = [] for i in range(self.__height): SCREAMING_SNAKE_CASE_ = [ self.__matrix[i][j] - other.component(_A , _A) for j in range(self.__width) ] matrix.append(_A) return Matrix(_A , self.__width , self.__height) else: raise Exception('matrices must have the same dimension!') @overload def __mul__( self , _A): ... @overload def __mul__( self , _A): ... def __mul__( self , _A): if isinstance(_A , _A): # matrix-vector if len(_A) == self.__width: SCREAMING_SNAKE_CASE_ = zero_vector(self.__height) for i in range(self.__height): SCREAMING_SNAKE_CASE_ = [ self.__matrix[i][j] * other.component(_A) for j in range(self.__width) ] ans.change_component(_A , sum(_A)) return ans else: raise Exception( 'vector must have the same size as the ' 'number of columns of the matrix!') elif isinstance(_A , (int, float)): # matrix-scalar SCREAMING_SNAKE_CASE_ = [ [self.__matrix[i][j] * other for j in range(self.__width)] for i in range(self.__height) ] return Matrix(_A , self.__width , self.__height) return None def lowerCAmelCase__ ( self): return self.__height def lowerCAmelCase__ ( self): return self.__width def lowerCAmelCase__ ( self , _A , _A): if 0 <= x < self.__height and 0 <= y < self.__width: return self.__matrix[x][y] else: raise Exception('change_component: indices out of bounds') def lowerCAmelCase__ ( self , _A , _A , _A): if 0 <= x < self.__height and 0 <= y < self.__width: SCREAMING_SNAKE_CASE_ = value else: raise Exception('change_component: indices out of bounds') def lowerCAmelCase__ ( self , _A , _A): if self.__height != self.__width: raise Exception('Matrix is not square') SCREAMING_SNAKE_CASE_ = self.__matrix[:x] + self.__matrix[x + 1 :] for i in range(len(_A)): SCREAMING_SNAKE_CASE_ = minor[i][:y] + minor[i][y + 1 :] return Matrix(_A , self.__width - 1 , self.__height - 1).determinant() def lowerCAmelCase__ ( self , _A , _A): if self.__height != self.__width: raise Exception('Matrix is not square') if 0 <= x < self.__height and 0 <= y < self.__width: return (-1) ** (x + y) * self.minor(_A , _A) else: raise Exception('Indices out of bounds') def lowerCAmelCase__ ( self): if self.__height != self.__width: raise Exception('Matrix is not square') if self.__height < 1: raise Exception('Matrix has no element') elif self.__height == 1: return self.__matrix[0][0] elif self.__height == 2: return ( self.__matrix[0][0] * self.__matrix[1][1] - self.__matrix[0][1] * self.__matrix[1][0] ) else: SCREAMING_SNAKE_CASE_ = [ self.__matrix[0][y] * self.cofactor(0 , _A) for y in range(self.__width) ] return sum(_A) def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : int ): """simple docstring""" SCREAMING_SNAKE_CASE_ = [[0] * n for _ in range(_SCREAMING_SNAKE_CASE )] return Matrix(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int ): """simple docstring""" random.seed(_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE_ = [ [random.randint(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for _ in range(_SCREAMING_SNAKE_CASE )] for _ in range(_SCREAMING_SNAKE_CASE ) ] return Matrix(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
620
def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : list , _SCREAMING_SNAKE_CASE : list , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int ): """simple docstring""" if index == number_of_items: return 0 SCREAMING_SNAKE_CASE_ = 0 SCREAMING_SNAKE_CASE_ = 0 SCREAMING_SNAKE_CASE_ = knapsack(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , index + 1 ) if weights[index] <= max_weight: SCREAMING_SNAKE_CASE_ = values[index] + knapsack( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , max_weight - weights[index] , index + 1 ) return max(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) if __name__ == "__main__": import doctest doctest.testmod()
620
1
def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : Optional[int] ): """simple docstring""" SCREAMING_SNAKE_CASE_ = [] SCREAMING_SNAKE_CASE_ = set({'(', '[', '{'} ) SCREAMING_SNAKE_CASE_ = set({')', ']', '}'} ) SCREAMING_SNAKE_CASE_ = {'{': '}', '[': ']', '(': ')'} for i in range(len(_SCREAMING_SNAKE_CASE ) ): if s[i] in open_brackets: stack.append(s[i] ) elif s[i] in closed_brackets and ( len(_SCREAMING_SNAKE_CASE ) == 0 or (len(_SCREAMING_SNAKE_CASE ) > 0 and open_to_closed[stack.pop()] != s[i]) ): return False return len(_SCREAMING_SNAKE_CASE ) == 0 def _UpperCAmelCase ( ): """simple docstring""" SCREAMING_SNAKE_CASE_ = input('Enter sequence of brackets: ' ) if is_balanced(_SCREAMING_SNAKE_CASE ): print(_SCREAMING_SNAKE_CASE , 'is balanced' ) else: print(_SCREAMING_SNAKE_CASE , 'is not balanced' ) if __name__ == "__main__": main()
620
import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( SwiftFormerConfig, SwiftFormerForImageClassification, ViTImageProcessor, ) from transformers.utils import logging logging.set_verbosity_info() UpperCamelCase__ : Optional[int] = logging.get_logger(__name__) UpperCamelCase__ : List[Any] = torch.device("cpu") def _UpperCAmelCase ( ): """simple docstring""" SCREAMING_SNAKE_CASE_ = 'http://images.cocodataset.org/val2017/000000039769.jpg' SCREAMING_SNAKE_CASE_ = Image.open(requests.get(_SCREAMING_SNAKE_CASE , stream=_SCREAMING_SNAKE_CASE ).raw ) return im def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : int ): """simple docstring""" if swiftformer_name == "swiftformer_xs": return torch.tensor([-2.1_7_0_3E0_0, 2.1_1_0_7E0_0, -2.0_8_1_1E0_0, 8.8_6_8_5E-0_1, 2.4_3_6_0E-0_1] ) elif swiftformer_name == "swiftformer_s": return torch.tensor([3.9_6_3_6E-0_1, 2.3_4_7_8E-0_1, -1.6_9_6_3E0_0, -1.7_3_8_1E0_0, -8.6_3_3_7E-0_1] ) elif swiftformer_name == "swiftformer_l1": return torch.tensor([-4.2_7_6_8E-0_1, -4.7_4_2_9E-0_1, -1.0_8_9_7E0_0, -1.0_2_4_8E0_0, 3.5_5_2_3E-0_2] ) elif swiftformer_name == "swiftformer_l3": return torch.tensor([-2.5_3_3_0E-0_1, 2.4_2_1_1E-0_1, -6.0_1_8_5E-0_1, -8.2_7_8_9E-0_1, -6.0_4_4_6E-0_2] ) def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : Optional[Any] ): """simple docstring""" SCREAMING_SNAKE_CASE_ = dct.pop(_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE_ = val def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : Dict ): """simple docstring""" SCREAMING_SNAKE_CASE_ = [] for k in state_dict.keys(): SCREAMING_SNAKE_CASE_ = k if ".pwconv" in k: SCREAMING_SNAKE_CASE_ = k_new.replace('.pwconv' , '.point_wise_conv' ) if ".dwconv" in k: SCREAMING_SNAKE_CASE_ = k_new.replace('.dwconv' , '.depth_wise_conv' ) if ".Proj." in k: SCREAMING_SNAKE_CASE_ = k_new.replace('.Proj.' , '.proj.' ) if "patch_embed" in k_new: SCREAMING_SNAKE_CASE_ = k_new.replace('patch_embed' , 'swiftformer.patch_embed.patch_embedding' ) if "network" in k_new: SCREAMING_SNAKE_CASE_ = k_new.split('.' ) if ls[2].isdigit(): SCREAMING_SNAKE_CASE_ = 'swiftformer.encoder.network.' + ls[1] + '.blocks.' + ls[2] + '.' + '.'.join(ls[3:] ) else: SCREAMING_SNAKE_CASE_ = k_new.replace('network' , 'swiftformer.encoder.network' ) rename_keys.append((k, k_new) ) return rename_keys @torch.no_grad() def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Optional[Any] ): """simple docstring""" SCREAMING_SNAKE_CASE_ = SwiftFormerConfig() # dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size SCREAMING_SNAKE_CASE_ = 1_000 SCREAMING_SNAKE_CASE_ = 'huggingface/label-files' SCREAMING_SNAKE_CASE_ = 'imagenet-1k-id2label.json' SCREAMING_SNAKE_CASE_ = json.load(open(hf_hub_download(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , repo_type='dataset' ) , 'r' ) ) SCREAMING_SNAKE_CASE_ = {int(_SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()} SCREAMING_SNAKE_CASE_ = idalabel SCREAMING_SNAKE_CASE_ = {v: k for k, v in idalabel.items()} # size of the architecture if swiftformer_name == "swiftformer_xs": SCREAMING_SNAKE_CASE_ = [3, 3, 6, 4] SCREAMING_SNAKE_CASE_ = [48, 56, 112, 220] elif swiftformer_name == "swiftformer_s": SCREAMING_SNAKE_CASE_ = [3, 3, 9, 6] SCREAMING_SNAKE_CASE_ = [48, 64, 168, 224] elif swiftformer_name == "swiftformer_l1": SCREAMING_SNAKE_CASE_ = [4, 3, 10, 5] SCREAMING_SNAKE_CASE_ = [48, 96, 192, 384] elif swiftformer_name == "swiftformer_l3": SCREAMING_SNAKE_CASE_ = [4, 4, 12, 6] SCREAMING_SNAKE_CASE_ = [64, 128, 320, 512] # load state_dict of original model, remove and rename some keys if original_ckpt: if original_ckpt.startswith('https' ): SCREAMING_SNAKE_CASE_ = torch.hub.load_state_dict_from_url(_SCREAMING_SNAKE_CASE , map_location='cpu' , check_hash=_SCREAMING_SNAKE_CASE ) else: SCREAMING_SNAKE_CASE_ = torch.load(_SCREAMING_SNAKE_CASE , map_location='cpu' ) SCREAMING_SNAKE_CASE_ = checkpoint SCREAMING_SNAKE_CASE_ = create_rename_keys(_SCREAMING_SNAKE_CASE ) for rename_key_src, rename_key_dest in rename_keys: rename_key(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # load HuggingFace model SCREAMING_SNAKE_CASE_ = SwiftFormerForImageClassification(_SCREAMING_SNAKE_CASE ).eval() hf_model.load_state_dict(_SCREAMING_SNAKE_CASE ) # prepare test inputs SCREAMING_SNAKE_CASE_ = prepare_img() SCREAMING_SNAKE_CASE_ = ViTImageProcessor.from_pretrained('preprocessor_config' ) SCREAMING_SNAKE_CASE_ = processor(images=_SCREAMING_SNAKE_CASE , return_tensors='pt' ) # compare outputs from both models SCREAMING_SNAKE_CASE_ = get_expected_output(_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE_ = hf_model(inputs['pixel_values'] ).logits assert hf_logits.shape == torch.Size([1, 1_000] ) assert torch.allclose(hf_logits[0, 0:5] , _SCREAMING_SNAKE_CASE , atol=1E-3 ) Path(_SCREAMING_SNAKE_CASE ).mkdir(exist_ok=_SCREAMING_SNAKE_CASE ) print(f"""Saving model {swiftformer_name} to {pytorch_dump_folder_path}""" ) hf_model.save_pretrained(_SCREAMING_SNAKE_CASE ) if __name__ == "__main__": UpperCamelCase__ : str = argparse.ArgumentParser() # Required parameters parser.add_argument( "--swiftformer_name", default="swiftformer_xs", choices=["swiftformer_xs", "swiftformer_s", "swiftformer_l1", "swiftformer_l3"], type=str, help="Name of the SwiftFormer model you'd like to convert.", ) parser.add_argument( "--pytorch_dump_folder_path", default="./converted_outputs/", type=str, help="Path to the output PyTorch model directory.", ) parser.add_argument("--original_ckpt", default=None, type=str, help="Path to the original model checkpoint.") UpperCamelCase__ : Union[str, Any] = parser.parse_args() convert_swiftformer_checkpoint(args.swiftformer_name, args.pytorch_dump_folder_path, args.original_ckpt)
620
1
import unittest import numpy as np from datasets import load_dataset from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import BeitImageProcessor class __snake_case ( unittest.TestCase ): def __init__( self , _A , _A=7 , _A=3 , _A=18 , _A=30 , _A=400 , _A=True , _A=None , _A=True , _A=None , _A=True , _A=[0.5, 0.5, 0.5] , _A=[0.5, 0.5, 0.5] , _A=False , ): SCREAMING_SNAKE_CASE_ = size if size is not None else {'height': 20, 'width': 20} SCREAMING_SNAKE_CASE_ = crop_size if crop_size is not None else {'height': 18, 'width': 18} SCREAMING_SNAKE_CASE_ = parent SCREAMING_SNAKE_CASE_ = batch_size SCREAMING_SNAKE_CASE_ = num_channels SCREAMING_SNAKE_CASE_ = image_size SCREAMING_SNAKE_CASE_ = min_resolution SCREAMING_SNAKE_CASE_ = max_resolution SCREAMING_SNAKE_CASE_ = do_resize SCREAMING_SNAKE_CASE_ = size SCREAMING_SNAKE_CASE_ = do_center_crop SCREAMING_SNAKE_CASE_ = crop_size SCREAMING_SNAKE_CASE_ = do_normalize SCREAMING_SNAKE_CASE_ = image_mean SCREAMING_SNAKE_CASE_ = image_std SCREAMING_SNAKE_CASE_ = do_reduce_labels def lowerCAmelCase__ ( self): return { "do_resize": self.do_resize, "size": self.size, "do_center_crop": self.do_center_crop, "crop_size": self.crop_size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "do_reduce_labels": self.do_reduce_labels, } def _UpperCAmelCase ( ): """simple docstring""" SCREAMING_SNAKE_CASE_ = load_dataset('hf-internal-testing/fixtures_ade20k' , split='test' ) SCREAMING_SNAKE_CASE_ = Image.open(dataset[0]['file'] ) SCREAMING_SNAKE_CASE_ = Image.open(dataset[1]['file'] ) return image, map def _UpperCAmelCase ( ): """simple docstring""" SCREAMING_SNAKE_CASE_ = load_dataset('hf-internal-testing/fixtures_ade20k' , split='test' ) SCREAMING_SNAKE_CASE_ = Image.open(ds[0]['file'] ) SCREAMING_SNAKE_CASE_ = Image.open(ds[1]['file'] ) SCREAMING_SNAKE_CASE_ = Image.open(ds[2]['file'] ) SCREAMING_SNAKE_CASE_ = Image.open(ds[3]['file'] ) return [imagea, imagea], [mapa, mapa] @require_torch @require_vision class __snake_case ( lowerCAmelCase__ , unittest.TestCase ): __lowerCAmelCase : Union[str, Any] = BeitImageProcessor if is_vision_available() else None def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = BeitImageProcessingTester(self) @property def lowerCAmelCase__ ( self): return self.image_processor_tester.prepare_image_processor_dict() def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = self.image_processing_class(**self.image_processor_dict) self.assertTrue(hasattr(_A , 'do_resize')) self.assertTrue(hasattr(_A , 'size')) self.assertTrue(hasattr(_A , 'do_center_crop')) self.assertTrue(hasattr(_A , 'center_crop')) self.assertTrue(hasattr(_A , 'do_normalize')) self.assertTrue(hasattr(_A , 'image_mean')) self.assertTrue(hasattr(_A , 'image_std')) def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = self.image_processing_class.from_dict(self.image_processor_dict) self.assertEqual(image_processor.size , {'height': 20, 'width': 20}) self.assertEqual(image_processor.crop_size , {'height': 18, 'width': 18}) self.assertEqual(image_processor.do_reduce_labels , _A) SCREAMING_SNAKE_CASE_ = self.image_processing_class.from_dict( self.image_processor_dict , size=42 , crop_size=84 , reduce_labels=_A) self.assertEqual(image_processor.size , {'height': 42, 'width': 42}) self.assertEqual(image_processor.crop_size , {'height': 84, 'width': 84}) self.assertEqual(image_processor.do_reduce_labels , _A) def lowerCAmelCase__ ( self): pass def lowerCAmelCase__ ( self): # Initialize image_processing SCREAMING_SNAKE_CASE_ = self.image_processing_class(**self.image_processor_dict) # create random PIL images SCREAMING_SNAKE_CASE_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A) for image in image_inputs: self.assertIsInstance(_A , Image.Image) # Test not batched input SCREAMING_SNAKE_CASE_ = image_processing(image_inputs[0] , return_tensors='pt').pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) # Test batched SCREAMING_SNAKE_CASE_ = image_processing(_A , return_tensors='pt').pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) def lowerCAmelCase__ ( self): # Initialize image_processing SCREAMING_SNAKE_CASE_ = self.image_processing_class(**self.image_processor_dict) # create random numpy tensors SCREAMING_SNAKE_CASE_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A , numpify=_A) for image in image_inputs: self.assertIsInstance(_A , np.ndarray) # Test not batched input SCREAMING_SNAKE_CASE_ = image_processing(image_inputs[0] , return_tensors='pt').pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) # Test batched SCREAMING_SNAKE_CASE_ = image_processing(_A , return_tensors='pt').pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) def lowerCAmelCase__ ( self): # Initialize image_processing SCREAMING_SNAKE_CASE_ = self.image_processing_class(**self.image_processor_dict) # create random PyTorch tensors SCREAMING_SNAKE_CASE_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A , torchify=_A) for image in image_inputs: self.assertIsInstance(_A , torch.Tensor) # Test not batched input SCREAMING_SNAKE_CASE_ = image_processing(image_inputs[0] , return_tensors='pt').pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) # Test batched SCREAMING_SNAKE_CASE_ = image_processing(_A , return_tensors='pt').pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) def lowerCAmelCase__ ( self): # Initialize image_processing SCREAMING_SNAKE_CASE_ = self.image_processing_class(**self.image_processor_dict) # create random PyTorch tensors SCREAMING_SNAKE_CASE_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A , torchify=_A) SCREAMING_SNAKE_CASE_ = [] for image in image_inputs: self.assertIsInstance(_A , torch.Tensor) maps.append(torch.zeros(image.shape[-2:]).long()) # Test not batched input SCREAMING_SNAKE_CASE_ = image_processing(image_inputs[0] , maps[0] , return_tensors='pt') self.assertEqual( encoding['pixel_values'].shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) self.assertEqual( encoding['labels'].shape , ( 1, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) self.assertEqual(encoding['labels'].dtype , torch.long) self.assertTrue(encoding['labels'].min().item() >= 0) self.assertTrue(encoding['labels'].max().item() <= 255) # Test batched SCREAMING_SNAKE_CASE_ = image_processing(_A , _A , return_tensors='pt') self.assertEqual( encoding['pixel_values'].shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) self.assertEqual( encoding['labels'].shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) self.assertEqual(encoding['labels'].dtype , torch.long) self.assertTrue(encoding['labels'].min().item() >= 0) self.assertTrue(encoding['labels'].max().item() <= 255) # Test not batched input (PIL images) SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = prepare_semantic_single_inputs() SCREAMING_SNAKE_CASE_ = image_processing(_A , _A , return_tensors='pt') self.assertEqual( encoding['pixel_values'].shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) self.assertEqual( encoding['labels'].shape , ( 1, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) self.assertEqual(encoding['labels'].dtype , torch.long) self.assertTrue(encoding['labels'].min().item() >= 0) self.assertTrue(encoding['labels'].max().item() <= 255) # Test batched input (PIL images) SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = prepare_semantic_batch_inputs() SCREAMING_SNAKE_CASE_ = image_processing(_A , _A , return_tensors='pt') self.assertEqual( encoding['pixel_values'].shape , ( 2, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) self.assertEqual( encoding['labels'].shape , ( 2, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) self.assertEqual(encoding['labels'].dtype , torch.long) self.assertTrue(encoding['labels'].min().item() >= 0) self.assertTrue(encoding['labels'].max().item() <= 255) def lowerCAmelCase__ ( self): # Initialize image_processing SCREAMING_SNAKE_CASE_ = self.image_processing_class(**self.image_processor_dict) # ADE20k has 150 classes, and the background is included, so labels should be between 0 and 150 SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = prepare_semantic_single_inputs() SCREAMING_SNAKE_CASE_ = image_processing(_A , _A , return_tensors='pt') self.assertTrue(encoding['labels'].min().item() >= 0) self.assertTrue(encoding['labels'].max().item() <= 150) SCREAMING_SNAKE_CASE_ = True SCREAMING_SNAKE_CASE_ = image_processing(_A , _A , return_tensors='pt') self.assertTrue(encoding['labels'].min().item() >= 0) self.assertTrue(encoding['labels'].max().item() <= 255)
620
def _UpperCAmelCase ( ): """simple docstring""" for n in range(1 , 1_000_000 ): yield n * (n + 1) // 2 def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : Tuple ): """simple docstring""" SCREAMING_SNAKE_CASE_ = 1 SCREAMING_SNAKE_CASE_ = 2 while i * i <= n: SCREAMING_SNAKE_CASE_ = 0 while n % i == 0: n //= i multiplicity += 1 divisors_count *= multiplicity + 1 i += 1 if n > 1: divisors_count *= 2 return divisors_count def _UpperCAmelCase ( ): """simple docstring""" return next(i for i in triangle_number_generator() if count_divisors(_SCREAMING_SNAKE_CASE ) > 500 ) if __name__ == "__main__": print(solution())
620
1
import sys import webbrowser import requests from bsa import BeautifulSoup from fake_useragent import UserAgent if __name__ == "__main__": print("Googling.....") UpperCamelCase__ : str = "https://www.google.com/search?q=" + " ".join(sys.argv[1:]) UpperCamelCase__ : Optional[Any] = requests.get(url, headers={"UserAgent": UserAgent().random}) # res.raise_for_status() with open("project1a.html", "wb") as out_file: # only for knowing the class for data in res.iter_content(10_000): out_file.write(data) UpperCamelCase__ : Any = BeautifulSoup(res.text, "html.parser") UpperCamelCase__ : Union[str, Any] = list(soup.select(".eZt8xd"))[:5] print(len(links)) for link in links: if link.text == "Maps": webbrowser.open(link.get("href")) else: webbrowser.open(F'https://google.com{link.get("href")}')
620
import io import itertools import json from dataclasses import dataclass from typing import Optional import pyarrow as pa import pyarrow.json as paj import datasets from datasets.table import table_cast from datasets.utils.file_utils import readline UpperCamelCase__ : Optional[int] = datasets.utils.logging.get_logger(__name__) @dataclass class __snake_case ( datasets.BuilderConfig ): __lowerCAmelCase : Optional[datasets.Features] = None __lowerCAmelCase : str = "utf-8" __lowerCAmelCase : Optional[str] = None __lowerCAmelCase : Optional[str] = None __lowerCAmelCase : bool = True # deprecated __lowerCAmelCase : Optional[int] = None # deprecated __lowerCAmelCase : int = 10 << 20 # 10MB __lowerCAmelCase : Optional[bool] = None class __snake_case ( datasets.ArrowBasedBuilder ): __lowerCAmelCase : int = JsonConfig def lowerCAmelCase__ ( self): if self.config.block_size is not None: logger.warning('The JSON loader parameter `block_size` is deprecated. Please use `chunksize` instead') SCREAMING_SNAKE_CASE_ = self.config.block_size if self.config.use_threads is not True: logger.warning( 'The JSON loader parameter `use_threads` is deprecated and doesn\'t have any effect anymore.') if self.config.newlines_in_values is not None: raise ValueError('The JSON loader parameter `newlines_in_values` is no longer supported') return datasets.DatasetInfo(features=self.config.features) def lowerCAmelCase__ ( self , _A): if not self.config.data_files: raise ValueError(f"""At least one data file must be specified, but got data_files={self.config.data_files}""") SCREAMING_SNAKE_CASE_ = dl_manager.download_and_extract(self.config.data_files) if isinstance(_A , (str, list, tuple)): SCREAMING_SNAKE_CASE_ = data_files if isinstance(_A , _A): SCREAMING_SNAKE_CASE_ = [files] SCREAMING_SNAKE_CASE_ = [dl_manager.iter_files(_A) for file in files] return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'files': files})] SCREAMING_SNAKE_CASE_ = [] for split_name, files in data_files.items(): if isinstance(_A , _A): SCREAMING_SNAKE_CASE_ = [files] SCREAMING_SNAKE_CASE_ = [dl_manager.iter_files(_A) for file in files] splits.append(datasets.SplitGenerator(name=_A , gen_kwargs={'files': files})) return splits def lowerCAmelCase__ ( self , _A): if self.config.features is not None: # adding missing columns for column_name in set(self.config.features) - set(pa_table.column_names): SCREAMING_SNAKE_CASE_ = self.config.features.arrow_schema.field(_A).type SCREAMING_SNAKE_CASE_ = pa_table.append_column(_A , pa.array([None] * len(_A) , type=_A)) # more expensive cast to support nested structures with keys in a different order # allows str <-> int/float or str to Audio for example SCREAMING_SNAKE_CASE_ = table_cast(_A , self.config.features.arrow_schema) return pa_table def lowerCAmelCase__ ( self , _A): for file_idx, file in enumerate(itertools.chain.from_iterable(_A)): # If the file is one json object and if we need to look at the list of items in one specific field if self.config.field is not None: with open(_A , encoding=self.config.encoding , errors=self.config.encoding_errors) as f: SCREAMING_SNAKE_CASE_ = json.load(_A) # We keep only the field we are interested in SCREAMING_SNAKE_CASE_ = dataset[self.config.field] # We accept two format: a list of dicts or a dict of lists if isinstance(_A , (list, tuple)): SCREAMING_SNAKE_CASE_ = set().union(*[row.keys() for row in dataset]) SCREAMING_SNAKE_CASE_ = {col: [row.get(_A) for row in dataset] for col in keys} else: SCREAMING_SNAKE_CASE_ = dataset SCREAMING_SNAKE_CASE_ = pa.Table.from_pydict(_A) yield file_idx, self._cast_table(_A) # If the file has one json object per line else: with open(_A , 'rb') as f: SCREAMING_SNAKE_CASE_ = 0 # Use block_size equal to the chunk size divided by 32 to leverage multithreading # Set a default minimum value of 16kB if the chunk size is really small SCREAMING_SNAKE_CASE_ = max(self.config.chunksize // 32 , 16 << 10) SCREAMING_SNAKE_CASE_ = ( self.config.encoding_errors if self.config.encoding_errors is not None else 'strict' ) while True: SCREAMING_SNAKE_CASE_ = f.read(self.config.chunksize) if not batch: break # Finish current line try: batch += f.readline() except (AttributeError, io.UnsupportedOperation): batch += readline(_A) # PyArrow only accepts utf-8 encoded bytes if self.config.encoding != "utf-8": SCREAMING_SNAKE_CASE_ = batch.decode(self.config.encoding , errors=_A).encode('utf-8') try: while True: try: SCREAMING_SNAKE_CASE_ = paj.read_json( io.BytesIO(_A) , read_options=paj.ReadOptions(block_size=_A)) break except (pa.ArrowInvalid, pa.ArrowNotImplementedError) as e: if ( isinstance(_A , pa.ArrowInvalid) and "straddling" not in str(_A) or block_size > len(_A) ): raise else: # Increase the block size in case it was too small. # The block size will be reset for the next file. logger.debug( f"""Batch of {len(_A)} bytes couldn't be parsed with block_size={block_size}. Retrying with block_size={block_size * 2}.""") block_size *= 2 except pa.ArrowInvalid as e: try: with open( _A , encoding=self.config.encoding , errors=self.config.encoding_errors) as f: SCREAMING_SNAKE_CASE_ = json.load(_A) except json.JSONDecodeError: logger.error(f"""Failed to read file '{file}' with error {type(_A)}: {e}""") raise e # If possible, parse the file as a list of json objects and exit the loop if isinstance(_A , _A): # list is the only sequence type supported in JSON try: SCREAMING_SNAKE_CASE_ = set().union(*[row.keys() for row in dataset]) SCREAMING_SNAKE_CASE_ = {col: [row.get(_A) for row in dataset] for col in keys} SCREAMING_SNAKE_CASE_ = pa.Table.from_pydict(_A) except (pa.ArrowInvalid, AttributeError) as e: logger.error(f"""Failed to read file '{file}' with error {type(_A)}: {e}""") raise ValueError(f"""Not able to read records in the JSON file at {file}.""") from None yield file_idx, self._cast_table(_A) break else: logger.error(f"""Failed to read file '{file}' with error {type(_A)}: {e}""") raise ValueError( f"""Not able to read records in the JSON file at {file}. """ f"""You should probably indicate the field of the JSON file containing your records. """ f"""This JSON file contain the following fields: {str(list(dataset.keys()))}. """ f"""Select the correct one and provide it as `field='XXX'` to the dataset loading method. """) from None # Uncomment for debugging (will print the Arrow table size and elements) # logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}") # logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows))) yield (file_idx, batch_idx), self._cast_table(_A) batch_idx += 1
620
1
import logging import os from dataclasses import dataclass from typing import List, Optional, Union import tqdm from filelock import FileLock from transformers import ( BartTokenizer, BartTokenizerFast, DataProcessor, PreTrainedTokenizer, RobertaTokenizer, RobertaTokenizerFast, XLMRobertaTokenizer, is_tf_available, is_torch_available, ) UpperCamelCase__ : Any = logging.getLogger(__name__) @dataclass(frozen=lowerCAmelCase__ ) class __snake_case : __lowerCAmelCase : str __lowerCAmelCase : str __lowerCAmelCase : Optional[str] = None __lowerCAmelCase : Optional[str] = None __lowerCAmelCase : Optional[str] = None @dataclass(frozen=lowerCAmelCase__ ) class __snake_case : __lowerCAmelCase : List[int] __lowerCAmelCase : Optional[List[int]] = None __lowerCAmelCase : Optional[List[int]] = None __lowerCAmelCase : Optional[Union[int, float]] = None __lowerCAmelCase : Optional[int] = None if is_torch_available(): import torch from torch.utils.data import Dataset class __snake_case ( lowerCAmelCase__ ): __lowerCAmelCase : List[InputFeatures] def __init__( self , _A , _A , _A , _A = None , _A=False , _A = False , ): SCREAMING_SNAKE_CASE_ = hans_processors[task]() SCREAMING_SNAKE_CASE_ = os.path.join( _A , 'cached_{}_{}_{}_{}'.format( 'dev' if evaluate else 'train' , tokenizer.__class__.__name__ , str(_A) , _A , ) , ) SCREAMING_SNAKE_CASE_ = processor.get_labels() if tokenizer.__class__ in ( RobertaTokenizer, RobertaTokenizerFast, XLMRobertaTokenizer, BartTokenizer, BartTokenizerFast, ): # HACK(label indices are swapped in RoBERTa pretrained model) SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = label_list[2], label_list[1] SCREAMING_SNAKE_CASE_ = label_list # Make sure only the first process in distributed training processes the dataset, # and the others will use the cache. SCREAMING_SNAKE_CASE_ = cached_features_file + '.lock' with FileLock(_A): if os.path.exists(_A) and not overwrite_cache: logger.info(f"""Loading features from cached file {cached_features_file}""") SCREAMING_SNAKE_CASE_ = torch.load(_A) else: logger.info(f"""Creating features from dataset file at {data_dir}""") SCREAMING_SNAKE_CASE_ = ( processor.get_dev_examples(_A) if evaluate else processor.get_train_examples(_A) ) logger.info('Training examples: %s' , len(_A)) SCREAMING_SNAKE_CASE_ = hans_convert_examples_to_features(_A , _A , _A , _A) logger.info('Saving features into cached file %s' , _A) torch.save(self.features , _A) def __len__( self): return len(self.features) def __getitem__( self , _A): return self.features[i] def lowerCAmelCase__ ( self): return self.label_list if is_tf_available(): import tensorflow as tf class __snake_case : __lowerCAmelCase : List[InputFeatures] def __init__( self , _A , _A , _A , _A = 128 , _A=False , _A = False , ): SCREAMING_SNAKE_CASE_ = hans_processors[task]() SCREAMING_SNAKE_CASE_ = processor.get_labels() if tokenizer.__class__ in ( RobertaTokenizer, RobertaTokenizerFast, XLMRobertaTokenizer, BartTokenizer, BartTokenizerFast, ): # HACK(label indices are swapped in RoBERTa pretrained model) SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = label_list[2], label_list[1] SCREAMING_SNAKE_CASE_ = label_list SCREAMING_SNAKE_CASE_ = processor.get_dev_examples(_A) if evaluate else processor.get_train_examples(_A) SCREAMING_SNAKE_CASE_ = hans_convert_examples_to_features(_A , _A , _A , _A) def gen(): for ex_index, ex in tqdm.tqdm(enumerate(self.features) , desc='convert examples to features'): if ex_index % 10000 == 0: logger.info('Writing example %d of %d' % (ex_index, len(_A))) yield ( { "example_id": 0, "input_ids": ex.input_ids, "attention_mask": ex.attention_mask, "token_type_ids": ex.token_type_ids, }, ex.label, ) SCREAMING_SNAKE_CASE_ = tf.data.Dataset.from_generator( _A , ( { 'example_id': tf.intaa, 'input_ids': tf.intaa, 'attention_mask': tf.intaa, 'token_type_ids': tf.intaa, }, tf.intaa, ) , ( { 'example_id': tf.TensorShape([]), 'input_ids': tf.TensorShape([None, None]), 'attention_mask': tf.TensorShape([None, None]), 'token_type_ids': tf.TensorShape([None, None]), }, tf.TensorShape([]), ) , ) def lowerCAmelCase__ ( self): return self.dataset def __len__( self): return len(self.features) def __getitem__( self , _A): return self.features[i] def lowerCAmelCase__ ( self): return self.label_list class __snake_case ( lowerCAmelCase__ ): def lowerCAmelCase__ ( self , _A): return self._create_examples(self._read_tsv(os.path.join(_A , 'heuristics_train_set.txt')) , 'train') def lowerCAmelCase__ ( self , _A): return self._create_examples(self._read_tsv(os.path.join(_A , 'heuristics_evaluation_set.txt')) , 'dev') def lowerCAmelCase__ ( self): return ["contradiction", "entailment", "neutral"] def lowerCAmelCase__ ( self , _A , _A): SCREAMING_SNAKE_CASE_ = [] for i, line in enumerate(_A): if i == 0: continue SCREAMING_SNAKE_CASE_ = '%s-%s' % (set_type, line[0]) SCREAMING_SNAKE_CASE_ = line[5] SCREAMING_SNAKE_CASE_ = line[6] SCREAMING_SNAKE_CASE_ = line[7][2:] if line[7].startswith('ex') else line[7] SCREAMING_SNAKE_CASE_ = line[0] examples.append(InputExample(guid=_A , text_a=_A , text_b=_A , label=_A , pairID=_A)) return examples def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : List[InputExample] , _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : PreTrainedTokenizer , ): """simple docstring""" SCREAMING_SNAKE_CASE_ = {label: i for i, label in enumerate(_SCREAMING_SNAKE_CASE )} SCREAMING_SNAKE_CASE_ = [] for ex_index, example in tqdm.tqdm(enumerate(_SCREAMING_SNAKE_CASE ) , desc='convert examples to features' ): if ex_index % 10_000 == 0: logger.info('Writing example %d' % (ex_index) ) SCREAMING_SNAKE_CASE_ = tokenizer( example.text_a , example.text_b , add_special_tokens=_SCREAMING_SNAKE_CASE , max_length=_SCREAMING_SNAKE_CASE , padding='max_length' , truncation=_SCREAMING_SNAKE_CASE , return_overflowing_tokens=_SCREAMING_SNAKE_CASE , ) SCREAMING_SNAKE_CASE_ = label_map[example.label] if example.label in label_map else 0 SCREAMING_SNAKE_CASE_ = int(example.pairID ) features.append(InputFeatures(**_SCREAMING_SNAKE_CASE , label=_SCREAMING_SNAKE_CASE , pairID=_SCREAMING_SNAKE_CASE ) ) for i, example in enumerate(examples[:5] ): logger.info('*** Example ***' ) logger.info(f"""guid: {example}""" ) logger.info(f"""features: {features[i]}""" ) return features UpperCamelCase__ : Any = { "hans": 3, } UpperCamelCase__ : List[Any] = { "hans": HansProcessor, }
620
import unittest from transformers import TrOCRConfig from transformers.testing_utils import is_torch_available, require_torch, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers.models.trocr.modeling_trocr import TrOCRDecoder, TrOCRForCausalLM @require_torch class __snake_case : def __init__( self , _A , _A=99 , _A=13 , _A=16 , _A=7 , _A=True , _A=True , _A=True , _A=False , _A=True , _A=2 , _A=32 , _A=4 , _A=4 , _A=30 , _A=0 , _A=1 , _A=2 , _A=None , ): SCREAMING_SNAKE_CASE_ = parent SCREAMING_SNAKE_CASE_ = batch_size SCREAMING_SNAKE_CASE_ = decoder_seq_length # For common tests SCREAMING_SNAKE_CASE_ = self.decoder_seq_length SCREAMING_SNAKE_CASE_ = is_training SCREAMING_SNAKE_CASE_ = use_attention_mask SCREAMING_SNAKE_CASE_ = use_labels SCREAMING_SNAKE_CASE_ = vocab_size SCREAMING_SNAKE_CASE_ = d_model SCREAMING_SNAKE_CASE_ = d_model SCREAMING_SNAKE_CASE_ = decoder_layers SCREAMING_SNAKE_CASE_ = decoder_layers SCREAMING_SNAKE_CASE_ = decoder_ffn_dim SCREAMING_SNAKE_CASE_ = decoder_attention_heads SCREAMING_SNAKE_CASE_ = decoder_attention_heads SCREAMING_SNAKE_CASE_ = eos_token_id SCREAMING_SNAKE_CASE_ = bos_token_id SCREAMING_SNAKE_CASE_ = pad_token_id SCREAMING_SNAKE_CASE_ = decoder_start_token_id SCREAMING_SNAKE_CASE_ = use_cache SCREAMING_SNAKE_CASE_ = max_position_embeddings SCREAMING_SNAKE_CASE_ = None SCREAMING_SNAKE_CASE_ = decoder_seq_length SCREAMING_SNAKE_CASE_ = 2 SCREAMING_SNAKE_CASE_ = 1 def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size) SCREAMING_SNAKE_CASE_ = None if self.use_attention_mask: SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size, self.decoder_seq_length] , vocab_size=2) SCREAMING_SNAKE_CASE_ = None if self.use_labels: SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size) SCREAMING_SNAKE_CASE_ = TrOCRConfig( vocab_size=self.vocab_size , d_model=self.d_model , decoder_layers=self.decoder_layers , decoder_ffn_dim=self.decoder_ffn_dim , decoder_attention_heads=self.decoder_attention_heads , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , use_cache=self.use_cache , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , max_position_embeddings=self.max_position_embeddings , ) return (config, input_ids, attention_mask, lm_labels) def lowerCAmelCase__ ( self , _A , _A , _A , _A , ): SCREAMING_SNAKE_CASE_ = True SCREAMING_SNAKE_CASE_ = TrOCRDecoder(config=_A).to(_A).eval() SCREAMING_SNAKE_CASE_ = input_ids[:2] input_ids[input_ids == 0] += 1 # first forward pass SCREAMING_SNAKE_CASE_ = model(_A , use_cache=_A) SCREAMING_SNAKE_CASE_ = model(_A) SCREAMING_SNAKE_CASE_ = model(_A , use_cache=_A) self.parent.assertTrue(len(_A) == len(_A)) self.parent.assertTrue(len(_A) == len(_A) + 1) SCREAMING_SNAKE_CASE_ = outputs['past_key_values'] # create hypothetical next token and extent to next_input_ids SCREAMING_SNAKE_CASE_ = ids_tensor((2, 1) , config.vocab_size - 1) + 1 # append to next input_ids and SCREAMING_SNAKE_CASE_ = torch.cat([input_ids, next_tokens] , dim=-1) SCREAMING_SNAKE_CASE_ = model(_A)['last_hidden_state'] SCREAMING_SNAKE_CASE_ = model(_A , past_key_values=_A)['last_hidden_state'] # select random slice SCREAMING_SNAKE_CASE_ = ids_tensor((1,) , output_from_past.shape[-1]).item() SCREAMING_SNAKE_CASE_ = output_from_no_past[:, next_input_ids.shape[-1] - 1, random_slice_idx].detach() SCREAMING_SNAKE_CASE_ = output_from_past[:, 0, random_slice_idx].detach() # test that outputs are equal for slice assert torch.allclose(_A , _A , atol=1E-3) def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = self.prepare_config_and_inputs() SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = config_and_inputs SCREAMING_SNAKE_CASE_ = {'input_ids': input_ids, 'attention_mask': attention_mask} return config, inputs_dict @require_torch class __snake_case ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ): __lowerCAmelCase : Tuple = (TrOCRDecoder, TrOCRForCausalLM) if is_torch_available() else () __lowerCAmelCase : Union[str, Any] = (TrOCRForCausalLM,) if is_torch_available() else () __lowerCAmelCase : str = {'text-generation': TrOCRForCausalLM} if is_torch_available() else {} __lowerCAmelCase : Any = True __lowerCAmelCase : str = False def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = TrOCRStandaloneDecoderModelTester(self , is_training=_A) SCREAMING_SNAKE_CASE_ = ConfigTester(self , config_class=_A) def lowerCAmelCase__ ( self): pass def lowerCAmelCase__ ( self): pass def lowerCAmelCase__ ( self): pass def lowerCAmelCase__ ( self): self.config_tester.run_common_tests() def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_decoder_model_past(*_A) def lowerCAmelCase__ ( self): return @unittest.skip('The model doesn\'t support left padding') # and it's not used enough to be worth fixing :) def lowerCAmelCase__ ( self): pass
620
1
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) UpperCamelCase__ : List[Any] = { "configuration_electra": ["ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP", "ElectraConfig", "ElectraOnnxConfig"], "tokenization_electra": ["ElectraTokenizer"], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase__ : int = ["ElectraTokenizerFast"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase__ : Union[str, Any] = [ "ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST", "ElectraForCausalLM", "ElectraForMaskedLM", "ElectraForMultipleChoice", "ElectraForPreTraining", "ElectraForQuestionAnswering", "ElectraForSequenceClassification", "ElectraForTokenClassification", "ElectraModel", "ElectraPreTrainedModel", "load_tf_weights_in_electra", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase__ : List[Any] = [ "TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST", "TFElectraForMaskedLM", "TFElectraForMultipleChoice", "TFElectraForPreTraining", "TFElectraForQuestionAnswering", "TFElectraForSequenceClassification", "TFElectraForTokenClassification", "TFElectraModel", "TFElectraPreTrainedModel", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase__ : Any = [ "FlaxElectraForCausalLM", "FlaxElectraForMaskedLM", "FlaxElectraForMultipleChoice", "FlaxElectraForPreTraining", "FlaxElectraForQuestionAnswering", "FlaxElectraForSequenceClassification", "FlaxElectraForTokenClassification", "FlaxElectraModel", "FlaxElectraPreTrainedModel", ] if TYPE_CHECKING: from .configuration_electra import ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, ElectraConfig, ElectraOnnxConfig from .tokenization_electra import ElectraTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_electra_fast import ElectraTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_electra import ( ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST, ElectraForCausalLM, ElectraForMaskedLM, ElectraForMultipleChoice, ElectraForPreTraining, ElectraForQuestionAnswering, ElectraForSequenceClassification, ElectraForTokenClassification, ElectraModel, ElectraPreTrainedModel, load_tf_weights_in_electra, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_electra import ( TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST, TFElectraForMaskedLM, TFElectraForMultipleChoice, TFElectraForPreTraining, TFElectraForQuestionAnswering, TFElectraForSequenceClassification, TFElectraForTokenClassification, TFElectraModel, TFElectraPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_electra import ( FlaxElectraForCausalLM, FlaxElectraForMaskedLM, FlaxElectraForMultipleChoice, FlaxElectraForPreTraining, FlaxElectraForQuestionAnswering, FlaxElectraForSequenceClassification, FlaxElectraForTokenClassification, FlaxElectraModel, FlaxElectraPreTrainedModel, ) else: import sys UpperCamelCase__ : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
620
from dataclasses import dataclass from typing import Optional, Tuple, Union import torch import torch.nn as nn from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput, apply_forward_hook from .modeling_utils import ModelMixin from .vae import Decoder, DecoderOutput, Encoder, VectorQuantizer @dataclass class __snake_case ( lowerCAmelCase__ ): __lowerCAmelCase : torch.FloatTensor class __snake_case ( lowerCAmelCase__ , lowerCAmelCase__ ): @register_to_config def __init__( self , _A = 3 , _A = 3 , _A = ("DownEncoderBlock2D",) , _A = ("UpDecoderBlock2D",) , _A = (64,) , _A = 1 , _A = "silu" , _A = 3 , _A = 32 , _A = 256 , _A = 32 , _A = None , _A = 0.1_8_2_1_5 , _A = "group" , ): super().__init__() # pass init params to Encoder SCREAMING_SNAKE_CASE_ = Encoder( in_channels=_A , out_channels=_A , down_block_types=_A , block_out_channels=_A , layers_per_block=_A , act_fn=_A , norm_num_groups=_A , double_z=_A , ) SCREAMING_SNAKE_CASE_ = vq_embed_dim if vq_embed_dim is not None else latent_channels SCREAMING_SNAKE_CASE_ = nn.Convad(_A , _A , 1) SCREAMING_SNAKE_CASE_ = VectorQuantizer(_A , _A , beta=0.2_5 , remap=_A , sane_index_shape=_A) SCREAMING_SNAKE_CASE_ = nn.Convad(_A , _A , 1) # pass init params to Decoder SCREAMING_SNAKE_CASE_ = Decoder( in_channels=_A , out_channels=_A , up_block_types=_A , block_out_channels=_A , layers_per_block=_A , act_fn=_A , norm_num_groups=_A , norm_type=_A , ) @apply_forward_hook def lowerCAmelCase__ ( self , _A , _A = True): SCREAMING_SNAKE_CASE_ = self.encoder(_A) SCREAMING_SNAKE_CASE_ = self.quant_conv(_A) if not return_dict: return (h,) return VQEncoderOutput(latents=_A) @apply_forward_hook def lowerCAmelCase__ ( self , _A , _A = False , _A = True): # also go through quantization layer if not force_not_quantize: SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.quantize(_A) else: SCREAMING_SNAKE_CASE_ = h SCREAMING_SNAKE_CASE_ = self.post_quant_conv(_A) SCREAMING_SNAKE_CASE_ = self.decoder(_A , quant if self.config.norm_type == 'spatial' else None) if not return_dict: return (dec,) return DecoderOutput(sample=_A) def lowerCAmelCase__ ( self , _A , _A = True): SCREAMING_SNAKE_CASE_ = sample SCREAMING_SNAKE_CASE_ = self.encode(_A).latents SCREAMING_SNAKE_CASE_ = self.decode(_A).sample if not return_dict: return (dec,) return DecoderOutput(sample=_A)
620
1
import os from shutil import copyfile from typing import List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging UpperCamelCase__ : List[str] = logging.get_logger(__name__) UpperCamelCase__ : Optional[int] = {"vocab_file": "sentencepiece.model"} UpperCamelCase__ : Tuple = { "vocab_file": { "google/rembert": "https://huggingface.co/google/rembert/resolve/main/sentencepiece.model", }, } UpperCamelCase__ : Any = { "google/rembert": 256, } class __snake_case ( lowerCAmelCase__ ): __lowerCAmelCase : Dict = VOCAB_FILES_NAMES __lowerCAmelCase : List[Any] = PRETRAINED_VOCAB_FILES_MAP __lowerCAmelCase : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES def __init__( self , _A , _A=False , _A=True , _A=True , _A="[CLS]" , _A="[SEP]" , _A="[UNK]" , _A="[SEP]" , _A="[PAD]" , _A="[CLS]" , _A="[MASK]" , **_A , ): super().__init__( do_lower_case=_A , remove_space=_A , keep_accents=_A , bos_token=_A , eos_token=_A , unk_token=_A , sep_token=_A , pad_token=_A , cls_token=_A , mask_token=_A , **_A , ) SCREAMING_SNAKE_CASE_ = do_lower_case SCREAMING_SNAKE_CASE_ = remove_space SCREAMING_SNAKE_CASE_ = keep_accents SCREAMING_SNAKE_CASE_ = vocab_file SCREAMING_SNAKE_CASE_ = spm.SentencePieceProcessor() self.sp_model.Load(_A) @property def lowerCAmelCase__ ( self): return len(self.sp_model) def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = {self.convert_ids_to_tokens(_A): i for i in range(self.vocab_size)} vocab.update(self.added_tokens_encoder) return vocab def __getstate__( self): SCREAMING_SNAKE_CASE_ = self.__dict__.copy() SCREAMING_SNAKE_CASE_ = None return state def __setstate__( self , _A): SCREAMING_SNAKE_CASE_ = d SCREAMING_SNAKE_CASE_ = spm.SentencePieceProcessor() self.sp_model.Load(self.vocab_file) def lowerCAmelCase__ ( self , _A , _A=False): SCREAMING_SNAKE_CASE_ = self.sp_model.EncodeAsPieces(_A) return pieces def lowerCAmelCase__ ( self , _A): return self.sp_model.PieceToId(_A) def lowerCAmelCase__ ( self , _A): return self.sp_model.IdToPiece(_A) def lowerCAmelCase__ ( self , _A): SCREAMING_SNAKE_CASE_ = self.sp_model.decode_pieces(_A) return out_string def lowerCAmelCase__ ( self , _A , _A = None): SCREAMING_SNAKE_CASE_ = [self.sep_token_id] SCREAMING_SNAKE_CASE_ = [self.cls_token_id] if token_ids_a is None: return cls + token_ids_a + sep return cls + token_ids_a + sep + token_ids_a + sep def lowerCAmelCase__ ( self , _A , _A = None , _A = False): if already_has_special_tokens: if token_ids_a is not None: raise ValueError( 'You should not supply a second sequence if the provided sequence of ' 'ids is already formatted with special tokens for the model.') return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a] if token_ids_a is not None: return [1] + ([0] * len(_A)) + [1] + ([0] * len(_A)) + [1] return [1] + ([0] * len(_A)) + [1] def lowerCAmelCase__ ( self , _A , _A = None): SCREAMING_SNAKE_CASE_ = [self.sep_token_id] SCREAMING_SNAKE_CASE_ = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep) * [0] return len(cls + token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1] def lowerCAmelCase__ ( self , _A , _A = None): if not os.path.isdir(_A): logger.error('Vocabulary path ({}) should be a directory'.format(_A)) return SCREAMING_SNAKE_CASE_ = os.path.join( _A , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file']) if os.path.abspath(self.vocab_file) != os.path.abspath(_A): copyfile(self.vocab_file , _A) return (out_vocab_file,)
620
import logging import os from typing import Dict, List, Optional, Union import torch import torch.nn as nn from accelerate.utils.imports import ( is_abit_bnb_available, is_abit_bnb_available, is_bnb_available, ) from ..big_modeling import dispatch_model, init_empty_weights from .dataclasses import BnbQuantizationConfig from .modeling import ( find_tied_parameters, get_balanced_memory, infer_auto_device_map, load_checkpoint_in_model, offload_weight, set_module_tensor_to_device, ) if is_bnb_available(): import bitsandbytes as bnb from copy import deepcopy UpperCamelCase__ : Optional[int] = logging.getLogger(__name__) def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : torch.nn.Module , _SCREAMING_SNAKE_CASE : BnbQuantizationConfig , _SCREAMING_SNAKE_CASE : Union[str, os.PathLike] = None , _SCREAMING_SNAKE_CASE : Optional[Dict[str, Union[int, str, torch.device]]] = None , _SCREAMING_SNAKE_CASE : Optional[List[str]] = None , _SCREAMING_SNAKE_CASE : Optional[Dict[Union[int, str], Union[int, str]]] = None , _SCREAMING_SNAKE_CASE : Optional[Union[str, os.PathLike]] = None , _SCREAMING_SNAKE_CASE : bool = False , ): """simple docstring""" SCREAMING_SNAKE_CASE_ = bnb_quantization_config.load_in_abit SCREAMING_SNAKE_CASE_ = bnb_quantization_config.load_in_abit if load_in_abit and not is_abit_bnb_available(): raise ImportError( 'You have a version of `bitsandbytes` that is not compatible with 8bit quantization,' ' make sure you have the latest version of `bitsandbytes` installed.' ) if load_in_abit and not is_abit_bnb_available(): raise ValueError( 'You have a version of `bitsandbytes` that is not compatible with 4bit quantization,' 'make sure you have the latest version of `bitsandbytes` installed.' ) SCREAMING_SNAKE_CASE_ = [] # custom device map if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) and len(device_map.keys() ) > 1: SCREAMING_SNAKE_CASE_ = [key for key, value in device_map.items() if value in ['disk', 'cpu']] # We keep some modules such as the lm_head in their original dtype for numerical stability reasons if bnb_quantization_config.skip_modules is None: SCREAMING_SNAKE_CASE_ = get_keys_to_not_convert(_SCREAMING_SNAKE_CASE ) # add cpu modules to skip modules only for 4-bit modules if load_in_abit: bnb_quantization_config.skip_modules.extend(_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE_ = bnb_quantization_config.skip_modules # We add the modules we want to keep in full precision if bnb_quantization_config.keep_in_fpaa_modules is None: SCREAMING_SNAKE_CASE_ = [] SCREAMING_SNAKE_CASE_ = bnb_quantization_config.keep_in_fpaa_modules modules_to_not_convert.extend(_SCREAMING_SNAKE_CASE ) # compatibility with peft SCREAMING_SNAKE_CASE_ = load_in_abit SCREAMING_SNAKE_CASE_ = load_in_abit SCREAMING_SNAKE_CASE_ = get_parameter_device(_SCREAMING_SNAKE_CASE ) if model_device.type != "meta": # quantization of an already loaded model logger.warning( 'It is not recommended to quantize a loaded model. ' 'The model should be instantiated under the `init_empty_weights` context manager.' ) SCREAMING_SNAKE_CASE_ = replace_with_bnb_layers(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , modules_to_not_convert=_SCREAMING_SNAKE_CASE ) # convert param to the right dtype SCREAMING_SNAKE_CASE_ = bnb_quantization_config.torch_dtype for name, param in model.state_dict().items(): if any(module_to_keep_in_fpaa in name for module_to_keep_in_fpaa in keep_in_fpaa_modules ): param.to(torch.floataa ) if param.dtype != torch.floataa: SCREAMING_SNAKE_CASE_ = name.replace('.weight' , '' ).replace('.bias' , '' ) SCREAMING_SNAKE_CASE_ = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) if param is not None: param.to(torch.floataa ) elif torch.is_floating_point(_SCREAMING_SNAKE_CASE ): param.to(_SCREAMING_SNAKE_CASE ) if model_device.type == "cuda": # move everything to cpu in the first place because we can't do quantization if the weights are already on cuda model.cuda(torch.cuda.current_device() ) torch.cuda.empty_cache() elif torch.cuda.is_available(): model.to(torch.cuda.current_device() ) else: raise RuntimeError('No GPU found. A GPU is needed for quantization.' ) logger.info( f"""The model device type is {model_device.type}. However, cuda is needed for quantization.""" 'We move the model to cuda.' ) return model elif weights_location is None: raise RuntimeError( f"""`weights_location` needs to be the folder path containing the weights of the model, but we found {weights_location} """ ) else: with init_empty_weights(): SCREAMING_SNAKE_CASE_ = replace_with_bnb_layers( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , modules_to_not_convert=_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE_ = get_quantized_model_device_map( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , max_memory=_SCREAMING_SNAKE_CASE , no_split_module_classes=_SCREAMING_SNAKE_CASE , ) if offload_state_dict is None and device_map is not None and "disk" in device_map.values(): SCREAMING_SNAKE_CASE_ = True SCREAMING_SNAKE_CASE_ = any(x in list(device_map.values() ) for x in ['cpu', 'disk'] ) load_checkpoint_in_model( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , dtype=bnb_quantization_config.torch_dtype , offload_folder=_SCREAMING_SNAKE_CASE , offload_state_dict=_SCREAMING_SNAKE_CASE , keep_in_fpaa_modules=bnb_quantization_config.keep_in_fpaa_modules , offload_abit_bnb=load_in_abit and offload , ) return dispatch_model(_SCREAMING_SNAKE_CASE , device_map=_SCREAMING_SNAKE_CASE , offload_dir=_SCREAMING_SNAKE_CASE ) def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : List[str]=None , _SCREAMING_SNAKE_CASE : List[str]=None , _SCREAMING_SNAKE_CASE : Union[str, Any]=None ): """simple docstring""" if device_map is None: if torch.cuda.is_available(): SCREAMING_SNAKE_CASE_ = {'': torch.cuda.current_device()} else: raise RuntimeError('No GPU found. A GPU is needed for quantization.' ) logger.info('The device_map was not initialized.' 'Setting device_map to `{\'\':torch.cuda.current_device()}`.' ) if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): if device_map not in ["auto", "balanced", "balanced_low_0", "sequential"]: raise ValueError( 'If passing a string for `device_map`, please choose \'auto\', \'balanced\', \'balanced_low_0\' or ' '\'sequential\'.' ) SCREAMING_SNAKE_CASE_ = {} special_dtypes.update( { name: bnb_quantization_config.torch_dtype for name, _ in model.named_parameters() if any(m in name for m in bnb_quantization_config.skip_modules ) } ) special_dtypes.update( { name: torch.floataa for name, _ in model.named_parameters() if any(m in name for m in bnb_quantization_config.keep_in_fpaa_modules ) } ) SCREAMING_SNAKE_CASE_ = {} SCREAMING_SNAKE_CASE_ = special_dtypes SCREAMING_SNAKE_CASE_ = no_split_module_classes SCREAMING_SNAKE_CASE_ = bnb_quantization_config.target_dtype # get max_memory for each device. if device_map != "sequential": SCREAMING_SNAKE_CASE_ = get_balanced_memory( _SCREAMING_SNAKE_CASE , low_zero=(device_map == 'balanced_low_0') , max_memory=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , ) SCREAMING_SNAKE_CASE_ = max_memory SCREAMING_SNAKE_CASE_ = infer_auto_device_map(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): # check if don't have any quantized module on the cpu SCREAMING_SNAKE_CASE_ = bnb_quantization_config.skip_modules + bnb_quantization_config.keep_in_fpaa_modules SCREAMING_SNAKE_CASE_ = { key: device_map[key] for key in device_map.keys() if key not in modules_not_to_convert } for device in ["cpu", "disk"]: if device in device_map_without_some_modules.values(): if bnb_quantization_config.load_in_abit: raise ValueError( '\n Some modules are dispatched on the CPU or the disk. Make sure you have enough GPU RAM to fit\n the quantized model. If you want to dispatch the model on the CPU or the disk while keeping\n these modules in `torch_dtype`, you need to pass a custom `device_map` to\n `load_and_quantize_model`. Check\n https://huggingface.co/docs/accelerate/main/en/usage_guides/quantization#offload-modules-to-cpu-and-disk\n for more details.\n ' ) else: logger.info( 'Some modules are are offloaded to the CPU or the disk. Note that these modules will be converted to 8-bit' ) del device_map_without_some_modules return device_map def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int=None , _SCREAMING_SNAKE_CASE : Union[str, Any]=None ): """simple docstring""" if modules_to_not_convert is None: SCREAMING_SNAKE_CASE_ = [] SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = _replace_with_bnb_layers( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) if not has_been_replaced: logger.warning( 'You are loading your model in 8bit or 4bit but no linear modules were found in your model.' ' this can happen for some architectures such as gpt2 that uses Conv1D instead of Linear layers.' ' Please double check your model architecture, or submit an issue on github if you think this is' ' a bug.' ) return model def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : Optional[Any]=None , _SCREAMING_SNAKE_CASE : str=None , ): """simple docstring""" SCREAMING_SNAKE_CASE_ = False for name, module in model.named_children(): if current_key_name is None: SCREAMING_SNAKE_CASE_ = [] current_key_name.append(_SCREAMING_SNAKE_CASE ) if isinstance(_SCREAMING_SNAKE_CASE , nn.Linear ) and name not in modules_to_not_convert: # Check if the current key is not in the `modules_to_not_convert` SCREAMING_SNAKE_CASE_ = '.'.join(_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE_ = True for key in modules_to_not_convert: if ( (key in current_key_name_str) and (key + "." in current_key_name_str) ) or key == current_key_name_str: SCREAMING_SNAKE_CASE_ = False break if proceed: # Load bnb module with empty weight and replace ``nn.Linear` module if bnb_quantization_config.load_in_abit: SCREAMING_SNAKE_CASE_ = bnb.nn.LinearabitLt( module.in_features , module.out_features , module.bias is not None , has_fpaa_weights=_SCREAMING_SNAKE_CASE , threshold=bnb_quantization_config.llm_inta_threshold , ) elif bnb_quantization_config.load_in_abit: SCREAMING_SNAKE_CASE_ = bnb.nn.Linearabit( module.in_features , module.out_features , module.bias is not None , bnb_quantization_config.bnb_abit_compute_dtype , compress_statistics=bnb_quantization_config.bnb_abit_use_double_quant , quant_type=bnb_quantization_config.bnb_abit_quant_type , ) else: raise ValueError('load_in_8bit and load_in_4bit can\'t be both False' ) SCREAMING_SNAKE_CASE_ = module.weight.data if module.bias is not None: SCREAMING_SNAKE_CASE_ = module.bias.data bnb_module.requires_grad_(_SCREAMING_SNAKE_CASE ) setattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE_ = True if len(list(module.children() ) ) > 0: SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = _replace_with_bnb_layers( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE_ = has_been_replaced | _has_been_replaced # Remove the last key for recursion current_key_name.pop(-1 ) return model, has_been_replaced def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : Union[str, Any] ): """simple docstring""" with init_empty_weights(): SCREAMING_SNAKE_CASE_ = deepcopy(_SCREAMING_SNAKE_CASE ) # this has 0 cost since it is done inside `init_empty_weights` context manager` SCREAMING_SNAKE_CASE_ = find_tied_parameters(_SCREAMING_SNAKE_CASE ) # For compatibility with Accelerate < 0.18 if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): SCREAMING_SNAKE_CASE_ = sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() ) else: SCREAMING_SNAKE_CASE_ = sum(_SCREAMING_SNAKE_CASE , [] ) SCREAMING_SNAKE_CASE_ = len(_SCREAMING_SNAKE_CASE ) > 0 # Check if it is a base model SCREAMING_SNAKE_CASE_ = False if hasattr(_SCREAMING_SNAKE_CASE , 'base_model_prefix' ): SCREAMING_SNAKE_CASE_ = not hasattr(_SCREAMING_SNAKE_CASE , model.base_model_prefix ) # Ignore this for base models (BertModel, GPT2Model, etc.) if (not has_tied_params) and is_base_model: return [] # otherwise they have an attached head SCREAMING_SNAKE_CASE_ = list(model.named_children() ) SCREAMING_SNAKE_CASE_ = [list_modules[-1][0]] # add last module together with tied weights SCREAMING_SNAKE_CASE_ = set(_SCREAMING_SNAKE_CASE ) - set(_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE_ = list(set(_SCREAMING_SNAKE_CASE ) ) + list(_SCREAMING_SNAKE_CASE ) # remove ".weight" from the keys SCREAMING_SNAKE_CASE_ = ['.weight', '.bias'] SCREAMING_SNAKE_CASE_ = [] for name in list_untouched: for name_to_remove in names_to_remove: if name_to_remove in name: SCREAMING_SNAKE_CASE_ = name.replace(_SCREAMING_SNAKE_CASE , '' ) filtered_module_names.append(_SCREAMING_SNAKE_CASE ) return filtered_module_names def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : Dict ): """simple docstring""" for m in model.modules(): if isinstance(_SCREAMING_SNAKE_CASE , bnb.nn.Linearabit ): return True return False def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : nn.Module ): """simple docstring""" return next(parameter.parameters() ).device def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : str ): """simple docstring""" if fpaa_statistics is None: set_module_tensor_to_device(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , 0 , dtype=_SCREAMING_SNAKE_CASE , value=_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE_ = param_name SCREAMING_SNAKE_CASE_ = model if "." in tensor_name: SCREAMING_SNAKE_CASE_ = tensor_name.split('.' ) for split in splits[:-1]: SCREAMING_SNAKE_CASE_ = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) if new_module is None: raise ValueError(f"""{module} has no attribute {split}.""" ) SCREAMING_SNAKE_CASE_ = new_module SCREAMING_SNAKE_CASE_ = splits[-1] # offload weights SCREAMING_SNAKE_CASE_ = False offload_weight(module._parameters[tensor_name] , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , index=_SCREAMING_SNAKE_CASE ) if hasattr(module._parameters[tensor_name] , 'SCB' ): offload_weight( module._parameters[tensor_name].SCB , param_name.replace('weight' , 'SCB' ) , _SCREAMING_SNAKE_CASE , index=_SCREAMING_SNAKE_CASE , ) else: offload_weight(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , index=_SCREAMING_SNAKE_CASE ) offload_weight(_SCREAMING_SNAKE_CASE , param_name.replace('weight' , 'SCB' ) , _SCREAMING_SNAKE_CASE , index=_SCREAMING_SNAKE_CASE ) set_module_tensor_to_device(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , 'meta' , dtype=_SCREAMING_SNAKE_CASE , value=torch.empty(*param.size() ) )
620
1
from typing import Dict, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import flip_channel_order, resize, to_channel_dimension_format, to_pil_image from ...image_utils import ( ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_pytesseract_available, is_vision_available, logging, requires_backends if is_vision_available(): import PIL # soft dependency if is_pytesseract_available(): import pytesseract UpperCamelCase__ : Optional[Any] = logging.get_logger(__name__) def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : int ): """simple docstring""" return [ int(1_000 * (box[0] / width) ), int(1_000 * (box[1] / height) ), int(1_000 * (box[2] / width) ), int(1_000 * (box[3] / height) ), ] def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : np.ndarray , _SCREAMING_SNAKE_CASE : Optional[str] , _SCREAMING_SNAKE_CASE : Optional[str] = None ): """simple docstring""" SCREAMING_SNAKE_CASE_ = tesseract_config if tesseract_config is not None else '' # apply OCR SCREAMING_SNAKE_CASE_ = to_pil_image(_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = pil_image.size SCREAMING_SNAKE_CASE_ = pytesseract.image_to_data(_SCREAMING_SNAKE_CASE , lang=_SCREAMING_SNAKE_CASE , output_type='dict' , config=_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = data['text'], data['left'], data['top'], data['width'], data['height'] # filter empty words and corresponding coordinates SCREAMING_SNAKE_CASE_ = [idx for idx, word in enumerate(_SCREAMING_SNAKE_CASE ) if not word.strip()] SCREAMING_SNAKE_CASE_ = [word for idx, word in enumerate(_SCREAMING_SNAKE_CASE ) if idx not in irrelevant_indices] SCREAMING_SNAKE_CASE_ = [coord for idx, coord in enumerate(_SCREAMING_SNAKE_CASE ) if idx not in irrelevant_indices] SCREAMING_SNAKE_CASE_ = [coord for idx, coord in enumerate(_SCREAMING_SNAKE_CASE ) if idx not in irrelevant_indices] SCREAMING_SNAKE_CASE_ = [coord for idx, coord in enumerate(_SCREAMING_SNAKE_CASE ) if idx not in irrelevant_indices] SCREAMING_SNAKE_CASE_ = [coord for idx, coord in enumerate(_SCREAMING_SNAKE_CASE ) if idx not in irrelevant_indices] # turn coordinates into (left, top, left+width, top+height) format SCREAMING_SNAKE_CASE_ = [] for x, y, w, h in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): SCREAMING_SNAKE_CASE_ = [x, y, x + w, y + h] actual_boxes.append(_SCREAMING_SNAKE_CASE ) # finally, normalize the bounding boxes SCREAMING_SNAKE_CASE_ = [] for box in actual_boxes: normalized_boxes.append(normalize_box(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ) assert len(_SCREAMING_SNAKE_CASE ) == len(_SCREAMING_SNAKE_CASE ), "Not as many words as there are bounding boxes" return words, normalized_boxes class __snake_case ( lowerCAmelCase__ ): __lowerCAmelCase : str = ['pixel_values'] def __init__( self , _A = True , _A = None , _A = PILImageResampling.BILINEAR , _A = True , _A = None , _A = "" , **_A , ): super().__init__(**_A) SCREAMING_SNAKE_CASE_ = size if size is not None else {'height': 224, 'width': 224} SCREAMING_SNAKE_CASE_ = get_size_dict(_A) SCREAMING_SNAKE_CASE_ = do_resize SCREAMING_SNAKE_CASE_ = size SCREAMING_SNAKE_CASE_ = resample SCREAMING_SNAKE_CASE_ = apply_ocr SCREAMING_SNAKE_CASE_ = ocr_lang SCREAMING_SNAKE_CASE_ = tesseract_config def lowerCAmelCase__ ( self , _A , _A , _A = PILImageResampling.BILINEAR , _A = None , **_A , ): SCREAMING_SNAKE_CASE_ = get_size_dict(_A) if "height" not in size or "width" not in size: raise ValueError(f"""The size dictionary must contain the keys 'height' and 'width'. Got {size.keys()}""") SCREAMING_SNAKE_CASE_ = (size['height'], size['width']) return resize(_A , size=_A , resample=_A , data_format=_A , **_A) def lowerCAmelCase__ ( self , _A , _A = None , _A = None , _A = None , _A = None , _A = None , _A = None , _A = None , _A = ChannelDimension.FIRST , **_A , ): SCREAMING_SNAKE_CASE_ = do_resize if do_resize is not None else self.do_resize SCREAMING_SNAKE_CASE_ = size if size is not None else self.size SCREAMING_SNAKE_CASE_ = get_size_dict(_A) SCREAMING_SNAKE_CASE_ = resample if resample is not None else self.resample SCREAMING_SNAKE_CASE_ = apply_ocr if apply_ocr is not None else self.apply_ocr SCREAMING_SNAKE_CASE_ = ocr_lang if ocr_lang is not None else self.ocr_lang SCREAMING_SNAKE_CASE_ = tesseract_config if tesseract_config is not None else self.tesseract_config SCREAMING_SNAKE_CASE_ = make_list_of_images(_A) if not valid_images(_A): raise ValueError( 'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ' 'torch.Tensor, tf.Tensor or jax.ndarray.') if do_resize and size is None: raise ValueError('Size must be specified if do_resize is True.') # All transformations expect numpy arrays. SCREAMING_SNAKE_CASE_ = [to_numpy_array(_A) for image in images] if apply_ocr: requires_backends(self , 'pytesseract') SCREAMING_SNAKE_CASE_ = [] SCREAMING_SNAKE_CASE_ = [] for image in images: SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = apply_tesseract(_A , _A , _A) words_batch.append(_A) boxes_batch.append(_A) if do_resize: SCREAMING_SNAKE_CASE_ = [self.resize(image=_A , size=_A , resample=_A) for image in images] # flip color channels from RGB to BGR (as Detectron2 requires this) SCREAMING_SNAKE_CASE_ = [flip_channel_order(_A) for image in images] SCREAMING_SNAKE_CASE_ = [to_channel_dimension_format(_A , _A) for image in images] SCREAMING_SNAKE_CASE_ = BatchFeature(data={'pixel_values': images} , tensor_type=_A) if apply_ocr: SCREAMING_SNAKE_CASE_ = words_batch SCREAMING_SNAKE_CASE_ = boxes_batch return data
620
import json import os from functools import lru_cache from typing import List, Optional, Tuple import regex as re from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging UpperCamelCase__ : Union[str, Any] = logging.get_logger(__name__) UpperCamelCase__ : Optional[Any] = {"vocab_file": "vocab.json", "merges_file": "merges.txt"} # See all BART models at https://huggingface.co/models?filter=bart UpperCamelCase__ : List[str] = { "vocab_file": { "facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/vocab.json", "facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/vocab.json", "facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json", "facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json", "facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json", "yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json", }, "merges_file": { "facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/merges.txt", "facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/merges.txt", "facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt", "facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt", "facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt", "yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt", }, } UpperCamelCase__ : str = { "facebook/bart-base": 1_024, "facebook/bart-large": 1_024, "facebook/bart-large-mnli": 1_024, "facebook/bart-large-cnn": 1_024, "facebook/bart-large-xsum": 1_024, "yjernite/bart_eli5": 1_024, } @lru_cache() def _UpperCAmelCase ( ): """simple docstring""" SCREAMING_SNAKE_CASE_ = ( list(range(ord('!' ) , ord('~' ) + 1 ) ) + list(range(ord('¡' ) , ord('¬' ) + 1 ) ) + list(range(ord('®' ) , ord('ÿ' ) + 1 ) ) ) SCREAMING_SNAKE_CASE_ = bs[:] SCREAMING_SNAKE_CASE_ = 0 for b in range(2**8 ): if b not in bs: bs.append(_SCREAMING_SNAKE_CASE ) cs.append(2**8 + n ) n += 1 SCREAMING_SNAKE_CASE_ = [chr(_SCREAMING_SNAKE_CASE ) for n in cs] return dict(zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ) def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : List[str] ): """simple docstring""" SCREAMING_SNAKE_CASE_ = set() SCREAMING_SNAKE_CASE_ = word[0] for char in word[1:]: pairs.add((prev_char, char) ) SCREAMING_SNAKE_CASE_ = char return pairs class __snake_case ( lowerCAmelCase__ ): __lowerCAmelCase : str = VOCAB_FILES_NAMES __lowerCAmelCase : Any = PRETRAINED_VOCAB_FILES_MAP __lowerCAmelCase : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __lowerCAmelCase : List[Any] = ['input_ids', 'attention_mask'] def __init__( self , _A , _A , _A="replace" , _A="<s>" , _A="</s>" , _A="</s>" , _A="<s>" , _A="<unk>" , _A="<pad>" , _A="<mask>" , _A=False , **_A , ): SCREAMING_SNAKE_CASE_ = AddedToken(_A , lstrip=_A , rstrip=_A) if isinstance(_A , _A) else bos_token SCREAMING_SNAKE_CASE_ = AddedToken(_A , lstrip=_A , rstrip=_A) if isinstance(_A , _A) else eos_token SCREAMING_SNAKE_CASE_ = AddedToken(_A , lstrip=_A , rstrip=_A) if isinstance(_A , _A) else sep_token SCREAMING_SNAKE_CASE_ = AddedToken(_A , lstrip=_A , rstrip=_A) if isinstance(_A , _A) else cls_token SCREAMING_SNAKE_CASE_ = AddedToken(_A , lstrip=_A , rstrip=_A) if isinstance(_A , _A) else unk_token SCREAMING_SNAKE_CASE_ = AddedToken(_A , lstrip=_A , rstrip=_A) if isinstance(_A , _A) else pad_token # Mask token behave like a normal word, i.e. include the space before it SCREAMING_SNAKE_CASE_ = AddedToken(_A , lstrip=_A , rstrip=_A) if isinstance(_A , _A) else mask_token super().__init__( errors=_A , bos_token=_A , eos_token=_A , unk_token=_A , sep_token=_A , cls_token=_A , pad_token=_A , mask_token=_A , add_prefix_space=_A , **_A , ) with open(_A , encoding='utf-8') as vocab_handle: SCREAMING_SNAKE_CASE_ = json.load(_A) SCREAMING_SNAKE_CASE_ = {v: k for k, v in self.encoder.items()} SCREAMING_SNAKE_CASE_ = errors # how to handle errors in decoding SCREAMING_SNAKE_CASE_ = bytes_to_unicode() SCREAMING_SNAKE_CASE_ = {v: k for k, v in self.byte_encoder.items()} with open(_A , encoding='utf-8') as merges_handle: SCREAMING_SNAKE_CASE_ = merges_handle.read().split('\n')[1:-1] SCREAMING_SNAKE_CASE_ = [tuple(merge.split()) for merge in bpe_merges] SCREAMING_SNAKE_CASE_ = dict(zip(_A , range(len(_A)))) SCREAMING_SNAKE_CASE_ = {} SCREAMING_SNAKE_CASE_ = add_prefix_space # Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions SCREAMING_SNAKE_CASE_ = re.compile(r'\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+') @property def lowerCAmelCase__ ( self): return len(self.encoder) def lowerCAmelCase__ ( self): return dict(self.encoder , **self.added_tokens_encoder) def lowerCAmelCase__ ( self , _A): if token in self.cache: return self.cache[token] SCREAMING_SNAKE_CASE_ = tuple(_A) SCREAMING_SNAKE_CASE_ = get_pairs(_A) if not pairs: return token while True: SCREAMING_SNAKE_CASE_ = min(_A , key=lambda _A: self.bpe_ranks.get(_A , float('inf'))) if bigram not in self.bpe_ranks: break SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = bigram SCREAMING_SNAKE_CASE_ = [] SCREAMING_SNAKE_CASE_ = 0 while i < len(_A): try: SCREAMING_SNAKE_CASE_ = word.index(_A , _A) except ValueError: new_word.extend(word[i:]) break else: new_word.extend(word[i:j]) SCREAMING_SNAKE_CASE_ = j if word[i] == first and i < len(_A) - 1 and word[i + 1] == second: new_word.append(first + second) i += 2 else: new_word.append(word[i]) i += 1 SCREAMING_SNAKE_CASE_ = tuple(_A) SCREAMING_SNAKE_CASE_ = new_word if len(_A) == 1: break else: SCREAMING_SNAKE_CASE_ = get_pairs(_A) SCREAMING_SNAKE_CASE_ = ' '.join(_A) SCREAMING_SNAKE_CASE_ = word return word def lowerCAmelCase__ ( self , _A): SCREAMING_SNAKE_CASE_ = [] for token in re.findall(self.pat , _A): SCREAMING_SNAKE_CASE_ = ''.join( self.byte_encoder[b] for b in token.encode('utf-8')) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case) bpe_tokens.extend(bpe_token for bpe_token in self.bpe(_A).split(' ')) return bpe_tokens def lowerCAmelCase__ ( self , _A): return self.encoder.get(_A , self.encoder.get(self.unk_token)) def lowerCAmelCase__ ( self , _A): return self.decoder.get(_A) def lowerCAmelCase__ ( self , _A): SCREAMING_SNAKE_CASE_ = ''.join(_A) SCREAMING_SNAKE_CASE_ = bytearray([self.byte_decoder[c] for c in text]).decode('utf-8' , errors=self.errors) return text def lowerCAmelCase__ ( self , _A , _A = None): if not os.path.isdir(_A): logger.error(f"""Vocabulary path ({save_directory}) should be a directory""") return SCREAMING_SNAKE_CASE_ = os.path.join( _A , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file']) SCREAMING_SNAKE_CASE_ = os.path.join( _A , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file']) with open(_A , 'w' , encoding='utf-8') as f: f.write(json.dumps(self.encoder , indent=2 , sort_keys=_A , ensure_ascii=_A) + '\n') SCREAMING_SNAKE_CASE_ = 0 with open(_A , 'w' , encoding='utf-8') as writer: writer.write('#version: 0.2\n') for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda _A: kv[1]): if index != token_index: logger.warning( f"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.""" ' Please check that the tokenizer is not corrupted!') SCREAMING_SNAKE_CASE_ = token_index writer.write(' '.join(_A) + '\n') index += 1 return vocab_file, merge_file def lowerCAmelCase__ ( self , _A , _A = None): if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] SCREAMING_SNAKE_CASE_ = [self.cls_token_id] SCREAMING_SNAKE_CASE_ = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def lowerCAmelCase__ ( self , _A , _A = None , _A = False): if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=_A , token_ids_a=_A , already_has_special_tokens=_A) if token_ids_a is None: return [1] + ([0] * len(_A)) + [1] return [1] + ([0] * len(_A)) + [1, 1] + ([0] * len(_A)) + [1] def lowerCAmelCase__ ( self , _A , _A = None): SCREAMING_SNAKE_CASE_ = [self.sep_token_id] SCREAMING_SNAKE_CASE_ = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0] def lowerCAmelCase__ ( self , _A , _A=False , **_A): SCREAMING_SNAKE_CASE_ = kwargs.pop('add_prefix_space' , self.add_prefix_space) if (is_split_into_words or add_prefix_space) and (len(_A) > 0 and not text[0].isspace()): SCREAMING_SNAKE_CASE_ = ' ' + text return (text, kwargs)
620
1
from __future__ import annotations import time UpperCamelCase__ : Optional[int] = list[tuple[int, int]] UpperCamelCase__ : str = [ [0, 0, 0, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles [0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0], [1, 0, 1, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0], ] UpperCamelCase__ : Optional[int] = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right class __snake_case : def __init__( self , _A , _A , _A , _A , _A): SCREAMING_SNAKE_CASE_ = pos_x SCREAMING_SNAKE_CASE_ = pos_y SCREAMING_SNAKE_CASE_ = (pos_y, pos_x) SCREAMING_SNAKE_CASE_ = goal_x SCREAMING_SNAKE_CASE_ = goal_y SCREAMING_SNAKE_CASE_ = parent class __snake_case : def __init__( self , _A , _A): SCREAMING_SNAKE_CASE_ = Node(start[1] , start[0] , goal[1] , goal[0] , _A) SCREAMING_SNAKE_CASE_ = Node(goal[1] , goal[0] , goal[1] , goal[0] , _A) SCREAMING_SNAKE_CASE_ = [self.start] SCREAMING_SNAKE_CASE_ = False def lowerCAmelCase__ ( self): while self.node_queue: SCREAMING_SNAKE_CASE_ = self.node_queue.pop(0) if current_node.pos == self.target.pos: SCREAMING_SNAKE_CASE_ = True return self.retrace_path(_A) SCREAMING_SNAKE_CASE_ = self.get_successors(_A) for node in successors: self.node_queue.append(_A) if not self.reached: return [self.start.pos] return None def lowerCAmelCase__ ( self , _A): SCREAMING_SNAKE_CASE_ = [] for action in delta: SCREAMING_SNAKE_CASE_ = parent.pos_x + action[1] SCREAMING_SNAKE_CASE_ = parent.pos_y + action[0] if not (0 <= pos_x <= len(grid[0]) - 1 and 0 <= pos_y <= len(_A) - 1): continue if grid[pos_y][pos_x] != 0: continue successors.append( Node(_A , _A , self.target.pos_y , self.target.pos_x , _A)) return successors def lowerCAmelCase__ ( self , _A): SCREAMING_SNAKE_CASE_ = node SCREAMING_SNAKE_CASE_ = [] while current_node is not None: path.append((current_node.pos_y, current_node.pos_x)) SCREAMING_SNAKE_CASE_ = current_node.parent path.reverse() return path class __snake_case : def __init__( self , _A , _A): SCREAMING_SNAKE_CASE_ = BreadthFirstSearch(_A , _A) SCREAMING_SNAKE_CASE_ = BreadthFirstSearch(_A , _A) SCREAMING_SNAKE_CASE_ = False def lowerCAmelCase__ ( self): while self.fwd_bfs.node_queue or self.bwd_bfs.node_queue: SCREAMING_SNAKE_CASE_ = self.fwd_bfs.node_queue.pop(0) SCREAMING_SNAKE_CASE_ = self.bwd_bfs.node_queue.pop(0) if current_bwd_node.pos == current_fwd_node.pos: SCREAMING_SNAKE_CASE_ = True return self.retrace_bidirectional_path( _A , _A) SCREAMING_SNAKE_CASE_ = current_bwd_node SCREAMING_SNAKE_CASE_ = current_fwd_node SCREAMING_SNAKE_CASE_ = { self.fwd_bfs: self.fwd_bfs.get_successors(_A), self.bwd_bfs: self.bwd_bfs.get_successors(_A), } for bfs in [self.fwd_bfs, self.bwd_bfs]: for node in successors[bfs]: bfs.node_queue.append(_A) if not self.reached: return [self.fwd_bfs.start.pos] return None def lowerCAmelCase__ ( self , _A , _A): SCREAMING_SNAKE_CASE_ = self.fwd_bfs.retrace_path(_A) SCREAMING_SNAKE_CASE_ = self.bwd_bfs.retrace_path(_A) bwd_path.pop() bwd_path.reverse() SCREAMING_SNAKE_CASE_ = fwd_path + bwd_path return path if __name__ == "__main__": # all coordinates are given in format [y,x] import doctest doctest.testmod() UpperCamelCase__ : Tuple = (0, 0) UpperCamelCase__ : Tuple = (len(grid) - 1, len(grid[0]) - 1) for elem in grid: print(elem) UpperCamelCase__ : List[Any] = time.time() UpperCamelCase__ : Tuple = BreadthFirstSearch(init, goal) UpperCamelCase__ : Any = bfs.search() UpperCamelCase__ : str = time.time() - start_bfs_time print("Unidirectional BFS computation time : ", bfs_time) UpperCamelCase__ : List[str] = time.time() UpperCamelCase__ : str = BidirectionalBreadthFirstSearch(init, goal) UpperCamelCase__ : Dict = bd_bfs.search() UpperCamelCase__ : Union[str, Any] = time.time() - start_bd_bfs_time print("Bidirectional BFS computation time : ", bd_bfs_time)
620
from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCamelCase__ : str = logging.get_logger(__name__) UpperCamelCase__ : Optional[int] = { "facebook/dpr-ctx_encoder-single-nq-base": ( "https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/config.json" ), "facebook/dpr-question_encoder-single-nq-base": ( "https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/config.json" ), "facebook/dpr-reader-single-nq-base": ( "https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/config.json" ), "facebook/dpr-ctx_encoder-multiset-base": ( "https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/config.json" ), "facebook/dpr-question_encoder-multiset-base": ( "https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/config.json" ), "facebook/dpr-reader-multiset-base": ( "https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/config.json" ), } class __snake_case ( lowerCAmelCase__ ): __lowerCAmelCase : Optional[int] = 'dpr' def __init__( self , _A=30522 , _A=768 , _A=12 , _A=12 , _A=3072 , _A="gelu" , _A=0.1 , _A=0.1 , _A=512 , _A=2 , _A=0.0_2 , _A=1E-12 , _A=0 , _A="absolute" , _A = 0 , **_A , ): super().__init__(pad_token_id=_A , **_A) SCREAMING_SNAKE_CASE_ = vocab_size SCREAMING_SNAKE_CASE_ = hidden_size SCREAMING_SNAKE_CASE_ = num_hidden_layers SCREAMING_SNAKE_CASE_ = num_attention_heads SCREAMING_SNAKE_CASE_ = hidden_act SCREAMING_SNAKE_CASE_ = intermediate_size SCREAMING_SNAKE_CASE_ = hidden_dropout_prob SCREAMING_SNAKE_CASE_ = attention_probs_dropout_prob SCREAMING_SNAKE_CASE_ = max_position_embeddings SCREAMING_SNAKE_CASE_ = type_vocab_size SCREAMING_SNAKE_CASE_ = initializer_range SCREAMING_SNAKE_CASE_ = layer_norm_eps SCREAMING_SNAKE_CASE_ = projection_dim SCREAMING_SNAKE_CASE_ = position_embedding_type
620
1
from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding class __snake_case ( lowerCAmelCase__ ): __lowerCAmelCase : Optional[Any] = ['image_processor', 'tokenizer'] __lowerCAmelCase : List[str] = 'AutoImageProcessor' __lowerCAmelCase : List[Any] = 'AutoTokenizer' def __init__( self , _A , _A): super().__init__(_A , _A) SCREAMING_SNAKE_CASE_ = self.image_processor def __call__( self , _A=None , _A=None , _A=None , **_A): if text is None and images is None: raise ValueError('You have to specify either text or images. Both cannot be none.') if text is not None: SCREAMING_SNAKE_CASE_ = self.tokenizer(_A , return_tensors=_A , **_A) if images is not None: SCREAMING_SNAKE_CASE_ = self.image_processor(_A , return_tensors=_A , **_A) if text is not None and images is not None: SCREAMING_SNAKE_CASE_ = image_features.pixel_values return encoding elif text is not None: return encoding else: return BatchEncoding(data=dict(**_A) , tensor_type=_A) def lowerCAmelCase__ ( self , *_A , **_A): return self.tokenizer.batch_decode(*_A , **_A) def lowerCAmelCase__ ( self , *_A , **_A): return self.tokenizer.decode(*_A , **_A) @property def lowerCAmelCase__ ( self): return ["input_ids", "attention_mask", "pixel_values"]
620
import pytest import datasets # Import fixture modules as plugins UpperCamelCase__ : Union[str, Any] = ["tests.fixtures.files", "tests.fixtures.hub", "tests.fixtures.fsspec"] def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : Dict ): """simple docstring""" for item in items: if any(marker in item.keywords for marker in ['integration', 'unit'] ): continue item.add_marker(pytest.mark.unit ) def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : Optional[int] ): """simple docstring""" config.addinivalue_line('markers' , 'torchaudio_latest: mark test to run with torchaudio>=0.12' ) @pytest.fixture(autouse=_SCREAMING_SNAKE_CASE ) def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : str ): """simple docstring""" SCREAMING_SNAKE_CASE_ = tmp_path_factory.getbasetemp() / 'cache' SCREAMING_SNAKE_CASE_ = test_hf_cache_home / 'datasets' SCREAMING_SNAKE_CASE_ = test_hf_cache_home / 'metrics' SCREAMING_SNAKE_CASE_ = test_hf_cache_home / 'modules' monkeypatch.setattr('datasets.config.HF_DATASETS_CACHE' , str(_SCREAMING_SNAKE_CASE ) ) monkeypatch.setattr('datasets.config.HF_METRICS_CACHE' , str(_SCREAMING_SNAKE_CASE ) ) monkeypatch.setattr('datasets.config.HF_MODULES_CACHE' , str(_SCREAMING_SNAKE_CASE ) ) SCREAMING_SNAKE_CASE_ = test_hf_datasets_cache / 'downloads' monkeypatch.setattr('datasets.config.DOWNLOADED_DATASETS_PATH' , str(_SCREAMING_SNAKE_CASE ) ) SCREAMING_SNAKE_CASE_ = test_hf_datasets_cache / 'downloads' / 'extracted' monkeypatch.setattr('datasets.config.EXTRACTED_DATASETS_PATH' , str(_SCREAMING_SNAKE_CASE ) ) @pytest.fixture(autouse=_SCREAMING_SNAKE_CASE , scope='session' ) def _UpperCAmelCase ( ): """simple docstring""" datasets.disable_progress_bar() @pytest.fixture(autouse=_SCREAMING_SNAKE_CASE ) def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : Any ): """simple docstring""" monkeypatch.setattr('datasets.config.HF_UPDATE_DOWNLOAD_COUNTS' , _SCREAMING_SNAKE_CASE ) @pytest.fixture def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : Tuple ): """simple docstring""" monkeypatch.setattr('sqlalchemy.util.deprecations.SILENCE_UBER_WARNING' , _SCREAMING_SNAKE_CASE )
620
1
def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : dict ): """simple docstring""" SCREAMING_SNAKE_CASE_ = set() # edges = list of graph's edges SCREAMING_SNAKE_CASE_ = get_edges(_SCREAMING_SNAKE_CASE ) # While there are still elements in edges list, take an arbitrary edge # (from_node, to_node) and add his extremity to chosen_vertices and then # remove all arcs adjacent to the from_node and to_node while edges: SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = edges.pop() chosen_vertices.add(_SCREAMING_SNAKE_CASE ) chosen_vertices.add(_SCREAMING_SNAKE_CASE ) for edge in edges.copy(): if from_node in edge or to_node in edge: edges.discard(_SCREAMING_SNAKE_CASE ) return chosen_vertices def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : dict ): """simple docstring""" SCREAMING_SNAKE_CASE_ = set() for from_node, to_nodes in graph.items(): for to_node in to_nodes: edges.add((from_node, to_node) ) return edges if __name__ == "__main__": import doctest doctest.testmod() # graph = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]} # print(f"Matching vertex cover:\n{matching_min_vertex_cover(graph)}")
620
from typing import List import numpy as np def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : dict ): """simple docstring""" SCREAMING_SNAKE_CASE_ = {key: len(_SCREAMING_SNAKE_CASE ) for key, value in gen_kwargs.items() if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )} if len(set(lists_lengths.values() ) ) > 1: raise RuntimeError( ( 'Sharding is ambiguous for this dataset: ' + 'we found several data sources lists of different lengths, and we don\'t know over which list we should parallelize:\n' + '\n'.join(f"""\t- key {key} has length {length}""" for key, length in lists_lengths.items() ) + '\nTo fix this, check the \'gen_kwargs\' and make sure to use lists only for data sources, ' + 'and use tuples otherwise. In the end there should only be one single list, or several lists with the same length.' ) ) SCREAMING_SNAKE_CASE_ = max(lists_lengths.values() , default=0 ) return max(1 , _SCREAMING_SNAKE_CASE ) def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int ): """simple docstring""" SCREAMING_SNAKE_CASE_ = [] for group_idx in range(_SCREAMING_SNAKE_CASE ): SCREAMING_SNAKE_CASE_ = num_shards // max_num_jobs + (group_idx < (num_shards % max_num_jobs)) if num_shards_to_add == 0: break SCREAMING_SNAKE_CASE_ = shards_indices_per_group[-1].stop if shards_indices_per_group else 0 SCREAMING_SNAKE_CASE_ = range(_SCREAMING_SNAKE_CASE , start + num_shards_to_add ) shards_indices_per_group.append(_SCREAMING_SNAKE_CASE ) return shards_indices_per_group def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : dict , _SCREAMING_SNAKE_CASE : int ): """simple docstring""" SCREAMING_SNAKE_CASE_ = _number_of_shards_in_gen_kwargs(_SCREAMING_SNAKE_CASE ) if num_shards == 1: return [dict(_SCREAMING_SNAKE_CASE )] else: SCREAMING_SNAKE_CASE_ = _distribute_shards(num_shards=_SCREAMING_SNAKE_CASE , max_num_jobs=_SCREAMING_SNAKE_CASE ) return [ { key: [value[shard_idx] for shard_idx in shard_indices_per_group[group_idx]] if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) else value for key, value in gen_kwargs.items() } for group_idx in range(len(_SCREAMING_SNAKE_CASE ) ) ] def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : List[dict] ): """simple docstring""" return { key: [value for gen_kwargs in gen_kwargs_list for value in gen_kwargs[key]] if isinstance(gen_kwargs_list[0][key] , _SCREAMING_SNAKE_CASE ) else gen_kwargs_list[0][key] for key in gen_kwargs_list[0] } def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : np.random.Generator , _SCREAMING_SNAKE_CASE : dict ): """simple docstring""" SCREAMING_SNAKE_CASE_ = {len(_SCREAMING_SNAKE_CASE ) for value in gen_kwargs.values() if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )} SCREAMING_SNAKE_CASE_ = {} for size in list_sizes: SCREAMING_SNAKE_CASE_ = list(range(_SCREAMING_SNAKE_CASE ) ) rng.shuffle(indices_per_size[size] ) # Now let's copy the gen_kwargs and shuffle the lists based on their sizes SCREAMING_SNAKE_CASE_ = dict(_SCREAMING_SNAKE_CASE ) for key, value in shuffled_kwargs.items(): if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): SCREAMING_SNAKE_CASE_ = [value[i] for i in indices_per_size[len(_SCREAMING_SNAKE_CASE )]] return shuffled_kwargs
620
1
import argparse import json import subprocess def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Optional[int] ): """simple docstring""" SCREAMING_SNAKE_CASE_ = [] SCREAMING_SNAKE_CASE_ = ( f"""curl -H \"Accept: application/vnd.github+json\" -H \"Authorization: Bearer {token}\"""" ' https://api.github.com/repos/huggingface/transformers/actions/runners' ) SCREAMING_SNAKE_CASE_ = subprocess.run(_SCREAMING_SNAKE_CASE , shell=_SCREAMING_SNAKE_CASE , stdout=subprocess.PIPE ) SCREAMING_SNAKE_CASE_ = output.stdout.decode('utf-8' ) SCREAMING_SNAKE_CASE_ = json.loads(_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE_ = status['runners'] for runner in runners: if runner["name"] in target_runners: if runner["status"] == "offline": offline_runners.append(_SCREAMING_SNAKE_CASE ) # save the result so we can report them on Slack with open('offline_runners.txt' , 'w' ) as fp: fp.write(json.dumps(_SCREAMING_SNAKE_CASE ) ) if len(_SCREAMING_SNAKE_CASE ) > 0: SCREAMING_SNAKE_CASE_ = '\n'.join([x['name'] for x in offline_runners] ) raise ValueError(f"""The following runners are offline:\n{failed}""" ) if __name__ == "__main__": def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : str ): """simple docstring""" return values.split(',' ) UpperCamelCase__ : Optional[Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( "--target_runners", default=None, type=list_str, required=True, help="Comma-separated list of runners to check status.", ) parser.add_argument( "--token", default=None, type=str, required=True, help="A token that has actions:read permission." ) UpperCamelCase__ : Optional[int] = parser.parse_args() get_runner_status(args.target_runners, args.token)
620
from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCamelCase__ : List[Any] = logging.get_logger(__name__) UpperCamelCase__ : List[str] = { "microsoft/biogpt": "https://huggingface.co/microsoft/biogpt/resolve/main/config.json", # See all BioGPT models at https://huggingface.co/models?filter=biogpt } class __snake_case ( lowerCAmelCase__ ): __lowerCAmelCase : Any = 'biogpt' def __init__( self , _A=42384 , _A=1024 , _A=24 , _A=16 , _A=4096 , _A="gelu" , _A=0.1 , _A=0.1 , _A=1024 , _A=0.0_2 , _A=1E-12 , _A=True , _A=True , _A=0.0 , _A=0.0 , _A=1 , _A=0 , _A=2 , **_A , ): SCREAMING_SNAKE_CASE_ = vocab_size SCREAMING_SNAKE_CASE_ = max_position_embeddings SCREAMING_SNAKE_CASE_ = hidden_size SCREAMING_SNAKE_CASE_ = num_hidden_layers SCREAMING_SNAKE_CASE_ = num_attention_heads SCREAMING_SNAKE_CASE_ = intermediate_size SCREAMING_SNAKE_CASE_ = hidden_act SCREAMING_SNAKE_CASE_ = hidden_dropout_prob SCREAMING_SNAKE_CASE_ = attention_probs_dropout_prob SCREAMING_SNAKE_CASE_ = initializer_range SCREAMING_SNAKE_CASE_ = layer_norm_eps SCREAMING_SNAKE_CASE_ = scale_embedding SCREAMING_SNAKE_CASE_ = use_cache SCREAMING_SNAKE_CASE_ = layerdrop SCREAMING_SNAKE_CASE_ = activation_dropout super().__init__(pad_token_id=_A , bos_token_id=_A , eos_token_id=_A , **_A)
620
1
import contextlib import importlib import io import unittest import transformers # Try to import everything from transformers to ensure every object can be loaded. from transformers import * # noqa F406 from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, require_flax, require_tf, require_torch from transformers.utils import ContextManagers, find_labels, is_flax_available, is_tf_available, is_torch_available if is_torch_available(): from transformers import BertForPreTraining, BertForQuestionAnswering, BertForSequenceClassification if is_tf_available(): from transformers import TFBertForPreTraining, TFBertForQuestionAnswering, TFBertForSequenceClassification if is_flax_available(): from transformers import FlaxBertForPreTraining, FlaxBertForQuestionAnswering, FlaxBertForSequenceClassification UpperCamelCase__ : Tuple = DUMMY_UNKNOWN_IDENTIFIER # An actual model hosted on huggingface.co UpperCamelCase__ : str = "main" # Default branch name UpperCamelCase__ : str = "f2c752cfc5c0ab6f4bdec59acea69eefbee381c2" # One particular commit (not the top of `main`) UpperCamelCase__ : Any = "aaaaaaa" # This commit does not exist, so we should 404. UpperCamelCase__ : Any = "d9e9f15bc825e4b2c9249e9578f884bbcb5e3684" # Sha-1 of config.json on the top of `main`, for checking purposes UpperCamelCase__ : int = "4b243c475af8d0a7754e87d7d096c92e5199ec2fe168a2ee7998e3b8e9bcb1d3" @contextlib.contextmanager def _UpperCAmelCase ( ): """simple docstring""" print('Welcome!' ) yield print('Bye!' ) @contextlib.contextmanager def _UpperCAmelCase ( ): """simple docstring""" print('Bonjour!' ) yield print('Au revoir!' ) class __snake_case ( unittest.TestCase ): def lowerCAmelCase__ ( self): # If the spec is missing, importlib would not be able to import the module dynamically. assert transformers.__spec__ is not None assert importlib.util.find_spec('transformers') is not None class __snake_case ( unittest.TestCase ): @unittest.mock.patch('sys.stdout' , new_callable=io.StringIO) def lowerCAmelCase__ ( self , _A): with ContextManagers([]): print('Transformers are awesome!') # The print statement adds a new line at the end of the output self.assertEqual(mock_stdout.getvalue() , 'Transformers are awesome!\n') @unittest.mock.patch('sys.stdout' , new_callable=io.StringIO) def lowerCAmelCase__ ( self , _A): with ContextManagers([context_en()]): print('Transformers are awesome!') # The output should be wrapped with an English welcome and goodbye self.assertEqual(mock_stdout.getvalue() , 'Welcome!\nTransformers are awesome!\nBye!\n') @unittest.mock.patch('sys.stdout' , new_callable=io.StringIO) def lowerCAmelCase__ ( self , _A): with ContextManagers([context_fr(), context_en()]): print('Transformers are awesome!') # The output should be wrapped with an English and French welcome and goodbye self.assertEqual(mock_stdout.getvalue() , 'Bonjour!\nWelcome!\nTransformers are awesome!\nBye!\nAu revoir!\n') @require_torch def lowerCAmelCase__ ( self): self.assertEqual(find_labels(_A) , ['labels']) self.assertEqual(find_labels(_A) , ['labels', 'next_sentence_label']) self.assertEqual(find_labels(_A) , ['start_positions', 'end_positions']) class __snake_case ( lowerCAmelCase__ ): pass self.assertEqual(find_labels(_A) , ['labels']) @require_tf def lowerCAmelCase__ ( self): self.assertEqual(find_labels(_A) , ['labels']) self.assertEqual(find_labels(_A) , ['labels', 'next_sentence_label']) self.assertEqual(find_labels(_A) , ['start_positions', 'end_positions']) class __snake_case ( lowerCAmelCase__ ): pass self.assertEqual(find_labels(_A) , ['labels']) @require_flax def lowerCAmelCase__ ( self): # Flax models don't have labels self.assertEqual(find_labels(_A) , []) self.assertEqual(find_labels(_A) , []) self.assertEqual(find_labels(_A) , []) class __snake_case ( lowerCAmelCase__ ): pass self.assertEqual(find_labels(_A) , [])
620
from typing import Dict, List, Optional, Tuple, Union import torch from ...models import AutoencoderKL, TransformeraDModel from ...schedulers import KarrasDiffusionSchedulers from ...utils import randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput class __snake_case ( lowerCAmelCase__ ): def __init__( self , _A , _A , _A , _A = None , ): super().__init__() self.register_modules(transformer=_A , vae=_A , scheduler=_A) # create a imagenet -> id dictionary for easier use SCREAMING_SNAKE_CASE_ = {} if idalabel is not None: for key, value in idalabel.items(): for label in value.split(','): SCREAMING_SNAKE_CASE_ = int(_A) SCREAMING_SNAKE_CASE_ = dict(sorted(self.labels.items())) def lowerCAmelCase__ ( self , _A): if not isinstance(_A , _A): SCREAMING_SNAKE_CASE_ = list(_A) for l in label: if l not in self.labels: raise ValueError( f"""{l} does not exist. Please make sure to select one of the following labels: \n {self.labels}.""") return [self.labels[l] for l in label] @torch.no_grad() def __call__( self , _A , _A = 4.0 , _A = None , _A = 50 , _A = "pil" , _A = True , ): SCREAMING_SNAKE_CASE_ = len(_A) SCREAMING_SNAKE_CASE_ = self.transformer.config.sample_size SCREAMING_SNAKE_CASE_ = self.transformer.config.in_channels SCREAMING_SNAKE_CASE_ = randn_tensor( shape=(batch_size, latent_channels, latent_size, latent_size) , generator=_A , device=self.device , dtype=self.transformer.dtype , ) SCREAMING_SNAKE_CASE_ = torch.cat([latents] * 2) if guidance_scale > 1 else latents SCREAMING_SNAKE_CASE_ = torch.tensor(_A , device=self.device).reshape(-1) SCREAMING_SNAKE_CASE_ = torch.tensor([1000] * batch_size , device=self.device) SCREAMING_SNAKE_CASE_ = torch.cat([class_labels, class_null] , 0) if guidance_scale > 1 else class_labels # set step values self.scheduler.set_timesteps(_A) for t in self.progress_bar(self.scheduler.timesteps): if guidance_scale > 1: SCREAMING_SNAKE_CASE_ = latent_model_input[: len(_A) // 2] SCREAMING_SNAKE_CASE_ = torch.cat([half, half] , dim=0) SCREAMING_SNAKE_CASE_ = self.scheduler.scale_model_input(_A , _A) SCREAMING_SNAKE_CASE_ = t if not torch.is_tensor(_A): # TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can # This would be a good case for the `match` statement (Python 3.10+) SCREAMING_SNAKE_CASE_ = latent_model_input.device.type == 'mps' if isinstance(_A , _A): SCREAMING_SNAKE_CASE_ = torch.floataa if is_mps else torch.floataa else: SCREAMING_SNAKE_CASE_ = torch.intaa if is_mps else torch.intaa SCREAMING_SNAKE_CASE_ = torch.tensor([timesteps] , dtype=_A , device=latent_model_input.device) elif len(timesteps.shape) == 0: SCREAMING_SNAKE_CASE_ = timesteps[None].to(latent_model_input.device) # broadcast to batch dimension in a way that's compatible with ONNX/Core ML SCREAMING_SNAKE_CASE_ = timesteps.expand(latent_model_input.shape[0]) # predict noise model_output SCREAMING_SNAKE_CASE_ = self.transformer( _A , timestep=_A , class_labels=_A).sample # perform guidance if guidance_scale > 1: SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = noise_pred[:, :latent_channels], noise_pred[:, latent_channels:] SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = torch.split(_A , len(_A) // 2 , dim=0) SCREAMING_SNAKE_CASE_ = uncond_eps + guidance_scale * (cond_eps - uncond_eps) SCREAMING_SNAKE_CASE_ = torch.cat([half_eps, half_eps] , dim=0) SCREAMING_SNAKE_CASE_ = torch.cat([eps, rest] , dim=1) # learned sigma if self.transformer.config.out_channels // 2 == latent_channels: SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = torch.split(_A , _A , dim=1) else: SCREAMING_SNAKE_CASE_ = noise_pred # compute previous image: x_t -> x_t-1 SCREAMING_SNAKE_CASE_ = self.scheduler.step(_A , _A , _A).prev_sample if guidance_scale > 1: SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = latent_model_input.chunk(2 , dim=0) else: SCREAMING_SNAKE_CASE_ = latent_model_input SCREAMING_SNAKE_CASE_ = 1 / self.vae.config.scaling_factor * latents SCREAMING_SNAKE_CASE_ = self.vae.decode(_A).sample SCREAMING_SNAKE_CASE_ = (samples / 2 + 0.5).clamp(0 , 1) # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16 SCREAMING_SNAKE_CASE_ = samples.cpu().permute(0 , 2 , 3 , 1).float().numpy() if output_type == "pil": SCREAMING_SNAKE_CASE_ = self.numpy_to_pil(_A) if not return_dict: return (samples,) return ImagePipelineOutput(images=_A)
620
1
# Usage: # ./gen-card-allenai-wmt16.py import os from pathlib import Path def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : Optional[Any] ): """simple docstring""" SCREAMING_SNAKE_CASE_ = { 'en': 'Machine learning is great, isn\'t it?', 'ru': 'Машинное обучение - это здорово, не так ли?', 'de': 'Maschinelles Lernen ist großartig, nicht wahr?', } # BLUE scores as follows: # "pair": [fairseq, transformers] SCREAMING_SNAKE_CASE_ = { 'wmt16-en-de-dist-12-1': [28.3, 27.52], 'wmt16-en-de-dist-6-1': [27.4, 27.11], 'wmt16-en-de-12-1': [26.9, 25.75], } SCREAMING_SNAKE_CASE_ = f"""{src_lang}-{tgt_lang}""" SCREAMING_SNAKE_CASE_ = f""" --- language: - {src_lang} - {tgt_lang} thumbnail: tags: - translation - wmt16 - allenai license: apache-2.0 datasets: - wmt16 metrics: - bleu --- # FSMT ## Model description This is a ported version of fairseq-based [wmt16 transformer](https://github.com/jungokasai/deep-shallow/) for {src_lang}-{tgt_lang}. For more details, please, see [Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation](https://arxiv.org/abs/2006.10369). All 3 models are available: * [wmt16-en-de-dist-12-1](https://huggingface.co/allenai/wmt16-en-de-dist-12-1) * [wmt16-en-de-dist-6-1](https://huggingface.co/allenai/wmt16-en-de-dist-6-1) * [wmt16-en-de-12-1](https://huggingface.co/allenai/wmt16-en-de-12-1) ## Intended uses & limitations #### How to use ```python from transformers import FSMTForConditionalGeneration, FSMTTokenizer mname = \"allenai/{model_name}\" tokenizer = FSMTTokenizer.from_pretrained(mname) model = FSMTForConditionalGeneration.from_pretrained(mname) input = \"{texts[src_lang]}\" input_ids = tokenizer.encode(input, return_tensors=\"pt\") outputs = model.generate(input_ids) decoded = tokenizer.decode(outputs[0], skip_special_tokens=True) print(decoded) # {texts[tgt_lang]} ``` #### Limitations and bias ## Training data Pretrained weights were left identical to the original model released by allenai. For more details, please, see the [paper](https://arxiv.org/abs/2006.10369). ## Eval results Here are the BLEU scores: model | fairseq | transformers -------|---------|---------- {model_name} | {scores[model_name][0]} | {scores[model_name][1]} The score is slightly below the score reported in the paper, as the researchers don't use `sacrebleu` and measure the score on tokenized outputs. `transformers` score was measured using `sacrebleu` on detokenized outputs. The score was calculated using this code: ```bash git clone https://github.com/huggingface/transformers cd transformers export PAIR={pair} export DATA_DIR=data/$PAIR export SAVE_DIR=data/$PAIR export BS=8 export NUM_BEAMS=5 mkdir -p $DATA_DIR sacrebleu -t wmt16 -l $PAIR --echo src > $DATA_DIR/val.source sacrebleu -t wmt16 -l $PAIR --echo ref > $DATA_DIR/val.target echo $PAIR PYTHONPATH=\"src:examples/seq2seq\" python examples/seq2seq/run_eval.py allenai/{model_name} $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS ``` ## Data Sources - [training, etc.](http://www.statmt.org/wmt16/) - [test set](http://matrix.statmt.org/test_sets/newstest2016.tgz?1504722372) ### BibTeX entry and citation info ``` @misc{{kasai2020deep, title={{Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation}}, author={{Jungo Kasai and Nikolaos Pappas and Hao Peng and James Cross and Noah A. Smith}}, year={{2020}}, eprint={{2006.10369}}, archivePrefix={{arXiv}}, primaryClass={{cs.CL}} }} ``` """ model_card_dir.mkdir(parents=_SCREAMING_SNAKE_CASE , exist_ok=_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE_ = os.path.join(_SCREAMING_SNAKE_CASE , 'README.md' ) print(f"""Generating {path}""" ) with open(_SCREAMING_SNAKE_CASE , 'w' , encoding='utf-8' ) as f: f.write(_SCREAMING_SNAKE_CASE ) # make sure we are under the root of the project UpperCamelCase__ : Optional[int] = Path(__file__).resolve().parent.parent.parent UpperCamelCase__ : Tuple = repo_dir / "model_cards" for model_name in ["wmt16-en-de-dist-12-1", "wmt16-en-de-dist-6-1", "wmt16-en-de-12-1"]: UpperCamelCase__ : int = model_cards_dir / "allenai" / model_name write_model_card(model_card_dir, src_lang="en", tgt_lang="de", model_name=model_name)
620
import pickle import numpy as np from matplotlib import pyplot as plt class __snake_case : def __init__( self , _A , _A , _A , _A , _A , _A=0.2 , _A=0.2): SCREAMING_SNAKE_CASE_ = bp_numa SCREAMING_SNAKE_CASE_ = bp_numa SCREAMING_SNAKE_CASE_ = bp_numa SCREAMING_SNAKE_CASE_ = conva_get[:2] SCREAMING_SNAKE_CASE_ = conva_get[2] SCREAMING_SNAKE_CASE_ = size_pa SCREAMING_SNAKE_CASE_ = rate_w SCREAMING_SNAKE_CASE_ = rate_t SCREAMING_SNAKE_CASE_ = [ np.mat(-1 * np.random.rand(self.conva[0] , self.conva[0]) + 0.5) for i in range(self.conva[1]) ] SCREAMING_SNAKE_CASE_ = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa) + 0.5) SCREAMING_SNAKE_CASE_ = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa) + 0.5) SCREAMING_SNAKE_CASE_ = -2 * np.random.rand(self.conva[1]) + 1 SCREAMING_SNAKE_CASE_ = -2 * np.random.rand(self.num_bpa) + 1 SCREAMING_SNAKE_CASE_ = -2 * np.random.rand(self.num_bpa) + 1 def lowerCAmelCase__ ( self , _A): # save model dict with pickle SCREAMING_SNAKE_CASE_ = { 'num_bp1': self.num_bpa, 'num_bp2': self.num_bpa, 'num_bp3': self.num_bpa, 'conv1': self.conva, 'step_conv1': self.step_conva, 'size_pooling1': self.size_poolinga, 'rate_weight': self.rate_weight, 'rate_thre': self.rate_thre, 'w_conv1': self.w_conva, 'wkj': self.wkj, 'vji': self.vji, 'thre_conv1': self.thre_conva, 'thre_bp2': self.thre_bpa, 'thre_bp3': self.thre_bpa, } with open(_A , 'wb') as f: pickle.dump(_A , _A) print(f"""Model saved: {save_path}""") @classmethod def lowerCAmelCase__ ( cls , _A): # read saved model with open(_A , 'rb') as f: SCREAMING_SNAKE_CASE_ = pickle.load(_A) # noqa: S301 SCREAMING_SNAKE_CASE_ = model_dic.get('conv1') conv_get.append(model_dic.get('step_conv1')) SCREAMING_SNAKE_CASE_ = model_dic.get('size_pooling1') SCREAMING_SNAKE_CASE_ = model_dic.get('num_bp1') SCREAMING_SNAKE_CASE_ = model_dic.get('num_bp2') SCREAMING_SNAKE_CASE_ = model_dic.get('num_bp3') SCREAMING_SNAKE_CASE_ = model_dic.get('rate_weight') SCREAMING_SNAKE_CASE_ = model_dic.get('rate_thre') # create model instance SCREAMING_SNAKE_CASE_ = CNN(_A , _A , _A , _A , _A , _A , _A) # modify model parameter SCREAMING_SNAKE_CASE_ = model_dic.get('w_conv1') SCREAMING_SNAKE_CASE_ = model_dic.get('wkj') SCREAMING_SNAKE_CASE_ = model_dic.get('vji') SCREAMING_SNAKE_CASE_ = model_dic.get('thre_conv1') SCREAMING_SNAKE_CASE_ = model_dic.get('thre_bp2') SCREAMING_SNAKE_CASE_ = model_dic.get('thre_bp3') return conv_ins def lowerCAmelCase__ ( self , _A): return 1 / (1 + np.exp(-1 * x)) def lowerCAmelCase__ ( self , _A): return round(_A , 3) def lowerCAmelCase__ ( self , _A , _A , _A , _A , _A): # convolution process SCREAMING_SNAKE_CASE_ = convs[0] SCREAMING_SNAKE_CASE_ = convs[1] SCREAMING_SNAKE_CASE_ = np.shape(_A)[0] # get the data slice of original image data, data_focus SCREAMING_SNAKE_CASE_ = [] for i_focus in range(0 , size_data - size_conv + 1 , _A): for j_focus in range(0 , size_data - size_conv + 1 , _A): SCREAMING_SNAKE_CASE_ = data[ i_focus : i_focus + size_conv, j_focus : j_focus + size_conv ] data_focus.append(_A) # calculate the feature map of every single kernel, and saved as list of matrix SCREAMING_SNAKE_CASE_ = [] SCREAMING_SNAKE_CASE_ = int((size_data - size_conv) / conv_step + 1) for i_map in range(_A): SCREAMING_SNAKE_CASE_ = [] for i_focus in range(len(_A)): SCREAMING_SNAKE_CASE_ = ( np.sum(np.multiply(data_focus[i_focus] , w_convs[i_map])) - thre_convs[i_map] ) featuremap.append(self.sig(_A)) SCREAMING_SNAKE_CASE_ = np.asmatrix(_A).reshape( _A , _A) data_featuremap.append(_A) # expanding the data slice to One dimenssion SCREAMING_SNAKE_CASE_ = [] for each_focus in data_focus: focusa_list.extend(self.Expand_Mat(_A)) SCREAMING_SNAKE_CASE_ = np.asarray(_A) return focus_list, data_featuremap def lowerCAmelCase__ ( self , _A , _A , _A="average_pool"): # pooling process SCREAMING_SNAKE_CASE_ = len(featuremaps[0]) SCREAMING_SNAKE_CASE_ = int(size_map / size_pooling) SCREAMING_SNAKE_CASE_ = [] for i_map in range(len(_A)): SCREAMING_SNAKE_CASE_ = featuremaps[i_map] SCREAMING_SNAKE_CASE_ = [] for i_focus in range(0 , _A , _A): for j_focus in range(0 , _A , _A): SCREAMING_SNAKE_CASE_ = feature_map[ i_focus : i_focus + size_pooling, j_focus : j_focus + size_pooling, ] if pooling_type == "average_pool": # average pooling map_pooled.append(np.average(_A)) elif pooling_type == "max_pooling": # max pooling map_pooled.append(np.max(_A)) SCREAMING_SNAKE_CASE_ = np.asmatrix(_A).reshape(_A , _A) featuremap_pooled.append(_A) return featuremap_pooled def lowerCAmelCase__ ( self , _A): # expanding three dimension data to one dimension list SCREAMING_SNAKE_CASE_ = [] for i in range(len(_A)): SCREAMING_SNAKE_CASE_ = np.shape(data[i]) SCREAMING_SNAKE_CASE_ = data[i].reshape(1 , shapes[0] * shapes[1]) SCREAMING_SNAKE_CASE_ = data_listed.getA().tolist()[0] data_expanded.extend(_A) SCREAMING_SNAKE_CASE_ = np.asarray(_A) return data_expanded def lowerCAmelCase__ ( self , _A): # expanding matrix to one dimension list SCREAMING_SNAKE_CASE_ = np.asarray(_A) SCREAMING_SNAKE_CASE_ = np.shape(_A) SCREAMING_SNAKE_CASE_ = data_mat.reshape(1 , shapes[0] * shapes[1]) return data_expanded def lowerCAmelCase__ ( self , _A , _A , _A , _A , _A): SCREAMING_SNAKE_CASE_ = [] SCREAMING_SNAKE_CASE_ = 0 for i_map in range(_A): SCREAMING_SNAKE_CASE_ = np.ones((size_map, size_map)) for i in range(0 , _A , _A): for j in range(0 , _A , _A): SCREAMING_SNAKE_CASE_ = pd_pool[ i_pool ] SCREAMING_SNAKE_CASE_ = i_pool + 1 SCREAMING_SNAKE_CASE_ = np.multiply( _A , np.multiply(out_map[i_map] , (1 - out_map[i_map]))) pd_all.append(_A) return pd_all def lowerCAmelCase__ ( self , _A , _A , _A , _A , _A , _A=bool): # model traning print('----------------------Start Training-------------------------') print((' - - Shape: Train_Data ', np.shape(_A))) print((' - - Shape: Teach_Data ', np.shape(_A))) SCREAMING_SNAKE_CASE_ = 0 SCREAMING_SNAKE_CASE_ = [] SCREAMING_SNAKE_CASE_ = 10000 while rp < n_repeat and mse >= error_accuracy: SCREAMING_SNAKE_CASE_ = 0 print(f"""-------------Learning Time {rp}--------------""") for p in range(len(_A)): # print('------------Learning Image: %d--------------'%p) SCREAMING_SNAKE_CASE_ = np.asmatrix(datas_train[p]) SCREAMING_SNAKE_CASE_ = np.asarray(datas_teach[p]) SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.convolute( _A , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , ) SCREAMING_SNAKE_CASE_ = self.pooling(_A , self.size_poolinga) SCREAMING_SNAKE_CASE_ = np.shape(_A) SCREAMING_SNAKE_CASE_ = self._expand(_A) SCREAMING_SNAKE_CASE_ = data_bp_input SCREAMING_SNAKE_CASE_ = np.dot(_A , self.vji.T) - self.thre_bpa SCREAMING_SNAKE_CASE_ = self.sig(_A) SCREAMING_SNAKE_CASE_ = np.dot(_A , self.wkj.T) - self.thre_bpa SCREAMING_SNAKE_CASE_ = self.sig(_A) # --------------Model Leaning ------------------------ # calculate error and gradient--------------- SCREAMING_SNAKE_CASE_ = np.multiply( (data_teach - bp_outa) , np.multiply(_A , (1 - bp_outa))) SCREAMING_SNAKE_CASE_ = np.multiply( np.dot(_A , self.wkj) , np.multiply(_A , (1 - bp_outa))) SCREAMING_SNAKE_CASE_ = np.dot(_A , self.vji) SCREAMING_SNAKE_CASE_ = pd_i_all / (self.size_poolinga * self.size_poolinga) SCREAMING_SNAKE_CASE_ = pd_conva_pooled.T.getA().tolist() SCREAMING_SNAKE_CASE_ = self._calculate_gradient_from_pool( _A , _A , shape_featuremapa[0] , shape_featuremapa[1] , self.size_poolinga , ) # weight and threshold learning process--------- # convolution layer for k_conv in range(self.conva[1]): SCREAMING_SNAKE_CASE_ = self._expand_mat(pd_conva_all[k_conv]) SCREAMING_SNAKE_CASE_ = self.rate_weight * np.dot(_A , _A) SCREAMING_SNAKE_CASE_ = self.w_conva[k_conv] + delta_w.reshape( (self.conva[0], self.conva[0])) SCREAMING_SNAKE_CASE_ = ( self.thre_conva[k_conv] - np.sum(pd_conva_all[k_conv]) * self.rate_thre ) # all connected layer SCREAMING_SNAKE_CASE_ = self.wkj + pd_k_all.T * bp_outa * self.rate_weight SCREAMING_SNAKE_CASE_ = self.vji + pd_j_all.T * bp_outa * self.rate_weight SCREAMING_SNAKE_CASE_ = self.thre_bpa - pd_k_all * self.rate_thre SCREAMING_SNAKE_CASE_ = self.thre_bpa - pd_j_all * self.rate_thre # calculate the sum error of all single image SCREAMING_SNAKE_CASE_ = np.sum(abs(data_teach - bp_outa)) error_count += errors # print(' ----Teach ',data_teach) # print(' ----BP_output ',bp_out3) SCREAMING_SNAKE_CASE_ = rp + 1 SCREAMING_SNAKE_CASE_ = error_count / patterns all_mse.append(_A) def draw_error(): SCREAMING_SNAKE_CASE_ = [error_accuracy for i in range(int(n_repeat * 1.2))] plt.plot(_A , '+-') plt.plot(_A , 'r--') plt.xlabel('Learning Times') plt.ylabel('All_mse') plt.grid(_A , alpha=0.5) plt.show() print('------------------Training Complished---------------------') print((' - - Training epoch: ', rp, f""" - - Mse: {mse:.6f}""")) if draw_e: draw_error() return mse def lowerCAmelCase__ ( self , _A): # model predict SCREAMING_SNAKE_CASE_ = [] print('-------------------Start Testing-------------------------') print((' - - Shape: Test_Data ', np.shape(_A))) for p in range(len(_A)): SCREAMING_SNAKE_CASE_ = np.asmatrix(datas_test[p]) SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.convolute( _A , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , ) SCREAMING_SNAKE_CASE_ = self.pooling(_A , self.size_poolinga) SCREAMING_SNAKE_CASE_ = self._expand(_A) SCREAMING_SNAKE_CASE_ = data_bp_input SCREAMING_SNAKE_CASE_ = bp_outa * self.vji.T - self.thre_bpa SCREAMING_SNAKE_CASE_ = self.sig(_A) SCREAMING_SNAKE_CASE_ = bp_outa * self.wkj.T - self.thre_bpa SCREAMING_SNAKE_CASE_ = self.sig(_A) produce_out.extend(bp_outa.getA().tolist()) SCREAMING_SNAKE_CASE_ = [list(map(self.do_round , _A)) for each in produce_out] return np.asarray(_A) def lowerCAmelCase__ ( self , _A): # return the data of image after convoluting process so we can check it out SCREAMING_SNAKE_CASE_ = np.asmatrix(_A) SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.convolute( _A , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , ) SCREAMING_SNAKE_CASE_ = self.pooling(_A , self.size_poolinga) return data_conveda, data_pooleda if __name__ == "__main__": pass
620
1
from __future__ import annotations def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : list[int] , _SCREAMING_SNAKE_CASE : int ): """simple docstring""" SCREAMING_SNAKE_CASE_ = [] SCREAMING_SNAKE_CASE_ = [] SCREAMING_SNAKE_CASE_ = 0 SCREAMING_SNAKE_CASE_ = sum(_SCREAMING_SNAKE_CASE ) create_state_space_tree(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) return result def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : list[int] , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : list[int] , _SCREAMING_SNAKE_CASE : list[list[int]] , _SCREAMING_SNAKE_CASE : int , ): """simple docstring""" if sum(_SCREAMING_SNAKE_CASE ) > max_sum or (remaining_nums_sum + sum(_SCREAMING_SNAKE_CASE )) < max_sum: return if sum(_SCREAMING_SNAKE_CASE ) == max_sum: result.append(_SCREAMING_SNAKE_CASE ) return for index in range(_SCREAMING_SNAKE_CASE , len(_SCREAMING_SNAKE_CASE ) ): create_state_space_tree( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , index + 1 , [*path, nums[index]] , _SCREAMING_SNAKE_CASE , remaining_nums_sum - nums[index] , ) UpperCamelCase__ : List[str] = [3, 34, 4, 12, 5, 2] UpperCamelCase__ : List[Any] = 9 UpperCamelCase__ : Union[str, Any] = generate_sum_of_subsets_soln(nums, max_sum) print(*result)
620
import os import zipfile import requests from get_ci_error_statistics import download_artifact, get_artifacts_links def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : int=7 ): """simple docstring""" SCREAMING_SNAKE_CASE_ = None if token is not None: SCREAMING_SNAKE_CASE_ = {'Accept': 'application/vnd.github+json', 'Authorization': f"""Bearer {token}"""} # The id of a workflow (not of a workflow run) SCREAMING_SNAKE_CASE_ = '636036' SCREAMING_SNAKE_CASE_ = f"""https://api.github.com/repos/huggingface/transformers/actions/workflows/{workflow_id}/runs""" # On `main` branch + event being `schedule` + not returning PRs + only `num_runs` results url += f"""?branch=main&event=schedule&exclude_pull_requests=true&per_page={num_runs}""" SCREAMING_SNAKE_CASE_ = requests.get(_SCREAMING_SNAKE_CASE , headers=_SCREAMING_SNAKE_CASE ).json() return result["workflow_runs"] def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : str ): """simple docstring""" SCREAMING_SNAKE_CASE_ = get_daily_ci_runs(_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE_ = None for workflow_run in workflow_runs: if workflow_run["status"] == "completed": SCREAMING_SNAKE_CASE_ = workflow_run['id'] break return workflow_run_id def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Optional[int] ): """simple docstring""" SCREAMING_SNAKE_CASE_ = get_last_daily_ci_runs(_SCREAMING_SNAKE_CASE ) if workflow_run_id is not None: SCREAMING_SNAKE_CASE_ = get_artifacts_links(worflow_run_id=_SCREAMING_SNAKE_CASE , token=_SCREAMING_SNAKE_CASE ) for artifact_name in artifact_names: if artifact_name in artifacts_links: SCREAMING_SNAKE_CASE_ = artifacts_links[artifact_name] download_artifact( artifact_name=_SCREAMING_SNAKE_CASE , artifact_url=_SCREAMING_SNAKE_CASE , output_dir=_SCREAMING_SNAKE_CASE , token=_SCREAMING_SNAKE_CASE ) def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : List[Any] ): """simple docstring""" get_last_daily_ci_artifacts(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE_ = {} for artifact_name in artifact_names: SCREAMING_SNAKE_CASE_ = os.path.join(_SCREAMING_SNAKE_CASE , f"""{artifact_name}.zip""" ) if os.path.isfile(_SCREAMING_SNAKE_CASE ): SCREAMING_SNAKE_CASE_ = {} with zipfile.ZipFile(_SCREAMING_SNAKE_CASE ) as z: for filename in z.namelist(): if not os.path.isdir(_SCREAMING_SNAKE_CASE ): # read the file with z.open(_SCREAMING_SNAKE_CASE ) as f: SCREAMING_SNAKE_CASE_ = f.read().decode('UTF-8' ) return results
620
1
import pickle import numpy as np from matplotlib import pyplot as plt class __snake_case : def __init__( self , _A , _A , _A , _A , _A , _A=0.2 , _A=0.2): SCREAMING_SNAKE_CASE_ = bp_numa SCREAMING_SNAKE_CASE_ = bp_numa SCREAMING_SNAKE_CASE_ = bp_numa SCREAMING_SNAKE_CASE_ = conva_get[:2] SCREAMING_SNAKE_CASE_ = conva_get[2] SCREAMING_SNAKE_CASE_ = size_pa SCREAMING_SNAKE_CASE_ = rate_w SCREAMING_SNAKE_CASE_ = rate_t SCREAMING_SNAKE_CASE_ = [ np.mat(-1 * np.random.rand(self.conva[0] , self.conva[0]) + 0.5) for i in range(self.conva[1]) ] SCREAMING_SNAKE_CASE_ = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa) + 0.5) SCREAMING_SNAKE_CASE_ = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa) + 0.5) SCREAMING_SNAKE_CASE_ = -2 * np.random.rand(self.conva[1]) + 1 SCREAMING_SNAKE_CASE_ = -2 * np.random.rand(self.num_bpa) + 1 SCREAMING_SNAKE_CASE_ = -2 * np.random.rand(self.num_bpa) + 1 def lowerCAmelCase__ ( self , _A): # save model dict with pickle SCREAMING_SNAKE_CASE_ = { 'num_bp1': self.num_bpa, 'num_bp2': self.num_bpa, 'num_bp3': self.num_bpa, 'conv1': self.conva, 'step_conv1': self.step_conva, 'size_pooling1': self.size_poolinga, 'rate_weight': self.rate_weight, 'rate_thre': self.rate_thre, 'w_conv1': self.w_conva, 'wkj': self.wkj, 'vji': self.vji, 'thre_conv1': self.thre_conva, 'thre_bp2': self.thre_bpa, 'thre_bp3': self.thre_bpa, } with open(_A , 'wb') as f: pickle.dump(_A , _A) print(f"""Model saved: {save_path}""") @classmethod def lowerCAmelCase__ ( cls , _A): # read saved model with open(_A , 'rb') as f: SCREAMING_SNAKE_CASE_ = pickle.load(_A) # noqa: S301 SCREAMING_SNAKE_CASE_ = model_dic.get('conv1') conv_get.append(model_dic.get('step_conv1')) SCREAMING_SNAKE_CASE_ = model_dic.get('size_pooling1') SCREAMING_SNAKE_CASE_ = model_dic.get('num_bp1') SCREAMING_SNAKE_CASE_ = model_dic.get('num_bp2') SCREAMING_SNAKE_CASE_ = model_dic.get('num_bp3') SCREAMING_SNAKE_CASE_ = model_dic.get('rate_weight') SCREAMING_SNAKE_CASE_ = model_dic.get('rate_thre') # create model instance SCREAMING_SNAKE_CASE_ = CNN(_A , _A , _A , _A , _A , _A , _A) # modify model parameter SCREAMING_SNAKE_CASE_ = model_dic.get('w_conv1') SCREAMING_SNAKE_CASE_ = model_dic.get('wkj') SCREAMING_SNAKE_CASE_ = model_dic.get('vji') SCREAMING_SNAKE_CASE_ = model_dic.get('thre_conv1') SCREAMING_SNAKE_CASE_ = model_dic.get('thre_bp2') SCREAMING_SNAKE_CASE_ = model_dic.get('thre_bp3') return conv_ins def lowerCAmelCase__ ( self , _A): return 1 / (1 + np.exp(-1 * x)) def lowerCAmelCase__ ( self , _A): return round(_A , 3) def lowerCAmelCase__ ( self , _A , _A , _A , _A , _A): # convolution process SCREAMING_SNAKE_CASE_ = convs[0] SCREAMING_SNAKE_CASE_ = convs[1] SCREAMING_SNAKE_CASE_ = np.shape(_A)[0] # get the data slice of original image data, data_focus SCREAMING_SNAKE_CASE_ = [] for i_focus in range(0 , size_data - size_conv + 1 , _A): for j_focus in range(0 , size_data - size_conv + 1 , _A): SCREAMING_SNAKE_CASE_ = data[ i_focus : i_focus + size_conv, j_focus : j_focus + size_conv ] data_focus.append(_A) # calculate the feature map of every single kernel, and saved as list of matrix SCREAMING_SNAKE_CASE_ = [] SCREAMING_SNAKE_CASE_ = int((size_data - size_conv) / conv_step + 1) for i_map in range(_A): SCREAMING_SNAKE_CASE_ = [] for i_focus in range(len(_A)): SCREAMING_SNAKE_CASE_ = ( np.sum(np.multiply(data_focus[i_focus] , w_convs[i_map])) - thre_convs[i_map] ) featuremap.append(self.sig(_A)) SCREAMING_SNAKE_CASE_ = np.asmatrix(_A).reshape( _A , _A) data_featuremap.append(_A) # expanding the data slice to One dimenssion SCREAMING_SNAKE_CASE_ = [] for each_focus in data_focus: focusa_list.extend(self.Expand_Mat(_A)) SCREAMING_SNAKE_CASE_ = np.asarray(_A) return focus_list, data_featuremap def lowerCAmelCase__ ( self , _A , _A , _A="average_pool"): # pooling process SCREAMING_SNAKE_CASE_ = len(featuremaps[0]) SCREAMING_SNAKE_CASE_ = int(size_map / size_pooling) SCREAMING_SNAKE_CASE_ = [] for i_map in range(len(_A)): SCREAMING_SNAKE_CASE_ = featuremaps[i_map] SCREAMING_SNAKE_CASE_ = [] for i_focus in range(0 , _A , _A): for j_focus in range(0 , _A , _A): SCREAMING_SNAKE_CASE_ = feature_map[ i_focus : i_focus + size_pooling, j_focus : j_focus + size_pooling, ] if pooling_type == "average_pool": # average pooling map_pooled.append(np.average(_A)) elif pooling_type == "max_pooling": # max pooling map_pooled.append(np.max(_A)) SCREAMING_SNAKE_CASE_ = np.asmatrix(_A).reshape(_A , _A) featuremap_pooled.append(_A) return featuremap_pooled def lowerCAmelCase__ ( self , _A): # expanding three dimension data to one dimension list SCREAMING_SNAKE_CASE_ = [] for i in range(len(_A)): SCREAMING_SNAKE_CASE_ = np.shape(data[i]) SCREAMING_SNAKE_CASE_ = data[i].reshape(1 , shapes[0] * shapes[1]) SCREAMING_SNAKE_CASE_ = data_listed.getA().tolist()[0] data_expanded.extend(_A) SCREAMING_SNAKE_CASE_ = np.asarray(_A) return data_expanded def lowerCAmelCase__ ( self , _A): # expanding matrix to one dimension list SCREAMING_SNAKE_CASE_ = np.asarray(_A) SCREAMING_SNAKE_CASE_ = np.shape(_A) SCREAMING_SNAKE_CASE_ = data_mat.reshape(1 , shapes[0] * shapes[1]) return data_expanded def lowerCAmelCase__ ( self , _A , _A , _A , _A , _A): SCREAMING_SNAKE_CASE_ = [] SCREAMING_SNAKE_CASE_ = 0 for i_map in range(_A): SCREAMING_SNAKE_CASE_ = np.ones((size_map, size_map)) for i in range(0 , _A , _A): for j in range(0 , _A , _A): SCREAMING_SNAKE_CASE_ = pd_pool[ i_pool ] SCREAMING_SNAKE_CASE_ = i_pool + 1 SCREAMING_SNAKE_CASE_ = np.multiply( _A , np.multiply(out_map[i_map] , (1 - out_map[i_map]))) pd_all.append(_A) return pd_all def lowerCAmelCase__ ( self , _A , _A , _A , _A , _A , _A=bool): # model traning print('----------------------Start Training-------------------------') print((' - - Shape: Train_Data ', np.shape(_A))) print((' - - Shape: Teach_Data ', np.shape(_A))) SCREAMING_SNAKE_CASE_ = 0 SCREAMING_SNAKE_CASE_ = [] SCREAMING_SNAKE_CASE_ = 10000 while rp < n_repeat and mse >= error_accuracy: SCREAMING_SNAKE_CASE_ = 0 print(f"""-------------Learning Time {rp}--------------""") for p in range(len(_A)): # print('------------Learning Image: %d--------------'%p) SCREAMING_SNAKE_CASE_ = np.asmatrix(datas_train[p]) SCREAMING_SNAKE_CASE_ = np.asarray(datas_teach[p]) SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.convolute( _A , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , ) SCREAMING_SNAKE_CASE_ = self.pooling(_A , self.size_poolinga) SCREAMING_SNAKE_CASE_ = np.shape(_A) SCREAMING_SNAKE_CASE_ = self._expand(_A) SCREAMING_SNAKE_CASE_ = data_bp_input SCREAMING_SNAKE_CASE_ = np.dot(_A , self.vji.T) - self.thre_bpa SCREAMING_SNAKE_CASE_ = self.sig(_A) SCREAMING_SNAKE_CASE_ = np.dot(_A , self.wkj.T) - self.thre_bpa SCREAMING_SNAKE_CASE_ = self.sig(_A) # --------------Model Leaning ------------------------ # calculate error and gradient--------------- SCREAMING_SNAKE_CASE_ = np.multiply( (data_teach - bp_outa) , np.multiply(_A , (1 - bp_outa))) SCREAMING_SNAKE_CASE_ = np.multiply( np.dot(_A , self.wkj) , np.multiply(_A , (1 - bp_outa))) SCREAMING_SNAKE_CASE_ = np.dot(_A , self.vji) SCREAMING_SNAKE_CASE_ = pd_i_all / (self.size_poolinga * self.size_poolinga) SCREAMING_SNAKE_CASE_ = pd_conva_pooled.T.getA().tolist() SCREAMING_SNAKE_CASE_ = self._calculate_gradient_from_pool( _A , _A , shape_featuremapa[0] , shape_featuremapa[1] , self.size_poolinga , ) # weight and threshold learning process--------- # convolution layer for k_conv in range(self.conva[1]): SCREAMING_SNAKE_CASE_ = self._expand_mat(pd_conva_all[k_conv]) SCREAMING_SNAKE_CASE_ = self.rate_weight * np.dot(_A , _A) SCREAMING_SNAKE_CASE_ = self.w_conva[k_conv] + delta_w.reshape( (self.conva[0], self.conva[0])) SCREAMING_SNAKE_CASE_ = ( self.thre_conva[k_conv] - np.sum(pd_conva_all[k_conv]) * self.rate_thre ) # all connected layer SCREAMING_SNAKE_CASE_ = self.wkj + pd_k_all.T * bp_outa * self.rate_weight SCREAMING_SNAKE_CASE_ = self.vji + pd_j_all.T * bp_outa * self.rate_weight SCREAMING_SNAKE_CASE_ = self.thre_bpa - pd_k_all * self.rate_thre SCREAMING_SNAKE_CASE_ = self.thre_bpa - pd_j_all * self.rate_thre # calculate the sum error of all single image SCREAMING_SNAKE_CASE_ = np.sum(abs(data_teach - bp_outa)) error_count += errors # print(' ----Teach ',data_teach) # print(' ----BP_output ',bp_out3) SCREAMING_SNAKE_CASE_ = rp + 1 SCREAMING_SNAKE_CASE_ = error_count / patterns all_mse.append(_A) def draw_error(): SCREAMING_SNAKE_CASE_ = [error_accuracy for i in range(int(n_repeat * 1.2))] plt.plot(_A , '+-') plt.plot(_A , 'r--') plt.xlabel('Learning Times') plt.ylabel('All_mse') plt.grid(_A , alpha=0.5) plt.show() print('------------------Training Complished---------------------') print((' - - Training epoch: ', rp, f""" - - Mse: {mse:.6f}""")) if draw_e: draw_error() return mse def lowerCAmelCase__ ( self , _A): # model predict SCREAMING_SNAKE_CASE_ = [] print('-------------------Start Testing-------------------------') print((' - - Shape: Test_Data ', np.shape(_A))) for p in range(len(_A)): SCREAMING_SNAKE_CASE_ = np.asmatrix(datas_test[p]) SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.convolute( _A , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , ) SCREAMING_SNAKE_CASE_ = self.pooling(_A , self.size_poolinga) SCREAMING_SNAKE_CASE_ = self._expand(_A) SCREAMING_SNAKE_CASE_ = data_bp_input SCREAMING_SNAKE_CASE_ = bp_outa * self.vji.T - self.thre_bpa SCREAMING_SNAKE_CASE_ = self.sig(_A) SCREAMING_SNAKE_CASE_ = bp_outa * self.wkj.T - self.thre_bpa SCREAMING_SNAKE_CASE_ = self.sig(_A) produce_out.extend(bp_outa.getA().tolist()) SCREAMING_SNAKE_CASE_ = [list(map(self.do_round , _A)) for each in produce_out] return np.asarray(_A) def lowerCAmelCase__ ( self , _A): # return the data of image after convoluting process so we can check it out SCREAMING_SNAKE_CASE_ = np.asmatrix(_A) SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.convolute( _A , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , ) SCREAMING_SNAKE_CASE_ = self.pooling(_A , self.size_poolinga) return data_conveda, data_pooleda if __name__ == "__main__": pass
620
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available UpperCamelCase__ : Any = { "configuration_mvp": ["MVP_PRETRAINED_CONFIG_ARCHIVE_MAP", "MvpConfig", "MvpOnnxConfig"], "tokenization_mvp": ["MvpTokenizer"], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase__ : Optional[int] = ["MvpTokenizerFast"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase__ : str = [ "MVP_PRETRAINED_MODEL_ARCHIVE_LIST", "MvpForCausalLM", "MvpForConditionalGeneration", "MvpForQuestionAnswering", "MvpForSequenceClassification", "MvpModel", "MvpPreTrainedModel", ] if TYPE_CHECKING: from .configuration_mvp import MVP_PRETRAINED_CONFIG_ARCHIVE_MAP, MvpConfig, MvpOnnxConfig from .tokenization_mvp import MvpTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_mvp_fast import MvpTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mvp import ( MVP_PRETRAINED_MODEL_ARCHIVE_LIST, MvpForCausalLM, MvpForConditionalGeneration, MvpForQuestionAnswering, MvpForSequenceClassification, MvpModel, MvpPreTrainedModel, ) else: import sys UpperCamelCase__ : Dict = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
620
1
class __snake_case : # Public class to implement a graph def __init__( self , _A , _A , _A): SCREAMING_SNAKE_CASE_ = row SCREAMING_SNAKE_CASE_ = col SCREAMING_SNAKE_CASE_ = graph def lowerCAmelCase__ ( self , _A , _A , _A): return ( 0 <= i < self.ROW and 0 <= j < self.COL and not visited[i][j] and self.graph[i][j] ) def lowerCAmelCase__ ( self , _A , _A , _A): # Checking all 8 elements surrounding nth element SCREAMING_SNAKE_CASE_ = [-1, -1, -1, 0, 0, 1, 1, 1] # Coordinate order SCREAMING_SNAKE_CASE_ = [-1, 0, 1, -1, 1, -1, 0, 1] SCREAMING_SNAKE_CASE_ = True # Make those cells visited for k in range(8): if self.is_safe(i + row_nbr[k] , j + col_nbr[k] , _A): self.diffs(i + row_nbr[k] , j + col_nbr[k] , _A) def lowerCAmelCase__ ( self): # And finally, count all islands. SCREAMING_SNAKE_CASE_ = [[False for j in range(self.COL)] for i in range(self.ROW)] SCREAMING_SNAKE_CASE_ = 0 for i in range(self.ROW): for j in range(self.COL): if visited[i][j] is False and self.graph[i][j] == 1: self.diffs(_A , _A , _A) count += 1 return count
620
import inspect import os import unittest from pathlib import Path import torch import accelerate from accelerate.test_utils import execute_subprocess_async from accelerate.test_utils.testing import run_command class __snake_case ( unittest.TestCase ): __lowerCAmelCase : Dict = inspect.getfile(accelerate.test_utils ) __lowerCAmelCase : Optional[Any] = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['scripts', 'test_cli.py'] ) __lowerCAmelCase : Tuple = ['accelerate', 'launch'] __lowerCAmelCase : Union[str, Any] = Path.home() / '.cache/huggingface/accelerate' __lowerCAmelCase : List[str] = 'default_config.yaml' __lowerCAmelCase : List[Any] = config_folder / config_file __lowerCAmelCase : str = config_folder / '_default_config.yaml' __lowerCAmelCase : Optional[int] = Path('tests/test_configs' ) @classmethod def lowerCAmelCase__ ( cls): if cls.config_path.is_file(): cls.config_path.rename(cls.changed_path) @classmethod def lowerCAmelCase__ ( cls): if cls.changed_path.is_file(): cls.changed_path.rename(cls.config_path) def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = self.base_cmd if torch.cuda.is_available() and (torch.cuda.device_count() > 1): cmd += ["--multi_gpu"] execute_subprocess_async(cmd + [self.test_file_path] , env=os.environ.copy()) def lowerCAmelCase__ ( self): for config in sorted(self.test_config_path.glob('**/*.yaml')): with self.subTest(config_file=_A): execute_subprocess_async( self.base_cmd + ['--config_file', str(_A), self.test_file_path] , env=os.environ.copy()) def lowerCAmelCase__ ( self): execute_subprocess_async(['accelerate', 'test'] , env=os.environ.copy()) class __snake_case ( unittest.TestCase ): __lowerCAmelCase : Optional[Any] = 'test-tpu' __lowerCAmelCase : str = 'us-central1-a' __lowerCAmelCase : Union[str, Any] = 'ls' __lowerCAmelCase : Union[str, Any] = ['accelerate', 'tpu-config'] __lowerCAmelCase : Union[str, Any] = 'cd /usr/share' __lowerCAmelCase : List[Any] = 'tests/test_samples/test_command_file.sh' __lowerCAmelCase : Dict = 'Running gcloud compute tpus tpu-vm ssh' def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = run_command( self.cmd + ['--command', self.command, '--tpu_zone', self.tpu_zone, '--tpu_name', self.tpu_name, '--debug'] , return_stdout=_A , ) self.assertIn( f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all""" , _A , ) def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = run_command( self.cmd + [ '--config_file', 'tests/test_configs/0_12_0.yaml', '--command', self.command, '--tpu_zone', self.tpu_zone, '--tpu_name', self.tpu_name, '--debug', ] , return_stdout=_A , ) self.assertIn( f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all""" , _A , ) def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = run_command( self.cmd + ['--config_file', 'tests/test_configs/latest.yaml', '--debug'] , return_stdout=_A) self.assertIn( f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo \"hello world\"; echo \"this is a second command\" --worker all""" , _A , ) def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = run_command( self.cmd + ['--config_file', 'tests/test_configs/latest.yaml', '--command', self.command, '--debug'] , return_stdout=_A , ) self.assertIn( f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all""" , _A , ) def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = run_command( self.cmd + [ '--config_file', 'tests/test_configs/latest.yaml', '--command', self.command, '--command', 'echo "Hello World"', '--debug', ] , return_stdout=_A , ) self.assertIn( f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls; echo \"Hello World\" --worker all""" , _A , ) def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = run_command( self.cmd + ['--config_file', 'tests/test_configs/latest.yaml', '--command_file', self.command_file, '--debug'] , return_stdout=_A , ) self.assertIn( f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo \"hello world\"; echo \"this is a second command\" --worker all""" , _A , ) def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = run_command( self.cmd + [ '--config_file', 'tests/test_configs/0_12_0.yaml', '--command_file', self.command_file, '--tpu_zone', self.tpu_zone, '--tpu_name', self.tpu_name, '--debug', ] , return_stdout=_A , ) self.assertIn( f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo \"hello world\"; echo \"this is a second command\" --worker all""" , _A , ) def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = run_command( self.cmd + ['--config_file', 'tests/test_configs/latest.yaml', '--install_accelerate', '--debug'] , return_stdout=_A , ) self.assertIn( f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate -U; echo \"hello world\"; echo \"this is a second command\" --worker all""" , _A , ) def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = run_command( self.cmd + [ '--config_file', 'tests/test_configs/latest.yaml', '--install_accelerate', '--accelerate_version', '12.0.0', '--debug', ] , return_stdout=_A , ) self.assertIn( f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate==12.0.0; echo \"hello world\"; echo \"this is a second command\" --worker all""" , _A , )
620
1
from __future__ import annotations def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : list[int] , _SCREAMING_SNAKE_CASE : int ): """simple docstring""" if len(_SCREAMING_SNAKE_CASE ) == 0: return False SCREAMING_SNAKE_CASE_ = len(_SCREAMING_SNAKE_CASE ) // 2 if a_list[midpoint] == item: return True if item < a_list[midpoint]: return binary_search(a_list[:midpoint] , _SCREAMING_SNAKE_CASE ) else: return binary_search(a_list[midpoint + 1 :] , _SCREAMING_SNAKE_CASE ) if __name__ == "__main__": UpperCamelCase__ : Union[str, Any] = input("Enter numbers separated by comma:\n").strip() UpperCamelCase__ : Union[str, Any] = [int(item.strip()) for item in user_input.split(",")] UpperCamelCase__ : Dict = int(input("Enter the number to be found in the list:\n").strip()) UpperCamelCase__ : List[Any] = "" if binary_search(sequence, target) else "not " print(F'{target} was {not_str}found in {sequence}')
620
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_speech_available, is_torch_available, ) UpperCamelCase__ : Tuple = { "configuration_trocr": ["TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP", "TrOCRConfig"], "processing_trocr": ["TrOCRProcessor"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase__ : Tuple = [ "TROCR_PRETRAINED_MODEL_ARCHIVE_LIST", "TrOCRForCausalLM", "TrOCRPreTrainedModel", ] if TYPE_CHECKING: from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig from .processing_trocr import TrOCRProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel else: import sys UpperCamelCase__ : str = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
620
1
from __future__ import annotations import typing from collections.abc import Iterable import numpy as np UpperCamelCase__ : Dict = typing.Union[Iterable[float], Iterable[int], np.ndarray] # noqa: UP007 UpperCamelCase__ : Optional[int] = typing.Union[np.floataa, int, float] # noqa: UP007 def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : Vector , _SCREAMING_SNAKE_CASE : Vector ): """simple docstring""" return np.sqrt(np.sum((np.asarray(_SCREAMING_SNAKE_CASE ) - np.asarray(_SCREAMING_SNAKE_CASE )) ** 2 ) ) def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : Vector , _SCREAMING_SNAKE_CASE : Vector ): """simple docstring""" return sum((va - va) ** 2 for va, va in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ) ** (1 / 2) if __name__ == "__main__": def _UpperCAmelCase ( ): """simple docstring""" from timeit import timeit print('Without Numpy' ) print( timeit( 'euclidean_distance_no_np([1, 2, 3], [4, 5, 6])' , number=10_000 , globals=globals() , ) ) print('With Numpy' ) print( timeit( 'euclidean_distance([1, 2, 3], [4, 5, 6])' , number=10_000 , globals=globals() , ) ) benchmark()
620
from multiprocessing import Lock, Pipe, Process # lock used to ensure that two processes do not access a pipe at the same time UpperCamelCase__ : int = Lock() def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : Any ): """simple docstring""" global process_lock # we perform n swaps since after n swaps we know we are sorted # we *could* stop early if we are sorted already, but it takes as long to # find out we are sorted as it does to sort the list with this algorithm for i in range(0 , 10 ): if (i + position) % 2 == 0 and r_send is not None: # send your value to your right neighbor process_lock.acquire() r_send[1].send(_SCREAMING_SNAKE_CASE ) process_lock.release() # receive your right neighbor's value process_lock.acquire() SCREAMING_SNAKE_CASE_ = rr_cv[0].recv() process_lock.release() # take the lower value since you are on the left SCREAMING_SNAKE_CASE_ = min(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) elif (i + position) % 2 != 0 and l_send is not None: # send your value to your left neighbor process_lock.acquire() l_send[1].send(_SCREAMING_SNAKE_CASE ) process_lock.release() # receive your left neighbor's value process_lock.acquire() SCREAMING_SNAKE_CASE_ = lr_cv[0].recv() process_lock.release() # take the higher value since you are on the right SCREAMING_SNAKE_CASE_ = max(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # after all swaps are performed, send the values back to main result_pipe[1].send(_SCREAMING_SNAKE_CASE ) def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : Union[str, Any] ): """simple docstring""" SCREAMING_SNAKE_CASE_ = [] SCREAMING_SNAKE_CASE_ = [] # initialize the list of pipes where the values will be retrieved for _ in arr: result_pipe.append(Pipe() ) # creates the processes # the first and last process only have one neighbor so they are made outside # of the loop SCREAMING_SNAKE_CASE_ = Pipe() SCREAMING_SNAKE_CASE_ = Pipe() process_array_.append( Process( target=_SCREAMING_SNAKE_CASE , args=(0, arr[0], None, temp_rs, None, temp_rr, result_pipe[0]) , ) ) SCREAMING_SNAKE_CASE_ = temp_rs SCREAMING_SNAKE_CASE_ = temp_rr for i in range(1 , len(_SCREAMING_SNAKE_CASE ) - 1 ): SCREAMING_SNAKE_CASE_ = Pipe() SCREAMING_SNAKE_CASE_ = Pipe() process_array_.append( Process( target=_SCREAMING_SNAKE_CASE , args=(i, arr[i], temp_ls, temp_rs, temp_lr, temp_rr, result_pipe[i]) , ) ) SCREAMING_SNAKE_CASE_ = temp_rs SCREAMING_SNAKE_CASE_ = temp_rr process_array_.append( Process( target=_SCREAMING_SNAKE_CASE , args=( len(_SCREAMING_SNAKE_CASE ) - 1, arr[len(_SCREAMING_SNAKE_CASE ) - 1], temp_ls, None, temp_lr, None, result_pipe[len(_SCREAMING_SNAKE_CASE ) - 1], ) , ) ) # start the processes for p in process_array_: p.start() # wait for the processes to end and write their values to the list for p in range(0 , len(_SCREAMING_SNAKE_CASE ) ): SCREAMING_SNAKE_CASE_ = result_pipe[p][0].recv() process_array_[p].join() return arr def _UpperCAmelCase ( ): """simple docstring""" SCREAMING_SNAKE_CASE_ = list(range(10 , 0 , -1 ) ) print('Initial List' ) print(*_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE_ = odd_even_transposition(_SCREAMING_SNAKE_CASE ) print('Sorted List\n' ) print(*_SCREAMING_SNAKE_CASE ) if __name__ == "__main__": main()
620
1
import qiskit def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : int = 2 ): """simple docstring""" SCREAMING_SNAKE_CASE_ = qubits # Using Aer's simulator SCREAMING_SNAKE_CASE_ = qiskit.Aer.get_backend('aer_simulator' ) # Creating a Quantum Circuit acting on the q register SCREAMING_SNAKE_CASE_ = qiskit.QuantumCircuit(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # Adding a H gate on qubit 0 (now q0 in superposition) circuit.h(0 ) for i in range(1 , _SCREAMING_SNAKE_CASE ): # Adding CX (CNOT) gate circuit.cx(i - 1 , _SCREAMING_SNAKE_CASE ) # Mapping the quantum measurement to the classical bits circuit.measure(list(range(_SCREAMING_SNAKE_CASE ) ) , list(range(_SCREAMING_SNAKE_CASE ) ) ) # Now measuring any one qubit would affect other qubits to collapse # their super position and have same state as the measured one. # Executing the circuit on the simulator SCREAMING_SNAKE_CASE_ = qiskit.execute(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , shots=1_000 ) return job.result().get_counts(_SCREAMING_SNAKE_CASE ) if __name__ == "__main__": print(F'Total count for various states are: {quantum_entanglement(3)}')
620
import unittest from transformers import load_tool from .test_tools_common import ToolTesterMixin UpperCamelCase__ : int = "\nHugging Face was founded in 2016 by French entrepreneurs Clément Delangue, Julien Chaumond, and Thomas Wolf originally as a company that developed a chatbot app targeted at teenagers.[2] After open-sourcing the model behind the chatbot, the company pivoted to focus on being a platform for machine learning.\n\nIn March 2021, Hugging Face raised $40 million in a Series B funding round.[3]\n\nOn April 28, 2021, the company launched the BigScience Research Workshop in collaboration with several other research groups to release an open large language model.[4] In 2022, the workshop concluded with the announcement of BLOOM, a multilingual large language model with 176 billion parameters.[5]\n" class __snake_case ( unittest.TestCase , lowerCAmelCase__ ): def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = load_tool('text-question-answering') self.tool.setup() SCREAMING_SNAKE_CASE_ = load_tool('text-question-answering' , remote=_A) def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = self.tool(_A , 'What did Hugging Face do in April 2021?') self.assertEqual(_A , 'launched the BigScience Research Workshop') def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = self.remote_tool(_A , 'What did Hugging Face do in April 2021?') self.assertEqual(_A , 'launched the BigScience Research Workshop') def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = self.tool(text=_A , question='What did Hugging Face do in April 2021?') self.assertEqual(_A , 'launched the BigScience Research Workshop') def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = self.remote_tool(text=_A , question='What did Hugging Face do in April 2021?') self.assertEqual(_A , 'launched the BigScience Research Workshop')
620
1
import copy import os from typing import Union from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCamelCase__ : Optional[int] = logging.get_logger(__name__) UpperCamelCase__ : Tuple = { "google/pix2struct-textcaps-base": ( "https://huggingface.co/google/pix2struct-textcaps-base/resolve/main/config.json" ), } class __snake_case ( lowerCAmelCase__ ): __lowerCAmelCase : Dict = 'pix2struct_text_model' __lowerCAmelCase : List[Any] = ['past_key_values'] __lowerCAmelCase : str = { 'hidden_size': 'hidden_size', 'num_attention_heads': 'num_heads', 'num_hidden_layers': 'num_layers', } def __init__( self , _A=50244 , _A=768 , _A=64 , _A=2048 , _A=12 , _A=12 , _A=32 , _A=128 , _A=0.1 , _A=1E-6 , _A=1.0 , _A="gelu_new" , _A=0 , _A=False , _A=0 , _A=1 , _A=False , _A=True , **_A , ): SCREAMING_SNAKE_CASE_ = vocab_size SCREAMING_SNAKE_CASE_ = hidden_size SCREAMING_SNAKE_CASE_ = d_kv SCREAMING_SNAKE_CASE_ = d_ff SCREAMING_SNAKE_CASE_ = num_layers SCREAMING_SNAKE_CASE_ = num_heads SCREAMING_SNAKE_CASE_ = relative_attention_num_buckets SCREAMING_SNAKE_CASE_ = relative_attention_max_distance SCREAMING_SNAKE_CASE_ = dropout_rate SCREAMING_SNAKE_CASE_ = layer_norm_epsilon SCREAMING_SNAKE_CASE_ = initializer_factor SCREAMING_SNAKE_CASE_ = use_cache SCREAMING_SNAKE_CASE_ = eos_token_id SCREAMING_SNAKE_CASE_ = decoder_start_token_id # for backwards compatibility SCREAMING_SNAKE_CASE_ = dense_act_fn super().__init__( pad_token_id=_A , eos_token_id=_A , decoder_start_token_id=_A , tie_word_embeddings=_A , is_decoder=_A , **_A , ) @classmethod def lowerCAmelCase__ ( cls , _A , **_A): cls._set_token_in_kwargs(_A) SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = cls.get_config_dict(_A , **_A) # get the text config dict if we are loading from Pix2StructConfig if config_dict.get('model_type') == "pix2struct": SCREAMING_SNAKE_CASE_ = config_dict['text_config'] if "model_type" in config_dict and hasattr(cls , 'model_type') and config_dict["model_type"] != cls.model_type: logger.warning( f"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """ f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""") return cls.from_dict(_A , **_A) class __snake_case ( lowerCAmelCase__ ): __lowerCAmelCase : Any = 'pix2struct_vision_model' def __init__( self , _A=768 , _A=768 , _A=2048 , _A=64 , _A=12 , _A=12 , _A="gelu_new" , _A=1E-6 , _A=0.0 , _A=0.0 , _A=1E-10 , _A=1.0 , _A=4096 , _A=32 , _A=128 , **_A , ): super().__init__(**_A) SCREAMING_SNAKE_CASE_ = hidden_size SCREAMING_SNAKE_CASE_ = patch_embed_hidden_size SCREAMING_SNAKE_CASE_ = d_ff SCREAMING_SNAKE_CASE_ = dropout_rate SCREAMING_SNAKE_CASE_ = num_hidden_layers SCREAMING_SNAKE_CASE_ = num_attention_heads SCREAMING_SNAKE_CASE_ = initializer_range SCREAMING_SNAKE_CASE_ = initializer_factor SCREAMING_SNAKE_CASE_ = attention_dropout SCREAMING_SNAKE_CASE_ = layer_norm_eps SCREAMING_SNAKE_CASE_ = dense_act_fn SCREAMING_SNAKE_CASE_ = seq_len SCREAMING_SNAKE_CASE_ = relative_attention_num_buckets SCREAMING_SNAKE_CASE_ = relative_attention_max_distance SCREAMING_SNAKE_CASE_ = d_kv @classmethod def lowerCAmelCase__ ( cls , _A , **_A): cls._set_token_in_kwargs(_A) SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = cls.get_config_dict(_A , **_A) # get the vision config dict if we are loading from Pix2StructConfig if config_dict.get('model_type') == "pix2struct": SCREAMING_SNAKE_CASE_ = config_dict['vision_config'] if "model_type" in config_dict and hasattr(cls , 'model_type') and config_dict["model_type"] != cls.model_type: logger.warning( f"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """ f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""") return cls.from_dict(_A , **_A) class __snake_case ( lowerCAmelCase__ ): __lowerCAmelCase : Tuple = 'pix2struct' __lowerCAmelCase : str = True def __init__( self , _A=None , _A=None , _A=1.0 , _A=0.0_2 , _A=False , _A=False , _A=True , **_A , ): super().__init__(tie_word_embeddings=_A , is_encoder_decoder=_A , **_A) if text_config is None: SCREAMING_SNAKE_CASE_ = {} logger.info('text_config is None. Initializing the Pix2StructTextConfig with default values.') if vision_config is None: SCREAMING_SNAKE_CASE_ = {} logger.info('vision_config is None. Initializing the Pix2StructVisionConfig with default values.') SCREAMING_SNAKE_CASE_ = PixaStructTextConfig(**_A) SCREAMING_SNAKE_CASE_ = PixaStructVisionConfig(**_A) SCREAMING_SNAKE_CASE_ = self.text_config.decoder_start_token_id SCREAMING_SNAKE_CASE_ = self.text_config.pad_token_id SCREAMING_SNAKE_CASE_ = self.text_config.eos_token_id SCREAMING_SNAKE_CASE_ = initializer_factor SCREAMING_SNAKE_CASE_ = initializer_range SCREAMING_SNAKE_CASE_ = self.initializer_range SCREAMING_SNAKE_CASE_ = self.initializer_range SCREAMING_SNAKE_CASE_ = is_vqa @classmethod def lowerCAmelCase__ ( cls , _A , _A , **_A): return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **_A) def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = copy.deepcopy(self.__dict__) SCREAMING_SNAKE_CASE_ = self.text_config.to_dict() SCREAMING_SNAKE_CASE_ = self.vision_config.to_dict() SCREAMING_SNAKE_CASE_ = self.__class__.model_type return output
620
import unittest import numpy as np from datasets import load_dataset from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import BeitImageProcessor class __snake_case ( unittest.TestCase ): def __init__( self , _A , _A=7 , _A=3 , _A=18 , _A=30 , _A=400 , _A=True , _A=None , _A=True , _A=None , _A=True , _A=[0.5, 0.5, 0.5] , _A=[0.5, 0.5, 0.5] , _A=False , ): SCREAMING_SNAKE_CASE_ = size if size is not None else {'height': 20, 'width': 20} SCREAMING_SNAKE_CASE_ = crop_size if crop_size is not None else {'height': 18, 'width': 18} SCREAMING_SNAKE_CASE_ = parent SCREAMING_SNAKE_CASE_ = batch_size SCREAMING_SNAKE_CASE_ = num_channels SCREAMING_SNAKE_CASE_ = image_size SCREAMING_SNAKE_CASE_ = min_resolution SCREAMING_SNAKE_CASE_ = max_resolution SCREAMING_SNAKE_CASE_ = do_resize SCREAMING_SNAKE_CASE_ = size SCREAMING_SNAKE_CASE_ = do_center_crop SCREAMING_SNAKE_CASE_ = crop_size SCREAMING_SNAKE_CASE_ = do_normalize SCREAMING_SNAKE_CASE_ = image_mean SCREAMING_SNAKE_CASE_ = image_std SCREAMING_SNAKE_CASE_ = do_reduce_labels def lowerCAmelCase__ ( self): return { "do_resize": self.do_resize, "size": self.size, "do_center_crop": self.do_center_crop, "crop_size": self.crop_size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "do_reduce_labels": self.do_reduce_labels, } def _UpperCAmelCase ( ): """simple docstring""" SCREAMING_SNAKE_CASE_ = load_dataset('hf-internal-testing/fixtures_ade20k' , split='test' ) SCREAMING_SNAKE_CASE_ = Image.open(dataset[0]['file'] ) SCREAMING_SNAKE_CASE_ = Image.open(dataset[1]['file'] ) return image, map def _UpperCAmelCase ( ): """simple docstring""" SCREAMING_SNAKE_CASE_ = load_dataset('hf-internal-testing/fixtures_ade20k' , split='test' ) SCREAMING_SNAKE_CASE_ = Image.open(ds[0]['file'] ) SCREAMING_SNAKE_CASE_ = Image.open(ds[1]['file'] ) SCREAMING_SNAKE_CASE_ = Image.open(ds[2]['file'] ) SCREAMING_SNAKE_CASE_ = Image.open(ds[3]['file'] ) return [imagea, imagea], [mapa, mapa] @require_torch @require_vision class __snake_case ( lowerCAmelCase__ , unittest.TestCase ): __lowerCAmelCase : Union[str, Any] = BeitImageProcessor if is_vision_available() else None def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = BeitImageProcessingTester(self) @property def lowerCAmelCase__ ( self): return self.image_processor_tester.prepare_image_processor_dict() def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = self.image_processing_class(**self.image_processor_dict) self.assertTrue(hasattr(_A , 'do_resize')) self.assertTrue(hasattr(_A , 'size')) self.assertTrue(hasattr(_A , 'do_center_crop')) self.assertTrue(hasattr(_A , 'center_crop')) self.assertTrue(hasattr(_A , 'do_normalize')) self.assertTrue(hasattr(_A , 'image_mean')) self.assertTrue(hasattr(_A , 'image_std')) def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = self.image_processing_class.from_dict(self.image_processor_dict) self.assertEqual(image_processor.size , {'height': 20, 'width': 20}) self.assertEqual(image_processor.crop_size , {'height': 18, 'width': 18}) self.assertEqual(image_processor.do_reduce_labels , _A) SCREAMING_SNAKE_CASE_ = self.image_processing_class.from_dict( self.image_processor_dict , size=42 , crop_size=84 , reduce_labels=_A) self.assertEqual(image_processor.size , {'height': 42, 'width': 42}) self.assertEqual(image_processor.crop_size , {'height': 84, 'width': 84}) self.assertEqual(image_processor.do_reduce_labels , _A) def lowerCAmelCase__ ( self): pass def lowerCAmelCase__ ( self): # Initialize image_processing SCREAMING_SNAKE_CASE_ = self.image_processing_class(**self.image_processor_dict) # create random PIL images SCREAMING_SNAKE_CASE_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A) for image in image_inputs: self.assertIsInstance(_A , Image.Image) # Test not batched input SCREAMING_SNAKE_CASE_ = image_processing(image_inputs[0] , return_tensors='pt').pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) # Test batched SCREAMING_SNAKE_CASE_ = image_processing(_A , return_tensors='pt').pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) def lowerCAmelCase__ ( self): # Initialize image_processing SCREAMING_SNAKE_CASE_ = self.image_processing_class(**self.image_processor_dict) # create random numpy tensors SCREAMING_SNAKE_CASE_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A , numpify=_A) for image in image_inputs: self.assertIsInstance(_A , np.ndarray) # Test not batched input SCREAMING_SNAKE_CASE_ = image_processing(image_inputs[0] , return_tensors='pt').pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) # Test batched SCREAMING_SNAKE_CASE_ = image_processing(_A , return_tensors='pt').pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) def lowerCAmelCase__ ( self): # Initialize image_processing SCREAMING_SNAKE_CASE_ = self.image_processing_class(**self.image_processor_dict) # create random PyTorch tensors SCREAMING_SNAKE_CASE_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A , torchify=_A) for image in image_inputs: self.assertIsInstance(_A , torch.Tensor) # Test not batched input SCREAMING_SNAKE_CASE_ = image_processing(image_inputs[0] , return_tensors='pt').pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) # Test batched SCREAMING_SNAKE_CASE_ = image_processing(_A , return_tensors='pt').pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) def lowerCAmelCase__ ( self): # Initialize image_processing SCREAMING_SNAKE_CASE_ = self.image_processing_class(**self.image_processor_dict) # create random PyTorch tensors SCREAMING_SNAKE_CASE_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A , torchify=_A) SCREAMING_SNAKE_CASE_ = [] for image in image_inputs: self.assertIsInstance(_A , torch.Tensor) maps.append(torch.zeros(image.shape[-2:]).long()) # Test not batched input SCREAMING_SNAKE_CASE_ = image_processing(image_inputs[0] , maps[0] , return_tensors='pt') self.assertEqual( encoding['pixel_values'].shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) self.assertEqual( encoding['labels'].shape , ( 1, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) self.assertEqual(encoding['labels'].dtype , torch.long) self.assertTrue(encoding['labels'].min().item() >= 0) self.assertTrue(encoding['labels'].max().item() <= 255) # Test batched SCREAMING_SNAKE_CASE_ = image_processing(_A , _A , return_tensors='pt') self.assertEqual( encoding['pixel_values'].shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) self.assertEqual( encoding['labels'].shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) self.assertEqual(encoding['labels'].dtype , torch.long) self.assertTrue(encoding['labels'].min().item() >= 0) self.assertTrue(encoding['labels'].max().item() <= 255) # Test not batched input (PIL images) SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = prepare_semantic_single_inputs() SCREAMING_SNAKE_CASE_ = image_processing(_A , _A , return_tensors='pt') self.assertEqual( encoding['pixel_values'].shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) self.assertEqual( encoding['labels'].shape , ( 1, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) self.assertEqual(encoding['labels'].dtype , torch.long) self.assertTrue(encoding['labels'].min().item() >= 0) self.assertTrue(encoding['labels'].max().item() <= 255) # Test batched input (PIL images) SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = prepare_semantic_batch_inputs() SCREAMING_SNAKE_CASE_ = image_processing(_A , _A , return_tensors='pt') self.assertEqual( encoding['pixel_values'].shape , ( 2, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) self.assertEqual( encoding['labels'].shape , ( 2, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) self.assertEqual(encoding['labels'].dtype , torch.long) self.assertTrue(encoding['labels'].min().item() >= 0) self.assertTrue(encoding['labels'].max().item() <= 255) def lowerCAmelCase__ ( self): # Initialize image_processing SCREAMING_SNAKE_CASE_ = self.image_processing_class(**self.image_processor_dict) # ADE20k has 150 classes, and the background is included, so labels should be between 0 and 150 SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = prepare_semantic_single_inputs() SCREAMING_SNAKE_CASE_ = image_processing(_A , _A , return_tensors='pt') self.assertTrue(encoding['labels'].min().item() >= 0) self.assertTrue(encoding['labels'].max().item() <= 150) SCREAMING_SNAKE_CASE_ = True SCREAMING_SNAKE_CASE_ = image_processing(_A , _A , return_tensors='pt') self.assertTrue(encoding['labels'].min().item() >= 0) self.assertTrue(encoding['labels'].max().item() <= 255)
620
1
import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import GLPNImageProcessor class __snake_case ( unittest.TestCase ): def __init__( self , _A , _A=7 , _A=3 , _A=18 , _A=30 , _A=400 , _A=True , _A=32 , _A=True , ): SCREAMING_SNAKE_CASE_ = parent SCREAMING_SNAKE_CASE_ = batch_size SCREAMING_SNAKE_CASE_ = num_channels SCREAMING_SNAKE_CASE_ = image_size SCREAMING_SNAKE_CASE_ = min_resolution SCREAMING_SNAKE_CASE_ = max_resolution SCREAMING_SNAKE_CASE_ = do_resize SCREAMING_SNAKE_CASE_ = size_divisor SCREAMING_SNAKE_CASE_ = do_rescale def lowerCAmelCase__ ( self): return { "do_resize": self.do_resize, "size_divisor": self.size_divisor, "do_rescale": self.do_rescale, } @require_torch @require_vision class __snake_case ( lowerCAmelCase__ , unittest.TestCase ): __lowerCAmelCase : Tuple = GLPNImageProcessor if is_vision_available() else None def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = GLPNImageProcessingTester(self) @property def lowerCAmelCase__ ( self): return self.image_processor_tester.prepare_image_processor_dict() def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = self.image_processing_class(**self.image_processor_dict) self.assertTrue(hasattr(_A , 'do_resize')) self.assertTrue(hasattr(_A , 'size_divisor')) self.assertTrue(hasattr(_A , 'resample')) self.assertTrue(hasattr(_A , 'do_rescale')) def lowerCAmelCase__ ( self): pass def lowerCAmelCase__ ( self): # Initialize image_processing SCREAMING_SNAKE_CASE_ = self.image_processing_class(**self.image_processor_dict) # create random PIL images SCREAMING_SNAKE_CASE_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A) for image in image_inputs: self.assertIsInstance(_A , Image.Image) # Test not batched input (GLPNImageProcessor doesn't support batching) SCREAMING_SNAKE_CASE_ = image_processing(image_inputs[0] , return_tensors='pt').pixel_values self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0) self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0) def lowerCAmelCase__ ( self): # Initialize image_processing SCREAMING_SNAKE_CASE_ = self.image_processing_class(**self.image_processor_dict) # create random numpy tensors SCREAMING_SNAKE_CASE_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A , numpify=_A) for image in image_inputs: self.assertIsInstance(_A , np.ndarray) # Test not batched input (GLPNImageProcessor doesn't support batching) SCREAMING_SNAKE_CASE_ = image_processing(image_inputs[0] , return_tensors='pt').pixel_values self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0) self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0) def lowerCAmelCase__ ( self): # Initialize image_processing SCREAMING_SNAKE_CASE_ = self.image_processing_class(**self.image_processor_dict) # create random PyTorch tensors SCREAMING_SNAKE_CASE_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A , torchify=_A) for image in image_inputs: self.assertIsInstance(_A , torch.Tensor) # Test not batched input (GLPNImageProcessor doesn't support batching) SCREAMING_SNAKE_CASE_ = image_processing(image_inputs[0] , return_tensors='pt').pixel_values self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0) self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0)
620
def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : int = 200 ): """simple docstring""" SCREAMING_SNAKE_CASE_ = [1, 2, 5, 10, 20, 50, 100, 200] SCREAMING_SNAKE_CASE_ = [0] * (pence + 1) SCREAMING_SNAKE_CASE_ = 1 # base case: 1 way to make 0 pence for coin in coins: for i in range(_SCREAMING_SNAKE_CASE , pence + 1 , 1 ): number_of_ways[i] += number_of_ways[i - coin] return number_of_ways[pence] if __name__ == "__main__": assert solution(200) == 73_682
620
1
def _UpperCAmelCase ( ): """simple docstring""" for n in range(1 , 1_000_000 ): yield n * (n + 1) // 2 def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : Tuple ): """simple docstring""" SCREAMING_SNAKE_CASE_ = 1 SCREAMING_SNAKE_CASE_ = 2 while i * i <= n: SCREAMING_SNAKE_CASE_ = 0 while n % i == 0: n //= i multiplicity += 1 divisors_count *= multiplicity + 1 i += 1 if n > 1: divisors_count *= 2 return divisors_count def _UpperCAmelCase ( ): """simple docstring""" return next(i for i in triangle_number_generator() if count_divisors(_SCREAMING_SNAKE_CASE ) > 500 ) if __name__ == "__main__": print(solution())
620
def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : list , _SCREAMING_SNAKE_CASE : list , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int ): """simple docstring""" if index == number_of_items: return 0 SCREAMING_SNAKE_CASE_ = 0 SCREAMING_SNAKE_CASE_ = 0 SCREAMING_SNAKE_CASE_ = knapsack(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , index + 1 ) if weights[index] <= max_weight: SCREAMING_SNAKE_CASE_ = values[index] + knapsack( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , max_weight - weights[index] , index + 1 ) return max(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) if __name__ == "__main__": import doctest doctest.testmod()
620
1
import builtins import sys from ...utils.imports import _is_package_available from . import cursor, input from .helpers import Direction, clear_line, forceWrite, linebreak, move_cursor, reset_cursor, writeColor from .keymap import KEYMAP UpperCamelCase__ : Tuple = False try: UpperCamelCase__ : Tuple = _is_package_available("google.colab") except ModuleNotFoundError: pass @input.register class __snake_case : def __init__( self , _A = None , _A = []): SCREAMING_SNAKE_CASE_ = 0 SCREAMING_SNAKE_CASE_ = choices SCREAMING_SNAKE_CASE_ = prompt if sys.platform == "win32": SCREAMING_SNAKE_CASE_ = '*' else: SCREAMING_SNAKE_CASE_ = '➔ ' def lowerCAmelCase__ ( self , _A , _A = ""): if sys.platform != "win32": writeColor(self.choices[index] , 32 , _A) else: forceWrite(self.choices[index] , _A) def lowerCAmelCase__ ( self , _A): if index == self.position: forceWrite(f""" {self.arrow_char} """) self.write_choice(_A) else: forceWrite(f""" {self.choices[index]}""") reset_cursor() def lowerCAmelCase__ ( self , _A , _A = 1): SCREAMING_SNAKE_CASE_ = self.position if direction == Direction.DOWN: if self.position + 1 >= len(self.choices): return self.position += num_spaces else: if self.position - 1 < 0: return self.position -= num_spaces clear_line() self.print_choice(_A) move_cursor(_A , direction.name) self.print_choice(self.position) @input.mark(KEYMAP['up']) def lowerCAmelCase__ ( self): self.move_direction(Direction.UP) @input.mark(KEYMAP['down']) def lowerCAmelCase__ ( self): self.move_direction(Direction.DOWN) @input.mark(KEYMAP['newline']) def lowerCAmelCase__ ( self): move_cursor(len(self.choices) - self.position , 'DOWN') return self.position @input.mark(KEYMAP['interrupt']) def lowerCAmelCase__ ( self): move_cursor(len(self.choices) - self.position , 'DOWN') raise KeyboardInterrupt @input.mark_multiple(*[KEYMAP[str(_A)] for number in range(10)]) def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = int(chr(self.current_selection)) SCREAMING_SNAKE_CASE_ = index - self.position if index == self.position: return if index < len(self.choices): if self.position > index: self.move_direction(Direction.UP , -movement) elif self.position < index: self.move_direction(Direction.DOWN , _A) else: return else: return def lowerCAmelCase__ ( self , _A = 0): if self.prompt: linebreak() forceWrite(self.prompt , '\n') if in_colab: forceWrite('Please input a choice index (starting from 0), and press enter' , '\n') else: forceWrite('Please select a choice using the arrow or number keys, and selecting with enter' , '\n') SCREAMING_SNAKE_CASE_ = default_choice for i in range(len(self.choices)): self.print_choice(_A) forceWrite('\n') move_cursor(len(self.choices) - self.position , 'UP') with cursor.hide(): while True: if in_colab: try: SCREAMING_SNAKE_CASE_ = int(builtins.input()) except ValueError: SCREAMING_SNAKE_CASE_ = default_choice else: SCREAMING_SNAKE_CASE_ = self.handle_input() if choice is not None: reset_cursor() for _ in range(len(self.choices) + 1): move_cursor(1 , 'UP') clear_line() self.write_choice(_A , '\n') return choice
620
import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( SwiftFormerConfig, SwiftFormerForImageClassification, ViTImageProcessor, ) from transformers.utils import logging logging.set_verbosity_info() UpperCamelCase__ : Optional[int] = logging.get_logger(__name__) UpperCamelCase__ : List[Any] = torch.device("cpu") def _UpperCAmelCase ( ): """simple docstring""" SCREAMING_SNAKE_CASE_ = 'http://images.cocodataset.org/val2017/000000039769.jpg' SCREAMING_SNAKE_CASE_ = Image.open(requests.get(_SCREAMING_SNAKE_CASE , stream=_SCREAMING_SNAKE_CASE ).raw ) return im def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : int ): """simple docstring""" if swiftformer_name == "swiftformer_xs": return torch.tensor([-2.1_7_0_3E0_0, 2.1_1_0_7E0_0, -2.0_8_1_1E0_0, 8.8_6_8_5E-0_1, 2.4_3_6_0E-0_1] ) elif swiftformer_name == "swiftformer_s": return torch.tensor([3.9_6_3_6E-0_1, 2.3_4_7_8E-0_1, -1.6_9_6_3E0_0, -1.7_3_8_1E0_0, -8.6_3_3_7E-0_1] ) elif swiftformer_name == "swiftformer_l1": return torch.tensor([-4.2_7_6_8E-0_1, -4.7_4_2_9E-0_1, -1.0_8_9_7E0_0, -1.0_2_4_8E0_0, 3.5_5_2_3E-0_2] ) elif swiftformer_name == "swiftformer_l3": return torch.tensor([-2.5_3_3_0E-0_1, 2.4_2_1_1E-0_1, -6.0_1_8_5E-0_1, -8.2_7_8_9E-0_1, -6.0_4_4_6E-0_2] ) def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : Optional[Any] ): """simple docstring""" SCREAMING_SNAKE_CASE_ = dct.pop(_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE_ = val def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : Dict ): """simple docstring""" SCREAMING_SNAKE_CASE_ = [] for k in state_dict.keys(): SCREAMING_SNAKE_CASE_ = k if ".pwconv" in k: SCREAMING_SNAKE_CASE_ = k_new.replace('.pwconv' , '.point_wise_conv' ) if ".dwconv" in k: SCREAMING_SNAKE_CASE_ = k_new.replace('.dwconv' , '.depth_wise_conv' ) if ".Proj." in k: SCREAMING_SNAKE_CASE_ = k_new.replace('.Proj.' , '.proj.' ) if "patch_embed" in k_new: SCREAMING_SNAKE_CASE_ = k_new.replace('patch_embed' , 'swiftformer.patch_embed.patch_embedding' ) if "network" in k_new: SCREAMING_SNAKE_CASE_ = k_new.split('.' ) if ls[2].isdigit(): SCREAMING_SNAKE_CASE_ = 'swiftformer.encoder.network.' + ls[1] + '.blocks.' + ls[2] + '.' + '.'.join(ls[3:] ) else: SCREAMING_SNAKE_CASE_ = k_new.replace('network' , 'swiftformer.encoder.network' ) rename_keys.append((k, k_new) ) return rename_keys @torch.no_grad() def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Optional[Any] ): """simple docstring""" SCREAMING_SNAKE_CASE_ = SwiftFormerConfig() # dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size SCREAMING_SNAKE_CASE_ = 1_000 SCREAMING_SNAKE_CASE_ = 'huggingface/label-files' SCREAMING_SNAKE_CASE_ = 'imagenet-1k-id2label.json' SCREAMING_SNAKE_CASE_ = json.load(open(hf_hub_download(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , repo_type='dataset' ) , 'r' ) ) SCREAMING_SNAKE_CASE_ = {int(_SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()} SCREAMING_SNAKE_CASE_ = idalabel SCREAMING_SNAKE_CASE_ = {v: k for k, v in idalabel.items()} # size of the architecture if swiftformer_name == "swiftformer_xs": SCREAMING_SNAKE_CASE_ = [3, 3, 6, 4] SCREAMING_SNAKE_CASE_ = [48, 56, 112, 220] elif swiftformer_name == "swiftformer_s": SCREAMING_SNAKE_CASE_ = [3, 3, 9, 6] SCREAMING_SNAKE_CASE_ = [48, 64, 168, 224] elif swiftformer_name == "swiftformer_l1": SCREAMING_SNAKE_CASE_ = [4, 3, 10, 5] SCREAMING_SNAKE_CASE_ = [48, 96, 192, 384] elif swiftformer_name == "swiftformer_l3": SCREAMING_SNAKE_CASE_ = [4, 4, 12, 6] SCREAMING_SNAKE_CASE_ = [64, 128, 320, 512] # load state_dict of original model, remove and rename some keys if original_ckpt: if original_ckpt.startswith('https' ): SCREAMING_SNAKE_CASE_ = torch.hub.load_state_dict_from_url(_SCREAMING_SNAKE_CASE , map_location='cpu' , check_hash=_SCREAMING_SNAKE_CASE ) else: SCREAMING_SNAKE_CASE_ = torch.load(_SCREAMING_SNAKE_CASE , map_location='cpu' ) SCREAMING_SNAKE_CASE_ = checkpoint SCREAMING_SNAKE_CASE_ = create_rename_keys(_SCREAMING_SNAKE_CASE ) for rename_key_src, rename_key_dest in rename_keys: rename_key(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # load HuggingFace model SCREAMING_SNAKE_CASE_ = SwiftFormerForImageClassification(_SCREAMING_SNAKE_CASE ).eval() hf_model.load_state_dict(_SCREAMING_SNAKE_CASE ) # prepare test inputs SCREAMING_SNAKE_CASE_ = prepare_img() SCREAMING_SNAKE_CASE_ = ViTImageProcessor.from_pretrained('preprocessor_config' ) SCREAMING_SNAKE_CASE_ = processor(images=_SCREAMING_SNAKE_CASE , return_tensors='pt' ) # compare outputs from both models SCREAMING_SNAKE_CASE_ = get_expected_output(_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE_ = hf_model(inputs['pixel_values'] ).logits assert hf_logits.shape == torch.Size([1, 1_000] ) assert torch.allclose(hf_logits[0, 0:5] , _SCREAMING_SNAKE_CASE , atol=1E-3 ) Path(_SCREAMING_SNAKE_CASE ).mkdir(exist_ok=_SCREAMING_SNAKE_CASE ) print(f"""Saving model {swiftformer_name} to {pytorch_dump_folder_path}""" ) hf_model.save_pretrained(_SCREAMING_SNAKE_CASE ) if __name__ == "__main__": UpperCamelCase__ : str = argparse.ArgumentParser() # Required parameters parser.add_argument( "--swiftformer_name", default="swiftformer_xs", choices=["swiftformer_xs", "swiftformer_s", "swiftformer_l1", "swiftformer_l3"], type=str, help="Name of the SwiftFormer model you'd like to convert.", ) parser.add_argument( "--pytorch_dump_folder_path", default="./converted_outputs/", type=str, help="Path to the output PyTorch model directory.", ) parser.add_argument("--original_ckpt", default=None, type=str, help="Path to the original model checkpoint.") UpperCamelCase__ : Union[str, Any] = parser.parse_args() convert_swiftformer_checkpoint(args.swiftformer_name, args.pytorch_dump_folder_path, args.original_ckpt)
620
1
import random def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : list , _SCREAMING_SNAKE_CASE : str ): """simple docstring""" SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = [], [], [] for element in data: if element < pivot: less.append(_SCREAMING_SNAKE_CASE ) elif element > pivot: greater.append(_SCREAMING_SNAKE_CASE ) else: equal.append(_SCREAMING_SNAKE_CASE ) return less, equal, greater def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : list , _SCREAMING_SNAKE_CASE : int ): """simple docstring""" if index >= len(_SCREAMING_SNAKE_CASE ) or index < 0: return None SCREAMING_SNAKE_CASE_ = items[random.randint(0 , len(_SCREAMING_SNAKE_CASE ) - 1 )] SCREAMING_SNAKE_CASE_ = 0 SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = _partition(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE_ = len(_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE_ = len(_SCREAMING_SNAKE_CASE ) # index is the pivot if m <= index < m + count: return pivot # must be in smaller elif m > index: return quick_select(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # must be in larger else: return quick_select(_SCREAMING_SNAKE_CASE , index - (m + count) )
620
def _UpperCAmelCase ( ): """simple docstring""" for n in range(1 , 1_000_000 ): yield n * (n + 1) // 2 def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : Tuple ): """simple docstring""" SCREAMING_SNAKE_CASE_ = 1 SCREAMING_SNAKE_CASE_ = 2 while i * i <= n: SCREAMING_SNAKE_CASE_ = 0 while n % i == 0: n //= i multiplicity += 1 divisors_count *= multiplicity + 1 i += 1 if n > 1: divisors_count *= 2 return divisors_count def _UpperCAmelCase ( ): """simple docstring""" return next(i for i in triangle_number_generator() if count_divisors(_SCREAMING_SNAKE_CASE ) > 500 ) if __name__ == "__main__": print(solution())
620
1
from __future__ import annotations import json import requests from bsa import BeautifulSoup from fake_useragent import UserAgent UpperCamelCase__ : List[str] = {"UserAgent": UserAgent().random} def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : List[str] ): """simple docstring""" SCREAMING_SNAKE_CASE_ = script.contents[0] SCREAMING_SNAKE_CASE_ = json.loads(data[data.find('{"config"' ) : -1] ) return info["entry_data"]["ProfilePage"][0]["graphql"]["user"] class __snake_case : def __init__( self , _A): SCREAMING_SNAKE_CASE_ = f"""https://www.instagram.com/{username}/""" SCREAMING_SNAKE_CASE_ = self.get_json() def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = requests.get(self.url , headers=_A).text SCREAMING_SNAKE_CASE_ = BeautifulSoup(_A , 'html.parser').find_all('script') try: return extract_user_profile(scripts[4]) except (json.decoder.JSONDecodeError, KeyError): return extract_user_profile(scripts[3]) def __repr__( self): return f"""{self.__class__.__name__}('{self.username}')""" def __str__( self): return f"""{self.fullname} ({self.username}) is {self.biography}""" @property def lowerCAmelCase__ ( self): return self.user_data["username"] @property def lowerCAmelCase__ ( self): return self.user_data["full_name"] @property def lowerCAmelCase__ ( self): return self.user_data["biography"] @property def lowerCAmelCase__ ( self): return self.user_data["business_email"] @property def lowerCAmelCase__ ( self): return self.user_data["external_url"] @property def lowerCAmelCase__ ( self): return self.user_data["edge_followed_by"]["count"] @property def lowerCAmelCase__ ( self): return self.user_data["edge_follow"]["count"] @property def lowerCAmelCase__ ( self): return self.user_data["edge_owner_to_timeline_media"]["count"] @property def lowerCAmelCase__ ( self): return self.user_data["profile_pic_url_hd"] @property def lowerCAmelCase__ ( self): return self.user_data["is_verified"] @property def lowerCAmelCase__ ( self): return self.user_data["is_private"] def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : str = "github" ): """simple docstring""" import os if os.environ.get('CI' ): return # test failing on GitHub Actions SCREAMING_SNAKE_CASE_ = InstagramUser(_SCREAMING_SNAKE_CASE ) assert instagram_user.user_data assert isinstance(instagram_user.user_data , _SCREAMING_SNAKE_CASE ) assert instagram_user.username == username if username != "github": return assert instagram_user.fullname == "GitHub" assert instagram_user.biography == "Built for developers." assert instagram_user.number_of_posts > 150 assert instagram_user.number_of_followers > 120_000 assert instagram_user.number_of_followings > 15 assert instagram_user.email == "support@github.com" assert instagram_user.website == "https://github.com/readme" assert instagram_user.profile_picture_url.startswith('https://instagram.' ) assert instagram_user.is_verified is True assert instagram_user.is_private is False if __name__ == "__main__": import doctest doctest.testmod() UpperCamelCase__ : List[Any] = InstagramUser("github") print(instagram_user) print(F'{instagram_user.number_of_posts = }') print(F'{instagram_user.number_of_followers = }') print(F'{instagram_user.number_of_followings = }') print(F'{instagram_user.email = }') print(F'{instagram_user.website = }') print(F'{instagram_user.profile_picture_url = }') print(F'{instagram_user.is_verified = }') print(F'{instagram_user.is_private = }')
620
import io import itertools import json from dataclasses import dataclass from typing import Optional import pyarrow as pa import pyarrow.json as paj import datasets from datasets.table import table_cast from datasets.utils.file_utils import readline UpperCamelCase__ : Optional[int] = datasets.utils.logging.get_logger(__name__) @dataclass class __snake_case ( datasets.BuilderConfig ): __lowerCAmelCase : Optional[datasets.Features] = None __lowerCAmelCase : str = "utf-8" __lowerCAmelCase : Optional[str] = None __lowerCAmelCase : Optional[str] = None __lowerCAmelCase : bool = True # deprecated __lowerCAmelCase : Optional[int] = None # deprecated __lowerCAmelCase : int = 10 << 20 # 10MB __lowerCAmelCase : Optional[bool] = None class __snake_case ( datasets.ArrowBasedBuilder ): __lowerCAmelCase : int = JsonConfig def lowerCAmelCase__ ( self): if self.config.block_size is not None: logger.warning('The JSON loader parameter `block_size` is deprecated. Please use `chunksize` instead') SCREAMING_SNAKE_CASE_ = self.config.block_size if self.config.use_threads is not True: logger.warning( 'The JSON loader parameter `use_threads` is deprecated and doesn\'t have any effect anymore.') if self.config.newlines_in_values is not None: raise ValueError('The JSON loader parameter `newlines_in_values` is no longer supported') return datasets.DatasetInfo(features=self.config.features) def lowerCAmelCase__ ( self , _A): if not self.config.data_files: raise ValueError(f"""At least one data file must be specified, but got data_files={self.config.data_files}""") SCREAMING_SNAKE_CASE_ = dl_manager.download_and_extract(self.config.data_files) if isinstance(_A , (str, list, tuple)): SCREAMING_SNAKE_CASE_ = data_files if isinstance(_A , _A): SCREAMING_SNAKE_CASE_ = [files] SCREAMING_SNAKE_CASE_ = [dl_manager.iter_files(_A) for file in files] return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'files': files})] SCREAMING_SNAKE_CASE_ = [] for split_name, files in data_files.items(): if isinstance(_A , _A): SCREAMING_SNAKE_CASE_ = [files] SCREAMING_SNAKE_CASE_ = [dl_manager.iter_files(_A) for file in files] splits.append(datasets.SplitGenerator(name=_A , gen_kwargs={'files': files})) return splits def lowerCAmelCase__ ( self , _A): if self.config.features is not None: # adding missing columns for column_name in set(self.config.features) - set(pa_table.column_names): SCREAMING_SNAKE_CASE_ = self.config.features.arrow_schema.field(_A).type SCREAMING_SNAKE_CASE_ = pa_table.append_column(_A , pa.array([None] * len(_A) , type=_A)) # more expensive cast to support nested structures with keys in a different order # allows str <-> int/float or str to Audio for example SCREAMING_SNAKE_CASE_ = table_cast(_A , self.config.features.arrow_schema) return pa_table def lowerCAmelCase__ ( self , _A): for file_idx, file in enumerate(itertools.chain.from_iterable(_A)): # If the file is one json object and if we need to look at the list of items in one specific field if self.config.field is not None: with open(_A , encoding=self.config.encoding , errors=self.config.encoding_errors) as f: SCREAMING_SNAKE_CASE_ = json.load(_A) # We keep only the field we are interested in SCREAMING_SNAKE_CASE_ = dataset[self.config.field] # We accept two format: a list of dicts or a dict of lists if isinstance(_A , (list, tuple)): SCREAMING_SNAKE_CASE_ = set().union(*[row.keys() for row in dataset]) SCREAMING_SNAKE_CASE_ = {col: [row.get(_A) for row in dataset] for col in keys} else: SCREAMING_SNAKE_CASE_ = dataset SCREAMING_SNAKE_CASE_ = pa.Table.from_pydict(_A) yield file_idx, self._cast_table(_A) # If the file has one json object per line else: with open(_A , 'rb') as f: SCREAMING_SNAKE_CASE_ = 0 # Use block_size equal to the chunk size divided by 32 to leverage multithreading # Set a default minimum value of 16kB if the chunk size is really small SCREAMING_SNAKE_CASE_ = max(self.config.chunksize // 32 , 16 << 10) SCREAMING_SNAKE_CASE_ = ( self.config.encoding_errors if self.config.encoding_errors is not None else 'strict' ) while True: SCREAMING_SNAKE_CASE_ = f.read(self.config.chunksize) if not batch: break # Finish current line try: batch += f.readline() except (AttributeError, io.UnsupportedOperation): batch += readline(_A) # PyArrow only accepts utf-8 encoded bytes if self.config.encoding != "utf-8": SCREAMING_SNAKE_CASE_ = batch.decode(self.config.encoding , errors=_A).encode('utf-8') try: while True: try: SCREAMING_SNAKE_CASE_ = paj.read_json( io.BytesIO(_A) , read_options=paj.ReadOptions(block_size=_A)) break except (pa.ArrowInvalid, pa.ArrowNotImplementedError) as e: if ( isinstance(_A , pa.ArrowInvalid) and "straddling" not in str(_A) or block_size > len(_A) ): raise else: # Increase the block size in case it was too small. # The block size will be reset for the next file. logger.debug( f"""Batch of {len(_A)} bytes couldn't be parsed with block_size={block_size}. Retrying with block_size={block_size * 2}.""") block_size *= 2 except pa.ArrowInvalid as e: try: with open( _A , encoding=self.config.encoding , errors=self.config.encoding_errors) as f: SCREAMING_SNAKE_CASE_ = json.load(_A) except json.JSONDecodeError: logger.error(f"""Failed to read file '{file}' with error {type(_A)}: {e}""") raise e # If possible, parse the file as a list of json objects and exit the loop if isinstance(_A , _A): # list is the only sequence type supported in JSON try: SCREAMING_SNAKE_CASE_ = set().union(*[row.keys() for row in dataset]) SCREAMING_SNAKE_CASE_ = {col: [row.get(_A) for row in dataset] for col in keys} SCREAMING_SNAKE_CASE_ = pa.Table.from_pydict(_A) except (pa.ArrowInvalid, AttributeError) as e: logger.error(f"""Failed to read file '{file}' with error {type(_A)}: {e}""") raise ValueError(f"""Not able to read records in the JSON file at {file}.""") from None yield file_idx, self._cast_table(_A) break else: logger.error(f"""Failed to read file '{file}' with error {type(_A)}: {e}""") raise ValueError( f"""Not able to read records in the JSON file at {file}. """ f"""You should probably indicate the field of the JSON file containing your records. """ f"""This JSON file contain the following fields: {str(list(dataset.keys()))}. """ f"""Select the correct one and provide it as `field='XXX'` to the dataset loading method. """) from None # Uncomment for debugging (will print the Arrow table size and elements) # logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}") # logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows))) yield (file_idx, batch_idx), self._cast_table(_A) batch_idx += 1
620
1
import argparse import torch from datasets import load_dataset from donut import DonutModel from transformers import ( DonutImageProcessor, DonutProcessor, DonutSwinConfig, DonutSwinModel, MBartConfig, MBartForCausalLM, VisionEncoderDecoderModel, XLMRobertaTokenizerFast, ) def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : List[Any] ): """simple docstring""" SCREAMING_SNAKE_CASE_ = model.config SCREAMING_SNAKE_CASE_ = DonutSwinConfig( image_size=original_config.input_size , patch_size=4 , depths=original_config.encoder_layer , num_heads=[4, 8, 16, 32] , window_size=original_config.window_size , embed_dim=128 , ) SCREAMING_SNAKE_CASE_ = MBartConfig( is_decoder=_SCREAMING_SNAKE_CASE , is_encoder_decoder=_SCREAMING_SNAKE_CASE , add_cross_attention=_SCREAMING_SNAKE_CASE , decoder_layers=original_config.decoder_layer , max_position_embeddings=original_config.max_position_embeddings , vocab_size=len( model.decoder.tokenizer ) , scale_embedding=_SCREAMING_SNAKE_CASE , add_final_layer_norm=_SCREAMING_SNAKE_CASE , ) return encoder_config, decoder_config def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : int ): """simple docstring""" if "encoder.model" in name: SCREAMING_SNAKE_CASE_ = name.replace('encoder.model' , 'encoder' ) if "decoder.model" in name: SCREAMING_SNAKE_CASE_ = name.replace('decoder.model' , 'decoder' ) if "patch_embed.proj" in name: SCREAMING_SNAKE_CASE_ = name.replace('patch_embed.proj' , 'embeddings.patch_embeddings.projection' ) if "patch_embed.norm" in name: SCREAMING_SNAKE_CASE_ = name.replace('patch_embed.norm' , 'embeddings.norm' ) if name.startswith('encoder' ): if "layers" in name: SCREAMING_SNAKE_CASE_ = 'encoder.' + name if "attn.proj" in name: SCREAMING_SNAKE_CASE_ = name.replace('attn.proj' , 'attention.output.dense' ) if "attn" in name and "mask" not in name: SCREAMING_SNAKE_CASE_ = name.replace('attn' , 'attention.self' ) if "norm1" in name: SCREAMING_SNAKE_CASE_ = name.replace('norm1' , 'layernorm_before' ) if "norm2" in name: SCREAMING_SNAKE_CASE_ = name.replace('norm2' , 'layernorm_after' ) if "mlp.fc1" in name: SCREAMING_SNAKE_CASE_ = name.replace('mlp.fc1' , 'intermediate.dense' ) if "mlp.fc2" in name: SCREAMING_SNAKE_CASE_ = name.replace('mlp.fc2' , 'output.dense' ) if name == "encoder.norm.weight": SCREAMING_SNAKE_CASE_ = 'encoder.layernorm.weight' if name == "encoder.norm.bias": SCREAMING_SNAKE_CASE_ = 'encoder.layernorm.bias' return name def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : Tuple ): """simple docstring""" for key in orig_state_dict.copy().keys(): SCREAMING_SNAKE_CASE_ = orig_state_dict.pop(_SCREAMING_SNAKE_CASE ) if "qkv" in key: SCREAMING_SNAKE_CASE_ = key.split('.' ) SCREAMING_SNAKE_CASE_ = int(key_split[3] ) SCREAMING_SNAKE_CASE_ = int(key_split[5] ) SCREAMING_SNAKE_CASE_ = model.encoder.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size if "weight" in key: SCREAMING_SNAKE_CASE_ = val[:dim, :] SCREAMING_SNAKE_CASE_ = val[dim : dim * 2, :] SCREAMING_SNAKE_CASE_ = val[-dim:, :] else: SCREAMING_SNAKE_CASE_ = val[:dim] SCREAMING_SNAKE_CASE_ = val[dim : dim * 2] SCREAMING_SNAKE_CASE_ = val[-dim:] elif "attn_mask" in key or key in ["encoder.model.norm.weight", "encoder.model.norm.bias"]: # HuggingFace implementation doesn't use attn_mask buffer # and model doesn't use final LayerNorms for the encoder pass else: SCREAMING_SNAKE_CASE_ = val return orig_state_dict def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : List[str]=None , _SCREAMING_SNAKE_CASE : str=False ): """simple docstring""" SCREAMING_SNAKE_CASE_ = DonutModel.from_pretrained(_SCREAMING_SNAKE_CASE ).eval() # load HuggingFace model SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = get_configs(_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE_ = DonutSwinModel(_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE_ = MBartForCausalLM(_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE_ = VisionEncoderDecoderModel(encoder=_SCREAMING_SNAKE_CASE , decoder=_SCREAMING_SNAKE_CASE ) model.eval() SCREAMING_SNAKE_CASE_ = original_model.state_dict() SCREAMING_SNAKE_CASE_ = convert_state_dict(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) model.load_state_dict(_SCREAMING_SNAKE_CASE ) # verify results on scanned document SCREAMING_SNAKE_CASE_ = load_dataset('hf-internal-testing/example-documents' ) SCREAMING_SNAKE_CASE_ = dataset['test'][0]['image'].convert('RGB' ) SCREAMING_SNAKE_CASE_ = XLMRobertaTokenizerFast.from_pretrained(_SCREAMING_SNAKE_CASE , from_slow=_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE_ = DonutImageProcessor( do_align_long_axis=original_model.config.align_long_axis , size=original_model.config.input_size[::-1] ) SCREAMING_SNAKE_CASE_ = DonutProcessor(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE_ = processor(_SCREAMING_SNAKE_CASE , return_tensors='pt' ).pixel_values if model_name == "naver-clova-ix/donut-base-finetuned-docvqa": SCREAMING_SNAKE_CASE_ = '<s_docvqa><s_question>{user_input}</s_question><s_answer>' SCREAMING_SNAKE_CASE_ = 'When is the coffee break?' SCREAMING_SNAKE_CASE_ = task_prompt.replace('{user_input}' , _SCREAMING_SNAKE_CASE ) elif model_name == "naver-clova-ix/donut-base-finetuned-rvlcdip": SCREAMING_SNAKE_CASE_ = '<s_rvlcdip>' elif model_name in [ "naver-clova-ix/donut-base-finetuned-cord-v1", "naver-clova-ix/donut-base-finetuned-cord-v1-2560", ]: SCREAMING_SNAKE_CASE_ = '<s_cord>' elif model_name == "naver-clova-ix/donut-base-finetuned-cord-v2": SCREAMING_SNAKE_CASE_ = 's_cord-v2>' elif model_name == "naver-clova-ix/donut-base-finetuned-zhtrainticket": SCREAMING_SNAKE_CASE_ = '<s_zhtrainticket>' elif model_name in ["naver-clova-ix/donut-proto", "naver-clova-ix/donut-base"]: # use a random prompt SCREAMING_SNAKE_CASE_ = 'hello world' else: raise ValueError('Model name not supported' ) SCREAMING_SNAKE_CASE_ = original_model.decoder.tokenizer(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE , return_tensors='pt' )[ 'input_ids' ] SCREAMING_SNAKE_CASE_ = original_model.encoder.model.patch_embed(_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = model.encoder.embeddings(_SCREAMING_SNAKE_CASE ) assert torch.allclose(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , atol=1E-3 ) # verify encoder hidden states SCREAMING_SNAKE_CASE_ = original_model.encoder(_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE_ = model.encoder(_SCREAMING_SNAKE_CASE ).last_hidden_state assert torch.allclose(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , atol=1E-2 ) # verify decoder hidden states SCREAMING_SNAKE_CASE_ = original_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).logits SCREAMING_SNAKE_CASE_ = model(_SCREAMING_SNAKE_CASE , decoder_input_ids=_SCREAMING_SNAKE_CASE ).logits assert torch.allclose(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , atol=1E-3 ) print('Looks ok!' ) if pytorch_dump_folder_path is not None: print(f"""Saving model and processor to {pytorch_dump_folder_path}""" ) model.save_pretrained(_SCREAMING_SNAKE_CASE ) processor.save_pretrained(_SCREAMING_SNAKE_CASE ) if push_to_hub: model.push_to_hub('nielsr/' + model_name.split('/' )[-1] , commit_message='Update model' ) processor.push_to_hub('nielsr/' + model_name.split('/' )[-1] , commit_message='Update model' ) if __name__ == "__main__": UpperCamelCase__ : Optional[int] = argparse.ArgumentParser() # Required parameters parser.add_argument( "--model_name", default="naver-clova-ix/donut-base-finetuned-docvqa", required=False, type=str, help="Name of the original model you'd like to convert.", ) parser.add_argument( "--pytorch_dump_folder_path", default=None, required=False, type=str, help="Path to the output PyTorch model directory.", ) parser.add_argument( "--push_to_hub", action="store_true", help="Whether or not to push the converted model and processor to the 🤗 hub.", ) UpperCamelCase__ : Any = parser.parse_args() convert_donut_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
620
import unittest from transformers import TrOCRConfig from transformers.testing_utils import is_torch_available, require_torch, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers.models.trocr.modeling_trocr import TrOCRDecoder, TrOCRForCausalLM @require_torch class __snake_case : def __init__( self , _A , _A=99 , _A=13 , _A=16 , _A=7 , _A=True , _A=True , _A=True , _A=False , _A=True , _A=2 , _A=32 , _A=4 , _A=4 , _A=30 , _A=0 , _A=1 , _A=2 , _A=None , ): SCREAMING_SNAKE_CASE_ = parent SCREAMING_SNAKE_CASE_ = batch_size SCREAMING_SNAKE_CASE_ = decoder_seq_length # For common tests SCREAMING_SNAKE_CASE_ = self.decoder_seq_length SCREAMING_SNAKE_CASE_ = is_training SCREAMING_SNAKE_CASE_ = use_attention_mask SCREAMING_SNAKE_CASE_ = use_labels SCREAMING_SNAKE_CASE_ = vocab_size SCREAMING_SNAKE_CASE_ = d_model SCREAMING_SNAKE_CASE_ = d_model SCREAMING_SNAKE_CASE_ = decoder_layers SCREAMING_SNAKE_CASE_ = decoder_layers SCREAMING_SNAKE_CASE_ = decoder_ffn_dim SCREAMING_SNAKE_CASE_ = decoder_attention_heads SCREAMING_SNAKE_CASE_ = decoder_attention_heads SCREAMING_SNAKE_CASE_ = eos_token_id SCREAMING_SNAKE_CASE_ = bos_token_id SCREAMING_SNAKE_CASE_ = pad_token_id SCREAMING_SNAKE_CASE_ = decoder_start_token_id SCREAMING_SNAKE_CASE_ = use_cache SCREAMING_SNAKE_CASE_ = max_position_embeddings SCREAMING_SNAKE_CASE_ = None SCREAMING_SNAKE_CASE_ = decoder_seq_length SCREAMING_SNAKE_CASE_ = 2 SCREAMING_SNAKE_CASE_ = 1 def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size) SCREAMING_SNAKE_CASE_ = None if self.use_attention_mask: SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size, self.decoder_seq_length] , vocab_size=2) SCREAMING_SNAKE_CASE_ = None if self.use_labels: SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size) SCREAMING_SNAKE_CASE_ = TrOCRConfig( vocab_size=self.vocab_size , d_model=self.d_model , decoder_layers=self.decoder_layers , decoder_ffn_dim=self.decoder_ffn_dim , decoder_attention_heads=self.decoder_attention_heads , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , use_cache=self.use_cache , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , max_position_embeddings=self.max_position_embeddings , ) return (config, input_ids, attention_mask, lm_labels) def lowerCAmelCase__ ( self , _A , _A , _A , _A , ): SCREAMING_SNAKE_CASE_ = True SCREAMING_SNAKE_CASE_ = TrOCRDecoder(config=_A).to(_A).eval() SCREAMING_SNAKE_CASE_ = input_ids[:2] input_ids[input_ids == 0] += 1 # first forward pass SCREAMING_SNAKE_CASE_ = model(_A , use_cache=_A) SCREAMING_SNAKE_CASE_ = model(_A) SCREAMING_SNAKE_CASE_ = model(_A , use_cache=_A) self.parent.assertTrue(len(_A) == len(_A)) self.parent.assertTrue(len(_A) == len(_A) + 1) SCREAMING_SNAKE_CASE_ = outputs['past_key_values'] # create hypothetical next token and extent to next_input_ids SCREAMING_SNAKE_CASE_ = ids_tensor((2, 1) , config.vocab_size - 1) + 1 # append to next input_ids and SCREAMING_SNAKE_CASE_ = torch.cat([input_ids, next_tokens] , dim=-1) SCREAMING_SNAKE_CASE_ = model(_A)['last_hidden_state'] SCREAMING_SNAKE_CASE_ = model(_A , past_key_values=_A)['last_hidden_state'] # select random slice SCREAMING_SNAKE_CASE_ = ids_tensor((1,) , output_from_past.shape[-1]).item() SCREAMING_SNAKE_CASE_ = output_from_no_past[:, next_input_ids.shape[-1] - 1, random_slice_idx].detach() SCREAMING_SNAKE_CASE_ = output_from_past[:, 0, random_slice_idx].detach() # test that outputs are equal for slice assert torch.allclose(_A , _A , atol=1E-3) def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = self.prepare_config_and_inputs() SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = config_and_inputs SCREAMING_SNAKE_CASE_ = {'input_ids': input_ids, 'attention_mask': attention_mask} return config, inputs_dict @require_torch class __snake_case ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ): __lowerCAmelCase : Tuple = (TrOCRDecoder, TrOCRForCausalLM) if is_torch_available() else () __lowerCAmelCase : Union[str, Any] = (TrOCRForCausalLM,) if is_torch_available() else () __lowerCAmelCase : str = {'text-generation': TrOCRForCausalLM} if is_torch_available() else {} __lowerCAmelCase : Any = True __lowerCAmelCase : str = False def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = TrOCRStandaloneDecoderModelTester(self , is_training=_A) SCREAMING_SNAKE_CASE_ = ConfigTester(self , config_class=_A) def lowerCAmelCase__ ( self): pass def lowerCAmelCase__ ( self): pass def lowerCAmelCase__ ( self): pass def lowerCAmelCase__ ( self): self.config_tester.run_common_tests() def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_decoder_model_past(*_A) def lowerCAmelCase__ ( self): return @unittest.skip('The model doesn\'t support left padding') # and it's not used enough to be worth fixing :) def lowerCAmelCase__ ( self): pass
620
1
import collections import json import os import re from typing import TYPE_CHECKING, List, Optional, Tuple import numpy as np from ...tokenization_utils_fast import PreTrainedTokenizer from ...utils import logging if TYPE_CHECKING: from transformers.pipelines.conversational import Conversation UpperCamelCase__ : Union[str, Any] = logging.get_logger(__name__) UpperCamelCase__ : Dict = {"vocab_file": "vocab.txt", "emoji_file": "emoji.json"} UpperCamelCase__ : int = { "vocab_file": { "abeja/gpt-neox-japanese-2.7b": "https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/vocab.txt", }, "emoji_file": { "abeja/gpt-neox-japanese-2.7b": "https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/emoji.json", }, } UpperCamelCase__ : int = { "abeja/gpt-neox-japanese-2.7b": 2_048, } def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : Optional[Any] ): """simple docstring""" with open(_SCREAMING_SNAKE_CASE , 'r' , encoding='utf-8' ) as f: SCREAMING_SNAKE_CASE_ = json.loads(f.read() ) SCREAMING_SNAKE_CASE_ = collections.OrderedDict() SCREAMING_SNAKE_CASE_ = collections.OrderedDict() SCREAMING_SNAKE_CASE_ = collections.OrderedDict() with open(_SCREAMING_SNAKE_CASE , 'r' , encoding='utf-8' ) as f: SCREAMING_SNAKE_CASE_ = f.readlines() SCREAMING_SNAKE_CASE_ = [[t.rstrip('\n' )] if (t == ',' or ',' not in t) else t.rstrip('\n' ).split(',' ) for t in token] for idx, b in enumerate(_SCREAMING_SNAKE_CASE ): SCREAMING_SNAKE_CASE_ = b SCREAMING_SNAKE_CASE_ = idx for wd in b: SCREAMING_SNAKE_CASE_ = idx return vocab, raw_vocab, ids_to_tokens, emoji class __snake_case ( lowerCAmelCase__ ): __lowerCAmelCase : Tuple = VOCAB_FILES_NAMES __lowerCAmelCase : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP __lowerCAmelCase : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __lowerCAmelCase : List[Any] = ['input_ids', 'attention_mask'] def __init__( self , _A , _A , _A="<|endoftext|>" , _A="<|endoftext|>" , _A="<|startoftext|>" , _A="<|endoftext|>" , _A=False , **_A , ): super().__init__( unk_token=_A , pad_token=_A , bos_token=_A , eos_token=_A , do_clean_text=_A , **_A , ) if not os.path.isfile(_A): raise ValueError( f"""Can't find a vocabulary file at path '{vocab_file}'. To load the vocabulary from a Google pretrained""" ' model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`') if not os.path.isfile(_A): raise ValueError( f"""Can't find a emoji file at path '{emoji_file}'. To load the emoji information from a Google""" ' pretrained model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`') SCREAMING_SNAKE_CASE_ = do_clean_text SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = load_vocab_and_emoji(_A , _A) SCREAMING_SNAKE_CASE_ = SubWordJapaneseTokenizer( vocab=self.vocab , ids_to_tokens=self.ids_to_tokens , emoji=self.emoji) @property def lowerCAmelCase__ ( self): # self.vocab contains support for character fluctuation unique to Japanese, and has a large number of vocab return len(self.raw_vocab) def lowerCAmelCase__ ( self): return dict(self.raw_vocab , **self.added_tokens_encoder) def lowerCAmelCase__ ( self , _A): return self.subword_tokenizer.tokenize(_A , clean=self.do_clean_text) def lowerCAmelCase__ ( self , _A): return self.vocab.get(_A , self.vocab.get(self.unk_token)) def lowerCAmelCase__ ( self , _A): return self.subword_tokenizer.convert_id_to_token(_A) def lowerCAmelCase__ ( self , _A): SCREAMING_SNAKE_CASE_ = ''.join(_A).strip() return out_string def lowerCAmelCase__ ( self , _A): SCREAMING_SNAKE_CASE_ = [] for is_user, text in conversation.iter_texts(): input_ids.extend(self.encode(_A , add_special_tokens=_A) + [self.eos_token_id]) if len(_A) > self.model_max_length: SCREAMING_SNAKE_CASE_ = input_ids[-self.model_max_length :] return input_ids def lowerCAmelCase__ ( self , _A , _A = None): SCREAMING_SNAKE_CASE_ = 0 if os.path.isdir(_A): SCREAMING_SNAKE_CASE_ = os.path.join( _A , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file']) SCREAMING_SNAKE_CASE_ = os.path.join( _A , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['emoji_file']) else: SCREAMING_SNAKE_CASE_ = ( (filename_prefix + '-' if filename_prefix else '') + save_directory + VOCAB_FILES_NAMES['vocab_file'] ) SCREAMING_SNAKE_CASE_ = ( (filename_prefix + '-' if filename_prefix else '') + save_directory + VOCAB_FILES_NAMES['emoji_file'] ) with open(_A , 'w' , encoding='utf-8') as writer: for token_index, token in self.ids_to_tokens.items(): if index != token_index: logger.warning( f"""Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.""" ' Please check that the vocabulary is not corrupted!') SCREAMING_SNAKE_CASE_ = token_index writer.write(','.join(_A) + '\n') index += 1 with open(_A , 'w' , encoding='utf-8') as writer: json.dump(self.emoji , _A) return vocab_file, emoji_file class __snake_case ( lowerCAmelCase__ ): def __init__( self , _A , _A , _A): SCREAMING_SNAKE_CASE_ = vocab # same as swe SCREAMING_SNAKE_CASE_ = ids_to_tokens # same as bpe SCREAMING_SNAKE_CASE_ = emoji SCREAMING_SNAKE_CASE_ = np.max([len(_A) for w in self.vocab.keys()]) SCREAMING_SNAKE_CASE_ = re.compile(r'(https?|ftp)(:\/\/[-_\.!~*\'()a-zA-Z0-9;\/?:\@&=\+$,%#]+)') SCREAMING_SNAKE_CASE_ = re.compile(r'[A-Za-z0-9\._+]*@[\-_0-9A-Za-z]+(\.[A-Za-z]+)*') SCREAMING_SNAKE_CASE_ = re.compile(r'[\(]{0,1}[0-9]{2,4}[\)\-\(]{0,1}[0-9]{2,4}[\)\-]{0,1}[0-9]{3,4}') SCREAMING_SNAKE_CASE_ = re.compile( r'([12]\d{3}[/\-年])*(0?[1-9]|1[0-2])[/\-月]((0?[1-9]|[12][0-9]|3[01])日?)*(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*') SCREAMING_SNAKE_CASE_ = re.compile( r'(明治|大正|昭和|平成|令和|㍾|㍽|㍼|㍻|\u32ff)\d{1,2}年(0?[1-9]|1[0-2])月(0?[1-9]|[12][0-9]|3[01])日(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*') SCREAMING_SNAKE_CASE_ = re.compile( r'((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*億)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*万)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*千)*(0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*(千円|万円|千万円|円|千ドル|万ドル|千万ドル|ドル|千ユーロ|万ユーロ|千万ユーロ|ユーロ)+(\(税込\)|\(税抜\)|\+tax)*') SCREAMING_SNAKE_CASE_ = '─━│┃┄┅┆┇┈┉┊┋┌┍┎┏┐┑┒┓└┕┖┗┘┙┚┛├┝┞┟┠┡┢┣┤┥┦┧┨┩┪┫┬┭┮┯┰┱┲┳┴┵┶┷┸┹┺┻┼┽┾┿╀╁╂╃╄╅╆╇╈╉╊╋╌╍╎╏═║╒╓╔╕╖╗╘╙╚╛╜╝╞╟╠╡╢╣╤╥╦╧╨╩╪╫╬╭╮╯╰╱╲╳╴╵╶╷╸╹╺╻╼╽╾╿' SCREAMING_SNAKE_CASE_ = '▀▁▂▃▄▅▆▇█▉▊▋▌▍▎▏▐░▒▓▔▕▖▗▘▙▚▛▜▝▞▟' SCREAMING_SNAKE_CASE_ = str.maketrans({k: '<BLOCK>' for k in keisen + blocks}) def __len__( self): return len(self.ids_to_tokens) def lowerCAmelCase__ ( self , _A): SCREAMING_SNAKE_CASE_ = self.content_repattera.sub('<URL>' , _A) SCREAMING_SNAKE_CASE_ = self.content_repattera.sub('<EMAIL>' , _A) SCREAMING_SNAKE_CASE_ = self.content_repattera.sub('<TEL>' , _A) SCREAMING_SNAKE_CASE_ = self.content_repattera.sub('<DATE>' , _A) SCREAMING_SNAKE_CASE_ = self.content_repattera.sub('<DATE>' , _A) SCREAMING_SNAKE_CASE_ = self.content_repattera.sub('<PRICE>' , _A) SCREAMING_SNAKE_CASE_ = content.translate(self.content_transa) while "<BLOCK><BLOCK>" in content: SCREAMING_SNAKE_CASE_ = content.replace('<BLOCK><BLOCK>' , '<BLOCK>') return content def lowerCAmelCase__ ( self , _A , _A=False): SCREAMING_SNAKE_CASE_ = text.replace(' ' , '<SP>') SCREAMING_SNAKE_CASE_ = text.replace(' ' , '<SP>') SCREAMING_SNAKE_CASE_ = text.replace('\r\n' , '<BR>') SCREAMING_SNAKE_CASE_ = text.replace('\n' , '<BR>') SCREAMING_SNAKE_CASE_ = text.replace('\r' , '<BR>') SCREAMING_SNAKE_CASE_ = text.replace('\t' , '<TAB>') SCREAMING_SNAKE_CASE_ = text.replace('—' , 'ー') SCREAMING_SNAKE_CASE_ = text.replace('−' , 'ー') for k, v in self.emoji["emoji"].items(): if k in text: SCREAMING_SNAKE_CASE_ = text.replace(_A , _A) if clean: SCREAMING_SNAKE_CASE_ = self.clean_text(_A) def check_simbol(_A): SCREAMING_SNAKE_CASE_ = x.encode() if len(_A) == 1 and len(_A) == 2: SCREAMING_SNAKE_CASE_ = (int(e[0]) << 8) + int(e[1]) if ( (c >= 0xc2a1 and c <= 0xc2bf) or (c >= 0xc780 and c <= 0xc783) or (c >= 0xcab9 and c <= 0xcbbf) or (c >= 0xcc80 and c <= 0xcda2) ): return True return False def checkuae(_A): SCREAMING_SNAKE_CASE_ = x.encode() if len(_A) == 1 and len(_A) == 3: SCREAMING_SNAKE_CASE_ = (int(e[0]) << 16) + (int(e[1]) << 8) + int(e[2]) if c >= 0xe2_8080 and c <= 0xe2_b07f: return True return False SCREAMING_SNAKE_CASE_ = 0 SCREAMING_SNAKE_CASE_ = [] while pos < len(_A): SCREAMING_SNAKE_CASE_ = min(len(_A) , pos + self.maxlen + 1) if text[pos] == '<' else pos + 3 SCREAMING_SNAKE_CASE_ = [] # (token_id, token, pos) for e in range(_A , _A , -1): SCREAMING_SNAKE_CASE_ = text[pos:e] if wd in self.vocab: if wd[0] == "<" and len(_A) > 2: SCREAMING_SNAKE_CASE_ = [(self.vocab[wd], wd, e)] break else: candidates.append((self.vocab[wd], wd, e)) if len(_A) > 0: # the smallest token_id is adopted SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = sorted(_A , key=lambda _A: x[0])[0] result.append(_A) SCREAMING_SNAKE_CASE_ = e else: SCREAMING_SNAKE_CASE_ = pos + 1 SCREAMING_SNAKE_CASE_ = text[pos:end] if check_simbol(_A): result.append('<KIGOU>') elif checkuae(_A): result.append('<U2000U2BFF>') else: for i in wd.encode('utf-8'): result.append('<|byte%d|>' % i) SCREAMING_SNAKE_CASE_ = end return result def lowerCAmelCase__ ( self , _A , _A="\n"): SCREAMING_SNAKE_CASE_ = [] SCREAMING_SNAKE_CASE_ = [] SCREAMING_SNAKE_CASE_ = self.ids_to_tokens[index][0] if word[:6] == "<|byte" and word[-2:] == "|>": byte_tokens.append(int(word[6:-2])) else: if len(_A) > 0: words.append(bytearray(_A).decode('utf-8' , errors='replace')) SCREAMING_SNAKE_CASE_ = [] if word[:7] == "<|emoji" and word[-2:] == "|>": words.append(self.emoji['emoji_inv'][word]) elif word == "<SP>": words.append(' ') elif word == "<BR>": words.append(_A) elif word == "<TAB>": words.append('\t') elif word == "<BLOCK>": words.append('▀') elif word == "<KIGOU>": words.append('ǀ') elif word == "<U2000U2BFF>": words.append('‖') else: words.append(_A) if len(_A) > 0: words.append(bytearray(_A).decode('utf-8' , errors='replace')) SCREAMING_SNAKE_CASE_ = ''.join(_A) return text
620
from dataclasses import dataclass from typing import Optional, Tuple, Union import torch import torch.nn as nn from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput, apply_forward_hook from .modeling_utils import ModelMixin from .vae import Decoder, DecoderOutput, Encoder, VectorQuantizer @dataclass class __snake_case ( lowerCAmelCase__ ): __lowerCAmelCase : torch.FloatTensor class __snake_case ( lowerCAmelCase__ , lowerCAmelCase__ ): @register_to_config def __init__( self , _A = 3 , _A = 3 , _A = ("DownEncoderBlock2D",) , _A = ("UpDecoderBlock2D",) , _A = (64,) , _A = 1 , _A = "silu" , _A = 3 , _A = 32 , _A = 256 , _A = 32 , _A = None , _A = 0.1_8_2_1_5 , _A = "group" , ): super().__init__() # pass init params to Encoder SCREAMING_SNAKE_CASE_ = Encoder( in_channels=_A , out_channels=_A , down_block_types=_A , block_out_channels=_A , layers_per_block=_A , act_fn=_A , norm_num_groups=_A , double_z=_A , ) SCREAMING_SNAKE_CASE_ = vq_embed_dim if vq_embed_dim is not None else latent_channels SCREAMING_SNAKE_CASE_ = nn.Convad(_A , _A , 1) SCREAMING_SNAKE_CASE_ = VectorQuantizer(_A , _A , beta=0.2_5 , remap=_A , sane_index_shape=_A) SCREAMING_SNAKE_CASE_ = nn.Convad(_A , _A , 1) # pass init params to Decoder SCREAMING_SNAKE_CASE_ = Decoder( in_channels=_A , out_channels=_A , up_block_types=_A , block_out_channels=_A , layers_per_block=_A , act_fn=_A , norm_num_groups=_A , norm_type=_A , ) @apply_forward_hook def lowerCAmelCase__ ( self , _A , _A = True): SCREAMING_SNAKE_CASE_ = self.encoder(_A) SCREAMING_SNAKE_CASE_ = self.quant_conv(_A) if not return_dict: return (h,) return VQEncoderOutput(latents=_A) @apply_forward_hook def lowerCAmelCase__ ( self , _A , _A = False , _A = True): # also go through quantization layer if not force_not_quantize: SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.quantize(_A) else: SCREAMING_SNAKE_CASE_ = h SCREAMING_SNAKE_CASE_ = self.post_quant_conv(_A) SCREAMING_SNAKE_CASE_ = self.decoder(_A , quant if self.config.norm_type == 'spatial' else None) if not return_dict: return (dec,) return DecoderOutput(sample=_A) def lowerCAmelCase__ ( self , _A , _A = True): SCREAMING_SNAKE_CASE_ = sample SCREAMING_SNAKE_CASE_ = self.encode(_A).latents SCREAMING_SNAKE_CASE_ = self.decode(_A).sample if not return_dict: return (dec,) return DecoderOutput(sample=_A)
620
1
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available UpperCamelCase__ : Dict = { "configuration_rag": ["RagConfig"], "retrieval_rag": ["RagRetriever"], "tokenization_rag": ["RagTokenizer"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase__ : Optional[int] = [ "RagModel", "RagPreTrainedModel", "RagSequenceForGeneration", "RagTokenForGeneration", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase__ : Optional[Any] = [ "TFRagModel", "TFRagPreTrainedModel", "TFRagSequenceForGeneration", "TFRagTokenForGeneration", ] if TYPE_CHECKING: from .configuration_rag import RagConfig from .retrieval_rag import RagRetriever from .tokenization_rag import RagTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_rag import RagModel, RagPreTrainedModel, RagSequenceForGeneration, RagTokenForGeneration try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_rag import ( TFRagModel, TFRagPreTrainedModel, TFRagSequenceForGeneration, TFRagTokenForGeneration, ) else: import sys UpperCamelCase__ : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
620
import logging import os from typing import Dict, List, Optional, Union import torch import torch.nn as nn from accelerate.utils.imports import ( is_abit_bnb_available, is_abit_bnb_available, is_bnb_available, ) from ..big_modeling import dispatch_model, init_empty_weights from .dataclasses import BnbQuantizationConfig from .modeling import ( find_tied_parameters, get_balanced_memory, infer_auto_device_map, load_checkpoint_in_model, offload_weight, set_module_tensor_to_device, ) if is_bnb_available(): import bitsandbytes as bnb from copy import deepcopy UpperCamelCase__ : Optional[int] = logging.getLogger(__name__) def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : torch.nn.Module , _SCREAMING_SNAKE_CASE : BnbQuantizationConfig , _SCREAMING_SNAKE_CASE : Union[str, os.PathLike] = None , _SCREAMING_SNAKE_CASE : Optional[Dict[str, Union[int, str, torch.device]]] = None , _SCREAMING_SNAKE_CASE : Optional[List[str]] = None , _SCREAMING_SNAKE_CASE : Optional[Dict[Union[int, str], Union[int, str]]] = None , _SCREAMING_SNAKE_CASE : Optional[Union[str, os.PathLike]] = None , _SCREAMING_SNAKE_CASE : bool = False , ): """simple docstring""" SCREAMING_SNAKE_CASE_ = bnb_quantization_config.load_in_abit SCREAMING_SNAKE_CASE_ = bnb_quantization_config.load_in_abit if load_in_abit and not is_abit_bnb_available(): raise ImportError( 'You have a version of `bitsandbytes` that is not compatible with 8bit quantization,' ' make sure you have the latest version of `bitsandbytes` installed.' ) if load_in_abit and not is_abit_bnb_available(): raise ValueError( 'You have a version of `bitsandbytes` that is not compatible with 4bit quantization,' 'make sure you have the latest version of `bitsandbytes` installed.' ) SCREAMING_SNAKE_CASE_ = [] # custom device map if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) and len(device_map.keys() ) > 1: SCREAMING_SNAKE_CASE_ = [key for key, value in device_map.items() if value in ['disk', 'cpu']] # We keep some modules such as the lm_head in their original dtype for numerical stability reasons if bnb_quantization_config.skip_modules is None: SCREAMING_SNAKE_CASE_ = get_keys_to_not_convert(_SCREAMING_SNAKE_CASE ) # add cpu modules to skip modules only for 4-bit modules if load_in_abit: bnb_quantization_config.skip_modules.extend(_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE_ = bnb_quantization_config.skip_modules # We add the modules we want to keep in full precision if bnb_quantization_config.keep_in_fpaa_modules is None: SCREAMING_SNAKE_CASE_ = [] SCREAMING_SNAKE_CASE_ = bnb_quantization_config.keep_in_fpaa_modules modules_to_not_convert.extend(_SCREAMING_SNAKE_CASE ) # compatibility with peft SCREAMING_SNAKE_CASE_ = load_in_abit SCREAMING_SNAKE_CASE_ = load_in_abit SCREAMING_SNAKE_CASE_ = get_parameter_device(_SCREAMING_SNAKE_CASE ) if model_device.type != "meta": # quantization of an already loaded model logger.warning( 'It is not recommended to quantize a loaded model. ' 'The model should be instantiated under the `init_empty_weights` context manager.' ) SCREAMING_SNAKE_CASE_ = replace_with_bnb_layers(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , modules_to_not_convert=_SCREAMING_SNAKE_CASE ) # convert param to the right dtype SCREAMING_SNAKE_CASE_ = bnb_quantization_config.torch_dtype for name, param in model.state_dict().items(): if any(module_to_keep_in_fpaa in name for module_to_keep_in_fpaa in keep_in_fpaa_modules ): param.to(torch.floataa ) if param.dtype != torch.floataa: SCREAMING_SNAKE_CASE_ = name.replace('.weight' , '' ).replace('.bias' , '' ) SCREAMING_SNAKE_CASE_ = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) if param is not None: param.to(torch.floataa ) elif torch.is_floating_point(_SCREAMING_SNAKE_CASE ): param.to(_SCREAMING_SNAKE_CASE ) if model_device.type == "cuda": # move everything to cpu in the first place because we can't do quantization if the weights are already on cuda model.cuda(torch.cuda.current_device() ) torch.cuda.empty_cache() elif torch.cuda.is_available(): model.to(torch.cuda.current_device() ) else: raise RuntimeError('No GPU found. A GPU is needed for quantization.' ) logger.info( f"""The model device type is {model_device.type}. However, cuda is needed for quantization.""" 'We move the model to cuda.' ) return model elif weights_location is None: raise RuntimeError( f"""`weights_location` needs to be the folder path containing the weights of the model, but we found {weights_location} """ ) else: with init_empty_weights(): SCREAMING_SNAKE_CASE_ = replace_with_bnb_layers( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , modules_to_not_convert=_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE_ = get_quantized_model_device_map( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , max_memory=_SCREAMING_SNAKE_CASE , no_split_module_classes=_SCREAMING_SNAKE_CASE , ) if offload_state_dict is None and device_map is not None and "disk" in device_map.values(): SCREAMING_SNAKE_CASE_ = True SCREAMING_SNAKE_CASE_ = any(x in list(device_map.values() ) for x in ['cpu', 'disk'] ) load_checkpoint_in_model( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , dtype=bnb_quantization_config.torch_dtype , offload_folder=_SCREAMING_SNAKE_CASE , offload_state_dict=_SCREAMING_SNAKE_CASE , keep_in_fpaa_modules=bnb_quantization_config.keep_in_fpaa_modules , offload_abit_bnb=load_in_abit and offload , ) return dispatch_model(_SCREAMING_SNAKE_CASE , device_map=_SCREAMING_SNAKE_CASE , offload_dir=_SCREAMING_SNAKE_CASE ) def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : List[str]=None , _SCREAMING_SNAKE_CASE : List[str]=None , _SCREAMING_SNAKE_CASE : Union[str, Any]=None ): """simple docstring""" if device_map is None: if torch.cuda.is_available(): SCREAMING_SNAKE_CASE_ = {'': torch.cuda.current_device()} else: raise RuntimeError('No GPU found. A GPU is needed for quantization.' ) logger.info('The device_map was not initialized.' 'Setting device_map to `{\'\':torch.cuda.current_device()}`.' ) if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): if device_map not in ["auto", "balanced", "balanced_low_0", "sequential"]: raise ValueError( 'If passing a string for `device_map`, please choose \'auto\', \'balanced\', \'balanced_low_0\' or ' '\'sequential\'.' ) SCREAMING_SNAKE_CASE_ = {} special_dtypes.update( { name: bnb_quantization_config.torch_dtype for name, _ in model.named_parameters() if any(m in name for m in bnb_quantization_config.skip_modules ) } ) special_dtypes.update( { name: torch.floataa for name, _ in model.named_parameters() if any(m in name for m in bnb_quantization_config.keep_in_fpaa_modules ) } ) SCREAMING_SNAKE_CASE_ = {} SCREAMING_SNAKE_CASE_ = special_dtypes SCREAMING_SNAKE_CASE_ = no_split_module_classes SCREAMING_SNAKE_CASE_ = bnb_quantization_config.target_dtype # get max_memory for each device. if device_map != "sequential": SCREAMING_SNAKE_CASE_ = get_balanced_memory( _SCREAMING_SNAKE_CASE , low_zero=(device_map == 'balanced_low_0') , max_memory=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , ) SCREAMING_SNAKE_CASE_ = max_memory SCREAMING_SNAKE_CASE_ = infer_auto_device_map(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): # check if don't have any quantized module on the cpu SCREAMING_SNAKE_CASE_ = bnb_quantization_config.skip_modules + bnb_quantization_config.keep_in_fpaa_modules SCREAMING_SNAKE_CASE_ = { key: device_map[key] for key in device_map.keys() if key not in modules_not_to_convert } for device in ["cpu", "disk"]: if device in device_map_without_some_modules.values(): if bnb_quantization_config.load_in_abit: raise ValueError( '\n Some modules are dispatched on the CPU or the disk. Make sure you have enough GPU RAM to fit\n the quantized model. If you want to dispatch the model on the CPU or the disk while keeping\n these modules in `torch_dtype`, you need to pass a custom `device_map` to\n `load_and_quantize_model`. Check\n https://huggingface.co/docs/accelerate/main/en/usage_guides/quantization#offload-modules-to-cpu-and-disk\n for more details.\n ' ) else: logger.info( 'Some modules are are offloaded to the CPU or the disk. Note that these modules will be converted to 8-bit' ) del device_map_without_some_modules return device_map def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int=None , _SCREAMING_SNAKE_CASE : Union[str, Any]=None ): """simple docstring""" if modules_to_not_convert is None: SCREAMING_SNAKE_CASE_ = [] SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = _replace_with_bnb_layers( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) if not has_been_replaced: logger.warning( 'You are loading your model in 8bit or 4bit but no linear modules were found in your model.' ' this can happen for some architectures such as gpt2 that uses Conv1D instead of Linear layers.' ' Please double check your model architecture, or submit an issue on github if you think this is' ' a bug.' ) return model def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : Optional[Any]=None , _SCREAMING_SNAKE_CASE : str=None , ): """simple docstring""" SCREAMING_SNAKE_CASE_ = False for name, module in model.named_children(): if current_key_name is None: SCREAMING_SNAKE_CASE_ = [] current_key_name.append(_SCREAMING_SNAKE_CASE ) if isinstance(_SCREAMING_SNAKE_CASE , nn.Linear ) and name not in modules_to_not_convert: # Check if the current key is not in the `modules_to_not_convert` SCREAMING_SNAKE_CASE_ = '.'.join(_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE_ = True for key in modules_to_not_convert: if ( (key in current_key_name_str) and (key + "." in current_key_name_str) ) or key == current_key_name_str: SCREAMING_SNAKE_CASE_ = False break if proceed: # Load bnb module with empty weight and replace ``nn.Linear` module if bnb_quantization_config.load_in_abit: SCREAMING_SNAKE_CASE_ = bnb.nn.LinearabitLt( module.in_features , module.out_features , module.bias is not None , has_fpaa_weights=_SCREAMING_SNAKE_CASE , threshold=bnb_quantization_config.llm_inta_threshold , ) elif bnb_quantization_config.load_in_abit: SCREAMING_SNAKE_CASE_ = bnb.nn.Linearabit( module.in_features , module.out_features , module.bias is not None , bnb_quantization_config.bnb_abit_compute_dtype , compress_statistics=bnb_quantization_config.bnb_abit_use_double_quant , quant_type=bnb_quantization_config.bnb_abit_quant_type , ) else: raise ValueError('load_in_8bit and load_in_4bit can\'t be both False' ) SCREAMING_SNAKE_CASE_ = module.weight.data if module.bias is not None: SCREAMING_SNAKE_CASE_ = module.bias.data bnb_module.requires_grad_(_SCREAMING_SNAKE_CASE ) setattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE_ = True if len(list(module.children() ) ) > 0: SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = _replace_with_bnb_layers( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE_ = has_been_replaced | _has_been_replaced # Remove the last key for recursion current_key_name.pop(-1 ) return model, has_been_replaced def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : Union[str, Any] ): """simple docstring""" with init_empty_weights(): SCREAMING_SNAKE_CASE_ = deepcopy(_SCREAMING_SNAKE_CASE ) # this has 0 cost since it is done inside `init_empty_weights` context manager` SCREAMING_SNAKE_CASE_ = find_tied_parameters(_SCREAMING_SNAKE_CASE ) # For compatibility with Accelerate < 0.18 if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): SCREAMING_SNAKE_CASE_ = sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() ) else: SCREAMING_SNAKE_CASE_ = sum(_SCREAMING_SNAKE_CASE , [] ) SCREAMING_SNAKE_CASE_ = len(_SCREAMING_SNAKE_CASE ) > 0 # Check if it is a base model SCREAMING_SNAKE_CASE_ = False if hasattr(_SCREAMING_SNAKE_CASE , 'base_model_prefix' ): SCREAMING_SNAKE_CASE_ = not hasattr(_SCREAMING_SNAKE_CASE , model.base_model_prefix ) # Ignore this for base models (BertModel, GPT2Model, etc.) if (not has_tied_params) and is_base_model: return [] # otherwise they have an attached head SCREAMING_SNAKE_CASE_ = list(model.named_children() ) SCREAMING_SNAKE_CASE_ = [list_modules[-1][0]] # add last module together with tied weights SCREAMING_SNAKE_CASE_ = set(_SCREAMING_SNAKE_CASE ) - set(_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE_ = list(set(_SCREAMING_SNAKE_CASE ) ) + list(_SCREAMING_SNAKE_CASE ) # remove ".weight" from the keys SCREAMING_SNAKE_CASE_ = ['.weight', '.bias'] SCREAMING_SNAKE_CASE_ = [] for name in list_untouched: for name_to_remove in names_to_remove: if name_to_remove in name: SCREAMING_SNAKE_CASE_ = name.replace(_SCREAMING_SNAKE_CASE , '' ) filtered_module_names.append(_SCREAMING_SNAKE_CASE ) return filtered_module_names def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : Dict ): """simple docstring""" for m in model.modules(): if isinstance(_SCREAMING_SNAKE_CASE , bnb.nn.Linearabit ): return True return False def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : nn.Module ): """simple docstring""" return next(parameter.parameters() ).device def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : str ): """simple docstring""" if fpaa_statistics is None: set_module_tensor_to_device(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , 0 , dtype=_SCREAMING_SNAKE_CASE , value=_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE_ = param_name SCREAMING_SNAKE_CASE_ = model if "." in tensor_name: SCREAMING_SNAKE_CASE_ = tensor_name.split('.' ) for split in splits[:-1]: SCREAMING_SNAKE_CASE_ = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) if new_module is None: raise ValueError(f"""{module} has no attribute {split}.""" ) SCREAMING_SNAKE_CASE_ = new_module SCREAMING_SNAKE_CASE_ = splits[-1] # offload weights SCREAMING_SNAKE_CASE_ = False offload_weight(module._parameters[tensor_name] , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , index=_SCREAMING_SNAKE_CASE ) if hasattr(module._parameters[tensor_name] , 'SCB' ): offload_weight( module._parameters[tensor_name].SCB , param_name.replace('weight' , 'SCB' ) , _SCREAMING_SNAKE_CASE , index=_SCREAMING_SNAKE_CASE , ) else: offload_weight(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , index=_SCREAMING_SNAKE_CASE ) offload_weight(_SCREAMING_SNAKE_CASE , param_name.replace('weight' , 'SCB' ) , _SCREAMING_SNAKE_CASE , index=_SCREAMING_SNAKE_CASE ) set_module_tensor_to_device(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , 'meta' , dtype=_SCREAMING_SNAKE_CASE , value=torch.empty(*param.size() ) )
620
1
# Lint as: python3 import sys from collections.abc import Mapping from typing import TYPE_CHECKING import numpy as np import pyarrow as pa from .. import config from ..utils.py_utils import map_nested from .formatting import TensorFormatter if TYPE_CHECKING: import torch class __snake_case ( TensorFormatter[Mapping, 'torch.Tensor', Mapping] ): def __init__( self , _A=None , **_A): super().__init__(features=_A) SCREAMING_SNAKE_CASE_ = torch_tensor_kwargs import torch # noqa import torch at initialization def lowerCAmelCase__ ( self , _A): import torch if isinstance(_A , _A) and column: if all( isinstance(_A , torch.Tensor) and x.shape == column[0].shape and x.dtype == column[0].dtype for x in column): return torch.stack(_A) return column def lowerCAmelCase__ ( self , _A): import torch if isinstance(_A , (str, bytes, type(_A))): return value elif isinstance(_A , (np.character, np.ndarray)) and np.issubdtype(value.dtype , np.character): return value.tolist() SCREAMING_SNAKE_CASE_ = {} if isinstance(_A , (np.number, np.ndarray)) and np.issubdtype(value.dtype , np.integer): SCREAMING_SNAKE_CASE_ = {'dtype': torch.intaa} elif isinstance(_A , (np.number, np.ndarray)) and np.issubdtype(value.dtype , np.floating): SCREAMING_SNAKE_CASE_ = {'dtype': torch.floataa} elif config.PIL_AVAILABLE and "PIL" in sys.modules: import PIL.Image if isinstance(_A , PIL.Image.Image): SCREAMING_SNAKE_CASE_ = np.asarray(_A) return torch.tensor(_A , **{**default_dtype, **self.torch_tensor_kwargs}) def lowerCAmelCase__ ( self , _A): import torch # support for torch, tf, jax etc. if hasattr(_A , '__array__') and not isinstance(_A , torch.Tensor): SCREAMING_SNAKE_CASE_ = data_struct.__array__() # support for nested types like struct of list of struct if isinstance(_A , np.ndarray): if data_struct.dtype == object: # torch tensors cannot be instantied from an array of objects return self._consolidate([self.recursive_tensorize(_A) for substruct in data_struct]) elif isinstance(_A , (list, tuple)): return self._consolidate([self.recursive_tensorize(_A) for substruct in data_struct]) return self._tensorize(_A) def lowerCAmelCase__ ( self , _A): return map_nested(self._recursive_tensorize , _A , map_list=_A) def lowerCAmelCase__ ( self , _A): SCREAMING_SNAKE_CASE_ = self.numpy_arrow_extractor().extract_row(_A) SCREAMING_SNAKE_CASE_ = self.python_features_decoder.decode_row(_A) return self.recursive_tensorize(_A) def lowerCAmelCase__ ( self , _A): SCREAMING_SNAKE_CASE_ = self.numpy_arrow_extractor().extract_column(_A) SCREAMING_SNAKE_CASE_ = self.python_features_decoder.decode_column(_A , pa_table.column_names[0]) SCREAMING_SNAKE_CASE_ = self.recursive_tensorize(_A) SCREAMING_SNAKE_CASE_ = self._consolidate(_A) return column def lowerCAmelCase__ ( self , _A): SCREAMING_SNAKE_CASE_ = self.numpy_arrow_extractor().extract_batch(_A) SCREAMING_SNAKE_CASE_ = self.python_features_decoder.decode_batch(_A) SCREAMING_SNAKE_CASE_ = self.recursive_tensorize(_A) for column_name in batch: SCREAMING_SNAKE_CASE_ = self._consolidate(batch[column_name]) return batch
620
import json import os from functools import lru_cache from typing import List, Optional, Tuple import regex as re from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging UpperCamelCase__ : Union[str, Any] = logging.get_logger(__name__) UpperCamelCase__ : Optional[Any] = {"vocab_file": "vocab.json", "merges_file": "merges.txt"} # See all BART models at https://huggingface.co/models?filter=bart UpperCamelCase__ : List[str] = { "vocab_file": { "facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/vocab.json", "facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/vocab.json", "facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json", "facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json", "facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json", "yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json", }, "merges_file": { "facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/merges.txt", "facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/merges.txt", "facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt", "facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt", "facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt", "yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt", }, } UpperCamelCase__ : str = { "facebook/bart-base": 1_024, "facebook/bart-large": 1_024, "facebook/bart-large-mnli": 1_024, "facebook/bart-large-cnn": 1_024, "facebook/bart-large-xsum": 1_024, "yjernite/bart_eli5": 1_024, } @lru_cache() def _UpperCAmelCase ( ): """simple docstring""" SCREAMING_SNAKE_CASE_ = ( list(range(ord('!' ) , ord('~' ) + 1 ) ) + list(range(ord('¡' ) , ord('¬' ) + 1 ) ) + list(range(ord('®' ) , ord('ÿ' ) + 1 ) ) ) SCREAMING_SNAKE_CASE_ = bs[:] SCREAMING_SNAKE_CASE_ = 0 for b in range(2**8 ): if b not in bs: bs.append(_SCREAMING_SNAKE_CASE ) cs.append(2**8 + n ) n += 1 SCREAMING_SNAKE_CASE_ = [chr(_SCREAMING_SNAKE_CASE ) for n in cs] return dict(zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ) def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : List[str] ): """simple docstring""" SCREAMING_SNAKE_CASE_ = set() SCREAMING_SNAKE_CASE_ = word[0] for char in word[1:]: pairs.add((prev_char, char) ) SCREAMING_SNAKE_CASE_ = char return pairs class __snake_case ( lowerCAmelCase__ ): __lowerCAmelCase : str = VOCAB_FILES_NAMES __lowerCAmelCase : Any = PRETRAINED_VOCAB_FILES_MAP __lowerCAmelCase : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __lowerCAmelCase : List[Any] = ['input_ids', 'attention_mask'] def __init__( self , _A , _A , _A="replace" , _A="<s>" , _A="</s>" , _A="</s>" , _A="<s>" , _A="<unk>" , _A="<pad>" , _A="<mask>" , _A=False , **_A , ): SCREAMING_SNAKE_CASE_ = AddedToken(_A , lstrip=_A , rstrip=_A) if isinstance(_A , _A) else bos_token SCREAMING_SNAKE_CASE_ = AddedToken(_A , lstrip=_A , rstrip=_A) if isinstance(_A , _A) else eos_token SCREAMING_SNAKE_CASE_ = AddedToken(_A , lstrip=_A , rstrip=_A) if isinstance(_A , _A) else sep_token SCREAMING_SNAKE_CASE_ = AddedToken(_A , lstrip=_A , rstrip=_A) if isinstance(_A , _A) else cls_token SCREAMING_SNAKE_CASE_ = AddedToken(_A , lstrip=_A , rstrip=_A) if isinstance(_A , _A) else unk_token SCREAMING_SNAKE_CASE_ = AddedToken(_A , lstrip=_A , rstrip=_A) if isinstance(_A , _A) else pad_token # Mask token behave like a normal word, i.e. include the space before it SCREAMING_SNAKE_CASE_ = AddedToken(_A , lstrip=_A , rstrip=_A) if isinstance(_A , _A) else mask_token super().__init__( errors=_A , bos_token=_A , eos_token=_A , unk_token=_A , sep_token=_A , cls_token=_A , pad_token=_A , mask_token=_A , add_prefix_space=_A , **_A , ) with open(_A , encoding='utf-8') as vocab_handle: SCREAMING_SNAKE_CASE_ = json.load(_A) SCREAMING_SNAKE_CASE_ = {v: k for k, v in self.encoder.items()} SCREAMING_SNAKE_CASE_ = errors # how to handle errors in decoding SCREAMING_SNAKE_CASE_ = bytes_to_unicode() SCREAMING_SNAKE_CASE_ = {v: k for k, v in self.byte_encoder.items()} with open(_A , encoding='utf-8') as merges_handle: SCREAMING_SNAKE_CASE_ = merges_handle.read().split('\n')[1:-1] SCREAMING_SNAKE_CASE_ = [tuple(merge.split()) for merge in bpe_merges] SCREAMING_SNAKE_CASE_ = dict(zip(_A , range(len(_A)))) SCREAMING_SNAKE_CASE_ = {} SCREAMING_SNAKE_CASE_ = add_prefix_space # Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions SCREAMING_SNAKE_CASE_ = re.compile(r'\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+') @property def lowerCAmelCase__ ( self): return len(self.encoder) def lowerCAmelCase__ ( self): return dict(self.encoder , **self.added_tokens_encoder) def lowerCAmelCase__ ( self , _A): if token in self.cache: return self.cache[token] SCREAMING_SNAKE_CASE_ = tuple(_A) SCREAMING_SNAKE_CASE_ = get_pairs(_A) if not pairs: return token while True: SCREAMING_SNAKE_CASE_ = min(_A , key=lambda _A: self.bpe_ranks.get(_A , float('inf'))) if bigram not in self.bpe_ranks: break SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = bigram SCREAMING_SNAKE_CASE_ = [] SCREAMING_SNAKE_CASE_ = 0 while i < len(_A): try: SCREAMING_SNAKE_CASE_ = word.index(_A , _A) except ValueError: new_word.extend(word[i:]) break else: new_word.extend(word[i:j]) SCREAMING_SNAKE_CASE_ = j if word[i] == first and i < len(_A) - 1 and word[i + 1] == second: new_word.append(first + second) i += 2 else: new_word.append(word[i]) i += 1 SCREAMING_SNAKE_CASE_ = tuple(_A) SCREAMING_SNAKE_CASE_ = new_word if len(_A) == 1: break else: SCREAMING_SNAKE_CASE_ = get_pairs(_A) SCREAMING_SNAKE_CASE_ = ' '.join(_A) SCREAMING_SNAKE_CASE_ = word return word def lowerCAmelCase__ ( self , _A): SCREAMING_SNAKE_CASE_ = [] for token in re.findall(self.pat , _A): SCREAMING_SNAKE_CASE_ = ''.join( self.byte_encoder[b] for b in token.encode('utf-8')) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case) bpe_tokens.extend(bpe_token for bpe_token in self.bpe(_A).split(' ')) return bpe_tokens def lowerCAmelCase__ ( self , _A): return self.encoder.get(_A , self.encoder.get(self.unk_token)) def lowerCAmelCase__ ( self , _A): return self.decoder.get(_A) def lowerCAmelCase__ ( self , _A): SCREAMING_SNAKE_CASE_ = ''.join(_A) SCREAMING_SNAKE_CASE_ = bytearray([self.byte_decoder[c] for c in text]).decode('utf-8' , errors=self.errors) return text def lowerCAmelCase__ ( self , _A , _A = None): if not os.path.isdir(_A): logger.error(f"""Vocabulary path ({save_directory}) should be a directory""") return SCREAMING_SNAKE_CASE_ = os.path.join( _A , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file']) SCREAMING_SNAKE_CASE_ = os.path.join( _A , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file']) with open(_A , 'w' , encoding='utf-8') as f: f.write(json.dumps(self.encoder , indent=2 , sort_keys=_A , ensure_ascii=_A) + '\n') SCREAMING_SNAKE_CASE_ = 0 with open(_A , 'w' , encoding='utf-8') as writer: writer.write('#version: 0.2\n') for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda _A: kv[1]): if index != token_index: logger.warning( f"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.""" ' Please check that the tokenizer is not corrupted!') SCREAMING_SNAKE_CASE_ = token_index writer.write(' '.join(_A) + '\n') index += 1 return vocab_file, merge_file def lowerCAmelCase__ ( self , _A , _A = None): if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] SCREAMING_SNAKE_CASE_ = [self.cls_token_id] SCREAMING_SNAKE_CASE_ = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def lowerCAmelCase__ ( self , _A , _A = None , _A = False): if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=_A , token_ids_a=_A , already_has_special_tokens=_A) if token_ids_a is None: return [1] + ([0] * len(_A)) + [1] return [1] + ([0] * len(_A)) + [1, 1] + ([0] * len(_A)) + [1] def lowerCAmelCase__ ( self , _A , _A = None): SCREAMING_SNAKE_CASE_ = [self.sep_token_id] SCREAMING_SNAKE_CASE_ = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0] def lowerCAmelCase__ ( self , _A , _A=False , **_A): SCREAMING_SNAKE_CASE_ = kwargs.pop('add_prefix_space' , self.add_prefix_space) if (is_split_into_words or add_prefix_space) and (len(_A) > 0 and not text[0].isspace()): SCREAMING_SNAKE_CASE_ = ' ' + text return (text, kwargs)
620
1
import os import unicodedata from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import SPIECE_UNDERLINE, logging UpperCamelCase__ : Tuple = logging.get_logger(__name__) UpperCamelCase__ : int = {"vocab_file": "spiece.model"} UpperCamelCase__ : str = { "vocab_file": { "xlnet-base-cased": "https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model", "xlnet-large-cased": "https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model", } } UpperCamelCase__ : Union[str, Any] = { "xlnet-base-cased": None, "xlnet-large-cased": None, } # Segments (not really needed) UpperCamelCase__ : List[Any] = 0 UpperCamelCase__ : Optional[Any] = 1 UpperCamelCase__ : Dict = 2 UpperCamelCase__ : Tuple = 3 UpperCamelCase__ : Optional[int] = 4 class __snake_case ( lowerCAmelCase__ ): __lowerCAmelCase : Tuple = VOCAB_FILES_NAMES __lowerCAmelCase : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP __lowerCAmelCase : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __lowerCAmelCase : Optional[int] = 'left' def __init__( self , _A , _A=False , _A=True , _A=False , _A="<s>" , _A="</s>" , _A="<unk>" , _A="<sep>" , _A="<pad>" , _A="<cls>" , _A="<mask>" , _A=["<eop>", "<eod>"] , _A = None , **_A , ): # Mask token behave like a normal word, i.e. include the space before it SCREAMING_SNAKE_CASE_ = AddedToken(_A , lstrip=_A , rstrip=_A) if isinstance(_A , _A) else mask_token SCREAMING_SNAKE_CASE_ = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( do_lower_case=_A , remove_space=_A , keep_accents=_A , bos_token=_A , eos_token=_A , unk_token=_A , sep_token=_A , pad_token=_A , cls_token=_A , mask_token=_A , additional_special_tokens=_A , sp_model_kwargs=self.sp_model_kwargs , **_A , ) SCREAMING_SNAKE_CASE_ = 3 SCREAMING_SNAKE_CASE_ = do_lower_case SCREAMING_SNAKE_CASE_ = remove_space SCREAMING_SNAKE_CASE_ = keep_accents SCREAMING_SNAKE_CASE_ = vocab_file SCREAMING_SNAKE_CASE_ = spm.SentencePieceProcessor(**self.sp_model_kwargs) self.sp_model.Load(_A) @property def lowerCAmelCase__ ( self): return len(self.sp_model) def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = {self.convert_ids_to_tokens(_A): i for i in range(self.vocab_size)} vocab.update(self.added_tokens_encoder) return vocab def __getstate__( self): SCREAMING_SNAKE_CASE_ = self.__dict__.copy() SCREAMING_SNAKE_CASE_ = None return state def __setstate__( self , _A): SCREAMING_SNAKE_CASE_ = d # for backward compatibility if not hasattr(self , 'sp_model_kwargs'): SCREAMING_SNAKE_CASE_ = {} SCREAMING_SNAKE_CASE_ = spm.SentencePieceProcessor(**self.sp_model_kwargs) self.sp_model.Load(self.vocab_file) def lowerCAmelCase__ ( self , _A): if self.remove_space: SCREAMING_SNAKE_CASE_ = ' '.join(inputs.strip().split()) else: SCREAMING_SNAKE_CASE_ = inputs SCREAMING_SNAKE_CASE_ = outputs.replace('``' , '"').replace('\'\'' , '"') if not self.keep_accents: SCREAMING_SNAKE_CASE_ = unicodedata.normalize('NFKD' , _A) SCREAMING_SNAKE_CASE_ = ''.join([c for c in outputs if not unicodedata.combining(_A)]) if self.do_lower_case: SCREAMING_SNAKE_CASE_ = outputs.lower() return outputs def lowerCAmelCase__ ( self , _A): SCREAMING_SNAKE_CASE_ = self.preprocess_text(_A) SCREAMING_SNAKE_CASE_ = self.sp_model.encode(_A , out_type=_A) SCREAMING_SNAKE_CASE_ = [] for piece in pieces: if len(_A) > 1 and piece[-1] == str(',') and piece[-2].isdigit(): SCREAMING_SNAKE_CASE_ = self.sp_model.EncodeAsPieces(piece[:-1].replace(_A , '')) if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE: if len(cur_pieces[0]) == 1: SCREAMING_SNAKE_CASE_ = cur_pieces[1:] else: SCREAMING_SNAKE_CASE_ = cur_pieces[0][1:] cur_pieces.append(piece[-1]) new_pieces.extend(_A) else: new_pieces.append(_A) return new_pieces def lowerCAmelCase__ ( self , _A): return self.sp_model.PieceToId(_A) def lowerCAmelCase__ ( self , _A): return self.sp_model.IdToPiece(_A) def lowerCAmelCase__ ( self , _A): SCREAMING_SNAKE_CASE_ = ''.join(_A).replace(_A , ' ').strip() return out_string def lowerCAmelCase__ ( self , _A , _A = False , _A = None , _A = True , **_A , ): SCREAMING_SNAKE_CASE_ = kwargs.pop('use_source_tokenizer' , _A) SCREAMING_SNAKE_CASE_ = self.convert_ids_to_tokens(_A , skip_special_tokens=_A) # To avoid mixing byte-level and unicode for byte-level BPT # we need to build string separately for added tokens and byte-level tokens # cf. https://github.com/huggingface/transformers/issues/1133 SCREAMING_SNAKE_CASE_ = [] SCREAMING_SNAKE_CASE_ = [] for token in filtered_tokens: if skip_special_tokens and token in self.all_special_ids: continue if token in self.added_tokens_encoder: if current_sub_text: sub_texts.append(self.convert_tokens_to_string(_A)) SCREAMING_SNAKE_CASE_ = [] sub_texts.append(_A) else: current_sub_text.append(_A) if current_sub_text: sub_texts.append(self.convert_tokens_to_string(_A)) # Mimic the behavior of the Rust tokenizer: # By default, there are no spaces between special tokens SCREAMING_SNAKE_CASE_ = ''.join(_A) SCREAMING_SNAKE_CASE_ = ( clean_up_tokenization_spaces if clean_up_tokenization_spaces is not None else self.clean_up_tokenization_spaces ) if clean_up_tokenization_spaces: SCREAMING_SNAKE_CASE_ = self.clean_up_tokenization(_A) return clean_text else: return text def lowerCAmelCase__ ( self , _A , _A = None): SCREAMING_SNAKE_CASE_ = [self.sep_token_id] SCREAMING_SNAKE_CASE_ = [self.cls_token_id] if token_ids_a is None: return token_ids_a + sep + cls return token_ids_a + sep + token_ids_a + sep + cls def lowerCAmelCase__ ( self , _A , _A = None , _A = False): if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=_A , token_ids_a=_A , already_has_special_tokens=_A) if token_ids_a is not None: return ([0] * len(_A)) + [1] + ([0] * len(_A)) + [1, 1] return ([0] * len(_A)) + [1, 1] def lowerCAmelCase__ ( self , _A , _A = None): SCREAMING_SNAKE_CASE_ = [self.sep_token_id] SCREAMING_SNAKE_CASE_ = [2] if token_ids_a is None: return len(token_ids_a + sep) * [0] + cls_segment_id return len(token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1] + cls_segment_id def lowerCAmelCase__ ( self , _A , _A = None): if not os.path.isdir(_A): logger.error(f"""Vocabulary path ({save_directory}) should be a directory""") return SCREAMING_SNAKE_CASE_ = os.path.join( _A , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file']) if os.path.abspath(self.vocab_file) != os.path.abspath(_A) and os.path.isfile(self.vocab_file): copyfile(self.vocab_file , _A) elif not os.path.isfile(self.vocab_file): with open(_A , 'wb') as fi: SCREAMING_SNAKE_CASE_ = self.sp_model.serialized_model_proto() fi.write(_A) return (out_vocab_file,)
620
from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCamelCase__ : str = logging.get_logger(__name__) UpperCamelCase__ : Optional[int] = { "facebook/dpr-ctx_encoder-single-nq-base": ( "https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/config.json" ), "facebook/dpr-question_encoder-single-nq-base": ( "https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/config.json" ), "facebook/dpr-reader-single-nq-base": ( "https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/config.json" ), "facebook/dpr-ctx_encoder-multiset-base": ( "https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/config.json" ), "facebook/dpr-question_encoder-multiset-base": ( "https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/config.json" ), "facebook/dpr-reader-multiset-base": ( "https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/config.json" ), } class __snake_case ( lowerCAmelCase__ ): __lowerCAmelCase : Optional[int] = 'dpr' def __init__( self , _A=30522 , _A=768 , _A=12 , _A=12 , _A=3072 , _A="gelu" , _A=0.1 , _A=0.1 , _A=512 , _A=2 , _A=0.0_2 , _A=1E-12 , _A=0 , _A="absolute" , _A = 0 , **_A , ): super().__init__(pad_token_id=_A , **_A) SCREAMING_SNAKE_CASE_ = vocab_size SCREAMING_SNAKE_CASE_ = hidden_size SCREAMING_SNAKE_CASE_ = num_hidden_layers SCREAMING_SNAKE_CASE_ = num_attention_heads SCREAMING_SNAKE_CASE_ = hidden_act SCREAMING_SNAKE_CASE_ = intermediate_size SCREAMING_SNAKE_CASE_ = hidden_dropout_prob SCREAMING_SNAKE_CASE_ = attention_probs_dropout_prob SCREAMING_SNAKE_CASE_ = max_position_embeddings SCREAMING_SNAKE_CASE_ = type_vocab_size SCREAMING_SNAKE_CASE_ = initializer_range SCREAMING_SNAKE_CASE_ = layer_norm_eps SCREAMING_SNAKE_CASE_ = projection_dim SCREAMING_SNAKE_CASE_ = position_embedding_type
620
1
import dataclasses import re import string from typing import Any, Dict, Iterator, List, Mapping, Optional, Sequence, Tuple import numpy as np from . import residue_constants UpperCamelCase__ : str = Mapping[str, np.ndarray] UpperCamelCase__ : int = Mapping[str, Any] # Is a nested dict. UpperCamelCase__ : str = 0.01 @dataclasses.dataclass(frozen=lowerCAmelCase__ ) class __snake_case : __lowerCAmelCase : np.ndarray # [num_res, num_atom_type, 3] # Amino-acid type for each residue represented as an integer between 0 and # 20, where 20 is 'X'. __lowerCAmelCase : np.ndarray # [num_res] # Binary float mask to indicate presence of a particular atom. 1.0 if an atom # is present and 0.0 if not. This should be used for loss masking. __lowerCAmelCase : np.ndarray # [num_res, num_atom_type] # Residue index as used in PDB. It is not necessarily continuous or 0-indexed. __lowerCAmelCase : np.ndarray # [num_res] # B-factors, or temperature factors, of each residue (in sq. angstroms units), # representing the displacement of the residue from its ground truth mean # value. __lowerCAmelCase : np.ndarray # [num_res, num_atom_type] # Chain indices for multi-chain predictions __lowerCAmelCase : Optional[np.ndarray] = None # Optional remark about the protein. Included as a comment in output PDB # files __lowerCAmelCase : Optional[str] = None # Templates used to generate this protein (prediction-only) __lowerCAmelCase : Optional[Sequence[str]] = None # Chain corresponding to each parent __lowerCAmelCase : Optional[Sequence[int]] = None def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : str ): """simple docstring""" SCREAMING_SNAKE_CASE_ = r'(\[[A-Z]+\]\n)' SCREAMING_SNAKE_CASE_ = [tag.strip() for tag in re.split(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) if len(_SCREAMING_SNAKE_CASE ) > 0] SCREAMING_SNAKE_CASE_ = zip(tags[0::2] , [l.split('\n' ) for l in tags[1::2]] ) SCREAMING_SNAKE_CASE_ = ["N", "CA", "C"] SCREAMING_SNAKE_CASE_ = None SCREAMING_SNAKE_CASE_ = None SCREAMING_SNAKE_CASE_ = None for g in groups: if "[PRIMARY]" == g[0]: SCREAMING_SNAKE_CASE_ = g[1][0].strip() for i in range(len(_SCREAMING_SNAKE_CASE ) ): if seq[i] not in residue_constants.restypes: SCREAMING_SNAKE_CASE_ = 'X' # FIXME: strings are immutable SCREAMING_SNAKE_CASE_ = np.array( [residue_constants.restype_order.get(_SCREAMING_SNAKE_CASE , residue_constants.restype_num ) for res_symbol in seq] ) elif "[TERTIARY]" == g[0]: SCREAMING_SNAKE_CASE_ = [] for axis in range(3 ): tertiary.append(list(map(_SCREAMING_SNAKE_CASE , g[1][axis].split() ) ) ) SCREAMING_SNAKE_CASE_ = np.array(_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE_ = np.zeros((len(tertiary[0] ) // 3, residue_constants.atom_type_num, 3) ).astype(np.floataa ) for i, atom in enumerate(_SCREAMING_SNAKE_CASE ): SCREAMING_SNAKE_CASE_ = np.transpose(tertiary_np[:, i::3] ) atom_positions *= PICO_TO_ANGSTROM elif "[MASK]" == g[0]: SCREAMING_SNAKE_CASE_ = np.array(list(map({'-': 0, '+': 1}.get , g[1][0].strip() ) ) ) SCREAMING_SNAKE_CASE_ = np.zeros( ( len(_SCREAMING_SNAKE_CASE ), residue_constants.atom_type_num, ) ).astype(np.floataa ) for i, atom in enumerate(_SCREAMING_SNAKE_CASE ): SCREAMING_SNAKE_CASE_ = 1 atom_mask *= mask[..., None] assert aatype is not None return Protein( atom_positions=_SCREAMING_SNAKE_CASE , atom_mask=_SCREAMING_SNAKE_CASE , aatype=_SCREAMING_SNAKE_CASE , residue_index=np.arange(len(_SCREAMING_SNAKE_CASE ) ) , b_factors=_SCREAMING_SNAKE_CASE , ) def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : Protein , _SCREAMING_SNAKE_CASE : int = 0 ): """simple docstring""" SCREAMING_SNAKE_CASE_ = [] SCREAMING_SNAKE_CASE_ = prot.remark if remark is not None: pdb_headers.append(f"""REMARK {remark}""" ) SCREAMING_SNAKE_CASE_ = prot.parents SCREAMING_SNAKE_CASE_ = prot.parents_chain_index if parents is not None and parents_chain_index is not None: SCREAMING_SNAKE_CASE_ = [p for i, p in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) if i == chain_id] if parents is None or len(_SCREAMING_SNAKE_CASE ) == 0: SCREAMING_SNAKE_CASE_ = ['N/A'] pdb_headers.append(f"""PARENT {' '.join(_SCREAMING_SNAKE_CASE )}""" ) return pdb_headers def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : Protein , _SCREAMING_SNAKE_CASE : str ): """simple docstring""" SCREAMING_SNAKE_CASE_ = [] SCREAMING_SNAKE_CASE_ = pdb_str.split('\n' ) SCREAMING_SNAKE_CASE_ = prot.remark if remark is not None: out_pdb_lines.append(f"""REMARK {remark}""" ) SCREAMING_SNAKE_CASE_ = 42 if prot.parents is not None and len(prot.parents ) > 0: SCREAMING_SNAKE_CASE_ = [] if prot.parents_chain_index is not None: SCREAMING_SNAKE_CASE_ = {} for p, i in zip(prot.parents , prot.parents_chain_index ): parent_dict.setdefault(str(_SCREAMING_SNAKE_CASE ) , [] ) parent_dict[str(_SCREAMING_SNAKE_CASE )].append(_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE_ = max([int(_SCREAMING_SNAKE_CASE ) for chain_idx in parent_dict] ) for i in range(max_idx + 1 ): SCREAMING_SNAKE_CASE_ = parent_dict.get(str(_SCREAMING_SNAKE_CASE ) , ['N/A'] ) parents_per_chain.append(_SCREAMING_SNAKE_CASE ) else: parents_per_chain.append(list(prot.parents ) ) else: SCREAMING_SNAKE_CASE_ = [['N/A']] def make_parent_line(_SCREAMING_SNAKE_CASE : Sequence[str] ) -> str: return f"""PARENT {' '.join(_SCREAMING_SNAKE_CASE )}""" out_pdb_lines.append(make_parent_line(parents_per_chain[0] ) ) SCREAMING_SNAKE_CASE_ = 0 for i, l in enumerate(_SCREAMING_SNAKE_CASE ): if "PARENT" not in l and "REMARK" not in l: out_pdb_lines.append(_SCREAMING_SNAKE_CASE ) if "TER" in l and "END" not in lines[i + 1]: chain_counter += 1 if not chain_counter >= len(_SCREAMING_SNAKE_CASE ): SCREAMING_SNAKE_CASE_ = parents_per_chain[chain_counter] else: SCREAMING_SNAKE_CASE_ = ['N/A'] out_pdb_lines.append(make_parent_line(_SCREAMING_SNAKE_CASE ) ) return "\n".join(_SCREAMING_SNAKE_CASE ) def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : Protein ): """simple docstring""" SCREAMING_SNAKE_CASE_ = residue_constants.restypes + ['X'] def res_atoa(_SCREAMING_SNAKE_CASE : int ) -> str: return residue_constants.restype_atoa.get(restypes[r] , 'UNK' ) SCREAMING_SNAKE_CASE_ = residue_constants.atom_types SCREAMING_SNAKE_CASE_ = [] SCREAMING_SNAKE_CASE_ = prot.atom_mask SCREAMING_SNAKE_CASE_ = prot.aatype SCREAMING_SNAKE_CASE_ = prot.atom_positions SCREAMING_SNAKE_CASE_ = prot.residue_index.astype(np.intaa ) SCREAMING_SNAKE_CASE_ = prot.b_factors SCREAMING_SNAKE_CASE_ = prot.chain_index if np.any(aatype > residue_constants.restype_num ): raise ValueError('Invalid aatypes.' ) SCREAMING_SNAKE_CASE_ = get_pdb_headers(_SCREAMING_SNAKE_CASE ) if len(_SCREAMING_SNAKE_CASE ) > 0: pdb_lines.extend(_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE_ = aatype.shape[0] SCREAMING_SNAKE_CASE_ = 1 SCREAMING_SNAKE_CASE_ = 0 SCREAMING_SNAKE_CASE_ = string.ascii_uppercase SCREAMING_SNAKE_CASE_ = None # Add all atom sites. for i in range(_SCREAMING_SNAKE_CASE ): SCREAMING_SNAKE_CASE_ = res_atoa(aatype[i] ) for atom_name, pos, mask, b_factor in zip(_SCREAMING_SNAKE_CASE , atom_positions[i] , atom_mask[i] , b_factors[i] ): if mask < 0.5: continue SCREAMING_SNAKE_CASE_ = 'ATOM' SCREAMING_SNAKE_CASE_ = atom_name if len(_SCREAMING_SNAKE_CASE ) == 4 else f""" {atom_name}""" SCREAMING_SNAKE_CASE_ = '' SCREAMING_SNAKE_CASE_ = '' SCREAMING_SNAKE_CASE_ = 1.00 SCREAMING_SNAKE_CASE_ = atom_name[0] # Protein supports only C, N, O, S, this works. SCREAMING_SNAKE_CASE_ = '' SCREAMING_SNAKE_CASE_ = 'A' if chain_index is not None: SCREAMING_SNAKE_CASE_ = chain_tags[chain_index[i]] # PDB is a columnar format, every space matters here! SCREAMING_SNAKE_CASE_ = ( f"""{record_type:<6}{atom_index:>5} {name:<4}{alt_loc:>1}""" f"""{res_name_a:>3} {chain_tag:>1}""" f"""{residue_index[i]:>4}{insertion_code:>1} """ f"""{pos[0]:>8.3f}{pos[1]:>8.3f}{pos[2]:>8.3f}""" f"""{occupancy:>6.2f}{b_factor:>6.2f} """ f"""{element:>2}{charge:>2}""" ) pdb_lines.append(_SCREAMING_SNAKE_CASE ) atom_index += 1 SCREAMING_SNAKE_CASE_ = i == n - 1 if chain_index is not None: if i != n - 1 and chain_index[i + 1] != prev_chain_index: SCREAMING_SNAKE_CASE_ = True SCREAMING_SNAKE_CASE_ = chain_index[i + 1] if should_terminate: # Close the chain. SCREAMING_SNAKE_CASE_ = 'TER' SCREAMING_SNAKE_CASE_ = ( f"""{chain_end:<6}{atom_index:>5} {res_atoa(aatype[i] ):>3} {chain_tag:>1}{residue_index[i]:>4}""" ) pdb_lines.append(_SCREAMING_SNAKE_CASE ) atom_index += 1 if i != n - 1: # "prev" is a misnomer here. This happens at the beginning of # each new chain. pdb_lines.extend(get_pdb_headers(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ) pdb_lines.append('END' ) pdb_lines.append('' ) return "\n".join(_SCREAMING_SNAKE_CASE ) def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : Protein ): """simple docstring""" return residue_constants.STANDARD_ATOM_MASK[prot.aatype] def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : FeatureDict , _SCREAMING_SNAKE_CASE : ModelOutput , _SCREAMING_SNAKE_CASE : Optional[np.ndarray] = None , _SCREAMING_SNAKE_CASE : Optional[np.ndarray] = None , _SCREAMING_SNAKE_CASE : Optional[str] = None , _SCREAMING_SNAKE_CASE : Optional[Sequence[str]] = None , _SCREAMING_SNAKE_CASE : Optional[Sequence[int]] = None , ): """simple docstring""" return Protein( aatype=features['aatype'] , atom_positions=result['final_atom_positions'] , atom_mask=result['final_atom_mask'] , residue_index=features['residue_index'] + 1 , b_factors=b_factors if b_factors is not None else np.zeros_like(result['final_atom_mask'] ) , chain_index=_SCREAMING_SNAKE_CASE , remark=_SCREAMING_SNAKE_CASE , parents=_SCREAMING_SNAKE_CASE , parents_chain_index=_SCREAMING_SNAKE_CASE , )
620
import pytest import datasets # Import fixture modules as plugins UpperCamelCase__ : Union[str, Any] = ["tests.fixtures.files", "tests.fixtures.hub", "tests.fixtures.fsspec"] def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : Dict ): """simple docstring""" for item in items: if any(marker in item.keywords for marker in ['integration', 'unit'] ): continue item.add_marker(pytest.mark.unit ) def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : Optional[int] ): """simple docstring""" config.addinivalue_line('markers' , 'torchaudio_latest: mark test to run with torchaudio>=0.12' ) @pytest.fixture(autouse=_SCREAMING_SNAKE_CASE ) def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : str ): """simple docstring""" SCREAMING_SNAKE_CASE_ = tmp_path_factory.getbasetemp() / 'cache' SCREAMING_SNAKE_CASE_ = test_hf_cache_home / 'datasets' SCREAMING_SNAKE_CASE_ = test_hf_cache_home / 'metrics' SCREAMING_SNAKE_CASE_ = test_hf_cache_home / 'modules' monkeypatch.setattr('datasets.config.HF_DATASETS_CACHE' , str(_SCREAMING_SNAKE_CASE ) ) monkeypatch.setattr('datasets.config.HF_METRICS_CACHE' , str(_SCREAMING_SNAKE_CASE ) ) monkeypatch.setattr('datasets.config.HF_MODULES_CACHE' , str(_SCREAMING_SNAKE_CASE ) ) SCREAMING_SNAKE_CASE_ = test_hf_datasets_cache / 'downloads' monkeypatch.setattr('datasets.config.DOWNLOADED_DATASETS_PATH' , str(_SCREAMING_SNAKE_CASE ) ) SCREAMING_SNAKE_CASE_ = test_hf_datasets_cache / 'downloads' / 'extracted' monkeypatch.setattr('datasets.config.EXTRACTED_DATASETS_PATH' , str(_SCREAMING_SNAKE_CASE ) ) @pytest.fixture(autouse=_SCREAMING_SNAKE_CASE , scope='session' ) def _UpperCAmelCase ( ): """simple docstring""" datasets.disable_progress_bar() @pytest.fixture(autouse=_SCREAMING_SNAKE_CASE ) def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : Any ): """simple docstring""" monkeypatch.setattr('datasets.config.HF_UPDATE_DOWNLOAD_COUNTS' , _SCREAMING_SNAKE_CASE ) @pytest.fixture def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : Tuple ): """simple docstring""" monkeypatch.setattr('sqlalchemy.util.deprecations.SILENCE_UBER_WARNING' , _SCREAMING_SNAKE_CASE )
620
1
import argparse import torch from transformers import RemBertConfig, RemBertModel, load_tf_weights_in_rembert from transformers.utils import logging logging.set_verbosity_info() def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : Optional[int] ): """simple docstring""" SCREAMING_SNAKE_CASE_ = RemBertConfig.from_json_file(_SCREAMING_SNAKE_CASE ) print('Building PyTorch model from configuration: {}'.format(str(_SCREAMING_SNAKE_CASE ) ) ) SCREAMING_SNAKE_CASE_ = RemBertModel(_SCREAMING_SNAKE_CASE ) # Load weights from tf checkpoint load_tf_weights_in_rembert(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # Save pytorch-model print('Save PyTorch model to {}'.format(_SCREAMING_SNAKE_CASE ) ) torch.save(model.state_dict() , _SCREAMING_SNAKE_CASE ) if __name__ == "__main__": UpperCamelCase__ : List[str] = argparse.ArgumentParser() # Required parameters parser.add_argument( "--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path." ) parser.add_argument( "--rembert_config_file", default=None, type=str, required=True, help=( "The config json file corresponding to the pre-trained RemBERT model. \n" "This specifies the model architecture." ), ) parser.add_argument( "--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model." ) UpperCamelCase__ : List[str] = parser.parse_args() convert_rembert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.rembert_config_file, args.pytorch_dump_path)
620
from typing import List import numpy as np def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : dict ): """simple docstring""" SCREAMING_SNAKE_CASE_ = {key: len(_SCREAMING_SNAKE_CASE ) for key, value in gen_kwargs.items() if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )} if len(set(lists_lengths.values() ) ) > 1: raise RuntimeError( ( 'Sharding is ambiguous for this dataset: ' + 'we found several data sources lists of different lengths, and we don\'t know over which list we should parallelize:\n' + '\n'.join(f"""\t- key {key} has length {length}""" for key, length in lists_lengths.items() ) + '\nTo fix this, check the \'gen_kwargs\' and make sure to use lists only for data sources, ' + 'and use tuples otherwise. In the end there should only be one single list, or several lists with the same length.' ) ) SCREAMING_SNAKE_CASE_ = max(lists_lengths.values() , default=0 ) return max(1 , _SCREAMING_SNAKE_CASE ) def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int ): """simple docstring""" SCREAMING_SNAKE_CASE_ = [] for group_idx in range(_SCREAMING_SNAKE_CASE ): SCREAMING_SNAKE_CASE_ = num_shards // max_num_jobs + (group_idx < (num_shards % max_num_jobs)) if num_shards_to_add == 0: break SCREAMING_SNAKE_CASE_ = shards_indices_per_group[-1].stop if shards_indices_per_group else 0 SCREAMING_SNAKE_CASE_ = range(_SCREAMING_SNAKE_CASE , start + num_shards_to_add ) shards_indices_per_group.append(_SCREAMING_SNAKE_CASE ) return shards_indices_per_group def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : dict , _SCREAMING_SNAKE_CASE : int ): """simple docstring""" SCREAMING_SNAKE_CASE_ = _number_of_shards_in_gen_kwargs(_SCREAMING_SNAKE_CASE ) if num_shards == 1: return [dict(_SCREAMING_SNAKE_CASE )] else: SCREAMING_SNAKE_CASE_ = _distribute_shards(num_shards=_SCREAMING_SNAKE_CASE , max_num_jobs=_SCREAMING_SNAKE_CASE ) return [ { key: [value[shard_idx] for shard_idx in shard_indices_per_group[group_idx]] if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) else value for key, value in gen_kwargs.items() } for group_idx in range(len(_SCREAMING_SNAKE_CASE ) ) ] def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : List[dict] ): """simple docstring""" return { key: [value for gen_kwargs in gen_kwargs_list for value in gen_kwargs[key]] if isinstance(gen_kwargs_list[0][key] , _SCREAMING_SNAKE_CASE ) else gen_kwargs_list[0][key] for key in gen_kwargs_list[0] } def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : np.random.Generator , _SCREAMING_SNAKE_CASE : dict ): """simple docstring""" SCREAMING_SNAKE_CASE_ = {len(_SCREAMING_SNAKE_CASE ) for value in gen_kwargs.values() if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )} SCREAMING_SNAKE_CASE_ = {} for size in list_sizes: SCREAMING_SNAKE_CASE_ = list(range(_SCREAMING_SNAKE_CASE ) ) rng.shuffle(indices_per_size[size] ) # Now let's copy the gen_kwargs and shuffle the lists based on their sizes SCREAMING_SNAKE_CASE_ = dict(_SCREAMING_SNAKE_CASE ) for key, value in shuffled_kwargs.items(): if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): SCREAMING_SNAKE_CASE_ = [value[i] for i in indices_per_size[len(_SCREAMING_SNAKE_CASE )]] return shuffled_kwargs
620
1
import inspect from typing import Callable, List, Optional, Union import torch from transformers import ( CLIPImageProcessor, CLIPTextModel, CLIPTokenizer, WhisperForConditionalGeneration, WhisperProcessor, ) from diffusers import ( AutoencoderKL, DDIMScheduler, DiffusionPipeline, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel, ) from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker from diffusers.utils import logging UpperCamelCase__ : str = logging.get_logger(__name__) # pylint: disable=invalid-name class __snake_case ( lowerCAmelCase__ ): def __init__( self , _A , _A , _A , _A , _A , _A , _A , _A , _A , ): super().__init__() if safety_checker is None: logger.warning( f"""You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure""" ' that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered' ' results in services or applications open to the public. Both the diffusers team and Hugging Face' ' strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling' ' it only for use-cases that involve analyzing network behavior or auditing its results. For more' ' information, please have a look at https://github.com/huggingface/diffusers/pull/254 .') self.register_modules( speech_model=_A , speech_processor=_A , vae=_A , text_encoder=_A , tokenizer=_A , unet=_A , scheduler=_A , feature_extractor=_A , ) def lowerCAmelCase__ ( self , _A = "auto"): if slice_size == "auto": SCREAMING_SNAKE_CASE_ = self.unet.config.attention_head_dim // 2 self.unet.set_attention_slice(_A) def lowerCAmelCase__ ( self): self.enable_attention_slicing(_A) @torch.no_grad() def __call__( self , _A , _A=16000 , _A = 512 , _A = 512 , _A = 50 , _A = 7.5 , _A = None , _A = 1 , _A = 0.0 , _A = None , _A = None , _A = "pil" , _A = True , _A = None , _A = 1 , **_A , ): SCREAMING_SNAKE_CASE_ = self.speech_processor.feature_extractor( _A , return_tensors='pt' , sampling_rate=_A).input_features.to(self.device) SCREAMING_SNAKE_CASE_ = self.speech_model.generate(_A , max_length=480000) SCREAMING_SNAKE_CASE_ = self.speech_processor.tokenizer.batch_decode(_A , skip_special_tokens=_A , normalize=_A)[ 0 ] if isinstance(_A , _A): SCREAMING_SNAKE_CASE_ = 1 elif isinstance(_A , _A): SCREAMING_SNAKE_CASE_ = len(_A) else: raise ValueError(f"""`prompt` has to be of type `str` or `list` but is {type(_A)}""") if height % 8 != 0 or width % 8 != 0: raise ValueError(f"""`height` and `width` have to be divisible by 8 but are {height} and {width}.""") if (callback_steps is None) or ( callback_steps is not None and (not isinstance(_A , _A) or callback_steps <= 0) ): raise ValueError( f"""`callback_steps` has to be a positive integer but is {callback_steps} of type""" f""" {type(_A)}.""") # get prompt text embeddings SCREAMING_SNAKE_CASE_ = self.tokenizer( _A , padding='max_length' , max_length=self.tokenizer.model_max_length , return_tensors='pt' , ) SCREAMING_SNAKE_CASE_ = text_inputs.input_ids if text_input_ids.shape[-1] > self.tokenizer.model_max_length: SCREAMING_SNAKE_CASE_ = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :]) logger.warning( 'The following part of your input was truncated because CLIP can only handle sequences up to' f""" {self.tokenizer.model_max_length} tokens: {removed_text}""") SCREAMING_SNAKE_CASE_ = text_input_ids[:, : self.tokenizer.model_max_length] SCREAMING_SNAKE_CASE_ = self.text_encoder(text_input_ids.to(self.device))[0] # duplicate text embeddings for each generation per prompt, using mps friendly method SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = text_embeddings.shape SCREAMING_SNAKE_CASE_ = text_embeddings.repeat(1 , _A , 1) SCREAMING_SNAKE_CASE_ = text_embeddings.view(bs_embed * num_images_per_prompt , _A , -1) # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` # corresponds to doing no classifier free guidance. SCREAMING_SNAKE_CASE_ = guidance_scale > 1.0 # get unconditional embeddings for classifier free guidance if do_classifier_free_guidance: SCREAMING_SNAKE_CASE_ = 42 if negative_prompt is None: SCREAMING_SNAKE_CASE_ = [''] * batch_size elif type(_A) is not type(_A): raise TypeError( f"""`negative_prompt` should be the same type to `prompt`, but got {type(_A)} !=""" f""" {type(_A)}.""") elif isinstance(_A , _A): SCREAMING_SNAKE_CASE_ = [negative_prompt] elif batch_size != len(_A): raise ValueError( f"""`negative_prompt`: {negative_prompt} has batch size {len(_A)}, but `prompt`:""" f""" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches""" ' the batch size of `prompt`.') else: SCREAMING_SNAKE_CASE_ = negative_prompt SCREAMING_SNAKE_CASE_ = text_input_ids.shape[-1] SCREAMING_SNAKE_CASE_ = self.tokenizer( _A , padding='max_length' , max_length=_A , truncation=_A , return_tensors='pt' , ) SCREAMING_SNAKE_CASE_ = self.text_encoder(uncond_input.input_ids.to(self.device))[0] # duplicate unconditional embeddings for each generation per prompt, using mps friendly method SCREAMING_SNAKE_CASE_ = uncond_embeddings.shape[1] SCREAMING_SNAKE_CASE_ = uncond_embeddings.repeat(1 , _A , 1) SCREAMING_SNAKE_CASE_ = uncond_embeddings.view(batch_size * num_images_per_prompt , _A , -1) # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes SCREAMING_SNAKE_CASE_ = torch.cat([uncond_embeddings, text_embeddings]) # get the initial random noise unless the user supplied it # Unlike in other pipelines, latents need to be generated in the target device # for 1-to-1 results reproducibility with the CompVis implementation. # However this currently doesn't work in `mps`. SCREAMING_SNAKE_CASE_ = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8) SCREAMING_SNAKE_CASE_ = text_embeddings.dtype if latents is None: if self.device.type == "mps": # randn does not exist on mps SCREAMING_SNAKE_CASE_ = torch.randn(_A , generator=_A , device='cpu' , dtype=_A).to( self.device) else: SCREAMING_SNAKE_CASE_ = torch.randn(_A , generator=_A , device=self.device , dtype=_A) else: if latents.shape != latents_shape: raise ValueError(f"""Unexpected latents shape, got {latents.shape}, expected {latents_shape}""") SCREAMING_SNAKE_CASE_ = latents.to(self.device) # set timesteps self.scheduler.set_timesteps(_A) # Some schedulers like PNDM have timesteps as arrays # It's more optimized to move all timesteps to correct device beforehand SCREAMING_SNAKE_CASE_ = self.scheduler.timesteps.to(self.device) # scale the initial noise by the standard deviation required by the scheduler SCREAMING_SNAKE_CASE_ = latents * self.scheduler.init_noise_sigma # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 # and should be between [0, 1] SCREAMING_SNAKE_CASE_ = 'eta' in set(inspect.signature(self.scheduler.step).parameters.keys()) SCREAMING_SNAKE_CASE_ = {} if accepts_eta: SCREAMING_SNAKE_CASE_ = eta for i, t in enumerate(self.progress_bar(_A)): # expand the latents if we are doing classifier free guidance SCREAMING_SNAKE_CASE_ = torch.cat([latents] * 2) if do_classifier_free_guidance else latents SCREAMING_SNAKE_CASE_ = self.scheduler.scale_model_input(_A , _A) # predict the noise residual SCREAMING_SNAKE_CASE_ = self.unet(_A , _A , encoder_hidden_states=_A).sample # perform guidance if do_classifier_free_guidance: SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = noise_pred.chunk(2) SCREAMING_SNAKE_CASE_ = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) # compute the previous noisy sample x_t -> x_t-1 SCREAMING_SNAKE_CASE_ = self.scheduler.step(_A , _A , _A , **_A).prev_sample # call the callback, if provided if callback is not None and i % callback_steps == 0: callback(_A , _A , _A) SCREAMING_SNAKE_CASE_ = 1 / 0.1_8_2_1_5 * latents SCREAMING_SNAKE_CASE_ = self.vae.decode(_A).sample SCREAMING_SNAKE_CASE_ = (image / 2 + 0.5).clamp(0 , 1) # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16 SCREAMING_SNAKE_CASE_ = image.cpu().permute(0 , 2 , 3 , 1).float().numpy() if output_type == "pil": SCREAMING_SNAKE_CASE_ = self.numpy_to_pil(_A) if not return_dict: return image return StableDiffusionPipelineOutput(images=_A , nsfw_content_detected=_A)
620
from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCamelCase__ : List[Any] = logging.get_logger(__name__) UpperCamelCase__ : List[str] = { "microsoft/biogpt": "https://huggingface.co/microsoft/biogpt/resolve/main/config.json", # See all BioGPT models at https://huggingface.co/models?filter=biogpt } class __snake_case ( lowerCAmelCase__ ): __lowerCAmelCase : Any = 'biogpt' def __init__( self , _A=42384 , _A=1024 , _A=24 , _A=16 , _A=4096 , _A="gelu" , _A=0.1 , _A=0.1 , _A=1024 , _A=0.0_2 , _A=1E-12 , _A=True , _A=True , _A=0.0 , _A=0.0 , _A=1 , _A=0 , _A=2 , **_A , ): SCREAMING_SNAKE_CASE_ = vocab_size SCREAMING_SNAKE_CASE_ = max_position_embeddings SCREAMING_SNAKE_CASE_ = hidden_size SCREAMING_SNAKE_CASE_ = num_hidden_layers SCREAMING_SNAKE_CASE_ = num_attention_heads SCREAMING_SNAKE_CASE_ = intermediate_size SCREAMING_SNAKE_CASE_ = hidden_act SCREAMING_SNAKE_CASE_ = hidden_dropout_prob SCREAMING_SNAKE_CASE_ = attention_probs_dropout_prob SCREAMING_SNAKE_CASE_ = initializer_range SCREAMING_SNAKE_CASE_ = layer_norm_eps SCREAMING_SNAKE_CASE_ = scale_embedding SCREAMING_SNAKE_CASE_ = use_cache SCREAMING_SNAKE_CASE_ = layerdrop SCREAMING_SNAKE_CASE_ = activation_dropout super().__init__(pad_token_id=_A , bos_token_id=_A , eos_token_id=_A , **_A)
620
1
import numpy as np from PIL import Image def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : np.ndarray , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int ): """simple docstring""" SCREAMING_SNAKE_CASE_ = np.array(_SCREAMING_SNAKE_CASE ) if arr.shape[0] != arr.shape[1]: raise ValueError('The input array is not a square matrix' ) SCREAMING_SNAKE_CASE_ = 0 SCREAMING_SNAKE_CASE_ = 0 SCREAMING_SNAKE_CASE_ = 0 SCREAMING_SNAKE_CASE_ = 0 # compute the shape of the output matrix SCREAMING_SNAKE_CASE_ = (arr.shape[0] - size) // stride + 1 # initialize the output matrix with zeros of shape maxpool_shape SCREAMING_SNAKE_CASE_ = np.zeros((maxpool_shape, maxpool_shape) ) while i < arr.shape[0]: if i + size > arr.shape[0]: # if the end of the matrix is reached, break break while j < arr.shape[1]: # if the end of the matrix is reached, break if j + size > arr.shape[1]: break # compute the maximum of the pooling matrix SCREAMING_SNAKE_CASE_ = np.max(arr[i : i + size, j : j + size] ) # shift the pooling matrix by stride of column pixels j += stride mat_j += 1 # shift the pooling matrix by stride of row pixels i += stride mat_i += 1 # reset the column index to 0 SCREAMING_SNAKE_CASE_ = 0 SCREAMING_SNAKE_CASE_ = 0 return updated_arr def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : np.ndarray , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int ): """simple docstring""" SCREAMING_SNAKE_CASE_ = np.array(_SCREAMING_SNAKE_CASE ) if arr.shape[0] != arr.shape[1]: raise ValueError('The input array is not a square matrix' ) SCREAMING_SNAKE_CASE_ = 0 SCREAMING_SNAKE_CASE_ = 0 SCREAMING_SNAKE_CASE_ = 0 SCREAMING_SNAKE_CASE_ = 0 # compute the shape of the output matrix SCREAMING_SNAKE_CASE_ = (arr.shape[0] - size) // stride + 1 # initialize the output matrix with zeros of shape avgpool_shape SCREAMING_SNAKE_CASE_ = np.zeros((avgpool_shape, avgpool_shape) ) while i < arr.shape[0]: # if the end of the matrix is reached, break if i + size > arr.shape[0]: break while j < arr.shape[1]: # if the end of the matrix is reached, break if j + size > arr.shape[1]: break # compute the average of the pooling matrix SCREAMING_SNAKE_CASE_ = int(np.average(arr[i : i + size, j : j + size] ) ) # shift the pooling matrix by stride of column pixels j += stride mat_j += 1 # shift the pooling matrix by stride of row pixels i += stride mat_i += 1 # reset the column index to 0 SCREAMING_SNAKE_CASE_ = 0 SCREAMING_SNAKE_CASE_ = 0 return updated_arr # Main Function if __name__ == "__main__": from doctest import testmod testmod(name="avgpooling", verbose=True) # Loading the image UpperCamelCase__ : Any = Image.open("path_to_image") # Converting the image to numpy array and maxpooling, displaying the result # Ensure that the image is a square matrix Image.fromarray(maxpooling(np.array(image), size=3, stride=2)).show() # Converting the image to numpy array and averagepooling, displaying the result # Ensure that the image is a square matrix Image.fromarray(avgpooling(np.array(image), size=3, stride=2)).show()
620
from typing import Dict, List, Optional, Tuple, Union import torch from ...models import AutoencoderKL, TransformeraDModel from ...schedulers import KarrasDiffusionSchedulers from ...utils import randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput class __snake_case ( lowerCAmelCase__ ): def __init__( self , _A , _A , _A , _A = None , ): super().__init__() self.register_modules(transformer=_A , vae=_A , scheduler=_A) # create a imagenet -> id dictionary for easier use SCREAMING_SNAKE_CASE_ = {} if idalabel is not None: for key, value in idalabel.items(): for label in value.split(','): SCREAMING_SNAKE_CASE_ = int(_A) SCREAMING_SNAKE_CASE_ = dict(sorted(self.labels.items())) def lowerCAmelCase__ ( self , _A): if not isinstance(_A , _A): SCREAMING_SNAKE_CASE_ = list(_A) for l in label: if l not in self.labels: raise ValueError( f"""{l} does not exist. Please make sure to select one of the following labels: \n {self.labels}.""") return [self.labels[l] for l in label] @torch.no_grad() def __call__( self , _A , _A = 4.0 , _A = None , _A = 50 , _A = "pil" , _A = True , ): SCREAMING_SNAKE_CASE_ = len(_A) SCREAMING_SNAKE_CASE_ = self.transformer.config.sample_size SCREAMING_SNAKE_CASE_ = self.transformer.config.in_channels SCREAMING_SNAKE_CASE_ = randn_tensor( shape=(batch_size, latent_channels, latent_size, latent_size) , generator=_A , device=self.device , dtype=self.transformer.dtype , ) SCREAMING_SNAKE_CASE_ = torch.cat([latents] * 2) if guidance_scale > 1 else latents SCREAMING_SNAKE_CASE_ = torch.tensor(_A , device=self.device).reshape(-1) SCREAMING_SNAKE_CASE_ = torch.tensor([1000] * batch_size , device=self.device) SCREAMING_SNAKE_CASE_ = torch.cat([class_labels, class_null] , 0) if guidance_scale > 1 else class_labels # set step values self.scheduler.set_timesteps(_A) for t in self.progress_bar(self.scheduler.timesteps): if guidance_scale > 1: SCREAMING_SNAKE_CASE_ = latent_model_input[: len(_A) // 2] SCREAMING_SNAKE_CASE_ = torch.cat([half, half] , dim=0) SCREAMING_SNAKE_CASE_ = self.scheduler.scale_model_input(_A , _A) SCREAMING_SNAKE_CASE_ = t if not torch.is_tensor(_A): # TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can # This would be a good case for the `match` statement (Python 3.10+) SCREAMING_SNAKE_CASE_ = latent_model_input.device.type == 'mps' if isinstance(_A , _A): SCREAMING_SNAKE_CASE_ = torch.floataa if is_mps else torch.floataa else: SCREAMING_SNAKE_CASE_ = torch.intaa if is_mps else torch.intaa SCREAMING_SNAKE_CASE_ = torch.tensor([timesteps] , dtype=_A , device=latent_model_input.device) elif len(timesteps.shape) == 0: SCREAMING_SNAKE_CASE_ = timesteps[None].to(latent_model_input.device) # broadcast to batch dimension in a way that's compatible with ONNX/Core ML SCREAMING_SNAKE_CASE_ = timesteps.expand(latent_model_input.shape[0]) # predict noise model_output SCREAMING_SNAKE_CASE_ = self.transformer( _A , timestep=_A , class_labels=_A).sample # perform guidance if guidance_scale > 1: SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = noise_pred[:, :latent_channels], noise_pred[:, latent_channels:] SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = torch.split(_A , len(_A) // 2 , dim=0) SCREAMING_SNAKE_CASE_ = uncond_eps + guidance_scale * (cond_eps - uncond_eps) SCREAMING_SNAKE_CASE_ = torch.cat([half_eps, half_eps] , dim=0) SCREAMING_SNAKE_CASE_ = torch.cat([eps, rest] , dim=1) # learned sigma if self.transformer.config.out_channels // 2 == latent_channels: SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = torch.split(_A , _A , dim=1) else: SCREAMING_SNAKE_CASE_ = noise_pred # compute previous image: x_t -> x_t-1 SCREAMING_SNAKE_CASE_ = self.scheduler.step(_A , _A , _A).prev_sample if guidance_scale > 1: SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = latent_model_input.chunk(2 , dim=0) else: SCREAMING_SNAKE_CASE_ = latent_model_input SCREAMING_SNAKE_CASE_ = 1 / self.vae.config.scaling_factor * latents SCREAMING_SNAKE_CASE_ = self.vae.decode(_A).sample SCREAMING_SNAKE_CASE_ = (samples / 2 + 0.5).clamp(0 , 1) # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16 SCREAMING_SNAKE_CASE_ = samples.cpu().permute(0 , 2 , 3 , 1).float().numpy() if output_type == "pil": SCREAMING_SNAKE_CASE_ = self.numpy_to_pil(_A) if not return_dict: return (samples,) return ImagePipelineOutput(images=_A)
620
1
import logging import torch from accelerate import Accelerator from arguments import EvaluationArguments from datasets import load_dataset from torch.utils.data import IterableDataset from torch.utils.data.dataloader import DataLoader from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, set_seed class __snake_case ( lowerCAmelCase__ ): def __init__( self , _A , _A , _A=1024 , _A=1024 , _A=3.6): SCREAMING_SNAKE_CASE_ = tokenizer SCREAMING_SNAKE_CASE_ = tokenizer.bos_token_id SCREAMING_SNAKE_CASE_ = dataset SCREAMING_SNAKE_CASE_ = seq_length SCREAMING_SNAKE_CASE_ = seq_length * chars_per_token * num_of_sequences def __iter__( self): SCREAMING_SNAKE_CASE_ = iter(self.dataset) SCREAMING_SNAKE_CASE_ = True while more_examples: SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = [], 0 while True: if buffer_len >= self.input_characters: break try: buffer.append(next(_A)['content']) buffer_len += len(buffer[-1]) except StopIteration: SCREAMING_SNAKE_CASE_ = False break SCREAMING_SNAKE_CASE_ = tokenizer(_A , truncation=_A)['input_ids'] SCREAMING_SNAKE_CASE_ = [] for tokenized_input in tokenized_inputs: all_token_ids.extend(tokenized_input + [self.concat_token_id]) for i in range(0 , len(_A) , self.seq_length): SCREAMING_SNAKE_CASE_ = all_token_ids[i : i + self.seq_length] if len(_A) == self.seq_length: yield torch.tensor(_A) def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : int ): """simple docstring""" SCREAMING_SNAKE_CASE_ = {'streaming': True} SCREAMING_SNAKE_CASE_ = load_dataset(args.dataset_name , split='train' , **_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE_ = ConstantLengthDataset(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , seq_length=args.seq_length ) SCREAMING_SNAKE_CASE_ = DataLoader(_SCREAMING_SNAKE_CASE , batch_size=args.batch_size ) return eval_dataloader def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : Any ): """simple docstring""" model.eval() SCREAMING_SNAKE_CASE_ = [] for step, batch in enumerate(_SCREAMING_SNAKE_CASE ): with torch.no_grad(): SCREAMING_SNAKE_CASE_ = model(_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE_ = outputs.loss.repeat(args.batch_size ) losses.append(accelerator.gather(_SCREAMING_SNAKE_CASE ) ) if args.max_eval_steps > 0 and step >= args.max_eval_steps: break SCREAMING_SNAKE_CASE_ = torch.mean(torch.cat(_SCREAMING_SNAKE_CASE ) ) try: SCREAMING_SNAKE_CASE_ = torch.exp(_SCREAMING_SNAKE_CASE ) except OverflowError: SCREAMING_SNAKE_CASE_ = float('inf' ) return loss.item(), perplexity.item() # Setup Accelerator UpperCamelCase__ : Optional[int] = Accelerator() # Parse configuration UpperCamelCase__ : Dict = HfArgumentParser(EvaluationArguments) UpperCamelCase__ : List[str] = parser.parse_args() set_seed(args.seed) # Logging UpperCamelCase__ : List[Any] = logging.getLogger(__name__) logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO ) # Load model and tokenizer UpperCamelCase__ : Dict = AutoModelForCausalLM.from_pretrained(args.model_ckpt) UpperCamelCase__ : Optional[Any] = AutoTokenizer.from_pretrained(args.model_ckpt) # Load dataset and dataloader UpperCamelCase__ : Dict = create_dataloader(args) # Prepare everything with our `accelerator`. UpperCamelCase__ , UpperCamelCase__ : Any = accelerator.prepare(model, eval_dataloader) # Evaluate and save the last checkpoint logger.info("Evaluating and saving model after training") UpperCamelCase__ , UpperCamelCase__ : Tuple = evaluate(args) logger.info(F'loss/eval: {eval_loss}, perplexity: {perplexity}')
620
import pickle import numpy as np from matplotlib import pyplot as plt class __snake_case : def __init__( self , _A , _A , _A , _A , _A , _A=0.2 , _A=0.2): SCREAMING_SNAKE_CASE_ = bp_numa SCREAMING_SNAKE_CASE_ = bp_numa SCREAMING_SNAKE_CASE_ = bp_numa SCREAMING_SNAKE_CASE_ = conva_get[:2] SCREAMING_SNAKE_CASE_ = conva_get[2] SCREAMING_SNAKE_CASE_ = size_pa SCREAMING_SNAKE_CASE_ = rate_w SCREAMING_SNAKE_CASE_ = rate_t SCREAMING_SNAKE_CASE_ = [ np.mat(-1 * np.random.rand(self.conva[0] , self.conva[0]) + 0.5) for i in range(self.conva[1]) ] SCREAMING_SNAKE_CASE_ = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa) + 0.5) SCREAMING_SNAKE_CASE_ = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa) + 0.5) SCREAMING_SNAKE_CASE_ = -2 * np.random.rand(self.conva[1]) + 1 SCREAMING_SNAKE_CASE_ = -2 * np.random.rand(self.num_bpa) + 1 SCREAMING_SNAKE_CASE_ = -2 * np.random.rand(self.num_bpa) + 1 def lowerCAmelCase__ ( self , _A): # save model dict with pickle SCREAMING_SNAKE_CASE_ = { 'num_bp1': self.num_bpa, 'num_bp2': self.num_bpa, 'num_bp3': self.num_bpa, 'conv1': self.conva, 'step_conv1': self.step_conva, 'size_pooling1': self.size_poolinga, 'rate_weight': self.rate_weight, 'rate_thre': self.rate_thre, 'w_conv1': self.w_conva, 'wkj': self.wkj, 'vji': self.vji, 'thre_conv1': self.thre_conva, 'thre_bp2': self.thre_bpa, 'thre_bp3': self.thre_bpa, } with open(_A , 'wb') as f: pickle.dump(_A , _A) print(f"""Model saved: {save_path}""") @classmethod def lowerCAmelCase__ ( cls , _A): # read saved model with open(_A , 'rb') as f: SCREAMING_SNAKE_CASE_ = pickle.load(_A) # noqa: S301 SCREAMING_SNAKE_CASE_ = model_dic.get('conv1') conv_get.append(model_dic.get('step_conv1')) SCREAMING_SNAKE_CASE_ = model_dic.get('size_pooling1') SCREAMING_SNAKE_CASE_ = model_dic.get('num_bp1') SCREAMING_SNAKE_CASE_ = model_dic.get('num_bp2') SCREAMING_SNAKE_CASE_ = model_dic.get('num_bp3') SCREAMING_SNAKE_CASE_ = model_dic.get('rate_weight') SCREAMING_SNAKE_CASE_ = model_dic.get('rate_thre') # create model instance SCREAMING_SNAKE_CASE_ = CNN(_A , _A , _A , _A , _A , _A , _A) # modify model parameter SCREAMING_SNAKE_CASE_ = model_dic.get('w_conv1') SCREAMING_SNAKE_CASE_ = model_dic.get('wkj') SCREAMING_SNAKE_CASE_ = model_dic.get('vji') SCREAMING_SNAKE_CASE_ = model_dic.get('thre_conv1') SCREAMING_SNAKE_CASE_ = model_dic.get('thre_bp2') SCREAMING_SNAKE_CASE_ = model_dic.get('thre_bp3') return conv_ins def lowerCAmelCase__ ( self , _A): return 1 / (1 + np.exp(-1 * x)) def lowerCAmelCase__ ( self , _A): return round(_A , 3) def lowerCAmelCase__ ( self , _A , _A , _A , _A , _A): # convolution process SCREAMING_SNAKE_CASE_ = convs[0] SCREAMING_SNAKE_CASE_ = convs[1] SCREAMING_SNAKE_CASE_ = np.shape(_A)[0] # get the data slice of original image data, data_focus SCREAMING_SNAKE_CASE_ = [] for i_focus in range(0 , size_data - size_conv + 1 , _A): for j_focus in range(0 , size_data - size_conv + 1 , _A): SCREAMING_SNAKE_CASE_ = data[ i_focus : i_focus + size_conv, j_focus : j_focus + size_conv ] data_focus.append(_A) # calculate the feature map of every single kernel, and saved as list of matrix SCREAMING_SNAKE_CASE_ = [] SCREAMING_SNAKE_CASE_ = int((size_data - size_conv) / conv_step + 1) for i_map in range(_A): SCREAMING_SNAKE_CASE_ = [] for i_focus in range(len(_A)): SCREAMING_SNAKE_CASE_ = ( np.sum(np.multiply(data_focus[i_focus] , w_convs[i_map])) - thre_convs[i_map] ) featuremap.append(self.sig(_A)) SCREAMING_SNAKE_CASE_ = np.asmatrix(_A).reshape( _A , _A) data_featuremap.append(_A) # expanding the data slice to One dimenssion SCREAMING_SNAKE_CASE_ = [] for each_focus in data_focus: focusa_list.extend(self.Expand_Mat(_A)) SCREAMING_SNAKE_CASE_ = np.asarray(_A) return focus_list, data_featuremap def lowerCAmelCase__ ( self , _A , _A , _A="average_pool"): # pooling process SCREAMING_SNAKE_CASE_ = len(featuremaps[0]) SCREAMING_SNAKE_CASE_ = int(size_map / size_pooling) SCREAMING_SNAKE_CASE_ = [] for i_map in range(len(_A)): SCREAMING_SNAKE_CASE_ = featuremaps[i_map] SCREAMING_SNAKE_CASE_ = [] for i_focus in range(0 , _A , _A): for j_focus in range(0 , _A , _A): SCREAMING_SNAKE_CASE_ = feature_map[ i_focus : i_focus + size_pooling, j_focus : j_focus + size_pooling, ] if pooling_type == "average_pool": # average pooling map_pooled.append(np.average(_A)) elif pooling_type == "max_pooling": # max pooling map_pooled.append(np.max(_A)) SCREAMING_SNAKE_CASE_ = np.asmatrix(_A).reshape(_A , _A) featuremap_pooled.append(_A) return featuremap_pooled def lowerCAmelCase__ ( self , _A): # expanding three dimension data to one dimension list SCREAMING_SNAKE_CASE_ = [] for i in range(len(_A)): SCREAMING_SNAKE_CASE_ = np.shape(data[i]) SCREAMING_SNAKE_CASE_ = data[i].reshape(1 , shapes[0] * shapes[1]) SCREAMING_SNAKE_CASE_ = data_listed.getA().tolist()[0] data_expanded.extend(_A) SCREAMING_SNAKE_CASE_ = np.asarray(_A) return data_expanded def lowerCAmelCase__ ( self , _A): # expanding matrix to one dimension list SCREAMING_SNAKE_CASE_ = np.asarray(_A) SCREAMING_SNAKE_CASE_ = np.shape(_A) SCREAMING_SNAKE_CASE_ = data_mat.reshape(1 , shapes[0] * shapes[1]) return data_expanded def lowerCAmelCase__ ( self , _A , _A , _A , _A , _A): SCREAMING_SNAKE_CASE_ = [] SCREAMING_SNAKE_CASE_ = 0 for i_map in range(_A): SCREAMING_SNAKE_CASE_ = np.ones((size_map, size_map)) for i in range(0 , _A , _A): for j in range(0 , _A , _A): SCREAMING_SNAKE_CASE_ = pd_pool[ i_pool ] SCREAMING_SNAKE_CASE_ = i_pool + 1 SCREAMING_SNAKE_CASE_ = np.multiply( _A , np.multiply(out_map[i_map] , (1 - out_map[i_map]))) pd_all.append(_A) return pd_all def lowerCAmelCase__ ( self , _A , _A , _A , _A , _A , _A=bool): # model traning print('----------------------Start Training-------------------------') print((' - - Shape: Train_Data ', np.shape(_A))) print((' - - Shape: Teach_Data ', np.shape(_A))) SCREAMING_SNAKE_CASE_ = 0 SCREAMING_SNAKE_CASE_ = [] SCREAMING_SNAKE_CASE_ = 10000 while rp < n_repeat and mse >= error_accuracy: SCREAMING_SNAKE_CASE_ = 0 print(f"""-------------Learning Time {rp}--------------""") for p in range(len(_A)): # print('------------Learning Image: %d--------------'%p) SCREAMING_SNAKE_CASE_ = np.asmatrix(datas_train[p]) SCREAMING_SNAKE_CASE_ = np.asarray(datas_teach[p]) SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.convolute( _A , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , ) SCREAMING_SNAKE_CASE_ = self.pooling(_A , self.size_poolinga) SCREAMING_SNAKE_CASE_ = np.shape(_A) SCREAMING_SNAKE_CASE_ = self._expand(_A) SCREAMING_SNAKE_CASE_ = data_bp_input SCREAMING_SNAKE_CASE_ = np.dot(_A , self.vji.T) - self.thre_bpa SCREAMING_SNAKE_CASE_ = self.sig(_A) SCREAMING_SNAKE_CASE_ = np.dot(_A , self.wkj.T) - self.thre_bpa SCREAMING_SNAKE_CASE_ = self.sig(_A) # --------------Model Leaning ------------------------ # calculate error and gradient--------------- SCREAMING_SNAKE_CASE_ = np.multiply( (data_teach - bp_outa) , np.multiply(_A , (1 - bp_outa))) SCREAMING_SNAKE_CASE_ = np.multiply( np.dot(_A , self.wkj) , np.multiply(_A , (1 - bp_outa))) SCREAMING_SNAKE_CASE_ = np.dot(_A , self.vji) SCREAMING_SNAKE_CASE_ = pd_i_all / (self.size_poolinga * self.size_poolinga) SCREAMING_SNAKE_CASE_ = pd_conva_pooled.T.getA().tolist() SCREAMING_SNAKE_CASE_ = self._calculate_gradient_from_pool( _A , _A , shape_featuremapa[0] , shape_featuremapa[1] , self.size_poolinga , ) # weight and threshold learning process--------- # convolution layer for k_conv in range(self.conva[1]): SCREAMING_SNAKE_CASE_ = self._expand_mat(pd_conva_all[k_conv]) SCREAMING_SNAKE_CASE_ = self.rate_weight * np.dot(_A , _A) SCREAMING_SNAKE_CASE_ = self.w_conva[k_conv] + delta_w.reshape( (self.conva[0], self.conva[0])) SCREAMING_SNAKE_CASE_ = ( self.thre_conva[k_conv] - np.sum(pd_conva_all[k_conv]) * self.rate_thre ) # all connected layer SCREAMING_SNAKE_CASE_ = self.wkj + pd_k_all.T * bp_outa * self.rate_weight SCREAMING_SNAKE_CASE_ = self.vji + pd_j_all.T * bp_outa * self.rate_weight SCREAMING_SNAKE_CASE_ = self.thre_bpa - pd_k_all * self.rate_thre SCREAMING_SNAKE_CASE_ = self.thre_bpa - pd_j_all * self.rate_thre # calculate the sum error of all single image SCREAMING_SNAKE_CASE_ = np.sum(abs(data_teach - bp_outa)) error_count += errors # print(' ----Teach ',data_teach) # print(' ----BP_output ',bp_out3) SCREAMING_SNAKE_CASE_ = rp + 1 SCREAMING_SNAKE_CASE_ = error_count / patterns all_mse.append(_A) def draw_error(): SCREAMING_SNAKE_CASE_ = [error_accuracy for i in range(int(n_repeat * 1.2))] plt.plot(_A , '+-') plt.plot(_A , 'r--') plt.xlabel('Learning Times') plt.ylabel('All_mse') plt.grid(_A , alpha=0.5) plt.show() print('------------------Training Complished---------------------') print((' - - Training epoch: ', rp, f""" - - Mse: {mse:.6f}""")) if draw_e: draw_error() return mse def lowerCAmelCase__ ( self , _A): # model predict SCREAMING_SNAKE_CASE_ = [] print('-------------------Start Testing-------------------------') print((' - - Shape: Test_Data ', np.shape(_A))) for p in range(len(_A)): SCREAMING_SNAKE_CASE_ = np.asmatrix(datas_test[p]) SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.convolute( _A , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , ) SCREAMING_SNAKE_CASE_ = self.pooling(_A , self.size_poolinga) SCREAMING_SNAKE_CASE_ = self._expand(_A) SCREAMING_SNAKE_CASE_ = data_bp_input SCREAMING_SNAKE_CASE_ = bp_outa * self.vji.T - self.thre_bpa SCREAMING_SNAKE_CASE_ = self.sig(_A) SCREAMING_SNAKE_CASE_ = bp_outa * self.wkj.T - self.thre_bpa SCREAMING_SNAKE_CASE_ = self.sig(_A) produce_out.extend(bp_outa.getA().tolist()) SCREAMING_SNAKE_CASE_ = [list(map(self.do_round , _A)) for each in produce_out] return np.asarray(_A) def lowerCAmelCase__ ( self , _A): # return the data of image after convoluting process so we can check it out SCREAMING_SNAKE_CASE_ = np.asmatrix(_A) SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.convolute( _A , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , ) SCREAMING_SNAKE_CASE_ = self.pooling(_A , self.size_poolinga) return data_conveda, data_pooleda if __name__ == "__main__": pass
620
1
import os import tempfile import unittest import numpy as np from diffusers.utils import is_flax_available from diffusers.utils.testing_utils import require_flax, slow if is_flax_available(): import jax import jax.numpy as jnp from flax.jax_utils import replicate from flax.training.common_utils import shard from diffusers import FlaxDDIMScheduler, FlaxDiffusionPipeline, FlaxStableDiffusionPipeline @require_flax class __snake_case ( unittest.TestCase ): def lowerCAmelCase__ ( self): with tempfile.TemporaryDirectory() as tmpdirname: # pipeline has Flax weights SCREAMING_SNAKE_CASE_ = FlaxDiffusionPipeline.from_pretrained( 'hf-internal-testing/tiny-stable-diffusion-pipe' , safety_checker=_A , cache_dir=_A) SCREAMING_SNAKE_CASE_ = [t[-1] for t in os.walk(os.path.join(_A , os.listdir(_A)[0] , 'snapshots'))] SCREAMING_SNAKE_CASE_ = [item for sublist in all_root_files for item in sublist] # None of the downloaded files should be a PyTorch file even if we have some here: # https://huggingface.co/hf-internal-testing/tiny-stable-diffusion-pipe/blob/main/unet/diffusion_pytorch_model.bin assert not any(f.endswith('.bin') for f in files) @slow @require_flax class __snake_case ( unittest.TestCase ): def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = FlaxStableDiffusionPipeline.from_pretrained( 'hf-internal-testing/tiny-stable-diffusion-pipe' , safety_checker=_A) SCREAMING_SNAKE_CASE_ = ( 'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of' ' field, close up, split lighting, cinematic' ) SCREAMING_SNAKE_CASE_ = jax.random.PRNGKey(0) SCREAMING_SNAKE_CASE_ = 4 SCREAMING_SNAKE_CASE_ = jax.device_count() SCREAMING_SNAKE_CASE_ = num_samples * [prompt] SCREAMING_SNAKE_CASE_ = pipeline.prepare_inputs(_A) # shard inputs and rng SCREAMING_SNAKE_CASE_ = replicate(_A) SCREAMING_SNAKE_CASE_ = jax.random.split(_A , _A) SCREAMING_SNAKE_CASE_ = shard(_A) SCREAMING_SNAKE_CASE_ = pipeline(_A , _A , _A , _A , jit=_A).images assert images.shape == (num_samples, 1, 64, 64, 3) if jax.device_count() == 8: assert np.abs(np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa).sum() - 4.1_5_1_4_7_4_5) < 1E-3 assert np.abs(np.abs(_A , dtype=np.floataa).sum() - 4_9_9_4_7.8_7_5) < 5E-1 SCREAMING_SNAKE_CASE_ = pipeline.numpy_to_pil(np.asarray(images.reshape((num_samples,) + images.shape[-3:]))) assert len(_A) == num_samples def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = FlaxStableDiffusionPipeline.from_pretrained( 'CompVis/stable-diffusion-v1-4' , revision='flax' , safety_checker=_A) SCREAMING_SNAKE_CASE_ = ( 'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of' ' field, close up, split lighting, cinematic' ) SCREAMING_SNAKE_CASE_ = jax.random.PRNGKey(0) SCREAMING_SNAKE_CASE_ = 50 SCREAMING_SNAKE_CASE_ = jax.device_count() SCREAMING_SNAKE_CASE_ = num_samples * [prompt] SCREAMING_SNAKE_CASE_ = pipeline.prepare_inputs(_A) # shard inputs and rng SCREAMING_SNAKE_CASE_ = replicate(_A) SCREAMING_SNAKE_CASE_ = jax.random.split(_A , _A) SCREAMING_SNAKE_CASE_ = shard(_A) SCREAMING_SNAKE_CASE_ = pipeline(_A , _A , _A , _A , jit=_A).images assert images.shape == (num_samples, 1, 512, 512, 3) if jax.device_count() == 8: assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa).sum() - 0.0_5_6_5_2_4_0_1)) < 1E-3 assert np.abs((np.abs(_A , dtype=np.floataa).sum() - 2_3_8_3_8_0_8.2)) < 5E-1 def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = FlaxStableDiffusionPipeline.from_pretrained( 'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa , safety_checker=_A) SCREAMING_SNAKE_CASE_ = ( 'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of' ' field, close up, split lighting, cinematic' ) SCREAMING_SNAKE_CASE_ = jax.random.PRNGKey(0) SCREAMING_SNAKE_CASE_ = 50 SCREAMING_SNAKE_CASE_ = jax.device_count() SCREAMING_SNAKE_CASE_ = num_samples * [prompt] SCREAMING_SNAKE_CASE_ = pipeline.prepare_inputs(_A) # shard inputs and rng SCREAMING_SNAKE_CASE_ = replicate(_A) SCREAMING_SNAKE_CASE_ = jax.random.split(_A , _A) SCREAMING_SNAKE_CASE_ = shard(_A) SCREAMING_SNAKE_CASE_ = pipeline(_A , _A , _A , _A , jit=_A).images assert images.shape == (num_samples, 1, 512, 512, 3) if jax.device_count() == 8: assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa).sum() - 0.0_4_0_0_3_9_0_6)) < 1E-3 assert np.abs((np.abs(_A , dtype=np.floataa).sum() - 2_3_7_3_5_1_6.7_5)) < 5E-1 def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = FlaxStableDiffusionPipeline.from_pretrained( 'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa) SCREAMING_SNAKE_CASE_ = ( 'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of' ' field, close up, split lighting, cinematic' ) SCREAMING_SNAKE_CASE_ = jax.random.PRNGKey(0) SCREAMING_SNAKE_CASE_ = 50 SCREAMING_SNAKE_CASE_ = jax.device_count() SCREAMING_SNAKE_CASE_ = num_samples * [prompt] SCREAMING_SNAKE_CASE_ = pipeline.prepare_inputs(_A) # shard inputs and rng SCREAMING_SNAKE_CASE_ = replicate(_A) SCREAMING_SNAKE_CASE_ = jax.random.split(_A , _A) SCREAMING_SNAKE_CASE_ = shard(_A) SCREAMING_SNAKE_CASE_ = pipeline(_A , _A , _A , _A , jit=_A).images assert images.shape == (num_samples, 1, 512, 512, 3) if jax.device_count() == 8: assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa).sum() - 0.0_4_0_0_3_9_0_6)) < 1E-3 assert np.abs((np.abs(_A , dtype=np.floataa).sum() - 2_3_7_3_5_1_6.7_5)) < 5E-1 def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = FlaxDDIMScheduler( beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule='scaled_linear' , set_alpha_to_one=_A , steps_offset=1 , ) SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = FlaxStableDiffusionPipeline.from_pretrained( 'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa , scheduler=_A , safety_checker=_A , ) SCREAMING_SNAKE_CASE_ = scheduler.create_state() SCREAMING_SNAKE_CASE_ = scheduler_state SCREAMING_SNAKE_CASE_ = ( 'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of' ' field, close up, split lighting, cinematic' ) SCREAMING_SNAKE_CASE_ = jax.random.PRNGKey(0) SCREAMING_SNAKE_CASE_ = 50 SCREAMING_SNAKE_CASE_ = jax.device_count() SCREAMING_SNAKE_CASE_ = num_samples * [prompt] SCREAMING_SNAKE_CASE_ = pipeline.prepare_inputs(_A) # shard inputs and rng SCREAMING_SNAKE_CASE_ = replicate(_A) SCREAMING_SNAKE_CASE_ = jax.random.split(_A , _A) SCREAMING_SNAKE_CASE_ = shard(_A) SCREAMING_SNAKE_CASE_ = pipeline(_A , _A , _A , _A , jit=_A).images assert images.shape == (num_samples, 1, 512, 512, 3) if jax.device_count() == 8: assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa).sum() - 0.0_4_5_0_4_3_9_4_5)) < 1E-3 assert np.abs((np.abs(_A , dtype=np.floataa).sum() - 2_3_4_7_6_9_3.5)) < 5E-1 def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = ( 'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of' ' field, close up, split lighting, cinematic' ) SCREAMING_SNAKE_CASE_ = jax.device_count() SCREAMING_SNAKE_CASE_ = num_samples * [prompt] SCREAMING_SNAKE_CASE_ = jax.random.split(jax.random.PRNGKey(0) , _A) SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = FlaxStableDiffusionPipeline.from_pretrained( 'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa , safety_checker=_A , ) SCREAMING_SNAKE_CASE_ = replicate(_A) SCREAMING_SNAKE_CASE_ = pipeline.prepare_inputs(_A) SCREAMING_SNAKE_CASE_ = shard(_A) SCREAMING_SNAKE_CASE_ = pipeline(_A , _A , _A , jit=_A).images assert images.shape == (num_samples, 1, 512, 512, 3) SCREAMING_SNAKE_CASE_ = images[2, 0, 256, 10:17, 1] # With memory efficient attention SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = FlaxStableDiffusionPipeline.from_pretrained( 'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa , safety_checker=_A , use_memory_efficient_attention=_A , ) SCREAMING_SNAKE_CASE_ = replicate(_A) SCREAMING_SNAKE_CASE_ = pipeline.prepare_inputs(_A) SCREAMING_SNAKE_CASE_ = shard(_A) SCREAMING_SNAKE_CASE_ = pipeline(_A , _A , _A , jit=_A).images assert images_eff.shape == (num_samples, 1, 512, 512, 3) SCREAMING_SNAKE_CASE_ = images[2, 0, 256, 10:17, 1] # I checked the results visually and they are very similar. However, I saw that the max diff is `1` and the `sum` # over the 8 images is exactly `256`, which is very suspicious. Testing a random slice for now. assert abs(slice_eff - slice).max() < 1E-2
620
import os import zipfile import requests from get_ci_error_statistics import download_artifact, get_artifacts_links def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : int=7 ): """simple docstring""" SCREAMING_SNAKE_CASE_ = None if token is not None: SCREAMING_SNAKE_CASE_ = {'Accept': 'application/vnd.github+json', 'Authorization': f"""Bearer {token}"""} # The id of a workflow (not of a workflow run) SCREAMING_SNAKE_CASE_ = '636036' SCREAMING_SNAKE_CASE_ = f"""https://api.github.com/repos/huggingface/transformers/actions/workflows/{workflow_id}/runs""" # On `main` branch + event being `schedule` + not returning PRs + only `num_runs` results url += f"""?branch=main&event=schedule&exclude_pull_requests=true&per_page={num_runs}""" SCREAMING_SNAKE_CASE_ = requests.get(_SCREAMING_SNAKE_CASE , headers=_SCREAMING_SNAKE_CASE ).json() return result["workflow_runs"] def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : str ): """simple docstring""" SCREAMING_SNAKE_CASE_ = get_daily_ci_runs(_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE_ = None for workflow_run in workflow_runs: if workflow_run["status"] == "completed": SCREAMING_SNAKE_CASE_ = workflow_run['id'] break return workflow_run_id def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Optional[int] ): """simple docstring""" SCREAMING_SNAKE_CASE_ = get_last_daily_ci_runs(_SCREAMING_SNAKE_CASE ) if workflow_run_id is not None: SCREAMING_SNAKE_CASE_ = get_artifacts_links(worflow_run_id=_SCREAMING_SNAKE_CASE , token=_SCREAMING_SNAKE_CASE ) for artifact_name in artifact_names: if artifact_name in artifacts_links: SCREAMING_SNAKE_CASE_ = artifacts_links[artifact_name] download_artifact( artifact_name=_SCREAMING_SNAKE_CASE , artifact_url=_SCREAMING_SNAKE_CASE , output_dir=_SCREAMING_SNAKE_CASE , token=_SCREAMING_SNAKE_CASE ) def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : List[Any] ): """simple docstring""" get_last_daily_ci_artifacts(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE_ = {} for artifact_name in artifact_names: SCREAMING_SNAKE_CASE_ = os.path.join(_SCREAMING_SNAKE_CASE , f"""{artifact_name}.zip""" ) if os.path.isfile(_SCREAMING_SNAKE_CASE ): SCREAMING_SNAKE_CASE_ = {} with zipfile.ZipFile(_SCREAMING_SNAKE_CASE ) as z: for filename in z.namelist(): if not os.path.isdir(_SCREAMING_SNAKE_CASE ): # read the file with z.open(_SCREAMING_SNAKE_CASE ) as f: SCREAMING_SNAKE_CASE_ = f.read().decode('UTF-8' ) return results
620
1
import torch from torch import nn from ...configuration_utils import ConfigMixin, register_to_config from ...models import ModelMixin class __snake_case ( lowerCAmelCase__ , lowerCAmelCase__ ): @register_to_config def __init__( self , *, _A = 4 , _A = 768 , _A , _A , ): super().__init__() SCREAMING_SNAKE_CASE_ = nn.Parameter(torch.zeros(_A)) # parameters for additional clip time embeddings SCREAMING_SNAKE_CASE_ = nn.Linear(_A , _A) SCREAMING_SNAKE_CASE_ = nn.Linear(_A , _A) # parameters for encoder hidden states SCREAMING_SNAKE_CASE_ = clip_extra_context_tokens SCREAMING_SNAKE_CASE_ = nn.Linear( _A , self.clip_extra_context_tokens * cross_attention_dim) SCREAMING_SNAKE_CASE_ = nn.Linear(_A , _A) SCREAMING_SNAKE_CASE_ = nn.LayerNorm(_A) def lowerCAmelCase__ ( self , *, _A , _A , _A , _A): if do_classifier_free_guidance: # Add the classifier free guidance embeddings to the image embeddings SCREAMING_SNAKE_CASE_ = image_embeddings.shape[0] SCREAMING_SNAKE_CASE_ = self.learned_classifier_free_guidance_embeddings.unsqueeze(0) SCREAMING_SNAKE_CASE_ = classifier_free_guidance_embeddings.expand( _A , -1) SCREAMING_SNAKE_CASE_ = torch.cat([classifier_free_guidance_embeddings, image_embeddings] , dim=0) # The image embeddings batch size and the text embeddings batch size are equal assert image_embeddings.shape[0] == prompt_embeds.shape[0] SCREAMING_SNAKE_CASE_ = prompt_embeds.shape[0] # "Specifically, we modify the architecture described in Nichol et al. (2021) by projecting and # adding CLIP embeddings to the existing timestep embedding, ... SCREAMING_SNAKE_CASE_ = self.embedding_proj(_A) SCREAMING_SNAKE_CASE_ = self.clip_image_embeddings_project_to_time_embeddings(_A) SCREAMING_SNAKE_CASE_ = time_projected_image_embeddings + time_projected_prompt_embeds # ... and by projecting CLIP embeddings into four # extra tokens of context that are concatenated to the sequence of outputs from the GLIDE text encoder" SCREAMING_SNAKE_CASE_ = self.clip_extra_context_tokens_proj(_A) SCREAMING_SNAKE_CASE_ = clip_extra_context_tokens.reshape(_A , -1 , self.clip_extra_context_tokens) SCREAMING_SNAKE_CASE_ = clip_extra_context_tokens.permute(0 , 2 , 1) SCREAMING_SNAKE_CASE_ = self.encoder_hidden_states_proj(_A) SCREAMING_SNAKE_CASE_ = self.text_encoder_hidden_states_norm(_A) SCREAMING_SNAKE_CASE_ = torch.cat([clip_extra_context_tokens, text_encoder_hidden_states] , dim=1) return text_encoder_hidden_states, additive_clip_time_embeddings
620
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available UpperCamelCase__ : Any = { "configuration_mvp": ["MVP_PRETRAINED_CONFIG_ARCHIVE_MAP", "MvpConfig", "MvpOnnxConfig"], "tokenization_mvp": ["MvpTokenizer"], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase__ : Optional[int] = ["MvpTokenizerFast"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase__ : str = [ "MVP_PRETRAINED_MODEL_ARCHIVE_LIST", "MvpForCausalLM", "MvpForConditionalGeneration", "MvpForQuestionAnswering", "MvpForSequenceClassification", "MvpModel", "MvpPreTrainedModel", ] if TYPE_CHECKING: from .configuration_mvp import MVP_PRETRAINED_CONFIG_ARCHIVE_MAP, MvpConfig, MvpOnnxConfig from .tokenization_mvp import MvpTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_mvp_fast import MvpTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mvp import ( MVP_PRETRAINED_MODEL_ARCHIVE_LIST, MvpForCausalLM, MvpForConditionalGeneration, MvpForQuestionAnswering, MvpForSequenceClassification, MvpModel, MvpPreTrainedModel, ) else: import sys UpperCamelCase__ : Dict = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
620
1
from typing import List import numpy as np def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : dict ): """simple docstring""" SCREAMING_SNAKE_CASE_ = {key: len(_SCREAMING_SNAKE_CASE ) for key, value in gen_kwargs.items() if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )} if len(set(lists_lengths.values() ) ) > 1: raise RuntimeError( ( 'Sharding is ambiguous for this dataset: ' + 'we found several data sources lists of different lengths, and we don\'t know over which list we should parallelize:\n' + '\n'.join(f"""\t- key {key} has length {length}""" for key, length in lists_lengths.items() ) + '\nTo fix this, check the \'gen_kwargs\' and make sure to use lists only for data sources, ' + 'and use tuples otherwise. In the end there should only be one single list, or several lists with the same length.' ) ) SCREAMING_SNAKE_CASE_ = max(lists_lengths.values() , default=0 ) return max(1 , _SCREAMING_SNAKE_CASE ) def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int ): """simple docstring""" SCREAMING_SNAKE_CASE_ = [] for group_idx in range(_SCREAMING_SNAKE_CASE ): SCREAMING_SNAKE_CASE_ = num_shards // max_num_jobs + (group_idx < (num_shards % max_num_jobs)) if num_shards_to_add == 0: break SCREAMING_SNAKE_CASE_ = shards_indices_per_group[-1].stop if shards_indices_per_group else 0 SCREAMING_SNAKE_CASE_ = range(_SCREAMING_SNAKE_CASE , start + num_shards_to_add ) shards_indices_per_group.append(_SCREAMING_SNAKE_CASE ) return shards_indices_per_group def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : dict , _SCREAMING_SNAKE_CASE : int ): """simple docstring""" SCREAMING_SNAKE_CASE_ = _number_of_shards_in_gen_kwargs(_SCREAMING_SNAKE_CASE ) if num_shards == 1: return [dict(_SCREAMING_SNAKE_CASE )] else: SCREAMING_SNAKE_CASE_ = _distribute_shards(num_shards=_SCREAMING_SNAKE_CASE , max_num_jobs=_SCREAMING_SNAKE_CASE ) return [ { key: [value[shard_idx] for shard_idx in shard_indices_per_group[group_idx]] if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) else value for key, value in gen_kwargs.items() } for group_idx in range(len(_SCREAMING_SNAKE_CASE ) ) ] def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : List[dict] ): """simple docstring""" return { key: [value for gen_kwargs in gen_kwargs_list for value in gen_kwargs[key]] if isinstance(gen_kwargs_list[0][key] , _SCREAMING_SNAKE_CASE ) else gen_kwargs_list[0][key] for key in gen_kwargs_list[0] } def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : np.random.Generator , _SCREAMING_SNAKE_CASE : dict ): """simple docstring""" SCREAMING_SNAKE_CASE_ = {len(_SCREAMING_SNAKE_CASE ) for value in gen_kwargs.values() if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )} SCREAMING_SNAKE_CASE_ = {} for size in list_sizes: SCREAMING_SNAKE_CASE_ = list(range(_SCREAMING_SNAKE_CASE ) ) rng.shuffle(indices_per_size[size] ) # Now let's copy the gen_kwargs and shuffle the lists based on their sizes SCREAMING_SNAKE_CASE_ = dict(_SCREAMING_SNAKE_CASE ) for key, value in shuffled_kwargs.items(): if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): SCREAMING_SNAKE_CASE_ = [value[i] for i in indices_per_size[len(_SCREAMING_SNAKE_CASE )]] return shuffled_kwargs
620
import inspect import os import unittest from pathlib import Path import torch import accelerate from accelerate.test_utils import execute_subprocess_async from accelerate.test_utils.testing import run_command class __snake_case ( unittest.TestCase ): __lowerCAmelCase : Dict = inspect.getfile(accelerate.test_utils ) __lowerCAmelCase : Optional[Any] = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['scripts', 'test_cli.py'] ) __lowerCAmelCase : Tuple = ['accelerate', 'launch'] __lowerCAmelCase : Union[str, Any] = Path.home() / '.cache/huggingface/accelerate' __lowerCAmelCase : List[str] = 'default_config.yaml' __lowerCAmelCase : List[Any] = config_folder / config_file __lowerCAmelCase : str = config_folder / '_default_config.yaml' __lowerCAmelCase : Optional[int] = Path('tests/test_configs' ) @classmethod def lowerCAmelCase__ ( cls): if cls.config_path.is_file(): cls.config_path.rename(cls.changed_path) @classmethod def lowerCAmelCase__ ( cls): if cls.changed_path.is_file(): cls.changed_path.rename(cls.config_path) def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = self.base_cmd if torch.cuda.is_available() and (torch.cuda.device_count() > 1): cmd += ["--multi_gpu"] execute_subprocess_async(cmd + [self.test_file_path] , env=os.environ.copy()) def lowerCAmelCase__ ( self): for config in sorted(self.test_config_path.glob('**/*.yaml')): with self.subTest(config_file=_A): execute_subprocess_async( self.base_cmd + ['--config_file', str(_A), self.test_file_path] , env=os.environ.copy()) def lowerCAmelCase__ ( self): execute_subprocess_async(['accelerate', 'test'] , env=os.environ.copy()) class __snake_case ( unittest.TestCase ): __lowerCAmelCase : Optional[Any] = 'test-tpu' __lowerCAmelCase : str = 'us-central1-a' __lowerCAmelCase : Union[str, Any] = 'ls' __lowerCAmelCase : Union[str, Any] = ['accelerate', 'tpu-config'] __lowerCAmelCase : Union[str, Any] = 'cd /usr/share' __lowerCAmelCase : List[Any] = 'tests/test_samples/test_command_file.sh' __lowerCAmelCase : Dict = 'Running gcloud compute tpus tpu-vm ssh' def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = run_command( self.cmd + ['--command', self.command, '--tpu_zone', self.tpu_zone, '--tpu_name', self.tpu_name, '--debug'] , return_stdout=_A , ) self.assertIn( f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all""" , _A , ) def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = run_command( self.cmd + [ '--config_file', 'tests/test_configs/0_12_0.yaml', '--command', self.command, '--tpu_zone', self.tpu_zone, '--tpu_name', self.tpu_name, '--debug', ] , return_stdout=_A , ) self.assertIn( f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all""" , _A , ) def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = run_command( self.cmd + ['--config_file', 'tests/test_configs/latest.yaml', '--debug'] , return_stdout=_A) self.assertIn( f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo \"hello world\"; echo \"this is a second command\" --worker all""" , _A , ) def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = run_command( self.cmd + ['--config_file', 'tests/test_configs/latest.yaml', '--command', self.command, '--debug'] , return_stdout=_A , ) self.assertIn( f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all""" , _A , ) def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = run_command( self.cmd + [ '--config_file', 'tests/test_configs/latest.yaml', '--command', self.command, '--command', 'echo "Hello World"', '--debug', ] , return_stdout=_A , ) self.assertIn( f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls; echo \"Hello World\" --worker all""" , _A , ) def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = run_command( self.cmd + ['--config_file', 'tests/test_configs/latest.yaml', '--command_file', self.command_file, '--debug'] , return_stdout=_A , ) self.assertIn( f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo \"hello world\"; echo \"this is a second command\" --worker all""" , _A , ) def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = run_command( self.cmd + [ '--config_file', 'tests/test_configs/0_12_0.yaml', '--command_file', self.command_file, '--tpu_zone', self.tpu_zone, '--tpu_name', self.tpu_name, '--debug', ] , return_stdout=_A , ) self.assertIn( f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo \"hello world\"; echo \"this is a second command\" --worker all""" , _A , ) def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = run_command( self.cmd + ['--config_file', 'tests/test_configs/latest.yaml', '--install_accelerate', '--debug'] , return_stdout=_A , ) self.assertIn( f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate -U; echo \"hello world\"; echo \"this is a second command\" --worker all""" , _A , ) def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = run_command( self.cmd + [ '--config_file', 'tests/test_configs/latest.yaml', '--install_accelerate', '--accelerate_version', '12.0.0', '--debug', ] , return_stdout=_A , ) self.assertIn( f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate==12.0.0; echo \"hello world\"; echo \"this is a second command\" --worker all""" , _A , )
620
1
from __future__ import annotations import os import tempfile import unittest import numpy as np from huggingface_hub import hf_hub_download from transformers import is_tensorflow_text_available, is_tf_available from transformers.testing_utils import require_tensorflow_text, require_tf, slow from ..test_modeling_tf_common import floats_tensor from .test_framework_agnostic import GenerationIntegrationTestsMixin if is_tf_available(): import tensorflow as tf from transformers import ( AutoTokenizer, TFAutoModelForCausalLM, TFAutoModelForSeqaSeqLM, TFAutoModelForSpeechSeqaSeq, TFAutoModelForVisionaSeq, TFBartForConditionalGeneration, TFLogitsProcessorList, TFMinLengthLogitsProcessor, tf_top_k_top_p_filtering, ) if is_tensorflow_text_available(): import tensorflow_text as text @require_tf class __snake_case ( unittest.TestCase ): def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = tf.convert_to_tensor( [ [ 8.2_2_2_0_9_9_1, # 3rd highest value; idx. 0 -0.5_6_2_0_0_4_4, 5.2_3_2_2_9_7_5_2, 4.0_3_8_6_3_9_3, -6.8_7_9_8_3_7_8, -0.5_4_7_8_5_8_0_2, -3.2_0_1_2_1_5_3, 2.9_2_7_7_7_1_7_6, 1.8_8_1_7_1_9_5_3, 7.3_5_3_4_1_2_7_6, # 5th highest value; idx. 9 8.4_3_2_0_7_8_3_3, # 2nd highest value; idx. 10 -9.8_5_7_1_1_8_3_6, -5.9_6_2_0_9_2_3_6, -1.1_3_0_3_9_1_6_1, -7.1_1_1_5_2_9_4, -0.8_3_6_9_6_3_3, -5.3_1_8_6_4_0_8, 7.0_6_4_2_7_4_0_7, 0.8_1_3_6_9_3_4_4, -0.8_2_0_2_3_8_1_7, -5.9_1_7_9_7_9_6, 0.5_8_8_1_3_4_4_3, -6.9_9_7_7_8_4_3_8, 4.7_1_5_5_1_1_8_9, -0.1_8_7_7_1_6_3_7, 7.4_4_0_2_0_7_5_9, # 4th highest value; idx. 25 9.3_8_4_5_0_9_8_7, # 1st highest value; idx. 26 2.1_2_6_6_2_9_4_1, -9.3_2_5_6_2_0_3_8, 2.3_5_6_5_2_5_2_2, ], # cummulative prob of 5 highest values <= 0.6 [ 0.5_8_4_2_5_5_1_8, 4.5_3_1_3_9_2_3_8, -5.5_7_5_1_0_4_6_4, -6.2_8_0_3_0_6_9_9, -7.1_9_5_2_9_5_0_3, -4.0_2_1_2_2_5_5_1, 1.3_9_3_3_7_0_3_7, -6.0_6_7_0_7_0_5_7, 1.5_9_4_8_0_5_1_7, -9.6_4_3_1_1_9, 0.0_3_9_0_7_7_9_9, 0.6_7_2_3_1_7_6_2, -8.8_8_2_0_6_7_2_6, 6.2_7_1_1_5_9_2_2, # 4th highest value; idx. 13 2.2_8_5_2_0_7_2_3, 4.8_2_7_6_7_5_0_6, 4.3_0_4_2_1_3_6_8, 8.8_2_7_5_3_1_3, # 2nd highest value; idx. 17 5.4_4_0_2_9_9_5_8, # 5th highest value; idx. 18 -4.4_7_3_5_7_9_4, 7.3_8_5_7_9_5_3_6, # 3rd highest value; idx. 20 -2.9_1_0_5_1_6_6_3, 2.6_1_9_4_6_0_7_7, -2.5_6_7_4_7_6_2, -9.4_8_9_5_9_3_0_2, -4.0_2_9_2_2_6_4_5, -1.3_5_4_1_6_9_1_8, 9.6_7_7_0_2_3_2_3, # 1st highest value; idx. 27 -5.8_9_4_7_8_5_5_3, 1.8_5_3_7_0_4_6_7, ], # cummulative prob of 5 highest values <= 0.6 ] , dtype=tf.floataa , ) SCREAMING_SNAKE_CASE_ = tf.convert_to_tensor( [[0, 0], [0, 9], [0, 10], [0, 25], [0, 26], [1, 13], [1, 17], [1, 18], [1, 20], [1, 27]] , dtype=tf.intaa , ) # expected non filtered idx as noted above SCREAMING_SNAKE_CASE_ = tf.convert_to_tensor( [8.2_2_2_0_9_9, 7.3_5_3_4_1_2_6, 8.4_3_2_0_7_8, 7.4_4_0_2_0_7_5, 9.3_8_4_5_1, 6.2_7_1_1_5_9, 8.8_2_7_5_3_1, 5.4_4_0_2_9_9_5, 7.3_8_5_7_9_5_6, 9.6_7_7_0_2_3] , dtype=tf.floataa , ) # expected non filtered values as noted above SCREAMING_SNAKE_CASE_ = tf_top_k_top_p_filtering(_A , top_k=10 , top_p=0.6 , min_tokens_to_keep=4) SCREAMING_SNAKE_CASE_ = output[output != -float('inf')] SCREAMING_SNAKE_CASE_ = tf.cast( tf.where(tf.not_equal(_A , tf.constant(-float('inf') , dtype=tf.floataa))) , dtype=tf.intaa , ) tf.debugging.assert_near(_A , _A , rtol=1E-12) tf.debugging.assert_equal(_A , _A) @require_tf class __snake_case ( unittest.TestCase , lowerCAmelCase__ ): # setting framework_dependent_parameters needs to be gated, just like its contents' imports if is_tf_available(): __lowerCAmelCase : List[str] = { 'AutoModelForCausalLM': TFAutoModelForCausalLM, 'AutoModelForSpeechSeq2Seq': TFAutoModelForSpeechSeqaSeq, 'AutoModelForSeq2SeqLM': TFAutoModelForSeqaSeqLM, 'AutoModelForVision2Seq': TFAutoModelForVisionaSeq, 'LogitsProcessorList': TFLogitsProcessorList, 'MinLengthLogitsProcessor': TFMinLengthLogitsProcessor, 'create_tensor_fn': tf.convert_to_tensor, 'floats_tensor': floats_tensor, 'return_tensors': 'tf', } @slow def lowerCAmelCase__ ( self): # TF-only test: tf.saved_model export SCREAMING_SNAKE_CASE_ = TFAutoModelForCausalLM.from_pretrained('hf-internal-testing/tiny-random-gpt2') SCREAMING_SNAKE_CASE_ = 2 SCREAMING_SNAKE_CASE_ = 2 class __snake_case ( tf.Module ): def __init__( self , _A): super(_A , self).__init__() SCREAMING_SNAKE_CASE_ = model @tf.function( input_signature=( tf.TensorSpec((None, input_length) , tf.intaa , name='input_ids'), tf.TensorSpec((None, input_length) , tf.intaa , name='attention_mask'), ) , jit_compile=_A , ) def lowerCAmelCase__ ( self , _A , _A): SCREAMING_SNAKE_CASE_ = self.model.generate( input_ids=_A , attention_mask=_A , max_new_tokens=_A , return_dict_in_generate=_A , ) return {"sequences": outputs["sequences"]} SCREAMING_SNAKE_CASE_ = [[2, 0], [102, 103]] SCREAMING_SNAKE_CASE_ = [[1, 0], [1, 1]] SCREAMING_SNAKE_CASE_ = DummyModel(model=_A) with tempfile.TemporaryDirectory() as tmp_dir: tf.saved_model.save(_A , _A , signatures={'serving_default': dummy_model.serving}) SCREAMING_SNAKE_CASE_ = tf.saved_model.load(_A).signatures['serving_default'] for batch_size in range(1 , len(_A) + 1): SCREAMING_SNAKE_CASE_ = { 'input_ids': tf.constant(dummy_input_ids[:batch_size]), 'attention_mask': tf.constant(dummy_attention_masks[:batch_size]), } SCREAMING_SNAKE_CASE_ = serving_func(**_A)['sequences'] SCREAMING_SNAKE_CASE_ = test_model.generate(**_A , max_new_tokens=_A) tf.debugging.assert_equal(_A , _A) @slow def lowerCAmelCase__ ( self): # TF-only test: tf.saved_model export SCREAMING_SNAKE_CASE_ = TFAutoModelForCausalLM.from_pretrained('hf-internal-testing/tiny-random-gpt2') SCREAMING_SNAKE_CASE_ = 1 SCREAMING_SNAKE_CASE_ = 2 class __snake_case ( tf.Module ): def __init__( self , _A): super(_A , self).__init__() SCREAMING_SNAKE_CASE_ = model @tf.function( input_signature=( tf.TensorSpec((batch_size, None) , tf.intaa , name='input_ids'), tf.TensorSpec((batch_size, None) , tf.intaa , name='attention_mask'), ) , jit_compile=_A , ) def lowerCAmelCase__ ( self , _A , _A): SCREAMING_SNAKE_CASE_ = self.model.generate( input_ids=_A , attention_mask=_A , max_new_tokens=_A , return_dict_in_generate=_A , ) return {"sequences": outputs["sequences"]} SCREAMING_SNAKE_CASE_ = [[2], [102, 103]] SCREAMING_SNAKE_CASE_ = [[1], [1, 1]] SCREAMING_SNAKE_CASE_ = DummyModel(model=_A) with tempfile.TemporaryDirectory() as tmp_dir: tf.saved_model.save(_A , _A , signatures={'serving_default': dummy_model.serving}) SCREAMING_SNAKE_CASE_ = tf.saved_model.load(_A).signatures['serving_default'] for input_row in range(len(_A)): SCREAMING_SNAKE_CASE_ = { 'input_ids': tf.constant([dummy_input_ids[input_row]]), 'attention_mask': tf.constant([dummy_attention_masks[input_row]]), } SCREAMING_SNAKE_CASE_ = serving_func(**_A)['sequences'] SCREAMING_SNAKE_CASE_ = test_model.generate(**_A , max_new_tokens=_A) tf.debugging.assert_equal(_A , _A) @slow @require_tensorflow_text def lowerCAmelCase__ ( self): # TF-only test: tf.saved_model export with tempfile.TemporaryDirectory() as tmp_dir: # file needed to load the TF tokenizer hf_hub_download(repo_id='google/flan-t5-small' , filename='spiece.model' , local_dir=_A) class __snake_case ( tf.keras.layers.Layer ): def __init__( self): super().__init__() SCREAMING_SNAKE_CASE_ = text.SentencepieceTokenizer( model=tf.io.gfile.GFile(os.path.join(_A , 'spiece.model') , 'rb').read()) SCREAMING_SNAKE_CASE_ = TFAutoModelForSeqaSeqLM.from_pretrained('hf-internal-testing/tiny-random-t5') def lowerCAmelCase__ ( self , _A , *_A , **_A): SCREAMING_SNAKE_CASE_ = self.tokenizer.tokenize(_A) SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = text.pad_model_inputs( _A , max_seq_length=64 , pad_value=self.model.config.pad_token_id) SCREAMING_SNAKE_CASE_ = self.model.generate(input_ids=_A , attention_mask=_A) return self.tokenizer.detokenize(_A) SCREAMING_SNAKE_CASE_ = CompleteSentenceTransformer() SCREAMING_SNAKE_CASE_ = tf.keras.layers.Input(shape=(1,) , dtype=tf.string , name='inputs') SCREAMING_SNAKE_CASE_ = complete_model(_A) SCREAMING_SNAKE_CASE_ = tf.keras.Model(_A , _A) keras_model.save(_A) def lowerCAmelCase__ ( self): # Has PT equivalent: this test relies on random sampling SCREAMING_SNAKE_CASE_ = { 'do_sample': True, 'num_beams': 1, 'top_p': 0.7, 'top_k': 10, 'temperature': 0.7, } SCREAMING_SNAKE_CASE_ = 14 SCREAMING_SNAKE_CASE_ = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-gpt2') SCREAMING_SNAKE_CASE_ = 'Hello, my dog is cute and' SCREAMING_SNAKE_CASE_ = tokenizer(_A , return_tensors='tf') SCREAMING_SNAKE_CASE_ = TFAutoModelForCausalLM.from_pretrained('hf-internal-testing/tiny-random-gpt2') SCREAMING_SNAKE_CASE_ = 638 # forces the generation to happen on CPU, to avoid GPU-related quirks with tf.device(':/CPU:0'): tf.random.set_seed(0) SCREAMING_SNAKE_CASE_ = model.generate(**_A , eos_token_id=_A , **_A) self.assertTrue(expectation == len(generated_tokens[0])) SCREAMING_SNAKE_CASE_ = [638, 198] with tf.device(':/CPU:0'): tf.random.set_seed(0) SCREAMING_SNAKE_CASE_ = model.generate(**_A , eos_token_id=_A , **_A) self.assertTrue(expectation == len(generated_tokens[0])) def lowerCAmelCase__ ( self): # Has PT equivalent: ample use of framework-specific code SCREAMING_SNAKE_CASE_ = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-bart') SCREAMING_SNAKE_CASE_ = 'Hugging Face is a technology company based in New York and Paris.' SCREAMING_SNAKE_CASE_ = bart_tokenizer(_A , return_tensors='tf').input_ids SCREAMING_SNAKE_CASE_ = TFBartForConditionalGeneration.from_pretrained('hf-internal-testing/tiny-random-bart') SCREAMING_SNAKE_CASE_ = bart_model.generate(_A).numpy() class __snake_case ( lowerCAmelCase__ ): def lowerCAmelCase__ ( self , _A , _A=None , **_A): return super().call(_A , **_A) SCREAMING_SNAKE_CASE_ = FakeBart.from_pretrained('hf-internal-testing/tiny-random-bart') SCREAMING_SNAKE_CASE_ = bart_model.generate(_A , foo='bar').numpy() self.assertTrue(np.array_equal(_A , _A)) class __snake_case ( bart_model.model.encoder.__class__ ): def lowerCAmelCase__ ( self , _A , **_A): return super().call(_A , **_A) SCREAMING_SNAKE_CASE_ = FakeEncoder(bart_model.config , bart_model.model.shared) SCREAMING_SNAKE_CASE_ = fake_encoder # Normal generation still works (the output will be different because the encoder weights are different) SCREAMING_SNAKE_CASE_ = bart_model.generate(_A).numpy() with self.assertRaises(_A): # FakeEncoder.call() accepts **kwargs -> no filtering -> value error due to unexpected input "foo" bart_model.generate(_A , foo='bar')
620
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_speech_available, is_torch_available, ) UpperCamelCase__ : Tuple = { "configuration_trocr": ["TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP", "TrOCRConfig"], "processing_trocr": ["TrOCRProcessor"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase__ : Tuple = [ "TROCR_PRETRAINED_MODEL_ARCHIVE_LIST", "TrOCRForCausalLM", "TrOCRPreTrainedModel", ] if TYPE_CHECKING: from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig from .processing_trocr import TrOCRProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel else: import sys UpperCamelCase__ : str = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
620
1
import os import tempfile import unittest from transformers import DistilBertConfig, is_torch_available from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST, DistilBertForMaskedLM, DistilBertForMultipleChoice, DistilBertForQuestionAnswering, DistilBertForSequenceClassification, DistilBertForTokenClassification, DistilBertModel, ) class __snake_case ( lowerCAmelCase__ ): def __init__( self , _A , _A=13 , _A=7 , _A=True , _A=True , _A=False , _A=True , _A=99 , _A=32 , _A=5 , _A=4 , _A=37 , _A="gelu" , _A=0.1 , _A=0.1 , _A=512 , _A=16 , _A=2 , _A=0.0_2 , _A=3 , _A=4 , _A=None , ): SCREAMING_SNAKE_CASE_ = parent SCREAMING_SNAKE_CASE_ = batch_size SCREAMING_SNAKE_CASE_ = seq_length SCREAMING_SNAKE_CASE_ = is_training SCREAMING_SNAKE_CASE_ = use_input_mask SCREAMING_SNAKE_CASE_ = use_token_type_ids SCREAMING_SNAKE_CASE_ = use_labels SCREAMING_SNAKE_CASE_ = vocab_size SCREAMING_SNAKE_CASE_ = hidden_size SCREAMING_SNAKE_CASE_ = num_hidden_layers SCREAMING_SNAKE_CASE_ = num_attention_heads SCREAMING_SNAKE_CASE_ = intermediate_size SCREAMING_SNAKE_CASE_ = hidden_act SCREAMING_SNAKE_CASE_ = hidden_dropout_prob SCREAMING_SNAKE_CASE_ = attention_probs_dropout_prob SCREAMING_SNAKE_CASE_ = max_position_embeddings SCREAMING_SNAKE_CASE_ = type_vocab_size SCREAMING_SNAKE_CASE_ = type_sequence_label_size SCREAMING_SNAKE_CASE_ = initializer_range SCREAMING_SNAKE_CASE_ = num_labels SCREAMING_SNAKE_CASE_ = num_choices SCREAMING_SNAKE_CASE_ = scope def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size) SCREAMING_SNAKE_CASE_ = None if self.use_input_mask: SCREAMING_SNAKE_CASE_ = random_attention_mask([self.batch_size, self.seq_length]) SCREAMING_SNAKE_CASE_ = None SCREAMING_SNAKE_CASE_ = None SCREAMING_SNAKE_CASE_ = None if self.use_labels: SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size] , self.type_sequence_label_size) SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels) SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size] , self.num_choices) SCREAMING_SNAKE_CASE_ = self.get_config() return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels def lowerCAmelCase__ ( self): return DistilBertConfig( vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , ) def lowerCAmelCase__ ( self , _A , _A , _A , _A , _A , _A): SCREAMING_SNAKE_CASE_ = DistilBertModel(config=_A) model.to(_A) model.eval() SCREAMING_SNAKE_CASE_ = model(_A , _A) SCREAMING_SNAKE_CASE_ = model(_A) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size)) def lowerCAmelCase__ ( self , _A , _A , _A , _A , _A , _A): SCREAMING_SNAKE_CASE_ = DistilBertForMaskedLM(config=_A) model.to(_A) model.eval() SCREAMING_SNAKE_CASE_ = model(_A , attention_mask=_A , labels=_A) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size)) def lowerCAmelCase__ ( self , _A , _A , _A , _A , _A , _A): SCREAMING_SNAKE_CASE_ = DistilBertForQuestionAnswering(config=_A) model.to(_A) model.eval() SCREAMING_SNAKE_CASE_ = model( _A , attention_mask=_A , start_positions=_A , end_positions=_A) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length)) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length)) def lowerCAmelCase__ ( self , _A , _A , _A , _A , _A , _A): SCREAMING_SNAKE_CASE_ = self.num_labels SCREAMING_SNAKE_CASE_ = DistilBertForSequenceClassification(_A) model.to(_A) model.eval() SCREAMING_SNAKE_CASE_ = model(_A , attention_mask=_A , labels=_A) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels)) def lowerCAmelCase__ ( self , _A , _A , _A , _A , _A , _A): SCREAMING_SNAKE_CASE_ = self.num_labels SCREAMING_SNAKE_CASE_ = DistilBertForTokenClassification(config=_A) model.to(_A) model.eval() SCREAMING_SNAKE_CASE_ = model(_A , attention_mask=_A , labels=_A) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels)) def lowerCAmelCase__ ( self , _A , _A , _A , _A , _A , _A): SCREAMING_SNAKE_CASE_ = self.num_choices SCREAMING_SNAKE_CASE_ = DistilBertForMultipleChoice(config=_A) model.to(_A) model.eval() SCREAMING_SNAKE_CASE_ = input_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous() SCREAMING_SNAKE_CASE_ = input_mask.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous() SCREAMING_SNAKE_CASE_ = model( _A , attention_mask=_A , labels=_A , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices)) def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = self.prepare_config_and_inputs() ((SCREAMING_SNAKE_CASE_) , (SCREAMING_SNAKE_CASE_) , (SCREAMING_SNAKE_CASE_) , (SCREAMING_SNAKE_CASE_) , (SCREAMING_SNAKE_CASE_) , (SCREAMING_SNAKE_CASE_)) = config_and_inputs SCREAMING_SNAKE_CASE_ = {'input_ids': input_ids, 'attention_mask': input_mask} return config, inputs_dict @require_torch class __snake_case ( lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ): __lowerCAmelCase : Tuple = ( ( DistilBertModel, DistilBertForMaskedLM, DistilBertForMultipleChoice, DistilBertForQuestionAnswering, DistilBertForSequenceClassification, DistilBertForTokenClassification, ) if is_torch_available() else None ) __lowerCAmelCase : int = ( { 'feature-extraction': DistilBertModel, 'fill-mask': DistilBertForMaskedLM, 'question-answering': DistilBertForQuestionAnswering, 'text-classification': DistilBertForSequenceClassification, 'token-classification': DistilBertForTokenClassification, 'zero-shot': DistilBertForSequenceClassification, } if is_torch_available() else {} ) __lowerCAmelCase : str = True __lowerCAmelCase : Optional[Any] = True __lowerCAmelCase : Optional[int] = True __lowerCAmelCase : int = True def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = DistilBertModelTester(self) SCREAMING_SNAKE_CASE_ = ConfigTester(self , config_class=_A , dim=37) def lowerCAmelCase__ ( self): self.config_tester.run_common_tests() def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_model(*_A) def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_masked_lm(*_A) def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_question_answering(*_A) def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_sequence_classification(*_A) def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_token_classification(*_A) def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_multiple_choice(*_A) @slow def lowerCAmelCase__ ( self): for model_name in DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: SCREAMING_SNAKE_CASE_ = DistilBertModel.from_pretrained(_A) self.assertIsNotNone(_A) @slow @require_torch_gpu def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: # BertForMultipleChoice behaves incorrectly in JIT environments. if model_class == DistilBertForMultipleChoice: return SCREAMING_SNAKE_CASE_ = True SCREAMING_SNAKE_CASE_ = model_class(config=_A) SCREAMING_SNAKE_CASE_ = self._prepare_for_class(_A , _A) SCREAMING_SNAKE_CASE_ = torch.jit.trace( _A , (inputs_dict['input_ids'].to('cpu'), inputs_dict['attention_mask'].to('cpu'))) with tempfile.TemporaryDirectory() as tmp: torch.jit.save(_A , os.path.join(_A , 'traced_model.pt')) SCREAMING_SNAKE_CASE_ = torch.jit.load(os.path.join(_A , 'traced_model.pt') , map_location=_A) loaded(inputs_dict['input_ids'].to(_A) , inputs_dict['attention_mask'].to(_A)) @require_torch class __snake_case ( unittest.TestCase ): @slow def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = DistilBertModel.from_pretrained('distilbert-base-uncased') SCREAMING_SNAKE_CASE_ = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]]) SCREAMING_SNAKE_CASE_ = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]) with torch.no_grad(): SCREAMING_SNAKE_CASE_ = model(_A , attention_mask=_A)[0] SCREAMING_SNAKE_CASE_ = torch.Size((1, 11, 768)) self.assertEqual(output.shape , _A) SCREAMING_SNAKE_CASE_ = torch.tensor( [[[-0.1_6_3_9, 0.3_2_9_9, 0.1_6_4_8], [-0.1_7_4_6, 0.3_2_8_9, 0.1_7_1_0], [-0.1_8_8_4, 0.3_3_5_7, 0.1_8_1_0]]]) self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , _A , atol=1E-4))
620
from multiprocessing import Lock, Pipe, Process # lock used to ensure that two processes do not access a pipe at the same time UpperCamelCase__ : int = Lock() def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : Any ): """simple docstring""" global process_lock # we perform n swaps since after n swaps we know we are sorted # we *could* stop early if we are sorted already, but it takes as long to # find out we are sorted as it does to sort the list with this algorithm for i in range(0 , 10 ): if (i + position) % 2 == 0 and r_send is not None: # send your value to your right neighbor process_lock.acquire() r_send[1].send(_SCREAMING_SNAKE_CASE ) process_lock.release() # receive your right neighbor's value process_lock.acquire() SCREAMING_SNAKE_CASE_ = rr_cv[0].recv() process_lock.release() # take the lower value since you are on the left SCREAMING_SNAKE_CASE_ = min(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) elif (i + position) % 2 != 0 and l_send is not None: # send your value to your left neighbor process_lock.acquire() l_send[1].send(_SCREAMING_SNAKE_CASE ) process_lock.release() # receive your left neighbor's value process_lock.acquire() SCREAMING_SNAKE_CASE_ = lr_cv[0].recv() process_lock.release() # take the higher value since you are on the right SCREAMING_SNAKE_CASE_ = max(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # after all swaps are performed, send the values back to main result_pipe[1].send(_SCREAMING_SNAKE_CASE ) def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : Union[str, Any] ): """simple docstring""" SCREAMING_SNAKE_CASE_ = [] SCREAMING_SNAKE_CASE_ = [] # initialize the list of pipes where the values will be retrieved for _ in arr: result_pipe.append(Pipe() ) # creates the processes # the first and last process only have one neighbor so they are made outside # of the loop SCREAMING_SNAKE_CASE_ = Pipe() SCREAMING_SNAKE_CASE_ = Pipe() process_array_.append( Process( target=_SCREAMING_SNAKE_CASE , args=(0, arr[0], None, temp_rs, None, temp_rr, result_pipe[0]) , ) ) SCREAMING_SNAKE_CASE_ = temp_rs SCREAMING_SNAKE_CASE_ = temp_rr for i in range(1 , len(_SCREAMING_SNAKE_CASE ) - 1 ): SCREAMING_SNAKE_CASE_ = Pipe() SCREAMING_SNAKE_CASE_ = Pipe() process_array_.append( Process( target=_SCREAMING_SNAKE_CASE , args=(i, arr[i], temp_ls, temp_rs, temp_lr, temp_rr, result_pipe[i]) , ) ) SCREAMING_SNAKE_CASE_ = temp_rs SCREAMING_SNAKE_CASE_ = temp_rr process_array_.append( Process( target=_SCREAMING_SNAKE_CASE , args=( len(_SCREAMING_SNAKE_CASE ) - 1, arr[len(_SCREAMING_SNAKE_CASE ) - 1], temp_ls, None, temp_lr, None, result_pipe[len(_SCREAMING_SNAKE_CASE ) - 1], ) , ) ) # start the processes for p in process_array_: p.start() # wait for the processes to end and write their values to the list for p in range(0 , len(_SCREAMING_SNAKE_CASE ) ): SCREAMING_SNAKE_CASE_ = result_pipe[p][0].recv() process_array_[p].join() return arr def _UpperCAmelCase ( ): """simple docstring""" SCREAMING_SNAKE_CASE_ = list(range(10 , 0 , -1 ) ) print('Initial List' ) print(*_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE_ = odd_even_transposition(_SCREAMING_SNAKE_CASE ) print('Sorted List\n' ) print(*_SCREAMING_SNAKE_CASE ) if __name__ == "__main__": main()
620
1
def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : int = 50 ): """simple docstring""" SCREAMING_SNAKE_CASE_ = [[0] * 3 for _ in range(length + 1 )] for row_length in range(length + 1 ): for tile_length in range(2 , 5 ): for tile_start in range(row_length - tile_length + 1 ): different_colour_ways_number[row_length][tile_length - 2] += ( different_colour_ways_number[row_length - tile_start - tile_length][ tile_length - 2 ] + 1 ) return sum(different_colour_ways_number[length] ) if __name__ == "__main__": print(F'{solution() = }')
620
import unittest from transformers import load_tool from .test_tools_common import ToolTesterMixin UpperCamelCase__ : int = "\nHugging Face was founded in 2016 by French entrepreneurs Clément Delangue, Julien Chaumond, and Thomas Wolf originally as a company that developed a chatbot app targeted at teenagers.[2] After open-sourcing the model behind the chatbot, the company pivoted to focus on being a platform for machine learning.\n\nIn March 2021, Hugging Face raised $40 million in a Series B funding round.[3]\n\nOn April 28, 2021, the company launched the BigScience Research Workshop in collaboration with several other research groups to release an open large language model.[4] In 2022, the workshop concluded with the announcement of BLOOM, a multilingual large language model with 176 billion parameters.[5]\n" class __snake_case ( unittest.TestCase , lowerCAmelCase__ ): def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = load_tool('text-question-answering') self.tool.setup() SCREAMING_SNAKE_CASE_ = load_tool('text-question-answering' , remote=_A) def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = self.tool(_A , 'What did Hugging Face do in April 2021?') self.assertEqual(_A , 'launched the BigScience Research Workshop') def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = self.remote_tool(_A , 'What did Hugging Face do in April 2021?') self.assertEqual(_A , 'launched the BigScience Research Workshop') def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = self.tool(text=_A , question='What did Hugging Face do in April 2021?') self.assertEqual(_A , 'launched the BigScience Research Workshop') def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = self.remote_tool(text=_A , question='What did Hugging Face do in April 2021?') self.assertEqual(_A , 'launched the BigScience Research Workshop')
620
1
from __future__ import annotations def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : list[int] ): """simple docstring""" if not nums: return 0 SCREAMING_SNAKE_CASE_ = nums[0] SCREAMING_SNAKE_CASE_ = 0 for num in nums[1:]: SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = ( max_excluding + num, max(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ), ) return max(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) if __name__ == "__main__": import doctest doctest.testmod()
620
import unittest import numpy as np from datasets import load_dataset from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import BeitImageProcessor class __snake_case ( unittest.TestCase ): def __init__( self , _A , _A=7 , _A=3 , _A=18 , _A=30 , _A=400 , _A=True , _A=None , _A=True , _A=None , _A=True , _A=[0.5, 0.5, 0.5] , _A=[0.5, 0.5, 0.5] , _A=False , ): SCREAMING_SNAKE_CASE_ = size if size is not None else {'height': 20, 'width': 20} SCREAMING_SNAKE_CASE_ = crop_size if crop_size is not None else {'height': 18, 'width': 18} SCREAMING_SNAKE_CASE_ = parent SCREAMING_SNAKE_CASE_ = batch_size SCREAMING_SNAKE_CASE_ = num_channels SCREAMING_SNAKE_CASE_ = image_size SCREAMING_SNAKE_CASE_ = min_resolution SCREAMING_SNAKE_CASE_ = max_resolution SCREAMING_SNAKE_CASE_ = do_resize SCREAMING_SNAKE_CASE_ = size SCREAMING_SNAKE_CASE_ = do_center_crop SCREAMING_SNAKE_CASE_ = crop_size SCREAMING_SNAKE_CASE_ = do_normalize SCREAMING_SNAKE_CASE_ = image_mean SCREAMING_SNAKE_CASE_ = image_std SCREAMING_SNAKE_CASE_ = do_reduce_labels def lowerCAmelCase__ ( self): return { "do_resize": self.do_resize, "size": self.size, "do_center_crop": self.do_center_crop, "crop_size": self.crop_size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "do_reduce_labels": self.do_reduce_labels, } def _UpperCAmelCase ( ): """simple docstring""" SCREAMING_SNAKE_CASE_ = load_dataset('hf-internal-testing/fixtures_ade20k' , split='test' ) SCREAMING_SNAKE_CASE_ = Image.open(dataset[0]['file'] ) SCREAMING_SNAKE_CASE_ = Image.open(dataset[1]['file'] ) return image, map def _UpperCAmelCase ( ): """simple docstring""" SCREAMING_SNAKE_CASE_ = load_dataset('hf-internal-testing/fixtures_ade20k' , split='test' ) SCREAMING_SNAKE_CASE_ = Image.open(ds[0]['file'] ) SCREAMING_SNAKE_CASE_ = Image.open(ds[1]['file'] ) SCREAMING_SNAKE_CASE_ = Image.open(ds[2]['file'] ) SCREAMING_SNAKE_CASE_ = Image.open(ds[3]['file'] ) return [imagea, imagea], [mapa, mapa] @require_torch @require_vision class __snake_case ( lowerCAmelCase__ , unittest.TestCase ): __lowerCAmelCase : Union[str, Any] = BeitImageProcessor if is_vision_available() else None def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = BeitImageProcessingTester(self) @property def lowerCAmelCase__ ( self): return self.image_processor_tester.prepare_image_processor_dict() def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = self.image_processing_class(**self.image_processor_dict) self.assertTrue(hasattr(_A , 'do_resize')) self.assertTrue(hasattr(_A , 'size')) self.assertTrue(hasattr(_A , 'do_center_crop')) self.assertTrue(hasattr(_A , 'center_crop')) self.assertTrue(hasattr(_A , 'do_normalize')) self.assertTrue(hasattr(_A , 'image_mean')) self.assertTrue(hasattr(_A , 'image_std')) def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = self.image_processing_class.from_dict(self.image_processor_dict) self.assertEqual(image_processor.size , {'height': 20, 'width': 20}) self.assertEqual(image_processor.crop_size , {'height': 18, 'width': 18}) self.assertEqual(image_processor.do_reduce_labels , _A) SCREAMING_SNAKE_CASE_ = self.image_processing_class.from_dict( self.image_processor_dict , size=42 , crop_size=84 , reduce_labels=_A) self.assertEqual(image_processor.size , {'height': 42, 'width': 42}) self.assertEqual(image_processor.crop_size , {'height': 84, 'width': 84}) self.assertEqual(image_processor.do_reduce_labels , _A) def lowerCAmelCase__ ( self): pass def lowerCAmelCase__ ( self): # Initialize image_processing SCREAMING_SNAKE_CASE_ = self.image_processing_class(**self.image_processor_dict) # create random PIL images SCREAMING_SNAKE_CASE_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A) for image in image_inputs: self.assertIsInstance(_A , Image.Image) # Test not batched input SCREAMING_SNAKE_CASE_ = image_processing(image_inputs[0] , return_tensors='pt').pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) # Test batched SCREAMING_SNAKE_CASE_ = image_processing(_A , return_tensors='pt').pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) def lowerCAmelCase__ ( self): # Initialize image_processing SCREAMING_SNAKE_CASE_ = self.image_processing_class(**self.image_processor_dict) # create random numpy tensors SCREAMING_SNAKE_CASE_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A , numpify=_A) for image in image_inputs: self.assertIsInstance(_A , np.ndarray) # Test not batched input SCREAMING_SNAKE_CASE_ = image_processing(image_inputs[0] , return_tensors='pt').pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) # Test batched SCREAMING_SNAKE_CASE_ = image_processing(_A , return_tensors='pt').pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) def lowerCAmelCase__ ( self): # Initialize image_processing SCREAMING_SNAKE_CASE_ = self.image_processing_class(**self.image_processor_dict) # create random PyTorch tensors SCREAMING_SNAKE_CASE_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A , torchify=_A) for image in image_inputs: self.assertIsInstance(_A , torch.Tensor) # Test not batched input SCREAMING_SNAKE_CASE_ = image_processing(image_inputs[0] , return_tensors='pt').pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) # Test batched SCREAMING_SNAKE_CASE_ = image_processing(_A , return_tensors='pt').pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) def lowerCAmelCase__ ( self): # Initialize image_processing SCREAMING_SNAKE_CASE_ = self.image_processing_class(**self.image_processor_dict) # create random PyTorch tensors SCREAMING_SNAKE_CASE_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A , torchify=_A) SCREAMING_SNAKE_CASE_ = [] for image in image_inputs: self.assertIsInstance(_A , torch.Tensor) maps.append(torch.zeros(image.shape[-2:]).long()) # Test not batched input SCREAMING_SNAKE_CASE_ = image_processing(image_inputs[0] , maps[0] , return_tensors='pt') self.assertEqual( encoding['pixel_values'].shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) self.assertEqual( encoding['labels'].shape , ( 1, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) self.assertEqual(encoding['labels'].dtype , torch.long) self.assertTrue(encoding['labels'].min().item() >= 0) self.assertTrue(encoding['labels'].max().item() <= 255) # Test batched SCREAMING_SNAKE_CASE_ = image_processing(_A , _A , return_tensors='pt') self.assertEqual( encoding['pixel_values'].shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) self.assertEqual( encoding['labels'].shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) self.assertEqual(encoding['labels'].dtype , torch.long) self.assertTrue(encoding['labels'].min().item() >= 0) self.assertTrue(encoding['labels'].max().item() <= 255) # Test not batched input (PIL images) SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = prepare_semantic_single_inputs() SCREAMING_SNAKE_CASE_ = image_processing(_A , _A , return_tensors='pt') self.assertEqual( encoding['pixel_values'].shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) self.assertEqual( encoding['labels'].shape , ( 1, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) self.assertEqual(encoding['labels'].dtype , torch.long) self.assertTrue(encoding['labels'].min().item() >= 0) self.assertTrue(encoding['labels'].max().item() <= 255) # Test batched input (PIL images) SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = prepare_semantic_batch_inputs() SCREAMING_SNAKE_CASE_ = image_processing(_A , _A , return_tensors='pt') self.assertEqual( encoding['pixel_values'].shape , ( 2, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) self.assertEqual( encoding['labels'].shape , ( 2, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) self.assertEqual(encoding['labels'].dtype , torch.long) self.assertTrue(encoding['labels'].min().item() >= 0) self.assertTrue(encoding['labels'].max().item() <= 255) def lowerCAmelCase__ ( self): # Initialize image_processing SCREAMING_SNAKE_CASE_ = self.image_processing_class(**self.image_processor_dict) # ADE20k has 150 classes, and the background is included, so labels should be between 0 and 150 SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = prepare_semantic_single_inputs() SCREAMING_SNAKE_CASE_ = image_processing(_A , _A , return_tensors='pt') self.assertTrue(encoding['labels'].min().item() >= 0) self.assertTrue(encoding['labels'].max().item() <= 150) SCREAMING_SNAKE_CASE_ = True SCREAMING_SNAKE_CASE_ = image_processing(_A , _A , return_tensors='pt') self.assertTrue(encoding['labels'].min().item() >= 0) self.assertTrue(encoding['labels'].max().item() <= 255)
620
1
import json import sys import tempfile import unittest from pathlib import Path import transformers from transformers import ( CONFIG_MAPPING, FEATURE_EXTRACTOR_MAPPING, AutoConfig, AutoFeatureExtractor, WavaVecaConfig, WavaVecaFeatureExtractor, ) from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir sys.path.append(str(Path(__file__).parent.parent.parent.parent / "utils")) from test_module.custom_configuration import CustomConfig # noqa E402 from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402 UpperCamelCase__ : Optional[Any] = get_tests_dir("fixtures") UpperCamelCase__ : int = get_tests_dir("fixtures/dummy_feature_extractor_config.json") UpperCamelCase__ : Optional[Any] = get_tests_dir("fixtures/dummy-config.json") class __snake_case ( unittest.TestCase ): def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = 0 def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = AutoFeatureExtractor.from_pretrained('facebook/wav2vec2-base-960h') self.assertIsInstance(_A , _A) def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = AutoFeatureExtractor.from_pretrained(_A) self.assertIsInstance(_A , _A) def lowerCAmelCase__ ( self): with tempfile.TemporaryDirectory() as tmpdirname: SCREAMING_SNAKE_CASE_ = WavaVecaConfig() # remove feature_extractor_type to make sure config.json alone is enough to load feature processor locally SCREAMING_SNAKE_CASE_ = AutoFeatureExtractor.from_pretrained(_A).to_dict() config_dict.pop('feature_extractor_type') SCREAMING_SNAKE_CASE_ = WavaVecaFeatureExtractor(**_A) # save in new folder model_config.save_pretrained(_A) config.save_pretrained(_A) SCREAMING_SNAKE_CASE_ = AutoFeatureExtractor.from_pretrained(_A) # make sure private variable is not incorrectly saved SCREAMING_SNAKE_CASE_ = json.loads(config.to_json_string()) self.assertTrue('_processor_class' not in dict_as_saved) self.assertIsInstance(_A , _A) def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = AutoFeatureExtractor.from_pretrained(_A) self.assertIsInstance(_A , _A) def lowerCAmelCase__ ( self): with self.assertRaisesRegex( _A , 'bert-base is not a local folder and is not a valid model identifier'): SCREAMING_SNAKE_CASE_ = AutoFeatureExtractor.from_pretrained('bert-base') def lowerCAmelCase__ ( self): with self.assertRaisesRegex( _A , r'aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)'): SCREAMING_SNAKE_CASE_ = AutoFeatureExtractor.from_pretrained(_A , revision='aaaaaa') def lowerCAmelCase__ ( self): with self.assertRaisesRegex( _A , 'hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.' , ): SCREAMING_SNAKE_CASE_ = AutoFeatureExtractor.from_pretrained('hf-internal-testing/config-no-model') def lowerCAmelCase__ ( self): # If remote code is not set, we will time out when asking whether to load the model. with self.assertRaises(_A): SCREAMING_SNAKE_CASE_ = AutoFeatureExtractor.from_pretrained( 'hf-internal-testing/test_dynamic_feature_extractor') # If remote code is disabled, we can't load this config. with self.assertRaises(_A): SCREAMING_SNAKE_CASE_ = AutoFeatureExtractor.from_pretrained( 'hf-internal-testing/test_dynamic_feature_extractor' , trust_remote_code=_A) SCREAMING_SNAKE_CASE_ = AutoFeatureExtractor.from_pretrained( 'hf-internal-testing/test_dynamic_feature_extractor' , trust_remote_code=_A) self.assertEqual(feature_extractor.__class__.__name__ , 'NewFeatureExtractor') # Test feature extractor can be reloaded. with tempfile.TemporaryDirectory() as tmp_dir: feature_extractor.save_pretrained(_A) SCREAMING_SNAKE_CASE_ = AutoFeatureExtractor.from_pretrained(_A , trust_remote_code=_A) self.assertEqual(reloaded_feature_extractor.__class__.__name__ , 'NewFeatureExtractor') def lowerCAmelCase__ ( self): try: AutoConfig.register('custom' , _A) AutoFeatureExtractor.register(_A , _A) # Trying to register something existing in the Transformers library will raise an error with self.assertRaises(_A): AutoFeatureExtractor.register(_A , _A) # Now that the config is registered, it can be used as any other config with the auto-API SCREAMING_SNAKE_CASE_ = CustomFeatureExtractor.from_pretrained(_A) with tempfile.TemporaryDirectory() as tmp_dir: feature_extractor.save_pretrained(_A) SCREAMING_SNAKE_CASE_ = AutoFeatureExtractor.from_pretrained(_A) self.assertIsInstance(_A , _A) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content: del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig] def lowerCAmelCase__ ( self): class __snake_case ( lowerCAmelCase__ ): __lowerCAmelCase : Union[str, Any] = True try: AutoConfig.register('custom' , _A) AutoFeatureExtractor.register(_A , _A) # If remote code is not set, the default is to use local SCREAMING_SNAKE_CASE_ = AutoFeatureExtractor.from_pretrained( 'hf-internal-testing/test_dynamic_feature_extractor') self.assertEqual(feature_extractor.__class__.__name__ , 'NewFeatureExtractor') self.assertTrue(feature_extractor.is_local) # If remote code is disabled, we load the local one. SCREAMING_SNAKE_CASE_ = AutoFeatureExtractor.from_pretrained( 'hf-internal-testing/test_dynamic_feature_extractor' , trust_remote_code=_A) self.assertEqual(feature_extractor.__class__.__name__ , 'NewFeatureExtractor') self.assertTrue(feature_extractor.is_local) # If remote is enabled, we load from the Hub SCREAMING_SNAKE_CASE_ = AutoFeatureExtractor.from_pretrained( 'hf-internal-testing/test_dynamic_feature_extractor' , trust_remote_code=_A) self.assertEqual(feature_extractor.__class__.__name__ , 'NewFeatureExtractor') self.assertTrue(not hasattr(_A , 'is_local')) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content: del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
620
def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : int = 200 ): """simple docstring""" SCREAMING_SNAKE_CASE_ = [1, 2, 5, 10, 20, 50, 100, 200] SCREAMING_SNAKE_CASE_ = [0] * (pence + 1) SCREAMING_SNAKE_CASE_ = 1 # base case: 1 way to make 0 pence for coin in coins: for i in range(_SCREAMING_SNAKE_CASE , pence + 1 , 1 ): number_of_ways[i] += number_of_ways[i - coin] return number_of_ways[pence] if __name__ == "__main__": assert solution(200) == 73_682
620
1
import logging import os from typing import Dict, List, Optional, Union import torch import torch.nn as nn from accelerate.utils.imports import ( is_abit_bnb_available, is_abit_bnb_available, is_bnb_available, ) from ..big_modeling import dispatch_model, init_empty_weights from .dataclasses import BnbQuantizationConfig from .modeling import ( find_tied_parameters, get_balanced_memory, infer_auto_device_map, load_checkpoint_in_model, offload_weight, set_module_tensor_to_device, ) if is_bnb_available(): import bitsandbytes as bnb from copy import deepcopy UpperCamelCase__ : Optional[int] = logging.getLogger(__name__) def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : torch.nn.Module , _SCREAMING_SNAKE_CASE : BnbQuantizationConfig , _SCREAMING_SNAKE_CASE : Union[str, os.PathLike] = None , _SCREAMING_SNAKE_CASE : Optional[Dict[str, Union[int, str, torch.device]]] = None , _SCREAMING_SNAKE_CASE : Optional[List[str]] = None , _SCREAMING_SNAKE_CASE : Optional[Dict[Union[int, str], Union[int, str]]] = None , _SCREAMING_SNAKE_CASE : Optional[Union[str, os.PathLike]] = None , _SCREAMING_SNAKE_CASE : bool = False , ): """simple docstring""" SCREAMING_SNAKE_CASE_ = bnb_quantization_config.load_in_abit SCREAMING_SNAKE_CASE_ = bnb_quantization_config.load_in_abit if load_in_abit and not is_abit_bnb_available(): raise ImportError( 'You have a version of `bitsandbytes` that is not compatible with 8bit quantization,' ' make sure you have the latest version of `bitsandbytes` installed.' ) if load_in_abit and not is_abit_bnb_available(): raise ValueError( 'You have a version of `bitsandbytes` that is not compatible with 4bit quantization,' 'make sure you have the latest version of `bitsandbytes` installed.' ) SCREAMING_SNAKE_CASE_ = [] # custom device map if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) and len(device_map.keys() ) > 1: SCREAMING_SNAKE_CASE_ = [key for key, value in device_map.items() if value in ['disk', 'cpu']] # We keep some modules such as the lm_head in their original dtype for numerical stability reasons if bnb_quantization_config.skip_modules is None: SCREAMING_SNAKE_CASE_ = get_keys_to_not_convert(_SCREAMING_SNAKE_CASE ) # add cpu modules to skip modules only for 4-bit modules if load_in_abit: bnb_quantization_config.skip_modules.extend(_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE_ = bnb_quantization_config.skip_modules # We add the modules we want to keep in full precision if bnb_quantization_config.keep_in_fpaa_modules is None: SCREAMING_SNAKE_CASE_ = [] SCREAMING_SNAKE_CASE_ = bnb_quantization_config.keep_in_fpaa_modules modules_to_not_convert.extend(_SCREAMING_SNAKE_CASE ) # compatibility with peft SCREAMING_SNAKE_CASE_ = load_in_abit SCREAMING_SNAKE_CASE_ = load_in_abit SCREAMING_SNAKE_CASE_ = get_parameter_device(_SCREAMING_SNAKE_CASE ) if model_device.type != "meta": # quantization of an already loaded model logger.warning( 'It is not recommended to quantize a loaded model. ' 'The model should be instantiated under the `init_empty_weights` context manager.' ) SCREAMING_SNAKE_CASE_ = replace_with_bnb_layers(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , modules_to_not_convert=_SCREAMING_SNAKE_CASE ) # convert param to the right dtype SCREAMING_SNAKE_CASE_ = bnb_quantization_config.torch_dtype for name, param in model.state_dict().items(): if any(module_to_keep_in_fpaa in name for module_to_keep_in_fpaa in keep_in_fpaa_modules ): param.to(torch.floataa ) if param.dtype != torch.floataa: SCREAMING_SNAKE_CASE_ = name.replace('.weight' , '' ).replace('.bias' , '' ) SCREAMING_SNAKE_CASE_ = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) if param is not None: param.to(torch.floataa ) elif torch.is_floating_point(_SCREAMING_SNAKE_CASE ): param.to(_SCREAMING_SNAKE_CASE ) if model_device.type == "cuda": # move everything to cpu in the first place because we can't do quantization if the weights are already on cuda model.cuda(torch.cuda.current_device() ) torch.cuda.empty_cache() elif torch.cuda.is_available(): model.to(torch.cuda.current_device() ) else: raise RuntimeError('No GPU found. A GPU is needed for quantization.' ) logger.info( f"""The model device type is {model_device.type}. However, cuda is needed for quantization.""" 'We move the model to cuda.' ) return model elif weights_location is None: raise RuntimeError( f"""`weights_location` needs to be the folder path containing the weights of the model, but we found {weights_location} """ ) else: with init_empty_weights(): SCREAMING_SNAKE_CASE_ = replace_with_bnb_layers( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , modules_to_not_convert=_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE_ = get_quantized_model_device_map( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , max_memory=_SCREAMING_SNAKE_CASE , no_split_module_classes=_SCREAMING_SNAKE_CASE , ) if offload_state_dict is None and device_map is not None and "disk" in device_map.values(): SCREAMING_SNAKE_CASE_ = True SCREAMING_SNAKE_CASE_ = any(x in list(device_map.values() ) for x in ['cpu', 'disk'] ) load_checkpoint_in_model( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , dtype=bnb_quantization_config.torch_dtype , offload_folder=_SCREAMING_SNAKE_CASE , offload_state_dict=_SCREAMING_SNAKE_CASE , keep_in_fpaa_modules=bnb_quantization_config.keep_in_fpaa_modules , offload_abit_bnb=load_in_abit and offload , ) return dispatch_model(_SCREAMING_SNAKE_CASE , device_map=_SCREAMING_SNAKE_CASE , offload_dir=_SCREAMING_SNAKE_CASE ) def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : List[str]=None , _SCREAMING_SNAKE_CASE : List[str]=None , _SCREAMING_SNAKE_CASE : Union[str, Any]=None ): """simple docstring""" if device_map is None: if torch.cuda.is_available(): SCREAMING_SNAKE_CASE_ = {'': torch.cuda.current_device()} else: raise RuntimeError('No GPU found. A GPU is needed for quantization.' ) logger.info('The device_map was not initialized.' 'Setting device_map to `{\'\':torch.cuda.current_device()}`.' ) if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): if device_map not in ["auto", "balanced", "balanced_low_0", "sequential"]: raise ValueError( 'If passing a string for `device_map`, please choose \'auto\', \'balanced\', \'balanced_low_0\' or ' '\'sequential\'.' ) SCREAMING_SNAKE_CASE_ = {} special_dtypes.update( { name: bnb_quantization_config.torch_dtype for name, _ in model.named_parameters() if any(m in name for m in bnb_quantization_config.skip_modules ) } ) special_dtypes.update( { name: torch.floataa for name, _ in model.named_parameters() if any(m in name for m in bnb_quantization_config.keep_in_fpaa_modules ) } ) SCREAMING_SNAKE_CASE_ = {} SCREAMING_SNAKE_CASE_ = special_dtypes SCREAMING_SNAKE_CASE_ = no_split_module_classes SCREAMING_SNAKE_CASE_ = bnb_quantization_config.target_dtype # get max_memory for each device. if device_map != "sequential": SCREAMING_SNAKE_CASE_ = get_balanced_memory( _SCREAMING_SNAKE_CASE , low_zero=(device_map == 'balanced_low_0') , max_memory=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , ) SCREAMING_SNAKE_CASE_ = max_memory SCREAMING_SNAKE_CASE_ = infer_auto_device_map(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): # check if don't have any quantized module on the cpu SCREAMING_SNAKE_CASE_ = bnb_quantization_config.skip_modules + bnb_quantization_config.keep_in_fpaa_modules SCREAMING_SNAKE_CASE_ = { key: device_map[key] for key in device_map.keys() if key not in modules_not_to_convert } for device in ["cpu", "disk"]: if device in device_map_without_some_modules.values(): if bnb_quantization_config.load_in_abit: raise ValueError( '\n Some modules are dispatched on the CPU or the disk. Make sure you have enough GPU RAM to fit\n the quantized model. If you want to dispatch the model on the CPU or the disk while keeping\n these modules in `torch_dtype`, you need to pass a custom `device_map` to\n `load_and_quantize_model`. Check\n https://huggingface.co/docs/accelerate/main/en/usage_guides/quantization#offload-modules-to-cpu-and-disk\n for more details.\n ' ) else: logger.info( 'Some modules are are offloaded to the CPU or the disk. Note that these modules will be converted to 8-bit' ) del device_map_without_some_modules return device_map def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int=None , _SCREAMING_SNAKE_CASE : Union[str, Any]=None ): """simple docstring""" if modules_to_not_convert is None: SCREAMING_SNAKE_CASE_ = [] SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = _replace_with_bnb_layers( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) if not has_been_replaced: logger.warning( 'You are loading your model in 8bit or 4bit but no linear modules were found in your model.' ' this can happen for some architectures such as gpt2 that uses Conv1D instead of Linear layers.' ' Please double check your model architecture, or submit an issue on github if you think this is' ' a bug.' ) return model def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : Optional[Any]=None , _SCREAMING_SNAKE_CASE : str=None , ): """simple docstring""" SCREAMING_SNAKE_CASE_ = False for name, module in model.named_children(): if current_key_name is None: SCREAMING_SNAKE_CASE_ = [] current_key_name.append(_SCREAMING_SNAKE_CASE ) if isinstance(_SCREAMING_SNAKE_CASE , nn.Linear ) and name not in modules_to_not_convert: # Check if the current key is not in the `modules_to_not_convert` SCREAMING_SNAKE_CASE_ = '.'.join(_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE_ = True for key in modules_to_not_convert: if ( (key in current_key_name_str) and (key + "." in current_key_name_str) ) or key == current_key_name_str: SCREAMING_SNAKE_CASE_ = False break if proceed: # Load bnb module with empty weight and replace ``nn.Linear` module if bnb_quantization_config.load_in_abit: SCREAMING_SNAKE_CASE_ = bnb.nn.LinearabitLt( module.in_features , module.out_features , module.bias is not None , has_fpaa_weights=_SCREAMING_SNAKE_CASE , threshold=bnb_quantization_config.llm_inta_threshold , ) elif bnb_quantization_config.load_in_abit: SCREAMING_SNAKE_CASE_ = bnb.nn.Linearabit( module.in_features , module.out_features , module.bias is not None , bnb_quantization_config.bnb_abit_compute_dtype , compress_statistics=bnb_quantization_config.bnb_abit_use_double_quant , quant_type=bnb_quantization_config.bnb_abit_quant_type , ) else: raise ValueError('load_in_8bit and load_in_4bit can\'t be both False' ) SCREAMING_SNAKE_CASE_ = module.weight.data if module.bias is not None: SCREAMING_SNAKE_CASE_ = module.bias.data bnb_module.requires_grad_(_SCREAMING_SNAKE_CASE ) setattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE_ = True if len(list(module.children() ) ) > 0: SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = _replace_with_bnb_layers( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE_ = has_been_replaced | _has_been_replaced # Remove the last key for recursion current_key_name.pop(-1 ) return model, has_been_replaced def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : Union[str, Any] ): """simple docstring""" with init_empty_weights(): SCREAMING_SNAKE_CASE_ = deepcopy(_SCREAMING_SNAKE_CASE ) # this has 0 cost since it is done inside `init_empty_weights` context manager` SCREAMING_SNAKE_CASE_ = find_tied_parameters(_SCREAMING_SNAKE_CASE ) # For compatibility with Accelerate < 0.18 if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): SCREAMING_SNAKE_CASE_ = sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() ) else: SCREAMING_SNAKE_CASE_ = sum(_SCREAMING_SNAKE_CASE , [] ) SCREAMING_SNAKE_CASE_ = len(_SCREAMING_SNAKE_CASE ) > 0 # Check if it is a base model SCREAMING_SNAKE_CASE_ = False if hasattr(_SCREAMING_SNAKE_CASE , 'base_model_prefix' ): SCREAMING_SNAKE_CASE_ = not hasattr(_SCREAMING_SNAKE_CASE , model.base_model_prefix ) # Ignore this for base models (BertModel, GPT2Model, etc.) if (not has_tied_params) and is_base_model: return [] # otherwise they have an attached head SCREAMING_SNAKE_CASE_ = list(model.named_children() ) SCREAMING_SNAKE_CASE_ = [list_modules[-1][0]] # add last module together with tied weights SCREAMING_SNAKE_CASE_ = set(_SCREAMING_SNAKE_CASE ) - set(_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE_ = list(set(_SCREAMING_SNAKE_CASE ) ) + list(_SCREAMING_SNAKE_CASE ) # remove ".weight" from the keys SCREAMING_SNAKE_CASE_ = ['.weight', '.bias'] SCREAMING_SNAKE_CASE_ = [] for name in list_untouched: for name_to_remove in names_to_remove: if name_to_remove in name: SCREAMING_SNAKE_CASE_ = name.replace(_SCREAMING_SNAKE_CASE , '' ) filtered_module_names.append(_SCREAMING_SNAKE_CASE ) return filtered_module_names def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : Dict ): """simple docstring""" for m in model.modules(): if isinstance(_SCREAMING_SNAKE_CASE , bnb.nn.Linearabit ): return True return False def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : nn.Module ): """simple docstring""" return next(parameter.parameters() ).device def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : str ): """simple docstring""" if fpaa_statistics is None: set_module_tensor_to_device(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , 0 , dtype=_SCREAMING_SNAKE_CASE , value=_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE_ = param_name SCREAMING_SNAKE_CASE_ = model if "." in tensor_name: SCREAMING_SNAKE_CASE_ = tensor_name.split('.' ) for split in splits[:-1]: SCREAMING_SNAKE_CASE_ = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) if new_module is None: raise ValueError(f"""{module} has no attribute {split}.""" ) SCREAMING_SNAKE_CASE_ = new_module SCREAMING_SNAKE_CASE_ = splits[-1] # offload weights SCREAMING_SNAKE_CASE_ = False offload_weight(module._parameters[tensor_name] , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , index=_SCREAMING_SNAKE_CASE ) if hasattr(module._parameters[tensor_name] , 'SCB' ): offload_weight( module._parameters[tensor_name].SCB , param_name.replace('weight' , 'SCB' ) , _SCREAMING_SNAKE_CASE , index=_SCREAMING_SNAKE_CASE , ) else: offload_weight(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , index=_SCREAMING_SNAKE_CASE ) offload_weight(_SCREAMING_SNAKE_CASE , param_name.replace('weight' , 'SCB' ) , _SCREAMING_SNAKE_CASE , index=_SCREAMING_SNAKE_CASE ) set_module_tensor_to_device(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , 'meta' , dtype=_SCREAMING_SNAKE_CASE , value=torch.empty(*param.size() ) )
620
def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : list , _SCREAMING_SNAKE_CASE : list , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int ): """simple docstring""" if index == number_of_items: return 0 SCREAMING_SNAKE_CASE_ = 0 SCREAMING_SNAKE_CASE_ = 0 SCREAMING_SNAKE_CASE_ = knapsack(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , index + 1 ) if weights[index] <= max_weight: SCREAMING_SNAKE_CASE_ = values[index] + knapsack( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , max_weight - weights[index] , index + 1 ) return max(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) if __name__ == "__main__": import doctest doctest.testmod()
620
1
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available UpperCamelCase__ : Union[str, Any] = { "configuration_bloom": ["BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP", "BloomConfig", "BloomOnnxConfig"], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase__ : List[Any] = ["BloomTokenizerFast"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase__ : List[str] = [ "BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST", "BloomForCausalLM", "BloomModel", "BloomPreTrainedModel", "BloomForSequenceClassification", "BloomForTokenClassification", "BloomForQuestionAnswering", ] if TYPE_CHECKING: from .configuration_bloom import BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP, BloomConfig, BloomOnnxConfig try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_bloom_fast import BloomTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_bloom import ( BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST, BloomForCausalLM, BloomForQuestionAnswering, BloomForSequenceClassification, BloomForTokenClassification, BloomModel, BloomPreTrainedModel, ) else: import sys UpperCamelCase__ : List[str] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
620
import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( SwiftFormerConfig, SwiftFormerForImageClassification, ViTImageProcessor, ) from transformers.utils import logging logging.set_verbosity_info() UpperCamelCase__ : Optional[int] = logging.get_logger(__name__) UpperCamelCase__ : List[Any] = torch.device("cpu") def _UpperCAmelCase ( ): """simple docstring""" SCREAMING_SNAKE_CASE_ = 'http://images.cocodataset.org/val2017/000000039769.jpg' SCREAMING_SNAKE_CASE_ = Image.open(requests.get(_SCREAMING_SNAKE_CASE , stream=_SCREAMING_SNAKE_CASE ).raw ) return im def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : int ): """simple docstring""" if swiftformer_name == "swiftformer_xs": return torch.tensor([-2.1_7_0_3E0_0, 2.1_1_0_7E0_0, -2.0_8_1_1E0_0, 8.8_6_8_5E-0_1, 2.4_3_6_0E-0_1] ) elif swiftformer_name == "swiftformer_s": return torch.tensor([3.9_6_3_6E-0_1, 2.3_4_7_8E-0_1, -1.6_9_6_3E0_0, -1.7_3_8_1E0_0, -8.6_3_3_7E-0_1] ) elif swiftformer_name == "swiftformer_l1": return torch.tensor([-4.2_7_6_8E-0_1, -4.7_4_2_9E-0_1, -1.0_8_9_7E0_0, -1.0_2_4_8E0_0, 3.5_5_2_3E-0_2] ) elif swiftformer_name == "swiftformer_l3": return torch.tensor([-2.5_3_3_0E-0_1, 2.4_2_1_1E-0_1, -6.0_1_8_5E-0_1, -8.2_7_8_9E-0_1, -6.0_4_4_6E-0_2] ) def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : Optional[Any] ): """simple docstring""" SCREAMING_SNAKE_CASE_ = dct.pop(_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE_ = val def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : Dict ): """simple docstring""" SCREAMING_SNAKE_CASE_ = [] for k in state_dict.keys(): SCREAMING_SNAKE_CASE_ = k if ".pwconv" in k: SCREAMING_SNAKE_CASE_ = k_new.replace('.pwconv' , '.point_wise_conv' ) if ".dwconv" in k: SCREAMING_SNAKE_CASE_ = k_new.replace('.dwconv' , '.depth_wise_conv' ) if ".Proj." in k: SCREAMING_SNAKE_CASE_ = k_new.replace('.Proj.' , '.proj.' ) if "patch_embed" in k_new: SCREAMING_SNAKE_CASE_ = k_new.replace('patch_embed' , 'swiftformer.patch_embed.patch_embedding' ) if "network" in k_new: SCREAMING_SNAKE_CASE_ = k_new.split('.' ) if ls[2].isdigit(): SCREAMING_SNAKE_CASE_ = 'swiftformer.encoder.network.' + ls[1] + '.blocks.' + ls[2] + '.' + '.'.join(ls[3:] ) else: SCREAMING_SNAKE_CASE_ = k_new.replace('network' , 'swiftformer.encoder.network' ) rename_keys.append((k, k_new) ) return rename_keys @torch.no_grad() def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Optional[Any] ): """simple docstring""" SCREAMING_SNAKE_CASE_ = SwiftFormerConfig() # dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size SCREAMING_SNAKE_CASE_ = 1_000 SCREAMING_SNAKE_CASE_ = 'huggingface/label-files' SCREAMING_SNAKE_CASE_ = 'imagenet-1k-id2label.json' SCREAMING_SNAKE_CASE_ = json.load(open(hf_hub_download(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , repo_type='dataset' ) , 'r' ) ) SCREAMING_SNAKE_CASE_ = {int(_SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()} SCREAMING_SNAKE_CASE_ = idalabel SCREAMING_SNAKE_CASE_ = {v: k for k, v in idalabel.items()} # size of the architecture if swiftformer_name == "swiftformer_xs": SCREAMING_SNAKE_CASE_ = [3, 3, 6, 4] SCREAMING_SNAKE_CASE_ = [48, 56, 112, 220] elif swiftformer_name == "swiftformer_s": SCREAMING_SNAKE_CASE_ = [3, 3, 9, 6] SCREAMING_SNAKE_CASE_ = [48, 64, 168, 224] elif swiftformer_name == "swiftformer_l1": SCREAMING_SNAKE_CASE_ = [4, 3, 10, 5] SCREAMING_SNAKE_CASE_ = [48, 96, 192, 384] elif swiftformer_name == "swiftformer_l3": SCREAMING_SNAKE_CASE_ = [4, 4, 12, 6] SCREAMING_SNAKE_CASE_ = [64, 128, 320, 512] # load state_dict of original model, remove and rename some keys if original_ckpt: if original_ckpt.startswith('https' ): SCREAMING_SNAKE_CASE_ = torch.hub.load_state_dict_from_url(_SCREAMING_SNAKE_CASE , map_location='cpu' , check_hash=_SCREAMING_SNAKE_CASE ) else: SCREAMING_SNAKE_CASE_ = torch.load(_SCREAMING_SNAKE_CASE , map_location='cpu' ) SCREAMING_SNAKE_CASE_ = checkpoint SCREAMING_SNAKE_CASE_ = create_rename_keys(_SCREAMING_SNAKE_CASE ) for rename_key_src, rename_key_dest in rename_keys: rename_key(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # load HuggingFace model SCREAMING_SNAKE_CASE_ = SwiftFormerForImageClassification(_SCREAMING_SNAKE_CASE ).eval() hf_model.load_state_dict(_SCREAMING_SNAKE_CASE ) # prepare test inputs SCREAMING_SNAKE_CASE_ = prepare_img() SCREAMING_SNAKE_CASE_ = ViTImageProcessor.from_pretrained('preprocessor_config' ) SCREAMING_SNAKE_CASE_ = processor(images=_SCREAMING_SNAKE_CASE , return_tensors='pt' ) # compare outputs from both models SCREAMING_SNAKE_CASE_ = get_expected_output(_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE_ = hf_model(inputs['pixel_values'] ).logits assert hf_logits.shape == torch.Size([1, 1_000] ) assert torch.allclose(hf_logits[0, 0:5] , _SCREAMING_SNAKE_CASE , atol=1E-3 ) Path(_SCREAMING_SNAKE_CASE ).mkdir(exist_ok=_SCREAMING_SNAKE_CASE ) print(f"""Saving model {swiftformer_name} to {pytorch_dump_folder_path}""" ) hf_model.save_pretrained(_SCREAMING_SNAKE_CASE ) if __name__ == "__main__": UpperCamelCase__ : str = argparse.ArgumentParser() # Required parameters parser.add_argument( "--swiftformer_name", default="swiftformer_xs", choices=["swiftformer_xs", "swiftformer_s", "swiftformer_l1", "swiftformer_l3"], type=str, help="Name of the SwiftFormer model you'd like to convert.", ) parser.add_argument( "--pytorch_dump_folder_path", default="./converted_outputs/", type=str, help="Path to the output PyTorch model directory.", ) parser.add_argument("--original_ckpt", default=None, type=str, help="Path to the original model checkpoint.") UpperCamelCase__ : Union[str, Any] = parser.parse_args() convert_swiftformer_checkpoint(args.swiftformer_name, args.pytorch_dump_folder_path, args.original_ckpt)
620
1
from typing import Dict, Optional import numpy as np import datasets UpperCamelCase__ : List[Any] = "\nIoU is the area of overlap between the predicted segmentation and the ground truth divided by the area of union\nbetween the predicted segmentation and the ground truth. For binary (two classes) or multi-class segmentation,\nthe mean IoU of the image is calculated by taking the IoU of each class and averaging them.\n" UpperCamelCase__ : Tuple = "\nArgs:\n predictions (`List[ndarray]`):\n List of predicted segmentation maps, each of shape (height, width). Each segmentation map can be of a different size.\n references (`List[ndarray]`):\n List of ground truth segmentation maps, each of shape (height, width). Each segmentation map can be of a different size.\n num_labels (`int`):\n Number of classes (categories).\n ignore_index (`int`):\n Index that will be ignored during evaluation.\n nan_to_num (`int`, *optional*):\n If specified, NaN values will be replaced by the number defined by the user.\n label_map (`dict`, *optional*):\n If specified, dictionary mapping old label indices to new label indices.\n reduce_labels (`bool`, *optional*, defaults to `False`):\n Whether or not to reduce all label values of segmentation maps by 1. Usually used for datasets where 0 is used for background,\n and background itself is not included in all classes of a dataset (e.g. ADE20k). The background label will be replaced by 255.\n\nReturns:\n `Dict[str, float | ndarray]` comprising various elements:\n - *mean_iou* (`float`):\n Mean Intersection-over-Union (IoU averaged over all categories).\n - *mean_accuracy* (`float`):\n Mean accuracy (averaged over all categories).\n - *overall_accuracy* (`float`):\n Overall accuracy on all images.\n - *per_category_accuracy* (`ndarray` of shape `(num_labels,)`):\n Per category accuracy.\n - *per_category_iou* (`ndarray` of shape `(num_labels,)`):\n Per category IoU.\n\nExamples:\n\n >>> import numpy as np\n\n >>> mean_iou = datasets.load_metric(\"mean_iou\")\n\n >>> # suppose one has 3 different segmentation maps predicted\n >>> predicted_1 = np.array([[1, 2], [3, 4], [5, 255]])\n >>> actual_1 = np.array([[0, 3], [5, 4], [6, 255]])\n\n >>> predicted_2 = np.array([[2, 7], [9, 2], [3, 6]])\n >>> actual_2 = np.array([[1, 7], [9, 2], [3, 6]])\n\n >>> predicted_3 = np.array([[2, 2, 3], [8, 2, 4], [3, 255, 2]])\n >>> actual_3 = np.array([[1, 2, 2], [8, 2, 1], [3, 255, 1]])\n\n >>> predicted = [predicted_1, predicted_2, predicted_3]\n >>> ground_truth = [actual_1, actual_2, actual_3]\n\n >>> results = mean_iou.compute(predictions=predicted, references=ground_truth, num_labels=10, ignore_index=255, reduce_labels=False)\n >>> print(results) # doctest: +NORMALIZE_WHITESPACE\n {'mean_iou': 0.47750000000000004, 'mean_accuracy': 0.5916666666666666, 'overall_accuracy': 0.5263157894736842, 'per_category_iou': array([0. , 0. , 0.375, 0.4 , 0.5 , 0. , 0.5 , 1. , 1. , 1. ]), 'per_category_accuracy': array([0. , 0. , 0.75 , 0.66666667, 1. , 0. , 0.5 , 1. , 1. , 1. ])}\n" UpperCamelCase__ : List[Any] = "\\n@software{MMSegmentation_Contributors_OpenMMLab_Semantic_Segmentation_2020,\nauthor = {{MMSegmentation Contributors}},\nlicense = {Apache-2.0},\nmonth = {7},\ntitle = {{OpenMMLab Semantic Segmentation Toolbox and Benchmark}},\nurl = {https://github.com/open-mmlab/mmsegmentation},\nyear = {2020}\n}" def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : bool , _SCREAMING_SNAKE_CASE : Optional[Dict[int, int]] = None , _SCREAMING_SNAKE_CASE : bool = False , ): """simple docstring""" if label_map is not None: for old_id, new_id in label_map.items(): SCREAMING_SNAKE_CASE_ = new_id # turn into Numpy arrays SCREAMING_SNAKE_CASE_ = np.array(_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE_ = np.array(_SCREAMING_SNAKE_CASE ) if reduce_labels: SCREAMING_SNAKE_CASE_ = 255 SCREAMING_SNAKE_CASE_ = label - 1 SCREAMING_SNAKE_CASE_ = 255 SCREAMING_SNAKE_CASE_ = label != ignore_index SCREAMING_SNAKE_CASE_ = np.not_equal(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE_ = pred_label[mask] SCREAMING_SNAKE_CASE_ = np.array(_SCREAMING_SNAKE_CASE )[mask] SCREAMING_SNAKE_CASE_ = pred_label[pred_label == label] SCREAMING_SNAKE_CASE_ = np.histogram(_SCREAMING_SNAKE_CASE , bins=_SCREAMING_SNAKE_CASE , range=(0, num_labels - 1) )[0] SCREAMING_SNAKE_CASE_ = np.histogram(_SCREAMING_SNAKE_CASE , bins=_SCREAMING_SNAKE_CASE , range=(0, num_labels - 1) )[0] SCREAMING_SNAKE_CASE_ = np.histogram(_SCREAMING_SNAKE_CASE , bins=_SCREAMING_SNAKE_CASE , range=(0, num_labels - 1) )[0] SCREAMING_SNAKE_CASE_ = area_pred_label + area_label - area_intersect return area_intersect, area_union, area_pred_label, area_label def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : bool , _SCREAMING_SNAKE_CASE : Optional[Dict[int, int]] = None , _SCREAMING_SNAKE_CASE : bool = False , ): """simple docstring""" SCREAMING_SNAKE_CASE_ = np.zeros((num_labels,) , dtype=np.floataa ) SCREAMING_SNAKE_CASE_ = np.zeros((num_labels,) , dtype=np.floataa ) SCREAMING_SNAKE_CASE_ = np.zeros((num_labels,) , dtype=np.floataa ) SCREAMING_SNAKE_CASE_ = np.zeros((num_labels,) , dtype=np.floataa ) for result, gt_seg_map in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = intersect_and_union( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) total_area_intersect += area_intersect total_area_union += area_union total_area_pred_label += area_pred_label total_area_label += area_label return total_area_intersect, total_area_union, total_area_pred_label, total_area_label def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : bool , _SCREAMING_SNAKE_CASE : Optional[int] = None , _SCREAMING_SNAKE_CASE : Optional[Dict[int, int]] = None , _SCREAMING_SNAKE_CASE : bool = False , ): """simple docstring""" SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = total_intersect_and_union( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # compute metrics SCREAMING_SNAKE_CASE_ = {} SCREAMING_SNAKE_CASE_ = total_area_intersect.sum() / total_area_label.sum() SCREAMING_SNAKE_CASE_ = total_area_intersect / total_area_union SCREAMING_SNAKE_CASE_ = total_area_intersect / total_area_label SCREAMING_SNAKE_CASE_ = np.nanmean(_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE_ = np.nanmean(_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE_ = all_acc SCREAMING_SNAKE_CASE_ = iou SCREAMING_SNAKE_CASE_ = acc if nan_to_num is not None: SCREAMING_SNAKE_CASE_ = {metric: np.nan_to_num(_SCREAMING_SNAKE_CASE , nan=_SCREAMING_SNAKE_CASE ) for metric, metric_value in metrics.items()} return metrics @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class __snake_case ( datasets.Metric ): def lowerCAmelCase__ ( self): return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( # 1st Seq - height dim, 2nd - width dim { 'predictions': datasets.Sequence(datasets.Sequence(datasets.Value('uint16'))), 'references': datasets.Sequence(datasets.Sequence(datasets.Value('uint16'))), }) , reference_urls=[ 'https://github.com/open-mmlab/mmsegmentation/blob/71c201b1813267d78764f306a297ca717827c4bf/mmseg/core/evaluation/metrics.py' ] , ) def lowerCAmelCase__ ( self , _A , _A , _A , _A , _A = None , _A = None , _A = False , ): SCREAMING_SNAKE_CASE_ = mean_iou( results=_A , gt_seg_maps=_A , num_labels=_A , ignore_index=_A , nan_to_num=_A , label_map=_A , reduce_labels=_A , ) return iou_result
620
def _UpperCAmelCase ( ): """simple docstring""" for n in range(1 , 1_000_000 ): yield n * (n + 1) // 2 def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : Tuple ): """simple docstring""" SCREAMING_SNAKE_CASE_ = 1 SCREAMING_SNAKE_CASE_ = 2 while i * i <= n: SCREAMING_SNAKE_CASE_ = 0 while n % i == 0: n //= i multiplicity += 1 divisors_count *= multiplicity + 1 i += 1 if n > 1: divisors_count *= 2 return divisors_count def _UpperCAmelCase ( ): """simple docstring""" return next(i for i in triangle_number_generator() if count_divisors(_SCREAMING_SNAKE_CASE ) > 500 ) if __name__ == "__main__": print(solution())
620
1
import pytest import datasets # Import fixture modules as plugins UpperCamelCase__ : Union[str, Any] = ["tests.fixtures.files", "tests.fixtures.hub", "tests.fixtures.fsspec"] def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : Dict ): """simple docstring""" for item in items: if any(marker in item.keywords for marker in ['integration', 'unit'] ): continue item.add_marker(pytest.mark.unit ) def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : Optional[int] ): """simple docstring""" config.addinivalue_line('markers' , 'torchaudio_latest: mark test to run with torchaudio>=0.12' ) @pytest.fixture(autouse=_SCREAMING_SNAKE_CASE ) def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : str ): """simple docstring""" SCREAMING_SNAKE_CASE_ = tmp_path_factory.getbasetemp() / 'cache' SCREAMING_SNAKE_CASE_ = test_hf_cache_home / 'datasets' SCREAMING_SNAKE_CASE_ = test_hf_cache_home / 'metrics' SCREAMING_SNAKE_CASE_ = test_hf_cache_home / 'modules' monkeypatch.setattr('datasets.config.HF_DATASETS_CACHE' , str(_SCREAMING_SNAKE_CASE ) ) monkeypatch.setattr('datasets.config.HF_METRICS_CACHE' , str(_SCREAMING_SNAKE_CASE ) ) monkeypatch.setattr('datasets.config.HF_MODULES_CACHE' , str(_SCREAMING_SNAKE_CASE ) ) SCREAMING_SNAKE_CASE_ = test_hf_datasets_cache / 'downloads' monkeypatch.setattr('datasets.config.DOWNLOADED_DATASETS_PATH' , str(_SCREAMING_SNAKE_CASE ) ) SCREAMING_SNAKE_CASE_ = test_hf_datasets_cache / 'downloads' / 'extracted' monkeypatch.setattr('datasets.config.EXTRACTED_DATASETS_PATH' , str(_SCREAMING_SNAKE_CASE ) ) @pytest.fixture(autouse=_SCREAMING_SNAKE_CASE , scope='session' ) def _UpperCAmelCase ( ): """simple docstring""" datasets.disable_progress_bar() @pytest.fixture(autouse=_SCREAMING_SNAKE_CASE ) def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : Any ): """simple docstring""" monkeypatch.setattr('datasets.config.HF_UPDATE_DOWNLOAD_COUNTS' , _SCREAMING_SNAKE_CASE ) @pytest.fixture def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : Tuple ): """simple docstring""" monkeypatch.setattr('sqlalchemy.util.deprecations.SILENCE_UBER_WARNING' , _SCREAMING_SNAKE_CASE )
620
import io import itertools import json from dataclasses import dataclass from typing import Optional import pyarrow as pa import pyarrow.json as paj import datasets from datasets.table import table_cast from datasets.utils.file_utils import readline UpperCamelCase__ : Optional[int] = datasets.utils.logging.get_logger(__name__) @dataclass class __snake_case ( datasets.BuilderConfig ): __lowerCAmelCase : Optional[datasets.Features] = None __lowerCAmelCase : str = "utf-8" __lowerCAmelCase : Optional[str] = None __lowerCAmelCase : Optional[str] = None __lowerCAmelCase : bool = True # deprecated __lowerCAmelCase : Optional[int] = None # deprecated __lowerCAmelCase : int = 10 << 20 # 10MB __lowerCAmelCase : Optional[bool] = None class __snake_case ( datasets.ArrowBasedBuilder ): __lowerCAmelCase : int = JsonConfig def lowerCAmelCase__ ( self): if self.config.block_size is not None: logger.warning('The JSON loader parameter `block_size` is deprecated. Please use `chunksize` instead') SCREAMING_SNAKE_CASE_ = self.config.block_size if self.config.use_threads is not True: logger.warning( 'The JSON loader parameter `use_threads` is deprecated and doesn\'t have any effect anymore.') if self.config.newlines_in_values is not None: raise ValueError('The JSON loader parameter `newlines_in_values` is no longer supported') return datasets.DatasetInfo(features=self.config.features) def lowerCAmelCase__ ( self , _A): if not self.config.data_files: raise ValueError(f"""At least one data file must be specified, but got data_files={self.config.data_files}""") SCREAMING_SNAKE_CASE_ = dl_manager.download_and_extract(self.config.data_files) if isinstance(_A , (str, list, tuple)): SCREAMING_SNAKE_CASE_ = data_files if isinstance(_A , _A): SCREAMING_SNAKE_CASE_ = [files] SCREAMING_SNAKE_CASE_ = [dl_manager.iter_files(_A) for file in files] return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'files': files})] SCREAMING_SNAKE_CASE_ = [] for split_name, files in data_files.items(): if isinstance(_A , _A): SCREAMING_SNAKE_CASE_ = [files] SCREAMING_SNAKE_CASE_ = [dl_manager.iter_files(_A) for file in files] splits.append(datasets.SplitGenerator(name=_A , gen_kwargs={'files': files})) return splits def lowerCAmelCase__ ( self , _A): if self.config.features is not None: # adding missing columns for column_name in set(self.config.features) - set(pa_table.column_names): SCREAMING_SNAKE_CASE_ = self.config.features.arrow_schema.field(_A).type SCREAMING_SNAKE_CASE_ = pa_table.append_column(_A , pa.array([None] * len(_A) , type=_A)) # more expensive cast to support nested structures with keys in a different order # allows str <-> int/float or str to Audio for example SCREAMING_SNAKE_CASE_ = table_cast(_A , self.config.features.arrow_schema) return pa_table def lowerCAmelCase__ ( self , _A): for file_idx, file in enumerate(itertools.chain.from_iterable(_A)): # If the file is one json object and if we need to look at the list of items in one specific field if self.config.field is not None: with open(_A , encoding=self.config.encoding , errors=self.config.encoding_errors) as f: SCREAMING_SNAKE_CASE_ = json.load(_A) # We keep only the field we are interested in SCREAMING_SNAKE_CASE_ = dataset[self.config.field] # We accept two format: a list of dicts or a dict of lists if isinstance(_A , (list, tuple)): SCREAMING_SNAKE_CASE_ = set().union(*[row.keys() for row in dataset]) SCREAMING_SNAKE_CASE_ = {col: [row.get(_A) for row in dataset] for col in keys} else: SCREAMING_SNAKE_CASE_ = dataset SCREAMING_SNAKE_CASE_ = pa.Table.from_pydict(_A) yield file_idx, self._cast_table(_A) # If the file has one json object per line else: with open(_A , 'rb') as f: SCREAMING_SNAKE_CASE_ = 0 # Use block_size equal to the chunk size divided by 32 to leverage multithreading # Set a default minimum value of 16kB if the chunk size is really small SCREAMING_SNAKE_CASE_ = max(self.config.chunksize // 32 , 16 << 10) SCREAMING_SNAKE_CASE_ = ( self.config.encoding_errors if self.config.encoding_errors is not None else 'strict' ) while True: SCREAMING_SNAKE_CASE_ = f.read(self.config.chunksize) if not batch: break # Finish current line try: batch += f.readline() except (AttributeError, io.UnsupportedOperation): batch += readline(_A) # PyArrow only accepts utf-8 encoded bytes if self.config.encoding != "utf-8": SCREAMING_SNAKE_CASE_ = batch.decode(self.config.encoding , errors=_A).encode('utf-8') try: while True: try: SCREAMING_SNAKE_CASE_ = paj.read_json( io.BytesIO(_A) , read_options=paj.ReadOptions(block_size=_A)) break except (pa.ArrowInvalid, pa.ArrowNotImplementedError) as e: if ( isinstance(_A , pa.ArrowInvalid) and "straddling" not in str(_A) or block_size > len(_A) ): raise else: # Increase the block size in case it was too small. # The block size will be reset for the next file. logger.debug( f"""Batch of {len(_A)} bytes couldn't be parsed with block_size={block_size}. Retrying with block_size={block_size * 2}.""") block_size *= 2 except pa.ArrowInvalid as e: try: with open( _A , encoding=self.config.encoding , errors=self.config.encoding_errors) as f: SCREAMING_SNAKE_CASE_ = json.load(_A) except json.JSONDecodeError: logger.error(f"""Failed to read file '{file}' with error {type(_A)}: {e}""") raise e # If possible, parse the file as a list of json objects and exit the loop if isinstance(_A , _A): # list is the only sequence type supported in JSON try: SCREAMING_SNAKE_CASE_ = set().union(*[row.keys() for row in dataset]) SCREAMING_SNAKE_CASE_ = {col: [row.get(_A) for row in dataset] for col in keys} SCREAMING_SNAKE_CASE_ = pa.Table.from_pydict(_A) except (pa.ArrowInvalid, AttributeError) as e: logger.error(f"""Failed to read file '{file}' with error {type(_A)}: {e}""") raise ValueError(f"""Not able to read records in the JSON file at {file}.""") from None yield file_idx, self._cast_table(_A) break else: logger.error(f"""Failed to read file '{file}' with error {type(_A)}: {e}""") raise ValueError( f"""Not able to read records in the JSON file at {file}. """ f"""You should probably indicate the field of the JSON file containing your records. """ f"""This JSON file contain the following fields: {str(list(dataset.keys()))}. """ f"""Select the correct one and provide it as `field='XXX'` to the dataset loading method. """) from None # Uncomment for debugging (will print the Arrow table size and elements) # logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}") # logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows))) yield (file_idx, batch_idx), self._cast_table(_A) batch_idx += 1
620
1
from ...utils import ( OptionalDependencyNotAvailable, is_torch_available, is_transformers_available, is_transformers_version, ) try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import ShapEPipeline else: from .camera import create_pan_cameras from .pipeline_shap_e import ShapEPipeline from .pipeline_shap_e_img2img import ShapEImgaImgPipeline from .renderer import ( BoundingBoxVolume, ImportanceRaySampler, MLPNeRFModelOutput, MLPNeRSTFModel, ShapEParamsProjModel, ShapERenderer, StratifiedRaySampler, VoidNeRFModel, )
620
import unittest from transformers import TrOCRConfig from transformers.testing_utils import is_torch_available, require_torch, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers.models.trocr.modeling_trocr import TrOCRDecoder, TrOCRForCausalLM @require_torch class __snake_case : def __init__( self , _A , _A=99 , _A=13 , _A=16 , _A=7 , _A=True , _A=True , _A=True , _A=False , _A=True , _A=2 , _A=32 , _A=4 , _A=4 , _A=30 , _A=0 , _A=1 , _A=2 , _A=None , ): SCREAMING_SNAKE_CASE_ = parent SCREAMING_SNAKE_CASE_ = batch_size SCREAMING_SNAKE_CASE_ = decoder_seq_length # For common tests SCREAMING_SNAKE_CASE_ = self.decoder_seq_length SCREAMING_SNAKE_CASE_ = is_training SCREAMING_SNAKE_CASE_ = use_attention_mask SCREAMING_SNAKE_CASE_ = use_labels SCREAMING_SNAKE_CASE_ = vocab_size SCREAMING_SNAKE_CASE_ = d_model SCREAMING_SNAKE_CASE_ = d_model SCREAMING_SNAKE_CASE_ = decoder_layers SCREAMING_SNAKE_CASE_ = decoder_layers SCREAMING_SNAKE_CASE_ = decoder_ffn_dim SCREAMING_SNAKE_CASE_ = decoder_attention_heads SCREAMING_SNAKE_CASE_ = decoder_attention_heads SCREAMING_SNAKE_CASE_ = eos_token_id SCREAMING_SNAKE_CASE_ = bos_token_id SCREAMING_SNAKE_CASE_ = pad_token_id SCREAMING_SNAKE_CASE_ = decoder_start_token_id SCREAMING_SNAKE_CASE_ = use_cache SCREAMING_SNAKE_CASE_ = max_position_embeddings SCREAMING_SNAKE_CASE_ = None SCREAMING_SNAKE_CASE_ = decoder_seq_length SCREAMING_SNAKE_CASE_ = 2 SCREAMING_SNAKE_CASE_ = 1 def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size) SCREAMING_SNAKE_CASE_ = None if self.use_attention_mask: SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size, self.decoder_seq_length] , vocab_size=2) SCREAMING_SNAKE_CASE_ = None if self.use_labels: SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size) SCREAMING_SNAKE_CASE_ = TrOCRConfig( vocab_size=self.vocab_size , d_model=self.d_model , decoder_layers=self.decoder_layers , decoder_ffn_dim=self.decoder_ffn_dim , decoder_attention_heads=self.decoder_attention_heads , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , use_cache=self.use_cache , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , max_position_embeddings=self.max_position_embeddings , ) return (config, input_ids, attention_mask, lm_labels) def lowerCAmelCase__ ( self , _A , _A , _A , _A , ): SCREAMING_SNAKE_CASE_ = True SCREAMING_SNAKE_CASE_ = TrOCRDecoder(config=_A).to(_A).eval() SCREAMING_SNAKE_CASE_ = input_ids[:2] input_ids[input_ids == 0] += 1 # first forward pass SCREAMING_SNAKE_CASE_ = model(_A , use_cache=_A) SCREAMING_SNAKE_CASE_ = model(_A) SCREAMING_SNAKE_CASE_ = model(_A , use_cache=_A) self.parent.assertTrue(len(_A) == len(_A)) self.parent.assertTrue(len(_A) == len(_A) + 1) SCREAMING_SNAKE_CASE_ = outputs['past_key_values'] # create hypothetical next token and extent to next_input_ids SCREAMING_SNAKE_CASE_ = ids_tensor((2, 1) , config.vocab_size - 1) + 1 # append to next input_ids and SCREAMING_SNAKE_CASE_ = torch.cat([input_ids, next_tokens] , dim=-1) SCREAMING_SNAKE_CASE_ = model(_A)['last_hidden_state'] SCREAMING_SNAKE_CASE_ = model(_A , past_key_values=_A)['last_hidden_state'] # select random slice SCREAMING_SNAKE_CASE_ = ids_tensor((1,) , output_from_past.shape[-1]).item() SCREAMING_SNAKE_CASE_ = output_from_no_past[:, next_input_ids.shape[-1] - 1, random_slice_idx].detach() SCREAMING_SNAKE_CASE_ = output_from_past[:, 0, random_slice_idx].detach() # test that outputs are equal for slice assert torch.allclose(_A , _A , atol=1E-3) def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = self.prepare_config_and_inputs() SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = config_and_inputs SCREAMING_SNAKE_CASE_ = {'input_ids': input_ids, 'attention_mask': attention_mask} return config, inputs_dict @require_torch class __snake_case ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ): __lowerCAmelCase : Tuple = (TrOCRDecoder, TrOCRForCausalLM) if is_torch_available() else () __lowerCAmelCase : Union[str, Any] = (TrOCRForCausalLM,) if is_torch_available() else () __lowerCAmelCase : str = {'text-generation': TrOCRForCausalLM} if is_torch_available() else {} __lowerCAmelCase : Any = True __lowerCAmelCase : str = False def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = TrOCRStandaloneDecoderModelTester(self , is_training=_A) SCREAMING_SNAKE_CASE_ = ConfigTester(self , config_class=_A) def lowerCAmelCase__ ( self): pass def lowerCAmelCase__ ( self): pass def lowerCAmelCase__ ( self): pass def lowerCAmelCase__ ( self): self.config_tester.run_common_tests() def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_decoder_model_past(*_A) def lowerCAmelCase__ ( self): return @unittest.skip('The model doesn\'t support left padding') # and it's not used enough to be worth fixing :) def lowerCAmelCase__ ( self): pass
620
1
import pytest from datasets import inspect_metric, list_metrics, load_metric @pytest.fixture def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : Tuple ): """simple docstring""" monkeypatch.setattr('datasets.utils.deprecation_utils._emitted_deprecation_warnings' , set() ) @pytest.fixture def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : List[Any] ): """simple docstring""" class __snake_case : def __init__( self , _A): SCREAMING_SNAKE_CASE_ = metric_id class __snake_case : __lowerCAmelCase : Optional[int] = [MetricMock(lowerCAmelCase__ ) for metric_id in ['accuracy', 'mse', 'precision', 'codeparrot/apps_metric']] def lowerCAmelCase__ ( self): return self._metrics monkeypatch.setattr('datasets.inspect.huggingface_hub' , HfhMock() ) @pytest.mark.parametrize( 'func, args' , [(load_metric, ('metrics/mse',)), (list_metrics, ()), (inspect_metric, ('metrics/mse', 'tmp_path'))] ) def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : List[Any] ): """simple docstring""" if "tmp_path" in args: SCREAMING_SNAKE_CASE_ = tuple(arg if arg != 'tmp_path' else tmp_path for arg in args ) with pytest.warns(_SCREAMING_SNAKE_CASE , match='https://huggingface.co/docs/evaluate' ): func(*_SCREAMING_SNAKE_CASE )
620
from dataclasses import dataclass from typing import Optional, Tuple, Union import torch import torch.nn as nn from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput, apply_forward_hook from .modeling_utils import ModelMixin from .vae import Decoder, DecoderOutput, Encoder, VectorQuantizer @dataclass class __snake_case ( lowerCAmelCase__ ): __lowerCAmelCase : torch.FloatTensor class __snake_case ( lowerCAmelCase__ , lowerCAmelCase__ ): @register_to_config def __init__( self , _A = 3 , _A = 3 , _A = ("DownEncoderBlock2D",) , _A = ("UpDecoderBlock2D",) , _A = (64,) , _A = 1 , _A = "silu" , _A = 3 , _A = 32 , _A = 256 , _A = 32 , _A = None , _A = 0.1_8_2_1_5 , _A = "group" , ): super().__init__() # pass init params to Encoder SCREAMING_SNAKE_CASE_ = Encoder( in_channels=_A , out_channels=_A , down_block_types=_A , block_out_channels=_A , layers_per_block=_A , act_fn=_A , norm_num_groups=_A , double_z=_A , ) SCREAMING_SNAKE_CASE_ = vq_embed_dim if vq_embed_dim is not None else latent_channels SCREAMING_SNAKE_CASE_ = nn.Convad(_A , _A , 1) SCREAMING_SNAKE_CASE_ = VectorQuantizer(_A , _A , beta=0.2_5 , remap=_A , sane_index_shape=_A) SCREAMING_SNAKE_CASE_ = nn.Convad(_A , _A , 1) # pass init params to Decoder SCREAMING_SNAKE_CASE_ = Decoder( in_channels=_A , out_channels=_A , up_block_types=_A , block_out_channels=_A , layers_per_block=_A , act_fn=_A , norm_num_groups=_A , norm_type=_A , ) @apply_forward_hook def lowerCAmelCase__ ( self , _A , _A = True): SCREAMING_SNAKE_CASE_ = self.encoder(_A) SCREAMING_SNAKE_CASE_ = self.quant_conv(_A) if not return_dict: return (h,) return VQEncoderOutput(latents=_A) @apply_forward_hook def lowerCAmelCase__ ( self , _A , _A = False , _A = True): # also go through quantization layer if not force_not_quantize: SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.quantize(_A) else: SCREAMING_SNAKE_CASE_ = h SCREAMING_SNAKE_CASE_ = self.post_quant_conv(_A) SCREAMING_SNAKE_CASE_ = self.decoder(_A , quant if self.config.norm_type == 'spatial' else None) if not return_dict: return (dec,) return DecoderOutput(sample=_A) def lowerCAmelCase__ ( self , _A , _A = True): SCREAMING_SNAKE_CASE_ = sample SCREAMING_SNAKE_CASE_ = self.encode(_A).latents SCREAMING_SNAKE_CASE_ = self.decode(_A).sample if not return_dict: return (dec,) return DecoderOutput(sample=_A)
620
1
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_sentencepiece_available, is_tf_available, is_tokenizers_available, is_torch_available, ) UpperCamelCase__ : int = {"configuration_xglm": ["XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP", "XGLMConfig"]} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase__ : List[str] = ["XGLMTokenizer"] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase__ : int = ["XGLMTokenizerFast"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase__ : List[str] = [ "XGLM_PRETRAINED_MODEL_ARCHIVE_LIST", "XGLMForCausalLM", "XGLMModel", "XGLMPreTrainedModel", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase__ : List[str] = [ "FlaxXGLMForCausalLM", "FlaxXGLMModel", "FlaxXGLMPreTrainedModel", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase__ : Tuple = [ "TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST", "TFXGLMForCausalLM", "TFXGLMModel", "TFXGLMPreTrainedModel", ] if TYPE_CHECKING: from .configuration_xglm import XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XGLMConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_xglm import XGLMTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_xglm_fast import XGLMTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_xglm import XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, XGLMForCausalLM, XGLMModel, XGLMPreTrainedModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_xglm import FlaxXGLMForCausalLM, FlaxXGLMModel, FlaxXGLMPreTrainedModel try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_xglm import ( TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, TFXGLMForCausalLM, TFXGLMModel, TFXGLMPreTrainedModel, ) else: import sys UpperCamelCase__ : int = _LazyModule(__name__, globals()["__file__"], _import_structure)
620
import logging import os from typing import Dict, List, Optional, Union import torch import torch.nn as nn from accelerate.utils.imports import ( is_abit_bnb_available, is_abit_bnb_available, is_bnb_available, ) from ..big_modeling import dispatch_model, init_empty_weights from .dataclasses import BnbQuantizationConfig from .modeling import ( find_tied_parameters, get_balanced_memory, infer_auto_device_map, load_checkpoint_in_model, offload_weight, set_module_tensor_to_device, ) if is_bnb_available(): import bitsandbytes as bnb from copy import deepcopy UpperCamelCase__ : Optional[int] = logging.getLogger(__name__) def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : torch.nn.Module , _SCREAMING_SNAKE_CASE : BnbQuantizationConfig , _SCREAMING_SNAKE_CASE : Union[str, os.PathLike] = None , _SCREAMING_SNAKE_CASE : Optional[Dict[str, Union[int, str, torch.device]]] = None , _SCREAMING_SNAKE_CASE : Optional[List[str]] = None , _SCREAMING_SNAKE_CASE : Optional[Dict[Union[int, str], Union[int, str]]] = None , _SCREAMING_SNAKE_CASE : Optional[Union[str, os.PathLike]] = None , _SCREAMING_SNAKE_CASE : bool = False , ): """simple docstring""" SCREAMING_SNAKE_CASE_ = bnb_quantization_config.load_in_abit SCREAMING_SNAKE_CASE_ = bnb_quantization_config.load_in_abit if load_in_abit and not is_abit_bnb_available(): raise ImportError( 'You have a version of `bitsandbytes` that is not compatible with 8bit quantization,' ' make sure you have the latest version of `bitsandbytes` installed.' ) if load_in_abit and not is_abit_bnb_available(): raise ValueError( 'You have a version of `bitsandbytes` that is not compatible with 4bit quantization,' 'make sure you have the latest version of `bitsandbytes` installed.' ) SCREAMING_SNAKE_CASE_ = [] # custom device map if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) and len(device_map.keys() ) > 1: SCREAMING_SNAKE_CASE_ = [key for key, value in device_map.items() if value in ['disk', 'cpu']] # We keep some modules such as the lm_head in their original dtype for numerical stability reasons if bnb_quantization_config.skip_modules is None: SCREAMING_SNAKE_CASE_ = get_keys_to_not_convert(_SCREAMING_SNAKE_CASE ) # add cpu modules to skip modules only for 4-bit modules if load_in_abit: bnb_quantization_config.skip_modules.extend(_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE_ = bnb_quantization_config.skip_modules # We add the modules we want to keep in full precision if bnb_quantization_config.keep_in_fpaa_modules is None: SCREAMING_SNAKE_CASE_ = [] SCREAMING_SNAKE_CASE_ = bnb_quantization_config.keep_in_fpaa_modules modules_to_not_convert.extend(_SCREAMING_SNAKE_CASE ) # compatibility with peft SCREAMING_SNAKE_CASE_ = load_in_abit SCREAMING_SNAKE_CASE_ = load_in_abit SCREAMING_SNAKE_CASE_ = get_parameter_device(_SCREAMING_SNAKE_CASE ) if model_device.type != "meta": # quantization of an already loaded model logger.warning( 'It is not recommended to quantize a loaded model. ' 'The model should be instantiated under the `init_empty_weights` context manager.' ) SCREAMING_SNAKE_CASE_ = replace_with_bnb_layers(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , modules_to_not_convert=_SCREAMING_SNAKE_CASE ) # convert param to the right dtype SCREAMING_SNAKE_CASE_ = bnb_quantization_config.torch_dtype for name, param in model.state_dict().items(): if any(module_to_keep_in_fpaa in name for module_to_keep_in_fpaa in keep_in_fpaa_modules ): param.to(torch.floataa ) if param.dtype != torch.floataa: SCREAMING_SNAKE_CASE_ = name.replace('.weight' , '' ).replace('.bias' , '' ) SCREAMING_SNAKE_CASE_ = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) if param is not None: param.to(torch.floataa ) elif torch.is_floating_point(_SCREAMING_SNAKE_CASE ): param.to(_SCREAMING_SNAKE_CASE ) if model_device.type == "cuda": # move everything to cpu in the first place because we can't do quantization if the weights are already on cuda model.cuda(torch.cuda.current_device() ) torch.cuda.empty_cache() elif torch.cuda.is_available(): model.to(torch.cuda.current_device() ) else: raise RuntimeError('No GPU found. A GPU is needed for quantization.' ) logger.info( f"""The model device type is {model_device.type}. However, cuda is needed for quantization.""" 'We move the model to cuda.' ) return model elif weights_location is None: raise RuntimeError( f"""`weights_location` needs to be the folder path containing the weights of the model, but we found {weights_location} """ ) else: with init_empty_weights(): SCREAMING_SNAKE_CASE_ = replace_with_bnb_layers( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , modules_to_not_convert=_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE_ = get_quantized_model_device_map( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , max_memory=_SCREAMING_SNAKE_CASE , no_split_module_classes=_SCREAMING_SNAKE_CASE , ) if offload_state_dict is None and device_map is not None and "disk" in device_map.values(): SCREAMING_SNAKE_CASE_ = True SCREAMING_SNAKE_CASE_ = any(x in list(device_map.values() ) for x in ['cpu', 'disk'] ) load_checkpoint_in_model( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , dtype=bnb_quantization_config.torch_dtype , offload_folder=_SCREAMING_SNAKE_CASE , offload_state_dict=_SCREAMING_SNAKE_CASE , keep_in_fpaa_modules=bnb_quantization_config.keep_in_fpaa_modules , offload_abit_bnb=load_in_abit and offload , ) return dispatch_model(_SCREAMING_SNAKE_CASE , device_map=_SCREAMING_SNAKE_CASE , offload_dir=_SCREAMING_SNAKE_CASE ) def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : List[str]=None , _SCREAMING_SNAKE_CASE : List[str]=None , _SCREAMING_SNAKE_CASE : Union[str, Any]=None ): """simple docstring""" if device_map is None: if torch.cuda.is_available(): SCREAMING_SNAKE_CASE_ = {'': torch.cuda.current_device()} else: raise RuntimeError('No GPU found. A GPU is needed for quantization.' ) logger.info('The device_map was not initialized.' 'Setting device_map to `{\'\':torch.cuda.current_device()}`.' ) if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): if device_map not in ["auto", "balanced", "balanced_low_0", "sequential"]: raise ValueError( 'If passing a string for `device_map`, please choose \'auto\', \'balanced\', \'balanced_low_0\' or ' '\'sequential\'.' ) SCREAMING_SNAKE_CASE_ = {} special_dtypes.update( { name: bnb_quantization_config.torch_dtype for name, _ in model.named_parameters() if any(m in name for m in bnb_quantization_config.skip_modules ) } ) special_dtypes.update( { name: torch.floataa for name, _ in model.named_parameters() if any(m in name for m in bnb_quantization_config.keep_in_fpaa_modules ) } ) SCREAMING_SNAKE_CASE_ = {} SCREAMING_SNAKE_CASE_ = special_dtypes SCREAMING_SNAKE_CASE_ = no_split_module_classes SCREAMING_SNAKE_CASE_ = bnb_quantization_config.target_dtype # get max_memory for each device. if device_map != "sequential": SCREAMING_SNAKE_CASE_ = get_balanced_memory( _SCREAMING_SNAKE_CASE , low_zero=(device_map == 'balanced_low_0') , max_memory=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , ) SCREAMING_SNAKE_CASE_ = max_memory SCREAMING_SNAKE_CASE_ = infer_auto_device_map(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): # check if don't have any quantized module on the cpu SCREAMING_SNAKE_CASE_ = bnb_quantization_config.skip_modules + bnb_quantization_config.keep_in_fpaa_modules SCREAMING_SNAKE_CASE_ = { key: device_map[key] for key in device_map.keys() if key not in modules_not_to_convert } for device in ["cpu", "disk"]: if device in device_map_without_some_modules.values(): if bnb_quantization_config.load_in_abit: raise ValueError( '\n Some modules are dispatched on the CPU or the disk. Make sure you have enough GPU RAM to fit\n the quantized model. If you want to dispatch the model on the CPU or the disk while keeping\n these modules in `torch_dtype`, you need to pass a custom `device_map` to\n `load_and_quantize_model`. Check\n https://huggingface.co/docs/accelerate/main/en/usage_guides/quantization#offload-modules-to-cpu-and-disk\n for more details.\n ' ) else: logger.info( 'Some modules are are offloaded to the CPU or the disk. Note that these modules will be converted to 8-bit' ) del device_map_without_some_modules return device_map def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int=None , _SCREAMING_SNAKE_CASE : Union[str, Any]=None ): """simple docstring""" if modules_to_not_convert is None: SCREAMING_SNAKE_CASE_ = [] SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = _replace_with_bnb_layers( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) if not has_been_replaced: logger.warning( 'You are loading your model in 8bit or 4bit but no linear modules were found in your model.' ' this can happen for some architectures such as gpt2 that uses Conv1D instead of Linear layers.' ' Please double check your model architecture, or submit an issue on github if you think this is' ' a bug.' ) return model def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : Optional[Any]=None , _SCREAMING_SNAKE_CASE : str=None , ): """simple docstring""" SCREAMING_SNAKE_CASE_ = False for name, module in model.named_children(): if current_key_name is None: SCREAMING_SNAKE_CASE_ = [] current_key_name.append(_SCREAMING_SNAKE_CASE ) if isinstance(_SCREAMING_SNAKE_CASE , nn.Linear ) and name not in modules_to_not_convert: # Check if the current key is not in the `modules_to_not_convert` SCREAMING_SNAKE_CASE_ = '.'.join(_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE_ = True for key in modules_to_not_convert: if ( (key in current_key_name_str) and (key + "." in current_key_name_str) ) or key == current_key_name_str: SCREAMING_SNAKE_CASE_ = False break if proceed: # Load bnb module with empty weight and replace ``nn.Linear` module if bnb_quantization_config.load_in_abit: SCREAMING_SNAKE_CASE_ = bnb.nn.LinearabitLt( module.in_features , module.out_features , module.bias is not None , has_fpaa_weights=_SCREAMING_SNAKE_CASE , threshold=bnb_quantization_config.llm_inta_threshold , ) elif bnb_quantization_config.load_in_abit: SCREAMING_SNAKE_CASE_ = bnb.nn.Linearabit( module.in_features , module.out_features , module.bias is not None , bnb_quantization_config.bnb_abit_compute_dtype , compress_statistics=bnb_quantization_config.bnb_abit_use_double_quant , quant_type=bnb_quantization_config.bnb_abit_quant_type , ) else: raise ValueError('load_in_8bit and load_in_4bit can\'t be both False' ) SCREAMING_SNAKE_CASE_ = module.weight.data if module.bias is not None: SCREAMING_SNAKE_CASE_ = module.bias.data bnb_module.requires_grad_(_SCREAMING_SNAKE_CASE ) setattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE_ = True if len(list(module.children() ) ) > 0: SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = _replace_with_bnb_layers( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE_ = has_been_replaced | _has_been_replaced # Remove the last key for recursion current_key_name.pop(-1 ) return model, has_been_replaced def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : Union[str, Any] ): """simple docstring""" with init_empty_weights(): SCREAMING_SNAKE_CASE_ = deepcopy(_SCREAMING_SNAKE_CASE ) # this has 0 cost since it is done inside `init_empty_weights` context manager` SCREAMING_SNAKE_CASE_ = find_tied_parameters(_SCREAMING_SNAKE_CASE ) # For compatibility with Accelerate < 0.18 if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): SCREAMING_SNAKE_CASE_ = sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() ) else: SCREAMING_SNAKE_CASE_ = sum(_SCREAMING_SNAKE_CASE , [] ) SCREAMING_SNAKE_CASE_ = len(_SCREAMING_SNAKE_CASE ) > 0 # Check if it is a base model SCREAMING_SNAKE_CASE_ = False if hasattr(_SCREAMING_SNAKE_CASE , 'base_model_prefix' ): SCREAMING_SNAKE_CASE_ = not hasattr(_SCREAMING_SNAKE_CASE , model.base_model_prefix ) # Ignore this for base models (BertModel, GPT2Model, etc.) if (not has_tied_params) and is_base_model: return [] # otherwise they have an attached head SCREAMING_SNAKE_CASE_ = list(model.named_children() ) SCREAMING_SNAKE_CASE_ = [list_modules[-1][0]] # add last module together with tied weights SCREAMING_SNAKE_CASE_ = set(_SCREAMING_SNAKE_CASE ) - set(_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE_ = list(set(_SCREAMING_SNAKE_CASE ) ) + list(_SCREAMING_SNAKE_CASE ) # remove ".weight" from the keys SCREAMING_SNAKE_CASE_ = ['.weight', '.bias'] SCREAMING_SNAKE_CASE_ = [] for name in list_untouched: for name_to_remove in names_to_remove: if name_to_remove in name: SCREAMING_SNAKE_CASE_ = name.replace(_SCREAMING_SNAKE_CASE , '' ) filtered_module_names.append(_SCREAMING_SNAKE_CASE ) return filtered_module_names def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : Dict ): """simple docstring""" for m in model.modules(): if isinstance(_SCREAMING_SNAKE_CASE , bnb.nn.Linearabit ): return True return False def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : nn.Module ): """simple docstring""" return next(parameter.parameters() ).device def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : str ): """simple docstring""" if fpaa_statistics is None: set_module_tensor_to_device(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , 0 , dtype=_SCREAMING_SNAKE_CASE , value=_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE_ = param_name SCREAMING_SNAKE_CASE_ = model if "." in tensor_name: SCREAMING_SNAKE_CASE_ = tensor_name.split('.' ) for split in splits[:-1]: SCREAMING_SNAKE_CASE_ = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) if new_module is None: raise ValueError(f"""{module} has no attribute {split}.""" ) SCREAMING_SNAKE_CASE_ = new_module SCREAMING_SNAKE_CASE_ = splits[-1] # offload weights SCREAMING_SNAKE_CASE_ = False offload_weight(module._parameters[tensor_name] , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , index=_SCREAMING_SNAKE_CASE ) if hasattr(module._parameters[tensor_name] , 'SCB' ): offload_weight( module._parameters[tensor_name].SCB , param_name.replace('weight' , 'SCB' ) , _SCREAMING_SNAKE_CASE , index=_SCREAMING_SNAKE_CASE , ) else: offload_weight(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , index=_SCREAMING_SNAKE_CASE ) offload_weight(_SCREAMING_SNAKE_CASE , param_name.replace('weight' , 'SCB' ) , _SCREAMING_SNAKE_CASE , index=_SCREAMING_SNAKE_CASE ) set_module_tensor_to_device(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , 'meta' , dtype=_SCREAMING_SNAKE_CASE , value=torch.empty(*param.size() ) )
620
1
import itertools from dataclasses import dataclass from typing import Optional import pandas as pd import pyarrow as pa import datasets from datasets.table import table_cast @dataclass class __snake_case ( datasets.BuilderConfig ): __lowerCAmelCase : Optional[datasets.Features] = None class __snake_case ( datasets.ArrowBasedBuilder ): __lowerCAmelCase : int = PandasConfig def lowerCAmelCase__ ( self): return datasets.DatasetInfo(features=self.config.features) def lowerCAmelCase__ ( self , _A): if not self.config.data_files: raise ValueError(f"""At least one data file must be specified, but got data_files={self.config.data_files}""") SCREAMING_SNAKE_CASE_ = dl_manager.download_and_extract(self.config.data_files) if isinstance(_A , (str, list, tuple)): SCREAMING_SNAKE_CASE_ = data_files if isinstance(_A , _A): SCREAMING_SNAKE_CASE_ = [files] # Use `dl_manager.iter_files` to skip hidden files in an extracted archive SCREAMING_SNAKE_CASE_ = [dl_manager.iter_files(_A) for file in files] return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'files': files})] SCREAMING_SNAKE_CASE_ = [] for split_name, files in data_files.items(): if isinstance(_A , _A): SCREAMING_SNAKE_CASE_ = [files] # Use `dl_manager.iter_files` to skip hidden files in an extracted archive SCREAMING_SNAKE_CASE_ = [dl_manager.iter_files(_A) for file in files] splits.append(datasets.SplitGenerator(name=_A , gen_kwargs={'files': files})) return splits def lowerCAmelCase__ ( self , _A): if self.config.features is not None: # more expensive cast to support nested features with keys in a different order # allows str <-> int/float or str to Audio for example SCREAMING_SNAKE_CASE_ = table_cast(_A , self.config.features.arrow_schema) return pa_table def lowerCAmelCase__ ( self , _A): for i, file in enumerate(itertools.chain.from_iterable(_A)): with open(_A , 'rb') as f: SCREAMING_SNAKE_CASE_ = pa.Table.from_pandas(pd.read_pickle(_A)) yield i, self._cast_table(_A)
620
import json import os from functools import lru_cache from typing import List, Optional, Tuple import regex as re from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging UpperCamelCase__ : Union[str, Any] = logging.get_logger(__name__) UpperCamelCase__ : Optional[Any] = {"vocab_file": "vocab.json", "merges_file": "merges.txt"} # See all BART models at https://huggingface.co/models?filter=bart UpperCamelCase__ : List[str] = { "vocab_file": { "facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/vocab.json", "facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/vocab.json", "facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json", "facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json", "facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json", "yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json", }, "merges_file": { "facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/merges.txt", "facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/merges.txt", "facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt", "facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt", "facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt", "yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt", }, } UpperCamelCase__ : str = { "facebook/bart-base": 1_024, "facebook/bart-large": 1_024, "facebook/bart-large-mnli": 1_024, "facebook/bart-large-cnn": 1_024, "facebook/bart-large-xsum": 1_024, "yjernite/bart_eli5": 1_024, } @lru_cache() def _UpperCAmelCase ( ): """simple docstring""" SCREAMING_SNAKE_CASE_ = ( list(range(ord('!' ) , ord('~' ) + 1 ) ) + list(range(ord('¡' ) , ord('¬' ) + 1 ) ) + list(range(ord('®' ) , ord('ÿ' ) + 1 ) ) ) SCREAMING_SNAKE_CASE_ = bs[:] SCREAMING_SNAKE_CASE_ = 0 for b in range(2**8 ): if b not in bs: bs.append(_SCREAMING_SNAKE_CASE ) cs.append(2**8 + n ) n += 1 SCREAMING_SNAKE_CASE_ = [chr(_SCREAMING_SNAKE_CASE ) for n in cs] return dict(zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ) def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : List[str] ): """simple docstring""" SCREAMING_SNAKE_CASE_ = set() SCREAMING_SNAKE_CASE_ = word[0] for char in word[1:]: pairs.add((prev_char, char) ) SCREAMING_SNAKE_CASE_ = char return pairs class __snake_case ( lowerCAmelCase__ ): __lowerCAmelCase : str = VOCAB_FILES_NAMES __lowerCAmelCase : Any = PRETRAINED_VOCAB_FILES_MAP __lowerCAmelCase : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __lowerCAmelCase : List[Any] = ['input_ids', 'attention_mask'] def __init__( self , _A , _A , _A="replace" , _A="<s>" , _A="</s>" , _A="</s>" , _A="<s>" , _A="<unk>" , _A="<pad>" , _A="<mask>" , _A=False , **_A , ): SCREAMING_SNAKE_CASE_ = AddedToken(_A , lstrip=_A , rstrip=_A) if isinstance(_A , _A) else bos_token SCREAMING_SNAKE_CASE_ = AddedToken(_A , lstrip=_A , rstrip=_A) if isinstance(_A , _A) else eos_token SCREAMING_SNAKE_CASE_ = AddedToken(_A , lstrip=_A , rstrip=_A) if isinstance(_A , _A) else sep_token SCREAMING_SNAKE_CASE_ = AddedToken(_A , lstrip=_A , rstrip=_A) if isinstance(_A , _A) else cls_token SCREAMING_SNAKE_CASE_ = AddedToken(_A , lstrip=_A , rstrip=_A) if isinstance(_A , _A) else unk_token SCREAMING_SNAKE_CASE_ = AddedToken(_A , lstrip=_A , rstrip=_A) if isinstance(_A , _A) else pad_token # Mask token behave like a normal word, i.e. include the space before it SCREAMING_SNAKE_CASE_ = AddedToken(_A , lstrip=_A , rstrip=_A) if isinstance(_A , _A) else mask_token super().__init__( errors=_A , bos_token=_A , eos_token=_A , unk_token=_A , sep_token=_A , cls_token=_A , pad_token=_A , mask_token=_A , add_prefix_space=_A , **_A , ) with open(_A , encoding='utf-8') as vocab_handle: SCREAMING_SNAKE_CASE_ = json.load(_A) SCREAMING_SNAKE_CASE_ = {v: k for k, v in self.encoder.items()} SCREAMING_SNAKE_CASE_ = errors # how to handle errors in decoding SCREAMING_SNAKE_CASE_ = bytes_to_unicode() SCREAMING_SNAKE_CASE_ = {v: k for k, v in self.byte_encoder.items()} with open(_A , encoding='utf-8') as merges_handle: SCREAMING_SNAKE_CASE_ = merges_handle.read().split('\n')[1:-1] SCREAMING_SNAKE_CASE_ = [tuple(merge.split()) for merge in bpe_merges] SCREAMING_SNAKE_CASE_ = dict(zip(_A , range(len(_A)))) SCREAMING_SNAKE_CASE_ = {} SCREAMING_SNAKE_CASE_ = add_prefix_space # Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions SCREAMING_SNAKE_CASE_ = re.compile(r'\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+') @property def lowerCAmelCase__ ( self): return len(self.encoder) def lowerCAmelCase__ ( self): return dict(self.encoder , **self.added_tokens_encoder) def lowerCAmelCase__ ( self , _A): if token in self.cache: return self.cache[token] SCREAMING_SNAKE_CASE_ = tuple(_A) SCREAMING_SNAKE_CASE_ = get_pairs(_A) if not pairs: return token while True: SCREAMING_SNAKE_CASE_ = min(_A , key=lambda _A: self.bpe_ranks.get(_A , float('inf'))) if bigram not in self.bpe_ranks: break SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = bigram SCREAMING_SNAKE_CASE_ = [] SCREAMING_SNAKE_CASE_ = 0 while i < len(_A): try: SCREAMING_SNAKE_CASE_ = word.index(_A , _A) except ValueError: new_word.extend(word[i:]) break else: new_word.extend(word[i:j]) SCREAMING_SNAKE_CASE_ = j if word[i] == first and i < len(_A) - 1 and word[i + 1] == second: new_word.append(first + second) i += 2 else: new_word.append(word[i]) i += 1 SCREAMING_SNAKE_CASE_ = tuple(_A) SCREAMING_SNAKE_CASE_ = new_word if len(_A) == 1: break else: SCREAMING_SNAKE_CASE_ = get_pairs(_A) SCREAMING_SNAKE_CASE_ = ' '.join(_A) SCREAMING_SNAKE_CASE_ = word return word def lowerCAmelCase__ ( self , _A): SCREAMING_SNAKE_CASE_ = [] for token in re.findall(self.pat , _A): SCREAMING_SNAKE_CASE_ = ''.join( self.byte_encoder[b] for b in token.encode('utf-8')) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case) bpe_tokens.extend(bpe_token for bpe_token in self.bpe(_A).split(' ')) return bpe_tokens def lowerCAmelCase__ ( self , _A): return self.encoder.get(_A , self.encoder.get(self.unk_token)) def lowerCAmelCase__ ( self , _A): return self.decoder.get(_A) def lowerCAmelCase__ ( self , _A): SCREAMING_SNAKE_CASE_ = ''.join(_A) SCREAMING_SNAKE_CASE_ = bytearray([self.byte_decoder[c] for c in text]).decode('utf-8' , errors=self.errors) return text def lowerCAmelCase__ ( self , _A , _A = None): if not os.path.isdir(_A): logger.error(f"""Vocabulary path ({save_directory}) should be a directory""") return SCREAMING_SNAKE_CASE_ = os.path.join( _A , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file']) SCREAMING_SNAKE_CASE_ = os.path.join( _A , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file']) with open(_A , 'w' , encoding='utf-8') as f: f.write(json.dumps(self.encoder , indent=2 , sort_keys=_A , ensure_ascii=_A) + '\n') SCREAMING_SNAKE_CASE_ = 0 with open(_A , 'w' , encoding='utf-8') as writer: writer.write('#version: 0.2\n') for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda _A: kv[1]): if index != token_index: logger.warning( f"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.""" ' Please check that the tokenizer is not corrupted!') SCREAMING_SNAKE_CASE_ = token_index writer.write(' '.join(_A) + '\n') index += 1 return vocab_file, merge_file def lowerCAmelCase__ ( self , _A , _A = None): if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] SCREAMING_SNAKE_CASE_ = [self.cls_token_id] SCREAMING_SNAKE_CASE_ = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def lowerCAmelCase__ ( self , _A , _A = None , _A = False): if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=_A , token_ids_a=_A , already_has_special_tokens=_A) if token_ids_a is None: return [1] + ([0] * len(_A)) + [1] return [1] + ([0] * len(_A)) + [1, 1] + ([0] * len(_A)) + [1] def lowerCAmelCase__ ( self , _A , _A = None): SCREAMING_SNAKE_CASE_ = [self.sep_token_id] SCREAMING_SNAKE_CASE_ = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0] def lowerCAmelCase__ ( self , _A , _A=False , **_A): SCREAMING_SNAKE_CASE_ = kwargs.pop('add_prefix_space' , self.add_prefix_space) if (is_split_into_words or add_prefix_space) and (len(_A) > 0 and not text[0].isspace()): SCREAMING_SNAKE_CASE_ = ' ' + text return (text, kwargs)
620
1
import inspect import unittest from typing import List import numpy as np from transformers import EfficientFormerConfig from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import cached_property, is_tf_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TFEfficientFormerForImageClassification, TFEfficientFormerForImageClassificationWithTeacher, TFEfficientFormerModel, ) from transformers.models.efficientformer.modeling_tf_efficientformer import ( TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, ) if is_vision_available(): from PIL import Image from transformers import EfficientFormerImageProcessor class __snake_case : def __init__( self , _A , _A = 13 , _A = 64 , _A = 2 , _A = 3 , _A = 3 , _A = True , _A = True , _A = 128 , _A=[16, 32, 64, 128] , _A = 7 , _A = 4 , _A = 37 , _A = "gelu" , _A = 0.1 , _A = 0.1 , _A = 10 , _A = 0.0_2 , _A = 2 , _A = 1 , _A = 128 , _A = [2, 2, 2, 2] , _A = 2 , _A = 2 , ): SCREAMING_SNAKE_CASE_ = parent SCREAMING_SNAKE_CASE_ = batch_size SCREAMING_SNAKE_CASE_ = image_size SCREAMING_SNAKE_CASE_ = patch_size SCREAMING_SNAKE_CASE_ = num_channels SCREAMING_SNAKE_CASE_ = is_training SCREAMING_SNAKE_CASE_ = use_labels SCREAMING_SNAKE_CASE_ = hidden_size SCREAMING_SNAKE_CASE_ = num_hidden_layers SCREAMING_SNAKE_CASE_ = num_attention_heads SCREAMING_SNAKE_CASE_ = intermediate_size SCREAMING_SNAKE_CASE_ = hidden_act SCREAMING_SNAKE_CASE_ = hidden_dropout_prob SCREAMING_SNAKE_CASE_ = attention_probs_dropout_prob SCREAMING_SNAKE_CASE_ = type_sequence_label_size SCREAMING_SNAKE_CASE_ = initializer_range SCREAMING_SNAKE_CASE_ = encoder_stride SCREAMING_SNAKE_CASE_ = num_attention_outputs SCREAMING_SNAKE_CASE_ = embed_dim SCREAMING_SNAKE_CASE_ = embed_dim + 1 SCREAMING_SNAKE_CASE_ = resolution SCREAMING_SNAKE_CASE_ = depths SCREAMING_SNAKE_CASE_ = hidden_sizes SCREAMING_SNAKE_CASE_ = dim SCREAMING_SNAKE_CASE_ = mlp_expansion_ratio def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) SCREAMING_SNAKE_CASE_ = None if self.use_labels: SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size] , self.type_sequence_label_size) SCREAMING_SNAKE_CASE_ = self.get_config() return config, pixel_values, labels def lowerCAmelCase__ ( self): return EfficientFormerConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_A , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , resolution=self.resolution , depths=self.depths , hidden_sizes=self.hidden_sizes , dim=self.dim , mlp_expansion_ratio=self.mlp_expansion_ratio , ) def lowerCAmelCase__ ( self , _A , _A , _A): SCREAMING_SNAKE_CASE_ = TFEfficientFormerModel(config=_A) SCREAMING_SNAKE_CASE_ = model(_A , training=_A) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size)) def lowerCAmelCase__ ( self , _A , _A , _A): SCREAMING_SNAKE_CASE_ = self.type_sequence_label_size SCREAMING_SNAKE_CASE_ = TFEfficientFormerForImageClassification(_A) SCREAMING_SNAKE_CASE_ = model(_A , labels=_A , training=_A) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size)) # test greyscale images SCREAMING_SNAKE_CASE_ = 1 SCREAMING_SNAKE_CASE_ = TFEfficientFormerForImageClassification(_A) SCREAMING_SNAKE_CASE_ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size]) SCREAMING_SNAKE_CASE_ = model(_A , labels=_A) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size)) def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = self.prepare_config_and_inputs() SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = config_and_inputs SCREAMING_SNAKE_CASE_ = {'pixel_values': pixel_values} return config, inputs_dict @require_tf class __snake_case ( lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ): __lowerCAmelCase : int = ( ( TFEfficientFormerModel, TFEfficientFormerForImageClassificationWithTeacher, TFEfficientFormerForImageClassification, ) if is_tf_available() else () ) __lowerCAmelCase : Any = ( { 'feature-extraction': TFEfficientFormerModel, 'image-classification': ( TFEfficientFormerForImageClassification, TFEfficientFormerForImageClassificationWithTeacher, ), } if is_tf_available() else {} ) __lowerCAmelCase : int = False __lowerCAmelCase : int = False __lowerCAmelCase : int = False __lowerCAmelCase : Any = False __lowerCAmelCase : Union[str, Any] = False def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = TFEfficientFormerModelTester(self) SCREAMING_SNAKE_CASE_ = ConfigTester( self , config_class=_A , has_text_modality=_A , hidden_size=37) def lowerCAmelCase__ ( self): self.config_tester.run_common_tests() @unittest.skip(reason='EfficientFormer does not use inputs_embeds') def lowerCAmelCase__ ( self): pass @unittest.skip(reason='EfficientFormer does not support input and output embeddings') def lowerCAmelCase__ ( self): pass def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: SCREAMING_SNAKE_CASE_ = model_class(_A) SCREAMING_SNAKE_CASE_ = inspect.signature(model.call) # signature.parameters is an OrderedDict => so arg_names order is deterministic SCREAMING_SNAKE_CASE_ = [*signature.parameters.keys()] SCREAMING_SNAKE_CASE_ = ['pixel_values'] self.assertListEqual(arg_names[:1] , _A) def lowerCAmelCase__ ( self): def check_hidden_states_output(_A , _A , _A): SCREAMING_SNAKE_CASE_ = model_class(_A) SCREAMING_SNAKE_CASE_ = model(**self._prepare_for_class(_A , _A) , training=_A) SCREAMING_SNAKE_CASE_ = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states SCREAMING_SNAKE_CASE_ = getattr( self.model_tester , 'expected_num_hidden_layers' , self.model_tester.num_hidden_layers + 1) self.assertEqual(len(_A) , _A) if hasattr(self.model_tester , 'encoder_seq_length'): SCREAMING_SNAKE_CASE_ = self.model_tester.encoder_seq_length if hasattr(self.model_tester , 'chunk_length') and self.model_tester.chunk_length > 1: SCREAMING_SNAKE_CASE_ = seq_length * self.model_tester.chunk_length else: SCREAMING_SNAKE_CASE_ = self.model_tester.seq_length self.assertListEqual( list(hidden_states[-1].shape[-2:]) , [seq_length, self.model_tester.hidden_size] , ) if config.is_encoder_decoder: SCREAMING_SNAKE_CASE_ = outputs.decoder_hidden_states self.asseretIsInstance(_A , (list, tuple)) self.assertEqual(len(_A) , _A) SCREAMING_SNAKE_CASE_ = getattr(self.model_tester , 'seq_length' , _A) SCREAMING_SNAKE_CASE_ = getattr(self.model_tester , 'decoder_seq_length' , _A) self.assertListEqual( list(hidden_states[-1].shape[-2:]) , [decoder_seq_length, self.model_tester.hidden_size] , ) SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: SCREAMING_SNAKE_CASE_ = True check_hidden_states_output(_A , _A , _A) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] SCREAMING_SNAKE_CASE_ = True check_hidden_states_output(_A , _A , _A) def lowerCAmelCase__ ( self , _A , _A , _A=False): SCREAMING_SNAKE_CASE_ = super()._prepare_for_class(_A , _A , return_labels=_A) if return_labels: if model_class.__name__ == "TFEfficientFormerForImageClassificationWithTeacher": del inputs_dict["labels"] return inputs_dict def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_A) @unittest.skip(reason='EfficientFormer does not implement masked image modeling yet') def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_image_modeling(*_A) def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*_A) @slow def lowerCAmelCase__ ( self): for model_name in TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: SCREAMING_SNAKE_CASE_ = TFEfficientFormerModel.from_pretrained(_A) self.assertIsNotNone(_A) def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs_for_common() SCREAMING_SNAKE_CASE_ = True SCREAMING_SNAKE_CASE_ = getattr(self.model_tester , 'seq_length' , _A) SCREAMING_SNAKE_CASE_ = getattr(self.model_tester , 'encoder_seq_length' , _A) SCREAMING_SNAKE_CASE_ = getattr(self.model_tester , 'key_length' , _A) SCREAMING_SNAKE_CASE_ = getattr(self.model_tester , 'chunk_length' , _A) if chunk_length is not None and hasattr(self.model_tester , 'num_hashes'): SCREAMING_SNAKE_CASE_ = encoder_seq_length * self.model_tester.num_hashes for model_class in self.all_model_classes: SCREAMING_SNAKE_CASE_ = True SCREAMING_SNAKE_CASE_ = False SCREAMING_SNAKE_CASE_ = True SCREAMING_SNAKE_CASE_ = model_class(_A) SCREAMING_SNAKE_CASE_ = model(**self._prepare_for_class(_A , _A) , training=_A) SCREAMING_SNAKE_CASE_ = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(_A) , self.model_tester.num_attention_outputs) # check that output_attentions also work using config del inputs_dict["output_attentions"] SCREAMING_SNAKE_CASE_ = True SCREAMING_SNAKE_CASE_ = model_class(_A) SCREAMING_SNAKE_CASE_ = model(**self._prepare_for_class(_A , _A) , training=_A) SCREAMING_SNAKE_CASE_ = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(_A) , self.model_tester.num_attention_outputs) if chunk_length is not None: self.assertListEqual( list(attentions[0].shape[-4:]) , [self.model_tester.num_attention_heads, encoder_seq_length, chunk_length, encoder_key_length] , ) else: self.assertListEqual( list(attentions[0].shape[-3:]) , [self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length] , ) def lowerCAmelCase__ ( self): # We use a simplified version of this test for EfficientFormer because it requires training=False # and Keras refuses to let us force that during functional construction SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: # Prepare our model SCREAMING_SNAKE_CASE_ = model_class(_A) # These are maximally general inputs for the model, with multiple None dimensions # Hopefully this will catch any conditionals that fail for flexible shapes SCREAMING_SNAKE_CASE_ = { key: tf.keras.Input(shape=val.shape[1:] , dtype=val.dtype , name=_A) for key, val in model.input_signature.items() if key in model.dummy_inputs } SCREAMING_SNAKE_CASE_ = model(_A) self.assertTrue(outputs_dict is not None) def _UpperCAmelCase ( ): """simple docstring""" SCREAMING_SNAKE_CASE_ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) return image @require_tf @require_vision class __snake_case ( unittest.TestCase ): @cached_property def lowerCAmelCase__ ( self): return ( EfficientFormerImageProcessor.from_pretrained('snap-research/efficientformer-l1-300') if is_vision_available() else None ) @slow def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = TFEfficientFormerForImageClassification.from_pretrained('snap-research/efficientformer-l1-300') SCREAMING_SNAKE_CASE_ = self.default_image_processor SCREAMING_SNAKE_CASE_ = prepare_img() SCREAMING_SNAKE_CASE_ = image_processor(images=_A , return_tensors='tf') # forward pass SCREAMING_SNAKE_CASE_ = model(**_A , training=_A) # verify the logits SCREAMING_SNAKE_CASE_ = tf.TensorShape((1, 1000)) self.assertEqual(outputs.logits.shape , _A) SCREAMING_SNAKE_CASE_ = tf.constant([-0.0_5_5_5, 0.4_8_2_5, -0.0_8_5_2]) self.assertTrue(np.allclose(outputs.logits[0, :3] , _A , atol=1E-4)) @slow def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = TFEfficientFormerForImageClassificationWithTeacher.from_pretrained( 'snap-research/efficientformer-l1-300') SCREAMING_SNAKE_CASE_ = self.default_image_processor SCREAMING_SNAKE_CASE_ = prepare_img() SCREAMING_SNAKE_CASE_ = image_processor(images=_A , return_tensors='tf') # forward pass SCREAMING_SNAKE_CASE_ = model(**_A , training=_A) # verify the logits SCREAMING_SNAKE_CASE_ = tf.TensorShape((1, 1000)) self.assertEqual(outputs.logits.shape , _A) SCREAMING_SNAKE_CASE_ = tf.constant([-0.1_3_1_2, 0.4_3_5_3, -1.0_4_9_9]) self.assertTrue(np.allclose(outputs.logits[0, :3] , _A , atol=1E-4))
620
from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCamelCase__ : str = logging.get_logger(__name__) UpperCamelCase__ : Optional[int] = { "facebook/dpr-ctx_encoder-single-nq-base": ( "https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/config.json" ), "facebook/dpr-question_encoder-single-nq-base": ( "https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/config.json" ), "facebook/dpr-reader-single-nq-base": ( "https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/config.json" ), "facebook/dpr-ctx_encoder-multiset-base": ( "https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/config.json" ), "facebook/dpr-question_encoder-multiset-base": ( "https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/config.json" ), "facebook/dpr-reader-multiset-base": ( "https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/config.json" ), } class __snake_case ( lowerCAmelCase__ ): __lowerCAmelCase : Optional[int] = 'dpr' def __init__( self , _A=30522 , _A=768 , _A=12 , _A=12 , _A=3072 , _A="gelu" , _A=0.1 , _A=0.1 , _A=512 , _A=2 , _A=0.0_2 , _A=1E-12 , _A=0 , _A="absolute" , _A = 0 , **_A , ): super().__init__(pad_token_id=_A , **_A) SCREAMING_SNAKE_CASE_ = vocab_size SCREAMING_SNAKE_CASE_ = hidden_size SCREAMING_SNAKE_CASE_ = num_hidden_layers SCREAMING_SNAKE_CASE_ = num_attention_heads SCREAMING_SNAKE_CASE_ = hidden_act SCREAMING_SNAKE_CASE_ = intermediate_size SCREAMING_SNAKE_CASE_ = hidden_dropout_prob SCREAMING_SNAKE_CASE_ = attention_probs_dropout_prob SCREAMING_SNAKE_CASE_ = max_position_embeddings SCREAMING_SNAKE_CASE_ = type_vocab_size SCREAMING_SNAKE_CASE_ = initializer_range SCREAMING_SNAKE_CASE_ = layer_norm_eps SCREAMING_SNAKE_CASE_ = projection_dim SCREAMING_SNAKE_CASE_ = position_embedding_type
620
1
import hashlib import unittest from transformers import MODEL_FOR_DEPTH_ESTIMATION_MAPPING, is_torch_available, is_vision_available from transformers.pipelines import DepthEstimationPipeline, pipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_tf, require_timm, require_torch, require_vision, slow, ) from .test_pipelines_common import ANY if is_torch_available(): import torch if is_vision_available(): from PIL import Image else: class __snake_case : @staticmethod def lowerCAmelCase__ ( *_A , **_A): pass def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : Image ): """simple docstring""" SCREAMING_SNAKE_CASE_ = hashlib.mda(image.tobytes() ) return m.hexdigest() @is_pipeline_test @require_vision @require_timm @require_torch class __snake_case ( unittest.TestCase ): __lowerCAmelCase : str = MODEL_FOR_DEPTH_ESTIMATION_MAPPING def lowerCAmelCase__ ( self , _A , _A , _A): SCREAMING_SNAKE_CASE_ = DepthEstimationPipeline(model=_A , image_processor=_A) return depth_estimator, [ "./tests/fixtures/tests_samples/COCO/000000039769.png", "./tests/fixtures/tests_samples/COCO/000000039769.png", ] def lowerCAmelCase__ ( self , _A , _A): SCREAMING_SNAKE_CASE_ = depth_estimator('./tests/fixtures/tests_samples/COCO/000000039769.png') self.assertEqual({'predicted_depth': ANY(torch.Tensor), 'depth': ANY(Image.Image)} , _A) import datasets SCREAMING_SNAKE_CASE_ = datasets.load_dataset('hf-internal-testing/fixtures_image_utils' , 'image' , split='test') SCREAMING_SNAKE_CASE_ = depth_estimator( [ Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png'), 'http://images.cocodataset.org/val2017/000000039769.jpg', # RGBA dataset[0]['file'], # LA dataset[1]['file'], # L dataset[2]['file'], ]) self.assertEqual( [ {'predicted_depth': ANY(torch.Tensor), 'depth': ANY(Image.Image)}, {'predicted_depth': ANY(torch.Tensor), 'depth': ANY(Image.Image)}, {'predicted_depth': ANY(torch.Tensor), 'depth': ANY(Image.Image)}, {'predicted_depth': ANY(torch.Tensor), 'depth': ANY(Image.Image)}, {'predicted_depth': ANY(torch.Tensor), 'depth': ANY(Image.Image)}, ] , _A , ) @require_tf @unittest.skip('Depth estimation is not implemented in TF') def lowerCAmelCase__ ( self): pass @slow @require_torch def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = 'Intel/dpt-large' SCREAMING_SNAKE_CASE_ = pipeline('depth-estimation' , model=_A) SCREAMING_SNAKE_CASE_ = depth_estimator('http://images.cocodataset.org/val2017/000000039769.jpg') SCREAMING_SNAKE_CASE_ = hashimage(outputs['depth']) # This seems flaky. # self.assertEqual(outputs["depth"], "1a39394e282e9f3b0741a90b9f108977") self.assertEqual(nested_simplify(outputs['predicted_depth'].max().item()) , 2_9.3_0_4) self.assertEqual(nested_simplify(outputs['predicted_depth'].min().item()) , 2.6_6_2) @require_torch def lowerCAmelCase__ ( self): # This is highly irregular to have no small tests. self.skipTest('There is not hf-internal-testing tiny model for either GLPN nor DPT')
620
import pytest import datasets # Import fixture modules as plugins UpperCamelCase__ : Union[str, Any] = ["tests.fixtures.files", "tests.fixtures.hub", "tests.fixtures.fsspec"] def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : Dict ): """simple docstring""" for item in items: if any(marker in item.keywords for marker in ['integration', 'unit'] ): continue item.add_marker(pytest.mark.unit ) def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : Optional[int] ): """simple docstring""" config.addinivalue_line('markers' , 'torchaudio_latest: mark test to run with torchaudio>=0.12' ) @pytest.fixture(autouse=_SCREAMING_SNAKE_CASE ) def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : str ): """simple docstring""" SCREAMING_SNAKE_CASE_ = tmp_path_factory.getbasetemp() / 'cache' SCREAMING_SNAKE_CASE_ = test_hf_cache_home / 'datasets' SCREAMING_SNAKE_CASE_ = test_hf_cache_home / 'metrics' SCREAMING_SNAKE_CASE_ = test_hf_cache_home / 'modules' monkeypatch.setattr('datasets.config.HF_DATASETS_CACHE' , str(_SCREAMING_SNAKE_CASE ) ) monkeypatch.setattr('datasets.config.HF_METRICS_CACHE' , str(_SCREAMING_SNAKE_CASE ) ) monkeypatch.setattr('datasets.config.HF_MODULES_CACHE' , str(_SCREAMING_SNAKE_CASE ) ) SCREAMING_SNAKE_CASE_ = test_hf_datasets_cache / 'downloads' monkeypatch.setattr('datasets.config.DOWNLOADED_DATASETS_PATH' , str(_SCREAMING_SNAKE_CASE ) ) SCREAMING_SNAKE_CASE_ = test_hf_datasets_cache / 'downloads' / 'extracted' monkeypatch.setattr('datasets.config.EXTRACTED_DATASETS_PATH' , str(_SCREAMING_SNAKE_CASE ) ) @pytest.fixture(autouse=_SCREAMING_SNAKE_CASE , scope='session' ) def _UpperCAmelCase ( ): """simple docstring""" datasets.disable_progress_bar() @pytest.fixture(autouse=_SCREAMING_SNAKE_CASE ) def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : Any ): """simple docstring""" monkeypatch.setattr('datasets.config.HF_UPDATE_DOWNLOAD_COUNTS' , _SCREAMING_SNAKE_CASE ) @pytest.fixture def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : Tuple ): """simple docstring""" monkeypatch.setattr('sqlalchemy.util.deprecations.SILENCE_UBER_WARNING' , _SCREAMING_SNAKE_CASE )
620
1
from __future__ import annotations import unittest from transformers import FunnelConfig, is_tf_available from transformers.testing_utils import require_tf from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TFFunnelBaseModel, TFFunnelForMaskedLM, TFFunnelForMultipleChoice, TFFunnelForPreTraining, TFFunnelForQuestionAnswering, TFFunnelForSequenceClassification, TFFunnelForTokenClassification, TFFunnelModel, ) class __snake_case : def __init__( self , _A , _A=13 , _A=7 , _A=True , _A=True , _A=True , _A=True , _A=99 , _A=[1, 1, 2] , _A=1 , _A=32 , _A=4 , _A=8 , _A=37 , _A="gelu_new" , _A=0.1 , _A=0.1 , _A=0.0 , _A=512 , _A=3 , _A=0.0_2 , _A=3 , _A=4 , _A=None , _A=False , ): SCREAMING_SNAKE_CASE_ = parent SCREAMING_SNAKE_CASE_ = batch_size SCREAMING_SNAKE_CASE_ = seq_length SCREAMING_SNAKE_CASE_ = is_training SCREAMING_SNAKE_CASE_ = use_input_mask SCREAMING_SNAKE_CASE_ = use_token_type_ids SCREAMING_SNAKE_CASE_ = use_labels SCREAMING_SNAKE_CASE_ = vocab_size SCREAMING_SNAKE_CASE_ = block_sizes SCREAMING_SNAKE_CASE_ = num_decoder_layers SCREAMING_SNAKE_CASE_ = d_model SCREAMING_SNAKE_CASE_ = n_head SCREAMING_SNAKE_CASE_ = d_head SCREAMING_SNAKE_CASE_ = d_inner SCREAMING_SNAKE_CASE_ = hidden_act SCREAMING_SNAKE_CASE_ = hidden_dropout SCREAMING_SNAKE_CASE_ = attention_dropout SCREAMING_SNAKE_CASE_ = activation_dropout SCREAMING_SNAKE_CASE_ = max_position_embeddings SCREAMING_SNAKE_CASE_ = type_vocab_size SCREAMING_SNAKE_CASE_ = 2 SCREAMING_SNAKE_CASE_ = num_labels SCREAMING_SNAKE_CASE_ = num_choices SCREAMING_SNAKE_CASE_ = scope SCREAMING_SNAKE_CASE_ = initializer_std # Used in the tests to check the size of the first attention layer SCREAMING_SNAKE_CASE_ = n_head # Used in the tests to check the size of the first hidden state SCREAMING_SNAKE_CASE_ = self.d_model # Used in the tests to check the number of output hidden states/attentions SCREAMING_SNAKE_CASE_ = sum(self.block_sizes) + (0 if base else self.num_decoder_layers) # FunnelModel adds two hidden layers: input embeddings and the sum of the upsampled encoder hidden state with # the last hidden state of the first block (which is the first hidden state of the decoder). if not base: SCREAMING_SNAKE_CASE_ = self.num_hidden_layers + 2 def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size) SCREAMING_SNAKE_CASE_ = None if self.use_input_mask: SCREAMING_SNAKE_CASE_ = random_attention_mask([self.batch_size, self.seq_length]) SCREAMING_SNAKE_CASE_ = None if self.use_token_type_ids: SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size) SCREAMING_SNAKE_CASE_ = None SCREAMING_SNAKE_CASE_ = None SCREAMING_SNAKE_CASE_ = None if self.use_labels: SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size] , self.type_sequence_label_size) SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels) SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size] , self.num_choices) SCREAMING_SNAKE_CASE_ = FunnelConfig( vocab_size=self.vocab_size , block_sizes=self.block_sizes , num_decoder_layers=self.num_decoder_layers , d_model=self.d_model , n_head=self.n_head , d_head=self.d_head , d_inner=self.d_inner , hidden_act=self.hidden_act , hidden_dropout=self.hidden_dropout , attention_dropout=self.attention_dropout , activation_dropout=self.activation_dropout , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_std=self.initializer_std , ) return ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, ) def lowerCAmelCase__ ( self , _A , _A , _A , _A , _A , _A , _A , ): SCREAMING_SNAKE_CASE_ = TFFunnelModel(config=_A) SCREAMING_SNAKE_CASE_ = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids} SCREAMING_SNAKE_CASE_ = model(_A) SCREAMING_SNAKE_CASE_ = [input_ids, input_mask] SCREAMING_SNAKE_CASE_ = model(_A) SCREAMING_SNAKE_CASE_ = model(_A) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model)) SCREAMING_SNAKE_CASE_ = False SCREAMING_SNAKE_CASE_ = TFFunnelModel(config=_A) SCREAMING_SNAKE_CASE_ = model(_A) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model)) SCREAMING_SNAKE_CASE_ = False SCREAMING_SNAKE_CASE_ = TFFunnelModel(config=_A) SCREAMING_SNAKE_CASE_ = model(_A) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model)) def lowerCAmelCase__ ( self , _A , _A , _A , _A , _A , _A , _A , ): SCREAMING_SNAKE_CASE_ = TFFunnelBaseModel(config=_A) SCREAMING_SNAKE_CASE_ = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids} SCREAMING_SNAKE_CASE_ = model(_A) SCREAMING_SNAKE_CASE_ = [input_ids, input_mask] SCREAMING_SNAKE_CASE_ = model(_A) SCREAMING_SNAKE_CASE_ = model(_A) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 2, self.d_model)) SCREAMING_SNAKE_CASE_ = False SCREAMING_SNAKE_CASE_ = TFFunnelBaseModel(config=_A) SCREAMING_SNAKE_CASE_ = model(_A) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 3, self.d_model)) SCREAMING_SNAKE_CASE_ = False SCREAMING_SNAKE_CASE_ = TFFunnelBaseModel(config=_A) SCREAMING_SNAKE_CASE_ = model(_A) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 2, self.d_model)) def lowerCAmelCase__ ( self , _A , _A , _A , _A , _A , _A , _A , ): SCREAMING_SNAKE_CASE_ = TFFunnelForPreTraining(config=_A) SCREAMING_SNAKE_CASE_ = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids} SCREAMING_SNAKE_CASE_ = model(_A) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length)) def lowerCAmelCase__ ( self , _A , _A , _A , _A , _A , _A , _A , ): SCREAMING_SNAKE_CASE_ = TFFunnelForMaskedLM(config=_A) SCREAMING_SNAKE_CASE_ = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids} SCREAMING_SNAKE_CASE_ = model(_A) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size)) def lowerCAmelCase__ ( self , _A , _A , _A , _A , _A , _A , _A , ): SCREAMING_SNAKE_CASE_ = self.num_labels SCREAMING_SNAKE_CASE_ = TFFunnelForSequenceClassification(config=_A) SCREAMING_SNAKE_CASE_ = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids} SCREAMING_SNAKE_CASE_ = model(_A) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels)) def lowerCAmelCase__ ( self , _A , _A , _A , _A , _A , _A , _A , ): SCREAMING_SNAKE_CASE_ = self.num_choices SCREAMING_SNAKE_CASE_ = TFFunnelForMultipleChoice(config=_A) SCREAMING_SNAKE_CASE_ = tf.tile(tf.expand_dims(_A , 1) , (1, self.num_choices, 1)) SCREAMING_SNAKE_CASE_ = tf.tile(tf.expand_dims(_A , 1) , (1, self.num_choices, 1)) SCREAMING_SNAKE_CASE_ = tf.tile(tf.expand_dims(_A , 1) , (1, self.num_choices, 1)) SCREAMING_SNAKE_CASE_ = { 'input_ids': multiple_choice_inputs_ids, 'attention_mask': multiple_choice_input_mask, 'token_type_ids': multiple_choice_token_type_ids, } SCREAMING_SNAKE_CASE_ = model(_A) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices)) def lowerCAmelCase__ ( self , _A , _A , _A , _A , _A , _A , _A , ): SCREAMING_SNAKE_CASE_ = self.num_labels SCREAMING_SNAKE_CASE_ = TFFunnelForTokenClassification(config=_A) SCREAMING_SNAKE_CASE_ = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids} SCREAMING_SNAKE_CASE_ = model(_A) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels)) def lowerCAmelCase__ ( self , _A , _A , _A , _A , _A , _A , _A , ): SCREAMING_SNAKE_CASE_ = TFFunnelForQuestionAnswering(config=_A) SCREAMING_SNAKE_CASE_ = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids} SCREAMING_SNAKE_CASE_ = model(_A) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length)) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length)) def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = self.prepare_config_and_inputs() ( ( SCREAMING_SNAKE_CASE_ ) , ( SCREAMING_SNAKE_CASE_ ) , ( SCREAMING_SNAKE_CASE_ ) , ( SCREAMING_SNAKE_CASE_ ) , ( SCREAMING_SNAKE_CASE_ ) , ( SCREAMING_SNAKE_CASE_ ) , ( SCREAMING_SNAKE_CASE_ ) , ) = config_and_inputs SCREAMING_SNAKE_CASE_ = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask} return config, inputs_dict @require_tf class __snake_case ( lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ): __lowerCAmelCase : Optional[int] = ( ( TFFunnelModel, TFFunnelForMaskedLM, TFFunnelForPreTraining, TFFunnelForQuestionAnswering, TFFunnelForTokenClassification, ) if is_tf_available() else () ) __lowerCAmelCase : Union[str, Any] = ( { 'feature-extraction': (TFFunnelBaseModel, TFFunnelModel), 'fill-mask': TFFunnelForMaskedLM, 'question-answering': TFFunnelForQuestionAnswering, 'text-classification': TFFunnelForSequenceClassification, 'token-classification': TFFunnelForTokenClassification, 'zero-shot': TFFunnelForSequenceClassification, } if is_tf_available() else {} ) __lowerCAmelCase : str = False __lowerCAmelCase : Union[str, Any] = False def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = TFFunnelModelTester(self) SCREAMING_SNAKE_CASE_ = ConfigTester(self , config_class=_A) def lowerCAmelCase__ ( self): self.config_tester.run_common_tests() def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_A) def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_pretraining(*_A) def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*_A) def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*_A) def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*_A) @require_tf class __snake_case ( lowerCAmelCase__ , unittest.TestCase ): __lowerCAmelCase : List[str] = ( (TFFunnelBaseModel, TFFunnelForMultipleChoice, TFFunnelForSequenceClassification) if is_tf_available() else () ) __lowerCAmelCase : Any = False __lowerCAmelCase : Any = False def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = TFFunnelModelTester(self , base=_A) SCREAMING_SNAKE_CASE_ = ConfigTester(self , config_class=_A) def lowerCAmelCase__ ( self): self.config_tester.run_common_tests() def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_base_model(*_A) def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*_A) def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*_A)
620
from typing import List import numpy as np def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : dict ): """simple docstring""" SCREAMING_SNAKE_CASE_ = {key: len(_SCREAMING_SNAKE_CASE ) for key, value in gen_kwargs.items() if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )} if len(set(lists_lengths.values() ) ) > 1: raise RuntimeError( ( 'Sharding is ambiguous for this dataset: ' + 'we found several data sources lists of different lengths, and we don\'t know over which list we should parallelize:\n' + '\n'.join(f"""\t- key {key} has length {length}""" for key, length in lists_lengths.items() ) + '\nTo fix this, check the \'gen_kwargs\' and make sure to use lists only for data sources, ' + 'and use tuples otherwise. In the end there should only be one single list, or several lists with the same length.' ) ) SCREAMING_SNAKE_CASE_ = max(lists_lengths.values() , default=0 ) return max(1 , _SCREAMING_SNAKE_CASE ) def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int ): """simple docstring""" SCREAMING_SNAKE_CASE_ = [] for group_idx in range(_SCREAMING_SNAKE_CASE ): SCREAMING_SNAKE_CASE_ = num_shards // max_num_jobs + (group_idx < (num_shards % max_num_jobs)) if num_shards_to_add == 0: break SCREAMING_SNAKE_CASE_ = shards_indices_per_group[-1].stop if shards_indices_per_group else 0 SCREAMING_SNAKE_CASE_ = range(_SCREAMING_SNAKE_CASE , start + num_shards_to_add ) shards_indices_per_group.append(_SCREAMING_SNAKE_CASE ) return shards_indices_per_group def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : dict , _SCREAMING_SNAKE_CASE : int ): """simple docstring""" SCREAMING_SNAKE_CASE_ = _number_of_shards_in_gen_kwargs(_SCREAMING_SNAKE_CASE ) if num_shards == 1: return [dict(_SCREAMING_SNAKE_CASE )] else: SCREAMING_SNAKE_CASE_ = _distribute_shards(num_shards=_SCREAMING_SNAKE_CASE , max_num_jobs=_SCREAMING_SNAKE_CASE ) return [ { key: [value[shard_idx] for shard_idx in shard_indices_per_group[group_idx]] if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) else value for key, value in gen_kwargs.items() } for group_idx in range(len(_SCREAMING_SNAKE_CASE ) ) ] def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : List[dict] ): """simple docstring""" return { key: [value for gen_kwargs in gen_kwargs_list for value in gen_kwargs[key]] if isinstance(gen_kwargs_list[0][key] , _SCREAMING_SNAKE_CASE ) else gen_kwargs_list[0][key] for key in gen_kwargs_list[0] } def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : np.random.Generator , _SCREAMING_SNAKE_CASE : dict ): """simple docstring""" SCREAMING_SNAKE_CASE_ = {len(_SCREAMING_SNAKE_CASE ) for value in gen_kwargs.values() if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )} SCREAMING_SNAKE_CASE_ = {} for size in list_sizes: SCREAMING_SNAKE_CASE_ = list(range(_SCREAMING_SNAKE_CASE ) ) rng.shuffle(indices_per_size[size] ) # Now let's copy the gen_kwargs and shuffle the lists based on their sizes SCREAMING_SNAKE_CASE_ = dict(_SCREAMING_SNAKE_CASE ) for key, value in shuffled_kwargs.items(): if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): SCREAMING_SNAKE_CASE_ = [value[i] for i in indices_per_size[len(_SCREAMING_SNAKE_CASE )]] return shuffled_kwargs
620
1